4 * DSP-BIOS Bridge driver support functions for TI OMAP processors.
6 * DSP/BIOS Bridge dynamic + overlay Node loader.
8 * Copyright (C) 2005-2006 Texas Instruments, Inc.
10 * This package is free software; you can redistribute it and/or modify
11 * it under the terms of the GNU General Public License version 2 as
12 * published by the Free Software Foundation.
14 * THIS PACKAGE IS PROVIDED ``AS IS'' AND WITHOUT ANY EXPRESS OR
15 * IMPLIED WARRANTIES, INCLUDING, WITHOUT LIMITATION, THE IMPLIED
16 * WARRANTIES OF MERCHANTIBILITY AND FITNESS FOR A PARTICULAR PURPOSE.
19 #include <linux/types.h>
21 #include <dspbridge/host_os.h>
23 #include <dspbridge/dbdefs.h>
25 /* Platform manager */
26 #include <dspbridge/cod.h>
27 #include <dspbridge/dev.h>
29 /* Resource manager */
30 #include <dspbridge/dbll.h>
31 #include <dspbridge/dbdcd.h>
32 #include <dspbridge/rmm.h>
33 #include <dspbridge/uuidutil.h>
35 #include <dspbridge/nldr.h>
36 #include <linux/lcm.h>
38 /* Name of section containing dynamic load mem */
39 #define DYNMEMSECT ".dspbridge_mem"
41 /* Name of section containing dependent library information */
42 #define DEPLIBSECT ".dspbridge_deplibs"
44 /* Max depth of recursion for loading node's dependent libraries */
47 /* Max number of persistent libraries kept by a node */
51 * Defines for extracting packed dynamic load memory requirements from two
53 * These defines must match node.cdb and dynm.cdb
54 * Format of data/code mask is:
55 * uuuuuuuu|fueeeeee|fudddddd|fucccccc|
58 * cccccc = preferred/required dynamic mem segid for create phase data/code
59 * dddddd = preferred/required dynamic mem segid for delete phase data/code
60 * eeeeee = preferred/req. dynamic mem segid for execute phase data/code
61 * f = flag indicating if memory is preferred or required:
62 * f = 1 if required, f = 0 if preferred.
64 * The 6 bits of the segid are interpreted as follows:
66 * If the 6th bit (bit 5) is not set, then this specifies a memory segment
67 * between 0 and 31 (a maximum of 32 dynamic loading memory segments).
68 * If the 6th bit (bit 5) is set, segid has the following interpretation:
69 * segid = 32 - Any internal memory segment can be used.
70 * segid = 33 - Any external memory segment can be used.
71 * segid = 63 - Any memory segment can be used (in this case the
72 * required/preferred flag is irrelevant).
75 /* Maximum allowed dynamic loading memory segments */
78 #define MAXSEGID 3 /* Largest possible (real) segid */
79 #define MEMINTERNALID 32 /* Segid meaning use internal mem */
80 #define MEMEXTERNALID 33 /* Segid meaning use external mem */
81 #define NULLID 63 /* Segid meaning no memory req/pref */
82 #define FLAGBIT 7 /* 7th bit is pref./req. flag */
83 #define SEGMASK 0x3f /* Bits 0 - 5 */
85 #define CREATEBIT 0 /* Create segid starts at bit 0 */
86 #define DELETEBIT 8 /* Delete segid starts at bit 8 */
87 #define EXECUTEBIT 16 /* Execute segid starts at bit 16 */
90 * Masks that define memory type. Must match defines in dynm.cdb.
94 #define DYNM_CODEDATA (DYNM_CODE | DYNM_DATA)
95 #define DYNM_INTERNAL 0x8
96 #define DYNM_EXTERNAL 0x10
99 * Defines for packing memory requirement/preference flags for code and
100 * data of each of the node's phases into one mask.
101 * The bit is set if the segid is required for loading code/data of the
102 * given phase. The bit is not set, if the segid is preferred only.
104 * These defines are also used as indeces into a segid array for the node.
105 * eg node's segid[CREATEDATAFLAGBIT] is the memory segment id that the
106 * create phase data is required or preferred to be loaded into.
108 #define CREATEDATAFLAGBIT 0
109 #define CREATECODEFLAGBIT 1
110 #define EXECUTEDATAFLAGBIT 2
111 #define EXECUTECODEFLAGBIT 3
112 #define DELETEDATAFLAGBIT 4
113 #define DELETECODEFLAGBIT 5
117 * These names may be embedded in overlay sections to identify which
118 * node phase the section should be overlayed.
120 #define PCREATE "create"
121 #define PDELETE "delete"
122 #define PEXECUTE "execute"
124 static inline bool is_equal_uuid(struct dsp_uuid
*uuid1
,
125 struct dsp_uuid
*uuid2
)
127 return !memcmp(uuid1
, uuid2
, sizeof(struct dsp_uuid
));
131 * ======== mem_seg_info ========
132 * Format of dynamic loading memory segment info in coff file.
133 * Must match dynm.h55.
135 struct mem_seg_info
{
136 u32 segid
; /* Dynamic loading memory segment number */
139 u32 type
; /* Mask of DYNM_CODE, DYNM_INTERNAL, etc. */
143 * ======== lib_node ========
144 * For maintaining a tree of library dependencies.
147 struct dbll_library_obj
*lib
; /* The library */
148 u16 dep_libs
; /* Number of dependent libraries */
149 struct lib_node
*dep_libs_tree
; /* Dependent libraries of lib */
153 * ======== ovly_sect ========
154 * Information needed to overlay a section.
157 struct ovly_sect
*next_sect
;
158 u32 sect_load_addr
; /* Load address of section */
159 u32 sect_run_addr
; /* Run address of section */
160 u32 size
; /* Size of section */
161 u16 page
; /* DBL_CODE, DBL_DATA */
165 * ======== ovly_node ========
166 * For maintaining a list of overlay nodes, with sections that need to be
167 * overlayed for each of the nodes phases.
170 struct dsp_uuid uuid
;
172 struct ovly_sect
*create_sects_list
;
173 struct ovly_sect
*delete_sects_list
;
174 struct ovly_sect
*execute_sects_list
;
175 struct ovly_sect
*other_sects_list
;
187 * ======== nldr_object ========
188 * Overlay loader object.
191 struct dev_object
*dev_obj
; /* Device object */
192 struct dcd_manager
*dcd_mgr
; /* Proc/Node data manager */
193 struct dbll_tar_obj
*dbll
; /* The DBL loader */
194 struct dbll_library_obj
*base_lib
; /* Base image library */
195 struct rmm_target_obj
*rmm
; /* Remote memory manager for DSP */
196 struct dbll_fxns ldr_fxns
; /* Loader function table */
197 struct dbll_attrs ldr_attrs
; /* attrs to pass to loader functions */
198 nldr_ovlyfxn ovly_fxn
; /* "write" for overlay nodes */
199 nldr_writefxn write_fxn
; /* "write" for dynamic nodes */
200 struct ovly_node
*ovly_table
; /* Table of overlay nodes */
201 u16 ovly_nodes
; /* Number of overlay nodes in base */
202 u16 ovly_nid
; /* Index for tracking overlay nodes */
203 u16 dload_segs
; /* Number of dynamic load mem segs */
204 u32
*seg_table
; /* memtypes of dynamic memory segs
207 u16 dsp_mau_size
; /* Size of DSP MAU */
208 u16 dsp_word_size
; /* Size of DSP word */
212 * ======== nldr_nodeobject ========
213 * Dynamic node object. This object is created when a node is allocated.
215 struct nldr_nodeobject
{
216 struct nldr_object
*nldr_obj
; /* Dynamic loader handle */
217 void *priv_ref
; /* Handle to pass to dbl_write_fxn */
218 struct dsp_uuid uuid
; /* Node's UUID */
219 bool dynamic
; /* Dynamically loaded node? */
220 bool overlay
; /* Overlay node? */
221 bool *phase_split
; /* Multiple phase libraries? */
222 struct lib_node root
; /* Library containing node phase */
223 struct lib_node create_lib
; /* Library with create phase lib */
224 struct lib_node execute_lib
; /* Library with execute phase lib */
225 struct lib_node delete_lib
; /* Library with delete phase lib */
226 /* libs remain loaded until Delete */
227 struct lib_node pers_lib_table
[MAXLIBS
];
228 s32 pers_libs
; /* Number of persistent libraries */
229 /* Path in lib dependency tree */
230 struct dbll_library_obj
*lib_path
[MAXDEPTH
+ 1];
231 enum nldr_phase phase
; /* Node phase currently being loaded */
234 * Dynamic loading memory segments for data and code of each phase.
236 u16 seg_id
[MAXFLAGS
];
239 * Mask indicating whether each mem segment specified in seg_id[]
240 * is preferred or required.
242 * if (code_data_flag_mask & (1 << EXECUTEDATAFLAGBIT)) != 0,
243 * then it is required to load execute phase data into the memory
244 * specified by seg_id[EXECUTEDATAFLAGBIT].
246 u32 code_data_flag_mask
;
249 /* Dynamic loader function table */
250 static struct dbll_fxns ldr_fxns
= {
251 (dbll_close_fxn
) dbll_close
,
252 (dbll_create_fxn
) dbll_create
,
253 (dbll_delete_fxn
) dbll_delete
,
254 (dbll_exit_fxn
) dbll_exit
,
255 (dbll_get_attrs_fxn
) dbll_get_attrs
,
256 (dbll_get_addr_fxn
) dbll_get_addr
,
257 (dbll_get_c_addr_fxn
) dbll_get_c_addr
,
258 (dbll_get_sect_fxn
) dbll_get_sect
,
259 (dbll_init_fxn
) dbll_init
,
260 (dbll_load_fxn
) dbll_load
,
261 (dbll_open_fxn
) dbll_open
,
262 (dbll_read_sect_fxn
) dbll_read_sect
,
263 (dbll_unload_fxn
) dbll_unload
,
266 static int add_ovly_info(void *handle
, struct dbll_sect_info
*sect_info
,
267 u32 addr
, u32 bytes
);
268 static int add_ovly_node(struct dsp_uuid
*uuid_obj
,
269 enum dsp_dcdobjtype obj_type
, void *handle
);
270 static int add_ovly_sect(struct nldr_object
*nldr_obj
,
271 struct ovly_sect
**lst
,
272 struct dbll_sect_info
*sect_inf
,
273 bool *exists
, u32 addr
, u32 bytes
);
274 static s32
fake_ovly_write(void *handle
, u32 dsp_address
, void *buf
, u32 bytes
,
276 static void free_sects(struct nldr_object
*nldr_obj
,
277 struct ovly_sect
*phase_sects
, u16 alloc_num
);
278 static bool get_symbol_value(void *handle
, void *parg
, void *rmm_handle
,
279 char *sym_name
, struct dbll_sym_val
**sym
);
280 static int load_lib(struct nldr_nodeobject
*nldr_node_obj
,
281 struct lib_node
*root
, struct dsp_uuid uuid
,
283 struct dbll_library_obj
**lib_path
,
284 enum nldr_phase phase
, u16 depth
);
285 static int load_ovly(struct nldr_nodeobject
*nldr_node_obj
,
286 enum nldr_phase phase
);
287 static int remote_alloc(void **ref
, u16 mem_sect
, u32 size
,
288 u32 align
, u32
*dsp_address
,
290 s32 req
, bool reserve
);
291 static int remote_free(void **ref
, u16 space
, u32 dsp_address
, u32 size
,
294 static void unload_lib(struct nldr_nodeobject
*nldr_node_obj
,
295 struct lib_node
*root
);
296 static void unload_ovly(struct nldr_nodeobject
*nldr_node_obj
,
297 enum nldr_phase phase
);
298 static bool find_in_persistent_lib_array(struct nldr_nodeobject
*nldr_node_obj
,
299 struct dbll_library_obj
*lib
);
302 * ======== nldr_allocate ========
304 int nldr_allocate(struct nldr_object
*nldr_obj
, void *priv_ref
,
305 const struct dcd_nodeprops
*node_props
,
306 struct nldr_nodeobject
**nldr_nodeobj
,
307 bool *pf_phase_split
)
309 struct nldr_nodeobject
*nldr_node_obj
= NULL
;
312 /* Initialize handle in case of failure */
313 *nldr_nodeobj
= NULL
;
314 /* Allocate node object */
315 nldr_node_obj
= kzalloc(sizeof(struct nldr_nodeobject
), GFP_KERNEL
);
317 if (nldr_node_obj
== NULL
) {
320 nldr_node_obj
->phase_split
= pf_phase_split
;
321 nldr_node_obj
->pers_libs
= 0;
322 nldr_node_obj
->nldr_obj
= nldr_obj
;
323 nldr_node_obj
->priv_ref
= priv_ref
;
324 /* Save node's UUID. */
325 nldr_node_obj
->uuid
= node_props
->ndb_props
.ui_node_id
;
327 * Determine if node is a dynamically loaded node from
330 if (node_props
->load_type
== NLDR_DYNAMICLOAD
) {
332 nldr_node_obj
->dynamic
= true;
334 * Extract memory requirements from ndb_props masks
337 nldr_node_obj
->seg_id
[CREATEDATAFLAGBIT
] = (u16
)
338 (node_props
->data_mem_seg_mask
>> CREATEBIT
) &
340 nldr_node_obj
->code_data_flag_mask
|=
341 ((node_props
->data_mem_seg_mask
>>
342 (CREATEBIT
+ FLAGBIT
)) & 1) << CREATEDATAFLAGBIT
;
343 nldr_node_obj
->seg_id
[CREATECODEFLAGBIT
] = (u16
)
344 (node_props
->code_mem_seg_mask
>>
345 CREATEBIT
) & SEGMASK
;
346 nldr_node_obj
->code_data_flag_mask
|=
347 ((node_props
->code_mem_seg_mask
>>
348 (CREATEBIT
+ FLAGBIT
)) & 1) << CREATECODEFLAGBIT
;
350 nldr_node_obj
->seg_id
[EXECUTEDATAFLAGBIT
] = (u16
)
351 (node_props
->data_mem_seg_mask
>>
352 EXECUTEBIT
) & SEGMASK
;
353 nldr_node_obj
->code_data_flag_mask
|=
354 ((node_props
->data_mem_seg_mask
>>
355 (EXECUTEBIT
+ FLAGBIT
)) & 1) <<
357 nldr_node_obj
->seg_id
[EXECUTECODEFLAGBIT
] = (u16
)
358 (node_props
->code_mem_seg_mask
>>
359 EXECUTEBIT
) & SEGMASK
;
360 nldr_node_obj
->code_data_flag_mask
|=
361 ((node_props
->code_mem_seg_mask
>>
362 (EXECUTEBIT
+ FLAGBIT
)) & 1) <<
365 nldr_node_obj
->seg_id
[DELETEDATAFLAGBIT
] = (u16
)
366 (node_props
->data_mem_seg_mask
>> DELETEBIT
) &
368 nldr_node_obj
->code_data_flag_mask
|=
369 ((node_props
->data_mem_seg_mask
>>
370 (DELETEBIT
+ FLAGBIT
)) & 1) << DELETEDATAFLAGBIT
;
371 nldr_node_obj
->seg_id
[DELETECODEFLAGBIT
] = (u16
)
372 (node_props
->code_mem_seg_mask
>>
373 DELETEBIT
) & SEGMASK
;
374 nldr_node_obj
->code_data_flag_mask
|=
375 ((node_props
->code_mem_seg_mask
>>
376 (DELETEBIT
+ FLAGBIT
)) & 1) << DELETECODEFLAGBIT
;
378 /* Non-dynamically loaded nodes are part of the
380 nldr_node_obj
->root
.lib
= nldr_obj
->base_lib
;
381 /* Check for overlay node */
382 if (node_props
->load_type
== NLDR_OVLYLOAD
)
383 nldr_node_obj
->overlay
= true;
386 *nldr_nodeobj
= (struct nldr_nodeobject
*)nldr_node_obj
;
388 /* Cleanup on failure */
389 if (status
&& nldr_node_obj
)
390 kfree(nldr_node_obj
);
396 * ======== nldr_create ========
398 int nldr_create(struct nldr_object
**nldr
,
399 struct dev_object
*hdev_obj
,
400 const struct nldr_attrs
*pattrs
)
402 struct cod_manager
*cod_mgr
; /* COD manager */
403 char *psz_coff_buf
= NULL
;
404 char sz_zl_file
[COD_MAXPATHLENGTH
];
405 struct nldr_object
*nldr_obj
= NULL
;
406 struct dbll_attrs save_attrs
;
407 struct dbll_attrs new_attrs
;
411 struct mem_seg_info
*mem_info_obj
;
414 struct rmm_segment
*rmm_segs
= NULL
;
418 /* Allocate dynamic loader object */
419 nldr_obj
= kzalloc(sizeof(struct nldr_object
), GFP_KERNEL
);
421 nldr_obj
->dev_obj
= hdev_obj
;
422 /* warning, lazy status checking alert! */
423 dev_get_cod_mgr(hdev_obj
, &cod_mgr
);
425 status
= cod_get_loader(cod_mgr
, &nldr_obj
->dbll
);
426 status
= cod_get_base_lib(cod_mgr
, &nldr_obj
->base_lib
);
428 cod_get_base_name(cod_mgr
, sz_zl_file
,
432 /* end lazy status checking */
433 nldr_obj
->dsp_mau_size
= pattrs
->dsp_mau_size
;
434 nldr_obj
->dsp_word_size
= pattrs
->dsp_word_size
;
435 nldr_obj
->ldr_fxns
= ldr_fxns
;
436 if (!(nldr_obj
->ldr_fxns
.init_fxn()))
442 /* Create the DCD Manager */
444 status
= dcd_create_manager(NULL
, &nldr_obj
->dcd_mgr
);
446 /* Get dynamic loading memory sections from base lib */
449 nldr_obj
->ldr_fxns
.get_sect_fxn(nldr_obj
->base_lib
,
450 DYNMEMSECT
, &ul_addr
,
454 kzalloc(ul_len
* nldr_obj
->dsp_mau_size
,
459 /* Ok to not have dynamic loading memory */
462 dev_dbg(bridge
, "%s: failed - no dynamic loading mem "
463 "segments: 0x%x\n", __func__
, status
);
466 if (!status
&& ul_len
> 0) {
467 /* Read section containing dynamic load mem segments */
469 nldr_obj
->ldr_fxns
.read_sect_fxn(nldr_obj
->base_lib
,
470 DYNMEMSECT
, psz_coff_buf
,
473 if (!status
&& ul_len
> 0) {
474 /* Parse memory segment data */
475 dload_segs
= (u16
) (*((u32
*) psz_coff_buf
));
476 if (dload_segs
> MAXMEMSEGS
)
479 /* Parse dynamic load memory segments */
480 if (!status
&& dload_segs
> 0) {
481 rmm_segs
= kzalloc(sizeof(struct rmm_segment
) * dload_segs
,
483 nldr_obj
->seg_table
=
484 kzalloc(sizeof(u32
) * dload_segs
, GFP_KERNEL
);
485 if (rmm_segs
== NULL
|| nldr_obj
->seg_table
== NULL
) {
488 nldr_obj
->dload_segs
= dload_segs
;
489 mem_info_obj
= (struct mem_seg_info
*)(psz_coff_buf
+
491 for (i
= 0; i
< dload_segs
; i
++) {
492 rmm_segs
[i
].base
= (mem_info_obj
+ i
)->base
;
493 rmm_segs
[i
].length
= (mem_info_obj
+ i
)->len
;
494 rmm_segs
[i
].space
= 0;
495 nldr_obj
->seg_table
[i
] =
496 (mem_info_obj
+ i
)->type
;
498 "(proc) DLL MEMSEGMENT: %d, "
499 "Base: 0x%x, Length: 0x%x\n", i
,
500 rmm_segs
[i
].base
, rmm_segs
[i
].length
);
504 /* Create Remote memory manager */
506 status
= rmm_create(&nldr_obj
->rmm
, rmm_segs
, dload_segs
);
509 /* set the alloc, free, write functions for loader */
510 nldr_obj
->ldr_fxns
.get_attrs_fxn(nldr_obj
->dbll
, &save_attrs
);
511 new_attrs
= save_attrs
;
512 new_attrs
.alloc
= (dbll_alloc_fxn
) remote_alloc
;
513 new_attrs
.free
= (dbll_free_fxn
) remote_free
;
514 new_attrs
.sym_lookup
= (dbll_sym_lookup
) get_symbol_value
;
515 new_attrs
.sym_handle
= nldr_obj
;
516 new_attrs
.write
= (dbll_write_fxn
) pattrs
->write
;
517 nldr_obj
->ovly_fxn
= pattrs
->ovly
;
518 nldr_obj
->write_fxn
= pattrs
->write
;
519 nldr_obj
->ldr_attrs
= new_attrs
;
525 /* Get overlay nodes */
528 cod_get_base_name(cod_mgr
, sz_zl_file
, COD_MAXPATHLENGTH
);
530 /* First count number of overlay nodes */
532 dcd_get_objects(nldr_obj
->dcd_mgr
, sz_zl_file
,
533 add_ovly_node
, (void *)nldr_obj
);
534 /* Now build table of overlay nodes */
535 if (!status
&& nldr_obj
->ovly_nodes
> 0) {
536 /* Allocate table for overlay nodes */
537 nldr_obj
->ovly_table
=
538 kzalloc(sizeof(struct ovly_node
) *
539 nldr_obj
->ovly_nodes
, GFP_KERNEL
);
540 /* Put overlay nodes in the table */
541 nldr_obj
->ovly_nid
= 0;
542 status
= dcd_get_objects(nldr_obj
->dcd_mgr
, sz_zl_file
,
547 /* Do a fake reload of the base image to get overlay section info */
548 if (!status
&& nldr_obj
->ovly_nodes
> 0) {
549 save_attrs
.write
= fake_ovly_write
;
550 save_attrs
.log_write
= add_ovly_info
;
551 save_attrs
.log_write_handle
= nldr_obj
;
552 flags
= DBLL_CODE
| DBLL_DATA
| DBLL_SYMB
;
553 status
= nldr_obj
->ldr_fxns
.load_fxn(nldr_obj
->base_lib
, flags
,
554 &save_attrs
, &ul_entry
);
557 *nldr
= (struct nldr_object
*)nldr_obj
;
560 nldr_delete((struct nldr_object
*)nldr_obj
);
564 /* FIXME:Temp. Fix. Must be removed */
569 * ======== nldr_delete ========
571 void nldr_delete(struct nldr_object
*nldr_obj
)
573 struct ovly_sect
*ovly_section
;
574 struct ovly_sect
*next
;
577 nldr_obj
->ldr_fxns
.exit_fxn();
579 rmm_delete(nldr_obj
->rmm
);
581 kfree(nldr_obj
->seg_table
);
583 if (nldr_obj
->dcd_mgr
)
584 dcd_destroy_manager(nldr_obj
->dcd_mgr
);
586 /* Free overlay node information */
587 if (nldr_obj
->ovly_table
) {
588 for (i
= 0; i
< nldr_obj
->ovly_nodes
; i
++) {
590 nldr_obj
->ovly_table
[i
].create_sects_list
;
591 while (ovly_section
) {
592 next
= ovly_section
->next_sect
;
597 nldr_obj
->ovly_table
[i
].delete_sects_list
;
598 while (ovly_section
) {
599 next
= ovly_section
->next_sect
;
604 nldr_obj
->ovly_table
[i
].execute_sects_list
;
605 while (ovly_section
) {
606 next
= ovly_section
->next_sect
;
610 ovly_section
= nldr_obj
->ovly_table
[i
].other_sects_list
;
611 while (ovly_section
) {
612 next
= ovly_section
->next_sect
;
617 kfree(nldr_obj
->ovly_table
);
623 * ======== nldr_get_fxn_addr ========
625 int nldr_get_fxn_addr(struct nldr_nodeobject
*nldr_node_obj
,
626 char *str_fxn
, u32
* addr
)
628 struct dbll_sym_val
*dbll_sym
;
629 struct nldr_object
*nldr_obj
;
631 bool status1
= false;
633 struct lib_node root
= { NULL
, 0, NULL
};
635 nldr_obj
= nldr_node_obj
->nldr_obj
;
636 /* Called from node_create(), node_delete(), or node_run(). */
637 if (nldr_node_obj
->dynamic
&& *nldr_node_obj
->phase_split
) {
638 switch (nldr_node_obj
->phase
) {
640 root
= nldr_node_obj
->create_lib
;
643 root
= nldr_node_obj
->execute_lib
;
646 root
= nldr_node_obj
->delete_lib
;
652 /* for Overlay nodes or non-split Dynamic nodes */
653 root
= nldr_node_obj
->root
;
656 nldr_obj
->ldr_fxns
.get_c_addr_fxn(root
.lib
, str_fxn
, &dbll_sym
);
659 nldr_obj
->ldr_fxns
.get_addr_fxn(root
.lib
, str_fxn
,
662 /* If symbol not found, check dependent libraries */
664 for (i
= 0; i
< root
.dep_libs
; i
++) {
666 nldr_obj
->ldr_fxns
.get_addr_fxn(root
.dep_libs_tree
672 get_c_addr_fxn(root
.dep_libs_tree
[i
].lib
,
681 /* Check persistent libraries */
683 for (i
= 0; i
< nldr_node_obj
->pers_libs
; i
++) {
686 get_addr_fxn(nldr_node_obj
->pers_lib_table
[i
].lib
,
691 get_c_addr_fxn(nldr_node_obj
->pers_lib_table
692 [i
].lib
, str_fxn
, &dbll_sym
);
702 *addr
= dbll_sym
->value
;
710 * ======== nldr_get_rmm_manager ========
711 * Given a NLDR object, retrieve RMM Manager Handle
713 int nldr_get_rmm_manager(struct nldr_object
*nldr
,
714 struct rmm_target_obj
**rmm_mgr
)
717 struct nldr_object
*nldr_obj
= nldr
;
720 *rmm_mgr
= nldr_obj
->rmm
;
730 * ======== nldr_load ========
732 int nldr_load(struct nldr_nodeobject
*nldr_node_obj
,
733 enum nldr_phase phase
)
735 struct nldr_object
*nldr_obj
;
736 struct dsp_uuid lib_uuid
;
739 nldr_obj
= nldr_node_obj
->nldr_obj
;
741 if (nldr_node_obj
->dynamic
) {
742 nldr_node_obj
->phase
= phase
;
744 lib_uuid
= nldr_node_obj
->uuid
;
746 /* At this point, we may not know if node is split into
747 * different libraries. So we'll go ahead and load the
748 * library, and then save the pointer to the appropriate
749 * location after we know. */
752 load_lib(nldr_node_obj
, &nldr_node_obj
->root
, lib_uuid
,
753 false, nldr_node_obj
->lib_path
, phase
, 0);
756 if (*nldr_node_obj
->phase_split
) {
759 nldr_node_obj
->create_lib
=
764 nldr_node_obj
->execute_lib
=
769 nldr_node_obj
->delete_lib
=
779 if (nldr_node_obj
->overlay
)
780 status
= load_ovly(nldr_node_obj
, phase
);
788 * ======== nldr_unload ========
790 int nldr_unload(struct nldr_nodeobject
*nldr_node_obj
,
791 enum nldr_phase phase
)
794 struct lib_node
*root_lib
= NULL
;
797 if (nldr_node_obj
!= NULL
) {
798 if (nldr_node_obj
->dynamic
) {
799 if (*nldr_node_obj
->phase_split
) {
802 root_lib
= &nldr_node_obj
->create_lib
;
805 root_lib
= &nldr_node_obj
->execute_lib
;
808 root_lib
= &nldr_node_obj
->delete_lib
;
809 /* Unload persistent libraries */
811 i
< nldr_node_obj
->pers_libs
;
813 unload_lib(nldr_node_obj
,
817 nldr_node_obj
->pers_libs
= 0;
823 /* Unload main library */
824 root_lib
= &nldr_node_obj
->root
;
827 unload_lib(nldr_node_obj
, root_lib
);
829 if (nldr_node_obj
->overlay
)
830 unload_ovly(nldr_node_obj
, phase
);
838 * ======== add_ovly_info ========
840 static int add_ovly_info(void *handle
, struct dbll_sect_info
*sect_info
,
844 char *sect_name
= (char *)sect_info
->name
;
845 bool sect_exists
= false;
849 struct nldr_object
*nldr_obj
= (struct nldr_object
*)handle
;
852 /* Is this an overlay section (load address != run address)? */
853 if (sect_info
->sect_load_addr
== sect_info
->sect_run_addr
)
856 /* Find the node it belongs to */
857 for (i
= 0; i
< nldr_obj
->ovly_nodes
; i
++) {
858 node_name
= nldr_obj
->ovly_table
[i
].node_name
;
859 if (strncmp(node_name
, sect_name
+ 1, strlen(node_name
)) == 0) {
864 if (!(i
< nldr_obj
->ovly_nodes
))
867 /* Determine which phase this section belongs to */
868 for (pch
= sect_name
+ 1; *pch
&& *pch
!= seps
; pch
++)
872 pch
++; /* Skip over the ':' */
873 if (strncmp(pch
, PCREATE
, strlen(PCREATE
)) == 0) {
875 add_ovly_sect(nldr_obj
,
877 ovly_table
[i
].create_sects_list
,
878 sect_info
, §_exists
, addr
, bytes
);
879 if (!status
&& !sect_exists
)
880 nldr_obj
->ovly_table
[i
].create_sects
++;
882 } else if (strncmp(pch
, PDELETE
, strlen(PDELETE
)) == 0) {
884 add_ovly_sect(nldr_obj
,
886 ovly_table
[i
].delete_sects_list
,
887 sect_info
, §_exists
, addr
, bytes
);
888 if (!status
&& !sect_exists
)
889 nldr_obj
->ovly_table
[i
].delete_sects
++;
891 } else if (strncmp(pch
, PEXECUTE
, strlen(PEXECUTE
)) == 0) {
893 add_ovly_sect(nldr_obj
,
895 ovly_table
[i
].execute_sects_list
,
896 sect_info
, §_exists
, addr
, bytes
);
897 if (!status
&& !sect_exists
)
898 nldr_obj
->ovly_table
[i
].execute_sects
++;
901 /* Put in "other" sections */
903 add_ovly_sect(nldr_obj
,
905 ovly_table
[i
].other_sects_list
,
906 sect_info
, §_exists
, addr
, bytes
);
907 if (!status
&& !sect_exists
)
908 nldr_obj
->ovly_table
[i
].other_sects
++;
917 * ======== add_ovly_node =========
918 * Callback function passed to dcd_get_objects.
920 static int add_ovly_node(struct dsp_uuid
*uuid_obj
,
921 enum dsp_dcdobjtype obj_type
, void *handle
)
923 struct nldr_object
*nldr_obj
= (struct nldr_object
*)handle
;
924 char *node_name
= NULL
;
927 struct dcd_genericobj obj_def
;
930 if (obj_type
!= DSP_DCDNODETYPE
)
934 dcd_get_object_def(nldr_obj
->dcd_mgr
, uuid_obj
, obj_type
,
939 /* If overlay node, add to the list */
940 if (obj_def
.obj_data
.node_obj
.load_type
== NLDR_OVLYLOAD
) {
941 if (nldr_obj
->ovly_table
== NULL
) {
942 nldr_obj
->ovly_nodes
++;
944 /* Add node to table */
945 nldr_obj
->ovly_table
[nldr_obj
->ovly_nid
].uuid
=
948 strlen(obj_def
.obj_data
.node_obj
.ndb_props
.ac_name
);
949 node_name
= obj_def
.obj_data
.node_obj
.ndb_props
.ac_name
;
950 pbuf
= kzalloc(len
+ 1, GFP_KERNEL
);
954 strncpy(pbuf
, node_name
, len
);
955 nldr_obj
->ovly_table
[nldr_obj
->ovly_nid
].
957 nldr_obj
->ovly_nid
++;
961 /* These were allocated in dcd_get_object_def */
962 kfree(obj_def
.obj_data
.node_obj
.str_create_phase_fxn
);
964 kfree(obj_def
.obj_data
.node_obj
.str_execute_phase_fxn
);
966 kfree(obj_def
.obj_data
.node_obj
.str_delete_phase_fxn
);
968 kfree(obj_def
.obj_data
.node_obj
.str_i_alg_name
);
975 * ======== add_ovly_sect ========
977 static int add_ovly_sect(struct nldr_object
*nldr_obj
,
978 struct ovly_sect
**lst
,
979 struct dbll_sect_info
*sect_inf
,
980 bool *exists
, u32 addr
, u32 bytes
)
982 struct ovly_sect
*new_sect
= NULL
;
983 struct ovly_sect
*last_sect
;
984 struct ovly_sect
*ovly_section
;
987 ovly_section
= last_sect
= *lst
;
989 while (ovly_section
) {
991 * Make sure section has not already been added. Multiple
992 * 'write' calls may be made to load the section.
994 if (ovly_section
->sect_load_addr
== addr
) {
999 last_sect
= ovly_section
;
1000 ovly_section
= ovly_section
->next_sect
;
1003 if (!ovly_section
) {
1005 new_sect
= kzalloc(sizeof(struct ovly_sect
), GFP_KERNEL
);
1006 if (new_sect
== NULL
) {
1009 new_sect
->sect_load_addr
= addr
;
1010 new_sect
->sect_run_addr
= sect_inf
->sect_run_addr
+
1011 (addr
- sect_inf
->sect_load_addr
);
1012 new_sect
->size
= bytes
;
1013 new_sect
->page
= sect_inf
->type
;
1016 /* Add to the list */
1019 /* First in the list */
1022 last_sect
->next_sect
= new_sect
;
1031 * ======== fake_ovly_write ========
1033 static s32
fake_ovly_write(void *handle
, u32 dsp_address
, void *buf
, u32 bytes
,
1040 * ======== free_sects ========
1042 static void free_sects(struct nldr_object
*nldr_obj
,
1043 struct ovly_sect
*phase_sects
, u16 alloc_num
)
1045 struct ovly_sect
*ovly_section
= phase_sects
;
1049 while (ovly_section
&& i
< alloc_num
) {
1051 /* segid - page not supported yet */
1052 /* Reserved memory */
1054 rmm_free(nldr_obj
->rmm
, 0, ovly_section
->sect_run_addr
,
1055 ovly_section
->size
, true);
1056 ovly_section
= ovly_section
->next_sect
;
1062 * ======== get_symbol_value ========
1063 * Find symbol in library's base image. If not there, check dependent
1066 static bool get_symbol_value(void *handle
, void *parg
, void *rmm_handle
,
1067 char *sym_name
, struct dbll_sym_val
**sym
)
1069 struct nldr_object
*nldr_obj
= (struct nldr_object
*)handle
;
1070 struct nldr_nodeobject
*nldr_node_obj
=
1071 (struct nldr_nodeobject
*)rmm_handle
;
1072 struct lib_node
*root
= (struct lib_node
*)parg
;
1074 bool status
= false;
1076 /* check the base image */
1077 status
= nldr_obj
->ldr_fxns
.get_addr_fxn(nldr_obj
->base_lib
,
1081 nldr_obj
->ldr_fxns
.get_c_addr_fxn(nldr_obj
->base_lib
,
1085 * Check in root lib itself. If the library consists of
1086 * multiple object files linked together, some symbols in the
1087 * library may need to be resolved.
1090 status
= nldr_obj
->ldr_fxns
.get_addr_fxn(root
->lib
, sym_name
,
1094 nldr_obj
->ldr_fxns
.get_c_addr_fxn(root
->lib
,
1100 * Check in root lib's dependent libraries, but not dependent
1101 * libraries' dependents.
1104 for (i
= 0; i
< root
->dep_libs
; i
++) {
1106 nldr_obj
->ldr_fxns
.get_addr_fxn(root
->
1113 get_c_addr_fxn(root
->dep_libs_tree
[i
].lib
,
1123 * Check in persistent libraries
1126 for (i
= 0; i
< nldr_node_obj
->pers_libs
; i
++) {
1129 get_addr_fxn(nldr_node_obj
->pers_lib_table
[i
].lib
,
1132 status
= nldr_obj
->ldr_fxns
.get_c_addr_fxn
1133 (nldr_node_obj
->pers_lib_table
[i
].lib
,
1147 * ======== load_lib ========
1148 * Recursively load library and all its dependent libraries. The library
1149 * we're loading is specified by a uuid.
1151 static int load_lib(struct nldr_nodeobject
*nldr_node_obj
,
1152 struct lib_node
*root
, struct dsp_uuid uuid
,
1154 struct dbll_library_obj
**lib_path
,
1155 enum nldr_phase phase
, u16 depth
)
1157 struct nldr_object
*nldr_obj
= nldr_node_obj
->nldr_obj
;
1158 u16 nd_libs
= 0; /* Number of dependent libraries */
1159 u16 np_libs
= 0; /* Number of persistent libraries */
1160 u16 nd_libs_loaded
= 0; /* Number of dep. libraries loaded */
1163 u32 dw_buf_size
= NLDR_MAXPATHLENGTH
;
1164 dbll_flags flags
= DBLL_SYMB
| DBLL_CODE
| DBLL_DATA
| DBLL_DYNAMIC
;
1165 struct dbll_attrs new_attrs
;
1166 char *psz_file_name
= NULL
;
1167 struct dsp_uuid
*dep_lib_uui_ds
= NULL
;
1168 bool *persistent_dep_libs
= NULL
;
1170 bool lib_status
= false;
1171 struct lib_node
*dep_lib
;
1173 if (depth
> MAXDEPTH
) {
1177 /* Allocate a buffer for library file name of size DBL_MAXPATHLENGTH */
1178 psz_file_name
= kzalloc(DBLL_MAXPATHLENGTH
, GFP_KERNEL
);
1179 if (psz_file_name
== NULL
)
1183 /* Get the name of the library */
1186 dcd_get_library_name(nldr_node_obj
->nldr_obj
->
1187 dcd_mgr
, &uuid
, psz_file_name
,
1188 &dw_buf_size
, phase
,
1189 nldr_node_obj
->phase_split
);
1191 /* Dependent libraries are registered with a phase */
1193 dcd_get_library_name(nldr_node_obj
->nldr_obj
->
1194 dcd_mgr
, &uuid
, psz_file_name
,
1195 &dw_buf_size
, NLDR_NOPHASE
,
1200 /* Open the library, don't load symbols */
1202 nldr_obj
->ldr_fxns
.open_fxn(nldr_obj
->dbll
, psz_file_name
,
1203 DBLL_NOLOAD
, &root
->lib
);
1205 /* Done with file name */
1206 kfree(psz_file_name
);
1208 /* Check to see if library not already loaded */
1209 if (!status
&& root_prstnt
) {
1211 find_in_persistent_lib_array(nldr_node_obj
, root
->lib
);
1214 nldr_obj
->ldr_fxns
.close_fxn(root
->lib
);
1219 /* Check for circular dependencies. */
1220 for (i
= 0; i
< depth
; i
++) {
1221 if (root
->lib
== lib_path
[i
]) {
1222 /* This condition could be checked by a
1223 * tool at build time. */
1229 /* Add library to current path in dependency tree */
1230 lib_path
[depth
] = root
->lib
;
1232 /* Get number of dependent libraries */
1234 dcd_get_num_dep_libs(nldr_node_obj
->nldr_obj
->dcd_mgr
,
1235 &uuid
, &nd_libs
, &np_libs
, phase
);
1238 if (!(*nldr_node_obj
->phase_split
))
1241 /* nd_libs = #of dependent libraries */
1242 root
->dep_libs
= nd_libs
- np_libs
;
1244 dep_lib_uui_ds
= kzalloc(sizeof(struct dsp_uuid
) *
1245 nd_libs
, GFP_KERNEL
);
1246 persistent_dep_libs
=
1247 kzalloc(sizeof(bool) * nd_libs
, GFP_KERNEL
);
1248 if (!dep_lib_uui_ds
|| !persistent_dep_libs
)
1251 if (root
->dep_libs
> 0) {
1252 /* Allocate arrays for dependent lib UUIDs,
1254 root
->dep_libs_tree
= kzalloc
1255 (sizeof(struct lib_node
) *
1256 (root
->dep_libs
), GFP_KERNEL
);
1257 if (!(root
->dep_libs_tree
))
1263 /* Get the dependent library UUIDs */
1265 dcd_get_dep_libs(nldr_node_obj
->
1266 nldr_obj
->dcd_mgr
, &uuid
,
1267 nd_libs
, dep_lib_uui_ds
,
1268 persistent_dep_libs
,
1275 * Recursively load dependent libraries.
1278 for (i
= 0; i
< nd_libs
; i
++) {
1279 /* If root library is NOT persistent, and dep library
1280 * is, then record it. If root library IS persistent,
1281 * the deplib is already included */
1282 if (!root_prstnt
&& persistent_dep_libs
[i
] &&
1283 *nldr_node_obj
->phase_split
) {
1284 if ((nldr_node_obj
->pers_libs
) >= MAXLIBS
) {
1289 /* Allocate library outside of phase */
1291 &nldr_node_obj
->pers_lib_table
1292 [nldr_node_obj
->pers_libs
];
1295 persistent_dep_libs
[i
] = true;
1297 /* Allocate library within phase */
1298 dep_lib
= &root
->dep_libs_tree
[nd_libs_loaded
];
1301 status
= load_lib(nldr_node_obj
, dep_lib
,
1303 persistent_dep_libs
[i
], lib_path
,
1307 if ((status
!= 0) &&
1308 !root_prstnt
&& persistent_dep_libs
[i
] &&
1309 *nldr_node_obj
->phase_split
) {
1310 (nldr_node_obj
->pers_libs
)++;
1312 if (!persistent_dep_libs
[i
] ||
1313 !(*nldr_node_obj
->phase_split
)) {
1323 /* Now we can load the root library */
1325 new_attrs
= nldr_obj
->ldr_attrs
;
1326 new_attrs
.sym_arg
= root
;
1327 new_attrs
.rmm_handle
= nldr_node_obj
;
1328 new_attrs
.input_params
= nldr_node_obj
->priv_ref
;
1329 new_attrs
.base_image
= false;
1332 nldr_obj
->ldr_fxns
.load_fxn(root
->lib
, flags
, &new_attrs
,
1337 * In case of failure, unload any dependent libraries that
1338 * were loaded, and close the root library.
1339 * (Persistent libraries are unloaded from the very top)
1342 if (phase
!= NLDR_EXECUTE
) {
1343 for (i
= 0; i
< nldr_node_obj
->pers_libs
; i
++)
1344 unload_lib(nldr_node_obj
,
1345 &nldr_node_obj
->pers_lib_table
[i
]);
1347 nldr_node_obj
->pers_libs
= 0;
1349 for (i
= 0; i
< nd_libs_loaded
; i
++)
1350 unload_lib(nldr_node_obj
, &root
->dep_libs_tree
[i
]);
1353 nldr_obj
->ldr_fxns
.close_fxn(root
->lib
);
1357 /* Going up one node in the dependency tree */
1360 kfree(dep_lib_uui_ds
);
1361 dep_lib_uui_ds
= NULL
;
1363 kfree(persistent_dep_libs
);
1364 persistent_dep_libs
= NULL
;
1370 * ======== load_ovly ========
1372 static int load_ovly(struct nldr_nodeobject
*nldr_node_obj
,
1373 enum nldr_phase phase
)
1375 struct nldr_object
*nldr_obj
= nldr_node_obj
->nldr_obj
;
1376 struct ovly_node
*po_node
= NULL
;
1377 struct ovly_sect
*phase_sects
= NULL
;
1378 struct ovly_sect
*other_sects_list
= NULL
;
1381 u16 other_alloc
= 0;
1382 u16
*ref_count
= NULL
;
1383 u16
*other_ref
= NULL
;
1385 struct ovly_sect
*ovly_section
;
1388 /* Find the node in the table */
1389 for (i
= 0; i
< nldr_obj
->ovly_nodes
; i
++) {
1391 (&nldr_node_obj
->uuid
, &nldr_obj
->ovly_table
[i
].uuid
)) {
1393 po_node
= &(nldr_obj
->ovly_table
[i
]);
1406 ref_count
= &(po_node
->create_ref
);
1407 other_ref
= &(po_node
->other_ref
);
1408 phase_sects
= po_node
->create_sects_list
;
1409 other_sects_list
= po_node
->other_sects_list
;
1413 ref_count
= &(po_node
->execute_ref
);
1414 phase_sects
= po_node
->execute_sects_list
;
1418 ref_count
= &(po_node
->delete_ref
);
1419 phase_sects
= po_node
->delete_sects_list
;
1426 if (ref_count
== NULL
)
1429 if (*ref_count
!= 0)
1432 /* 'Allocate' memory for overlay sections of this phase */
1433 ovly_section
= phase_sects
;
1434 while (ovly_section
) {
1435 /* allocate *//* page not supported yet */
1436 /* reserve *//* align */
1437 status
= rmm_alloc(nldr_obj
->rmm
, 0, ovly_section
->size
, 0,
1438 &(ovly_section
->sect_run_addr
), true);
1440 ovly_section
= ovly_section
->next_sect
;
1446 if (other_ref
&& *other_ref
== 0) {
1447 /* 'Allocate' memory for other overlay sections
1450 ovly_section
= other_sects_list
;
1451 while (ovly_section
) {
1452 /* page not supported *//* align */
1455 rmm_alloc(nldr_obj
->rmm
, 0,
1456 ovly_section
->size
, 0,
1457 &(ovly_section
->sect_run_addr
),
1460 ovly_section
= ovly_section
->next_sect
;
1468 if (*ref_count
== 0) {
1470 /* Load sections for this phase */
1471 ovly_section
= phase_sects
;
1472 while (ovly_section
&& !status
) {
1474 (*nldr_obj
->ovly_fxn
) (nldr_node_obj
->
1481 ovly_section
->page
);
1482 if (bytes
!= ovly_section
->size
)
1485 ovly_section
= ovly_section
->next_sect
;
1489 if (other_ref
&& *other_ref
== 0) {
1491 /* Load other sections (create phase) */
1492 ovly_section
= other_sects_list
;
1493 while (ovly_section
&& !status
) {
1495 (*nldr_obj
->ovly_fxn
) (nldr_node_obj
->
1502 ovly_section
->page
);
1503 if (bytes
!= ovly_section
->size
)
1506 ovly_section
= ovly_section
->next_sect
;
1511 /* 'Deallocate' memory */
1512 free_sects(nldr_obj
, phase_sects
, alloc_num
);
1513 free_sects(nldr_obj
, other_sects_list
, other_alloc
);
1516 if (!status
&& (ref_count
!= NULL
)) {
1527 * ======== remote_alloc ========
1529 static int remote_alloc(void **ref
, u16 mem_sect
, u32 size
,
1530 u32 align
, u32
*dsp_address
,
1531 s32 segmnt_id
, s32 req
,
1534 struct nldr_nodeobject
*hnode
= (struct nldr_nodeobject
*)ref
;
1535 struct nldr_object
*nldr_obj
;
1536 struct rmm_target_obj
*rmm
;
1537 u16 mem_phase_bit
= MAXFLAGS
;
1542 struct rmm_addr
*rmm_addr_obj
= (struct rmm_addr
*)dsp_address
;
1543 bool mem_load_req
= false;
1544 int status
= -ENOMEM
; /* Set to fail */
1545 nldr_obj
= hnode
->nldr_obj
;
1546 rmm
= nldr_obj
->rmm
;
1547 /* Convert size to DSP words */
1549 (size
+ nldr_obj
->dsp_word_size
-
1550 1) / nldr_obj
->dsp_word_size
;
1551 /* Modify memory 'align' to account for DSP cache line size */
1552 align
= lcm(GEM_CACHE_LINE_SIZE
, align
);
1553 dev_dbg(bridge
, "%s: memory align to 0x%x\n", __func__
, align
);
1554 if (segmnt_id
!= -1) {
1555 rmm_addr_obj
->segid
= segmnt_id
;
1559 switch (hnode
->phase
) {
1561 mem_phase_bit
= CREATEDATAFLAGBIT
;
1564 mem_phase_bit
= DELETEDATAFLAGBIT
;
1567 mem_phase_bit
= EXECUTEDATAFLAGBIT
;
1572 if (mem_sect
== DBLL_CODE
)
1575 if (mem_phase_bit
< MAXFLAGS
)
1576 segid
= hnode
->seg_id
[mem_phase_bit
];
1578 /* Determine if there is a memory loading requirement */
1579 if ((hnode
->code_data_flag_mask
>> mem_phase_bit
) & 0x1)
1580 mem_load_req
= true;
1583 mem_sect_type
= (mem_sect
== DBLL_CODE
) ? DYNM_CODE
: DYNM_DATA
;
1585 /* Find an appropriate segment based on mem_sect */
1586 if (segid
== NULLID
) {
1587 /* No memory requirements of preferences */
1590 if (segid
<= MAXSEGID
) {
1591 /* Attempt to allocate from segid first. */
1592 rmm_addr_obj
->segid
= segid
;
1594 rmm_alloc(rmm
, segid
, word_size
, align
, dsp_address
, false);
1596 dev_dbg(bridge
, "%s: Unable allocate from segment %d\n",
1600 /* segid > MAXSEGID ==> Internal or external memory */
1601 /* Check for any internal or external memory segment,
1602 * depending on segid. */
1603 mem_sect_type
|= segid
== MEMINTERNALID
?
1604 DYNM_INTERNAL
: DYNM_EXTERNAL
;
1605 for (i
= 0; i
< nldr_obj
->dload_segs
; i
++) {
1606 if ((nldr_obj
->seg_table
[i
] & mem_sect_type
) !=
1610 status
= rmm_alloc(rmm
, i
, word_size
, align
,
1611 dsp_address
, false);
1613 /* Save segid for freeing later */
1614 rmm_addr_obj
->segid
= i
;
1620 /* Haven't found memory yet, attempt to find any segment that works */
1621 if (status
== -ENOMEM
&& !mem_load_req
) {
1622 dev_dbg(bridge
, "%s: Preferred segment unavailable, trying "
1623 "another\n", __func__
);
1624 for (i
= 0; i
< nldr_obj
->dload_segs
; i
++) {
1625 /* All bits of mem_sect_type must be set */
1626 if ((nldr_obj
->seg_table
[i
] & mem_sect_type
) !=
1630 status
= rmm_alloc(rmm
, i
, word_size
, align
,
1631 dsp_address
, false);
1634 rmm_addr_obj
->segid
= i
;
1643 static int remote_free(void **ref
, u16 space
, u32 dsp_address
,
1644 u32 size
, bool reserve
)
1646 struct nldr_object
*nldr_obj
= (struct nldr_object
*)ref
;
1647 struct rmm_target_obj
*rmm
;
1649 int status
= -ENOMEM
; /* Set to fail */
1651 rmm
= nldr_obj
->rmm
;
1653 /* Convert size to DSP words */
1655 (size
+ nldr_obj
->dsp_word_size
-
1656 1) / nldr_obj
->dsp_word_size
;
1658 if (rmm_free(rmm
, space
, dsp_address
, word_size
, reserve
))
1665 * ======== unload_lib ========
1667 static void unload_lib(struct nldr_nodeobject
*nldr_node_obj
,
1668 struct lib_node
*root
)
1670 struct dbll_attrs new_attrs
;
1671 struct nldr_object
*nldr_obj
= nldr_node_obj
->nldr_obj
;
1675 /* Unload dependent libraries */
1676 for (i
= 0; i
< root
->dep_libs
; i
++)
1677 unload_lib(nldr_node_obj
, &root
->dep_libs_tree
[i
]);
1681 new_attrs
= nldr_obj
->ldr_attrs
;
1682 new_attrs
.rmm_handle
= nldr_obj
->rmm
;
1683 new_attrs
.input_params
= nldr_node_obj
->priv_ref
;
1684 new_attrs
.base_image
= false;
1685 new_attrs
.sym_arg
= root
;
1688 /* Unload the root library */
1689 nldr_obj
->ldr_fxns
.unload_fxn(root
->lib
, &new_attrs
);
1690 nldr_obj
->ldr_fxns
.close_fxn(root
->lib
);
1693 /* Free dependent library list */
1694 kfree(root
->dep_libs_tree
);
1695 root
->dep_libs_tree
= NULL
;
1699 * ======== unload_ovly ========
1701 static void unload_ovly(struct nldr_nodeobject
*nldr_node_obj
,
1702 enum nldr_phase phase
)
1704 struct nldr_object
*nldr_obj
= nldr_node_obj
->nldr_obj
;
1705 struct ovly_node
*po_node
= NULL
;
1706 struct ovly_sect
*phase_sects
= NULL
;
1707 struct ovly_sect
*other_sects_list
= NULL
;
1710 u16 other_alloc
= 0;
1711 u16
*ref_count
= NULL
;
1712 u16
*other_ref
= NULL
;
1714 /* Find the node in the table */
1715 for (i
= 0; i
< nldr_obj
->ovly_nodes
; i
++) {
1717 (&nldr_node_obj
->uuid
, &nldr_obj
->ovly_table
[i
].uuid
)) {
1719 po_node
= &(nldr_obj
->ovly_table
[i
]);
1726 /* TODO: Should we print warning here? */
1731 ref_count
= &(po_node
->create_ref
);
1732 phase_sects
= po_node
->create_sects_list
;
1733 alloc_num
= po_node
->create_sects
;
1736 ref_count
= &(po_node
->execute_ref
);
1737 phase_sects
= po_node
->execute_sects_list
;
1738 alloc_num
= po_node
->execute_sects
;
1741 ref_count
= &(po_node
->delete_ref
);
1742 other_ref
= &(po_node
->other_ref
);
1743 phase_sects
= po_node
->delete_sects_list
;
1744 /* 'Other' overlay sections are unloaded in the delete phase */
1745 other_sects_list
= po_node
->other_sects_list
;
1746 alloc_num
= po_node
->delete_sects
;
1747 other_alloc
= po_node
->other_sects
;
1752 if (ref_count
&& (*ref_count
> 0)) {
1759 if (ref_count
&& *ref_count
== 0) {
1760 /* 'Deallocate' memory */
1761 free_sects(nldr_obj
, phase_sects
, alloc_num
);
1763 if (other_ref
&& *other_ref
== 0)
1764 free_sects(nldr_obj
, other_sects_list
, other_alloc
);
1768 * ======== find_in_persistent_lib_array ========
1770 static bool find_in_persistent_lib_array(struct nldr_nodeobject
*nldr_node_obj
,
1771 struct dbll_library_obj
*lib
)
1775 for (i
= 0; i
< nldr_node_obj
->pers_libs
; i
++) {
1776 if (lib
== nldr_node_obj
->pers_lib_table
[i
].lib
)
1784 #ifdef CONFIG_TIDSPBRIDGE_BACKTRACE
1786 * nldr_find_addr() - Find the closest symbol to the given address based on
1787 * dynamic node object.
1789 * @nldr_node: Dynamic node object
1790 * @sym_addr: Given address to find the dsp symbol
1791 * @offset_range: offset range to look for dsp symbol
1792 * @offset_output: Symbol Output address
1793 * @sym_name: String with the dsp symbol
1795 * This function finds the node library for a given address and
1796 * retrieves the dsp symbol by calling dbll_find_dsp_symbol.
1798 int nldr_find_addr(struct nldr_nodeobject
*nldr_node
, u32 sym_addr
,
1799 u32 offset_range
, void *offset_output
, char *sym_name
)
1802 bool status1
= false;
1804 struct lib_node root
= { NULL
, 0, NULL
};
1805 pr_debug("%s(0x%x, 0x%x, 0x%x, 0x%x, %s)\n", __func__
, (u32
) nldr_node
,
1806 sym_addr
, offset_range
, (u32
) offset_output
, sym_name
);
1808 if (nldr_node
->dynamic
&& *nldr_node
->phase_split
) {
1809 switch (nldr_node
->phase
) {
1811 root
= nldr_node
->create_lib
;
1814 root
= nldr_node
->execute_lib
;
1817 root
= nldr_node
->delete_lib
;
1823 /* for Overlay nodes or non-split Dynamic nodes */
1824 root
= nldr_node
->root
;
1827 status1
= dbll_find_dsp_symbol(root
.lib
, sym_addr
,
1828 offset_range
, offset_output
, sym_name
);
1830 /* If symbol not found, check dependent libraries */
1832 for (i
= 0; i
< root
.dep_libs
; i
++) {
1833 status1
= dbll_find_dsp_symbol(
1834 root
.dep_libs_tree
[i
].lib
, sym_addr
,
1835 offset_range
, offset_output
, sym_name
);
1840 /* Check persistent libraries */
1842 for (i
= 0; i
< nldr_node
->pers_libs
; i
++) {
1843 status1
= dbll_find_dsp_symbol(
1844 nldr_node
->pers_lib_table
[i
].lib
, sym_addr
,
1845 offset_range
, offset_output
, sym_name
);
1852 pr_debug("%s: Address 0x%x not found in range %d.\n",
1853 __func__
, sym_addr
, offset_range
);