staging: tidspbridge: Remove macros used as cast
[GitHub/mt8127/android_kernel_alcatel_ttab.git] / drivers / staging / tidspbridge / rmgr / nldr.c
CommitLineData
7d55524d
ORL
1/*
2 * nldr.c
3 *
4 * DSP-BIOS Bridge driver support functions for TI OMAP processors.
5 *
6 * DSP/BIOS Bridge dynamic + overlay Node loader.
7 *
8 * Copyright (C) 2005-2006 Texas Instruments, Inc.
9 *
10 * This package is free software; you can redistribute it and/or modify
11 * it under the terms of the GNU General Public License version 2 as
12 * published by the Free Software Foundation.
13 *
14 * THIS PACKAGE IS PROVIDED ``AS IS'' AND WITHOUT ANY EXPRESS OR
15 * IMPLIED WARRANTIES, INCLUDING, WITHOUT LIMITATION, THE IMPLIED
16 * WARRANTIES OF MERCHANTIBILITY AND FITNESS FOR A PARTICULAR PURPOSE.
17 */
18
2094f12d
NM
19#include <linux/types.h>
20
7d55524d
ORL
21#include <dspbridge/host_os.h>
22
7d55524d
ORL
23#include <dspbridge/dbdefs.h>
24
25#include <dspbridge/dbc.h>
26
27/* Platform manager */
28#include <dspbridge/cod.h>
29#include <dspbridge/dev.h>
30
31/* Resource manager */
32#include <dspbridge/dbll.h>
33#include <dspbridge/dbdcd.h>
34#include <dspbridge/rmm.h>
35#include <dspbridge/uuidutil.h>
36
37#include <dspbridge/nldr.h>
38
39/* Name of section containing dynamic load mem */
40#define DYNMEMSECT ".dspbridge_mem"
41
42/* Name of section containing dependent library information */
43#define DEPLIBSECT ".dspbridge_deplibs"
44
45/* Max depth of recursion for loading node's dependent libraries */
46#define MAXDEPTH 5
47
48/* Max number of persistent libraries kept by a node */
49#define MAXLIBS 5
50
51/*
52 * Defines for extracting packed dynamic load memory requirements from two
53 * masks.
54 * These defines must match node.cdb and dynm.cdb
55 * Format of data/code mask is:
56 * uuuuuuuu|fueeeeee|fudddddd|fucccccc|
57 * where
58 * u = unused
59 * cccccc = prefered/required dynamic mem segid for create phase data/code
60 * dddddd = prefered/required dynamic mem segid for delete phase data/code
61 * eeeeee = prefered/req. dynamic mem segid for execute phase data/code
62 * f = flag indicating if memory is preferred or required:
63 * f = 1 if required, f = 0 if preferred.
64 *
65 * The 6 bits of the segid are interpreted as follows:
66 *
67 * If the 6th bit (bit 5) is not set, then this specifies a memory segment
68 * between 0 and 31 (a maximum of 32 dynamic loading memory segments).
69 * If the 6th bit (bit 5) is set, segid has the following interpretation:
70 * segid = 32 - Any internal memory segment can be used.
71 * segid = 33 - Any external memory segment can be used.
72 * segid = 63 - Any memory segment can be used (in this case the
73 * required/preferred flag is irrelevant).
74 *
75 */
76/* Maximum allowed dynamic loading memory segments */
77#define MAXMEMSEGS 32
78
79#define MAXSEGID 3 /* Largest possible (real) segid */
80#define MEMINTERNALID 32 /* Segid meaning use internal mem */
81#define MEMEXTERNALID 33 /* Segid meaning use external mem */
82#define NULLID 63 /* Segid meaning no memory req/pref */
83#define FLAGBIT 7 /* 7th bit is pref./req. flag */
84#define SEGMASK 0x3f /* Bits 0 - 5 */
85
86#define CREATEBIT 0 /* Create segid starts at bit 0 */
87#define DELETEBIT 8 /* Delete segid starts at bit 8 */
88#define EXECUTEBIT 16 /* Execute segid starts at bit 16 */
89
90/*
91 * Masks that define memory type. Must match defines in dynm.cdb.
92 */
93#define DYNM_CODE 0x2
94#define DYNM_DATA 0x4
95#define DYNM_CODEDATA (DYNM_CODE | DYNM_DATA)
96#define DYNM_INTERNAL 0x8
97#define DYNM_EXTERNAL 0x10
98
99/*
100 * Defines for packing memory requirement/preference flags for code and
101 * data of each of the node's phases into one mask.
102 * The bit is set if the segid is required for loading code/data of the
103 * given phase. The bit is not set, if the segid is preferred only.
104 *
105 * These defines are also used as indeces into a segid array for the node.
106 * eg node's segid[CREATEDATAFLAGBIT] is the memory segment id that the
107 * create phase data is required or preferred to be loaded into.
108 */
109#define CREATEDATAFLAGBIT 0
110#define CREATECODEFLAGBIT 1
111#define EXECUTEDATAFLAGBIT 2
112#define EXECUTECODEFLAGBIT 3
113#define DELETEDATAFLAGBIT 4
114#define DELETECODEFLAGBIT 5
115#define MAXFLAGS 6
116
117#define IS_INTERNAL(nldr_obj, segid) (((segid) <= MAXSEGID && \
118 nldr_obj->seg_table[(segid)] & DYNM_INTERNAL) || \
119 (segid) == MEMINTERNALID)
120
121#define IS_EXTERNAL(nldr_obj, segid) (((segid) <= MAXSEGID && \
122 nldr_obj->seg_table[(segid)] & DYNM_EXTERNAL) || \
123 (segid) == MEMEXTERNALID)
124
125#define SWAPLONG(x) ((((x) << 24) & 0xFF000000) | (((x) << 8) & 0xFF0000L) | \
126 (((x) >> 8) & 0xFF00L) | (((x) >> 24) & 0xFF))
127
128#define SWAPWORD(x) ((((x) << 8) & 0xFF00) | (((x) >> 8) & 0xFF))
129
130 /*
131 * These names may be embedded in overlay sections to identify which
132 * node phase the section should be overlayed.
133 */
134#define PCREATE "create"
135#define PDELETE "delete"
136#define PEXECUTE "execute"
137
138#define IS_EQUAL_UUID(uuid1, uuid2) (\
139 ((uuid1).ul_data1 == (uuid2).ul_data1) && \
140 ((uuid1).us_data2 == (uuid2).us_data2) && \
141 ((uuid1).us_data3 == (uuid2).us_data3) && \
142 ((uuid1).uc_data4 == (uuid2).uc_data4) && \
143 ((uuid1).uc_data5 == (uuid2).uc_data5) && \
144 (strncmp((void *)(uuid1).uc_data6, (void *)(uuid2).uc_data6, 6)) == 0)
145
146 /*
147 * ======== mem_seg_info ========
148 * Format of dynamic loading memory segment info in coff file.
149 * Must match dynm.h55.
150 */
151struct mem_seg_info {
152 u32 segid; /* Dynamic loading memory segment number */
153 u32 base;
154 u32 len;
155 u32 type; /* Mask of DYNM_CODE, DYNM_INTERNAL, etc. */
156};
157
158/*
159 * ======== lib_node ========
160 * For maintaining a tree of library dependencies.
161 */
162struct lib_node {
163 struct dbll_library_obj *lib; /* The library */
164 u16 dep_libs; /* Number of dependent libraries */
165 struct lib_node *dep_libs_tree; /* Dependent libraries of lib */
166};
167
168/*
169 * ======== ovly_sect ========
170 * Information needed to overlay a section.
171 */
172struct ovly_sect {
173 struct ovly_sect *next_sect;
174 u32 sect_load_addr; /* Load address of section */
175 u32 sect_run_addr; /* Run address of section */
176 u32 size; /* Size of section */
177 u16 page; /* DBL_CODE, DBL_DATA */
178};
179
180/*
181 * ======== ovly_node ========
182 * For maintaining a list of overlay nodes, with sections that need to be
183 * overlayed for each of the nodes phases.
184 */
185struct ovly_node {
186 struct dsp_uuid uuid;
187 char *node_name;
188 struct ovly_sect *create_sects_list;
189 struct ovly_sect *delete_sects_list;
190 struct ovly_sect *execute_sects_list;
191 struct ovly_sect *other_sects_list;
192 u16 create_sects;
193 u16 delete_sects;
194 u16 execute_sects;
195 u16 other_sects;
196 u16 create_ref;
197 u16 delete_ref;
198 u16 execute_ref;
199 u16 other_ref;
200};
201
202/*
203 * ======== nldr_object ========
204 * Overlay loader object.
205 */
206struct nldr_object {
207 struct dev_object *hdev_obj; /* Device object */
208 struct dcd_manager *hdcd_mgr; /* Proc/Node data manager */
209 struct dbll_tar_obj *dbll; /* The DBL loader */
210 struct dbll_library_obj *base_lib; /* Base image library */
211 struct rmm_target_obj *rmm; /* Remote memory manager for DSP */
212 struct dbll_fxns ldr_fxns; /* Loader function table */
213 struct dbll_attrs ldr_attrs; /* attrs to pass to loader functions */
214 nldr_ovlyfxn ovly_fxn; /* "write" for overlay nodes */
215 nldr_writefxn write_fxn; /* "write" for dynamic nodes */
216 struct ovly_node *ovly_table; /* Table of overlay nodes */
217 u16 ovly_nodes; /* Number of overlay nodes in base */
218 u16 ovly_nid; /* Index for tracking overlay nodes */
219 u16 dload_segs; /* Number of dynamic load mem segs */
220 u32 *seg_table; /* memtypes of dynamic memory segs
221 * indexed by segid
222 */
223 u16 us_dsp_mau_size; /* Size of DSP MAU */
224 u16 us_dsp_word_size; /* Size of DSP word */
225};
226
227/*
228 * ======== nldr_nodeobject ========
229 * Dynamic node object. This object is created when a node is allocated.
230 */
231struct nldr_nodeobject {
232 struct nldr_object *nldr_obj; /* Dynamic loader handle */
233 void *priv_ref; /* Handle to pass to dbl_write_fxn */
234 struct dsp_uuid uuid; /* Node's UUID */
235 bool dynamic; /* Dynamically loaded node? */
236 bool overlay; /* Overlay node? */
237 bool *pf_phase_split; /* Multiple phase libraries? */
238 struct lib_node root; /* Library containing node phase */
239 struct lib_node create_lib; /* Library with create phase lib */
240 struct lib_node execute_lib; /* Library with execute phase lib */
241 struct lib_node delete_lib; /* Library with delete phase lib */
242 /* libs remain loaded until Delete */
243 struct lib_node pers_lib_table[MAXLIBS];
244 s32 pers_libs; /* Number of persistent libraries */
245 /* Path in lib dependency tree */
246 struct dbll_library_obj *lib_path[MAXDEPTH + 1];
247 enum nldr_phase phase; /* Node phase currently being loaded */
248
249 /*
250 * Dynamic loading memory segments for data and code of each phase.
251 */
252 u16 seg_id[MAXFLAGS];
253
254 /*
255 * Mask indicating whether each mem segment specified in seg_id[]
256 * is preferred or required.
257 * For example
258 * if (code_data_flag_mask & (1 << EXECUTEDATAFLAGBIT)) != 0,
259 * then it is required to load execute phase data into the memory
260 * specified by seg_id[EXECUTEDATAFLAGBIT].
261 */
262 u32 code_data_flag_mask;
263};
264
265/* Dynamic loader function table */
266static struct dbll_fxns ldr_fxns = {
267 (dbll_close_fxn) dbll_close,
268 (dbll_create_fxn) dbll_create,
269 (dbll_delete_fxn) dbll_delete,
270 (dbll_exit_fxn) dbll_exit,
271 (dbll_get_attrs_fxn) dbll_get_attrs,
272 (dbll_get_addr_fxn) dbll_get_addr,
273 (dbll_get_c_addr_fxn) dbll_get_c_addr,
274 (dbll_get_sect_fxn) dbll_get_sect,
275 (dbll_init_fxn) dbll_init,
276 (dbll_load_fxn) dbll_load,
277 (dbll_load_sect_fxn) dbll_load_sect,
278 (dbll_open_fxn) dbll_open,
279 (dbll_read_sect_fxn) dbll_read_sect,
280 (dbll_set_attrs_fxn) dbll_set_attrs,
281 (dbll_unload_fxn) dbll_unload,
282 (dbll_unload_sect_fxn) dbll_unload_sect,
283};
284
285static u32 refs; /* module reference count */
286
287static int add_ovly_info(void *handle, struct dbll_sect_info *sect_info,
288 u32 addr, u32 bytes);
289static int add_ovly_node(struct dsp_uuid *uuid_obj,
9d7d0a52 290 enum dsp_dcdobjtype obj_type, void *handle);
7d55524d 291static int add_ovly_sect(struct nldr_object *nldr_obj,
daa89e6c 292 struct ovly_sect **lst,
13b18c29 293 struct dbll_sect_info *sect_inf,
a5120278 294 bool *exists, u32 addr, u32 bytes);
b301c858 295static s32 fake_ovly_write(void *handle, u32 dsp_address, void *buf, u32 bytes,
7d55524d
ORL
296 s32 mtype);
297static void free_sects(struct nldr_object *nldr_obj,
298 struct ovly_sect *phase_sects, u16 alloc_num);
299static bool get_symbol_value(void *handle, void *parg, void *rmm_handle,
0cd343a4 300 char *sym_name, struct dbll_sym_val **sym);
7d55524d
ORL
301static int load_lib(struct nldr_nodeobject *nldr_node_obj,
302 struct lib_node *root, struct dsp_uuid uuid,
318b5df9 303 bool root_prstnt,
7d55524d
ORL
304 struct dbll_library_obj **lib_path,
305 enum nldr_phase phase, u16 depth);
306static int load_ovly(struct nldr_nodeobject *nldr_node_obj,
307 enum nldr_phase phase);
c8c1ad8c 308static int remote_alloc(void **ref, u16 mem_sect, u32 size,
b301c858 309 u32 align, u32 *dsp_address,
21aaf42e
MN
310 s32 segmnt_id,
311 s32 req, bool reserve);
13b18c29 312static int remote_free(void **ref, u16 space, u32 dsp_address, u32 size,
7d55524d
ORL
313 bool reserve);
314
315static void unload_lib(struct nldr_nodeobject *nldr_node_obj,
316 struct lib_node *root);
317static void unload_ovly(struct nldr_nodeobject *nldr_node_obj,
318 enum nldr_phase phase);
319static bool find_in_persistent_lib_array(struct nldr_nodeobject *nldr_node_obj,
320 struct dbll_library_obj *lib);
321static u32 find_lcm(u32 a, u32 b);
322static u32 find_gcf(u32 a, u32 b);
323
324/*
325 * ======== nldr_allocate ========
326 */
327int nldr_allocate(struct nldr_object *nldr_obj, void *priv_ref,
9d7d0a52 328 const struct dcd_nodeprops *node_props,
e6bf74f0 329 struct nldr_nodeobject **nldr_nodeobj,
9d7d0a52 330 bool *pf_phase_split)
7d55524d
ORL
331{
332 struct nldr_nodeobject *nldr_node_obj = NULL;
333 int status = 0;
334
335 DBC_REQUIRE(refs > 0);
336 DBC_REQUIRE(node_props != NULL);
e436d07d 337 DBC_REQUIRE(nldr_nodeobj != NULL);
7d55524d
ORL
338 DBC_REQUIRE(nldr_obj);
339
340 /* Initialize handle in case of failure */
e436d07d 341 *nldr_nodeobj = NULL;
7d55524d
ORL
342 /* Allocate node object */
343 nldr_node_obj = kzalloc(sizeof(struct nldr_nodeobject), GFP_KERNEL);
344
345 if (nldr_node_obj == NULL) {
346 status = -ENOMEM;
347 } else {
348 nldr_node_obj->pf_phase_split = pf_phase_split;
349 nldr_node_obj->pers_libs = 0;
350 nldr_node_obj->nldr_obj = nldr_obj;
351 nldr_node_obj->priv_ref = priv_ref;
352 /* Save node's UUID. */
353 nldr_node_obj->uuid = node_props->ndb_props.ui_node_id;
354 /*
355 * Determine if node is a dynamically loaded node from
356 * ndb_props.
357 */
358 if (node_props->us_load_type == NLDR_DYNAMICLOAD) {
359 /* Dynamic node */
360 nldr_node_obj->dynamic = true;
361 /*
362 * Extract memory requirements from ndb_props masks
363 */
364 /* Create phase */
365 nldr_node_obj->seg_id[CREATEDATAFLAGBIT] = (u16)
366 (node_props->ul_data_mem_seg_mask >> CREATEBIT) &
367 SEGMASK;
368 nldr_node_obj->code_data_flag_mask |=
369 ((node_props->ul_data_mem_seg_mask >>
370 (CREATEBIT + FLAGBIT)) & 1) << CREATEDATAFLAGBIT;
371 nldr_node_obj->seg_id[CREATECODEFLAGBIT] = (u16)
372 (node_props->ul_code_mem_seg_mask >>
373 CREATEBIT) & SEGMASK;
374 nldr_node_obj->code_data_flag_mask |=
375 ((node_props->ul_code_mem_seg_mask >>
376 (CREATEBIT + FLAGBIT)) & 1) << CREATECODEFLAGBIT;
377 /* Execute phase */
378 nldr_node_obj->seg_id[EXECUTEDATAFLAGBIT] = (u16)
379 (node_props->ul_data_mem_seg_mask >>
380 EXECUTEBIT) & SEGMASK;
381 nldr_node_obj->code_data_flag_mask |=
382 ((node_props->ul_data_mem_seg_mask >>
383 (EXECUTEBIT + FLAGBIT)) & 1) <<
384 EXECUTEDATAFLAGBIT;
385 nldr_node_obj->seg_id[EXECUTECODEFLAGBIT] = (u16)
386 (node_props->ul_code_mem_seg_mask >>
387 EXECUTEBIT) & SEGMASK;
388 nldr_node_obj->code_data_flag_mask |=
389 ((node_props->ul_code_mem_seg_mask >>
390 (EXECUTEBIT + FLAGBIT)) & 1) <<
391 EXECUTECODEFLAGBIT;
392 /* Delete phase */
393 nldr_node_obj->seg_id[DELETEDATAFLAGBIT] = (u16)
394 (node_props->ul_data_mem_seg_mask >> DELETEBIT) &
395 SEGMASK;
396 nldr_node_obj->code_data_flag_mask |=
397 ((node_props->ul_data_mem_seg_mask >>
398 (DELETEBIT + FLAGBIT)) & 1) << DELETEDATAFLAGBIT;
399 nldr_node_obj->seg_id[DELETECODEFLAGBIT] = (u16)
400 (node_props->ul_code_mem_seg_mask >>
401 DELETEBIT) & SEGMASK;
402 nldr_node_obj->code_data_flag_mask |=
403 ((node_props->ul_code_mem_seg_mask >>
404 (DELETEBIT + FLAGBIT)) & 1) << DELETECODEFLAGBIT;
405 } else {
406 /* Non-dynamically loaded nodes are part of the
407 * base image */
408 nldr_node_obj->root.lib = nldr_obj->base_lib;
409 /* Check for overlay node */
410 if (node_props->us_load_type == NLDR_OVLYLOAD)
411 nldr_node_obj->overlay = true;
412
413 }
e436d07d 414 *nldr_nodeobj = (struct nldr_nodeobject *)nldr_node_obj;
7d55524d
ORL
415 }
416 /* Cleanup on failure */
417 if (DSP_FAILED(status) && nldr_node_obj)
418 kfree(nldr_node_obj);
419
e436d07d
RS
420 DBC_ENSURE((DSP_SUCCEEDED(status) && *nldr_nodeobj)
421 || (DSP_FAILED(status) && *nldr_nodeobj == NULL));
7d55524d
ORL
422 return status;
423}
424
425/*
426 * ======== nldr_create ========
427 */
e6bf74f0 428int nldr_create(struct nldr_object **nldr,
7d55524d 429 struct dev_object *hdev_obj,
9d7d0a52 430 const struct nldr_attrs *pattrs)
7d55524d
ORL
431{
432 struct cod_manager *cod_mgr; /* COD manager */
433 char *psz_coff_buf = NULL;
434 char sz_zl_file[COD_MAXPATHLENGTH];
435 struct nldr_object *nldr_obj = NULL;
436 struct dbll_attrs save_attrs;
437 struct dbll_attrs new_attrs;
438 dbll_flags flags;
439 u32 ul_entry;
440 u16 dload_segs = 0;
441 struct mem_seg_info *mem_info_obj;
442 u32 ul_len = 0;
443 u32 ul_addr;
444 struct rmm_segment *rmm_segs = NULL;
445 u16 i;
446 int status = 0;
447 DBC_REQUIRE(refs > 0);
daa89e6c 448 DBC_REQUIRE(nldr != NULL);
7d55524d
ORL
449 DBC_REQUIRE(hdev_obj != NULL);
450 DBC_REQUIRE(pattrs != NULL);
451 DBC_REQUIRE(pattrs->pfn_ovly != NULL);
452 DBC_REQUIRE(pattrs->pfn_write != NULL);
453
454 /* Allocate dynamic loader object */
455 nldr_obj = kzalloc(sizeof(struct nldr_object), GFP_KERNEL);
456 if (nldr_obj) {
457 nldr_obj->hdev_obj = hdev_obj;
458 /* warning, lazy status checking alert! */
459 dev_get_cod_mgr(hdev_obj, &cod_mgr);
460 if (cod_mgr) {
461 status = cod_get_loader(cod_mgr, &nldr_obj->dbll);
462 DBC_ASSERT(DSP_SUCCEEDED(status));
463 status = cod_get_base_lib(cod_mgr, &nldr_obj->base_lib);
464 DBC_ASSERT(DSP_SUCCEEDED(status));
465 status =
466 cod_get_base_name(cod_mgr, sz_zl_file,
467 COD_MAXPATHLENGTH);
468 DBC_ASSERT(DSP_SUCCEEDED(status));
469 }
470 status = 0;
471 /* end lazy status checking */
472 nldr_obj->us_dsp_mau_size = pattrs->us_dsp_mau_size;
473 nldr_obj->us_dsp_word_size = pattrs->us_dsp_word_size;
474 nldr_obj->ldr_fxns = ldr_fxns;
475 if (!(nldr_obj->ldr_fxns.init_fxn()))
476 status = -ENOMEM;
477
478 } else {
479 status = -ENOMEM;
480 }
481 /* Create the DCD Manager */
482 if (DSP_SUCCEEDED(status))
483 status = dcd_create_manager(NULL, &nldr_obj->hdcd_mgr);
484
485 /* Get dynamic loading memory sections from base lib */
486 if (DSP_SUCCEEDED(status)) {
487 status =
488 nldr_obj->ldr_fxns.get_sect_fxn(nldr_obj->base_lib,
489 DYNMEMSECT, &ul_addr,
490 &ul_len);
491 if (DSP_SUCCEEDED(status)) {
492 psz_coff_buf =
493 kzalloc(ul_len * nldr_obj->us_dsp_mau_size,
494 GFP_KERNEL);
495 if (!psz_coff_buf)
496 status = -ENOMEM;
497 } else {
498 /* Ok to not have dynamic loading memory */
499 status = 0;
500 ul_len = 0;
501 dev_dbg(bridge, "%s: failed - no dynamic loading mem "
502 "segments: 0x%x\n", __func__, status);
503 }
504 }
505 if (DSP_SUCCEEDED(status) && ul_len > 0) {
506 /* Read section containing dynamic load mem segments */
507 status =
508 nldr_obj->ldr_fxns.read_sect_fxn(nldr_obj->base_lib,
509 DYNMEMSECT, psz_coff_buf,
510 ul_len);
511 }
512 if (DSP_SUCCEEDED(status) && ul_len > 0) {
513 /* Parse memory segment data */
514 dload_segs = (u16) (*((u32 *) psz_coff_buf));
515 if (dload_segs > MAXMEMSEGS)
516 status = -EBADF;
517 }
518 /* Parse dynamic load memory segments */
519 if (DSP_SUCCEEDED(status) && dload_segs > 0) {
520 rmm_segs = kzalloc(sizeof(struct rmm_segment) * dload_segs,
521 GFP_KERNEL);
522 nldr_obj->seg_table =
523 kzalloc(sizeof(u32) * dload_segs, GFP_KERNEL);
524 if (rmm_segs == NULL || nldr_obj->seg_table == NULL) {
525 status = -ENOMEM;
526 } else {
527 nldr_obj->dload_segs = dload_segs;
528 mem_info_obj = (struct mem_seg_info *)(psz_coff_buf +
529 sizeof(u32));
530 for (i = 0; i < dload_segs; i++) {
531 rmm_segs[i].base = (mem_info_obj + i)->base;
532 rmm_segs[i].length = (mem_info_obj + i)->len;
533 rmm_segs[i].space = 0;
534 nldr_obj->seg_table[i] =
535 (mem_info_obj + i)->type;
536 dev_dbg(bridge,
537 "(proc) DLL MEMSEGMENT: %d, "
538 "Base: 0x%x, Length: 0x%x\n", i,
539 rmm_segs[i].base, rmm_segs[i].length);
540 }
541 }
542 }
543 /* Create Remote memory manager */
544 if (DSP_SUCCEEDED(status))
545 status = rmm_create(&nldr_obj->rmm, rmm_segs, dload_segs);
546
547 if (DSP_SUCCEEDED(status)) {
548 /* set the alloc, free, write functions for loader */
549 nldr_obj->ldr_fxns.get_attrs_fxn(nldr_obj->dbll, &save_attrs);
550 new_attrs = save_attrs;
551 new_attrs.alloc = (dbll_alloc_fxn) remote_alloc;
552 new_attrs.free = (dbll_free_fxn) remote_free;
553 new_attrs.sym_lookup = (dbll_sym_lookup) get_symbol_value;
554 new_attrs.sym_handle = nldr_obj;
555 new_attrs.write = (dbll_write_fxn) pattrs->pfn_write;
556 nldr_obj->ovly_fxn = pattrs->pfn_ovly;
557 nldr_obj->write_fxn = pattrs->pfn_write;
558 nldr_obj->ldr_attrs = new_attrs;
559 }
560 kfree(rmm_segs);
561
562 kfree(psz_coff_buf);
563
564 /* Get overlay nodes */
565 if (DSP_SUCCEEDED(status)) {
566 status =
567 cod_get_base_name(cod_mgr, sz_zl_file, COD_MAXPATHLENGTH);
568 /* lazy check */
569 DBC_ASSERT(DSP_SUCCEEDED(status));
570 /* First count number of overlay nodes */
571 status =
572 dcd_get_objects(nldr_obj->hdcd_mgr, sz_zl_file,
573 add_ovly_node, (void *)nldr_obj);
574 /* Now build table of overlay nodes */
575 if (DSP_SUCCEEDED(status) && nldr_obj->ovly_nodes > 0) {
576 /* Allocate table for overlay nodes */
577 nldr_obj->ovly_table =
578 kzalloc(sizeof(struct ovly_node) *
579 nldr_obj->ovly_nodes, GFP_KERNEL);
580 /* Put overlay nodes in the table */
581 nldr_obj->ovly_nid = 0;
582 status = dcd_get_objects(nldr_obj->hdcd_mgr, sz_zl_file,
583 add_ovly_node,
584 (void *)nldr_obj);
585 }
586 }
587 /* Do a fake reload of the base image to get overlay section info */
588 if (DSP_SUCCEEDED(status) && nldr_obj->ovly_nodes > 0) {
589 save_attrs.write = fake_ovly_write;
590 save_attrs.log_write = add_ovly_info;
591 save_attrs.log_write_handle = nldr_obj;
592 flags = DBLL_CODE | DBLL_DATA | DBLL_SYMB;
593 status = nldr_obj->ldr_fxns.load_fxn(nldr_obj->base_lib, flags,
594 &save_attrs, &ul_entry);
595 }
596 if (DSP_SUCCEEDED(status)) {
daa89e6c 597 *nldr = (struct nldr_object *)nldr_obj;
7d55524d
ORL
598 } else {
599 if (nldr_obj)
600 nldr_delete((struct nldr_object *)nldr_obj);
601
daa89e6c 602 *nldr = NULL;
7d55524d
ORL
603 }
604 /* FIXME:Temp. Fix. Must be removed */
daa89e6c
RS
605 DBC_ENSURE((DSP_SUCCEEDED(status) && *nldr)
606 || (DSP_FAILED(status) && (*nldr == NULL)));
7d55524d
ORL
607 return status;
608}
609
610/*
611 * ======== nldr_delete ========
612 */
613void nldr_delete(struct nldr_object *nldr_obj)
614{
615 struct ovly_sect *ovly_section;
616 struct ovly_sect *next;
617 u16 i;
618 DBC_REQUIRE(refs > 0);
619 DBC_REQUIRE(nldr_obj);
620
621 nldr_obj->ldr_fxns.exit_fxn();
622 if (nldr_obj->rmm)
623 rmm_delete(nldr_obj->rmm);
624
625 kfree(nldr_obj->seg_table);
626
627 if (nldr_obj->hdcd_mgr)
628 dcd_destroy_manager(nldr_obj->hdcd_mgr);
629
630 /* Free overlay node information */
631 if (nldr_obj->ovly_table) {
632 for (i = 0; i < nldr_obj->ovly_nodes; i++) {
633 ovly_section =
634 nldr_obj->ovly_table[i].create_sects_list;
635 while (ovly_section) {
636 next = ovly_section->next_sect;
637 kfree(ovly_section);
638 ovly_section = next;
639 }
640 ovly_section =
641 nldr_obj->ovly_table[i].delete_sects_list;
642 while (ovly_section) {
643 next = ovly_section->next_sect;
644 kfree(ovly_section);
645 ovly_section = next;
646 }
647 ovly_section =
648 nldr_obj->ovly_table[i].execute_sects_list;
649 while (ovly_section) {
650 next = ovly_section->next_sect;
651 kfree(ovly_section);
652 ovly_section = next;
653 }
654 ovly_section = nldr_obj->ovly_table[i].other_sects_list;
655 while (ovly_section) {
656 next = ovly_section->next_sect;
657 kfree(ovly_section);
658 ovly_section = next;
659 }
660 }
661 kfree(nldr_obj->ovly_table);
662 }
663 kfree(nldr_obj);
664}
665
666/*
667 * ======== nldr_exit ========
668 * Discontinue usage of NLDR module.
669 */
670void nldr_exit(void)
671{
672 DBC_REQUIRE(refs > 0);
673
674 refs--;
675
676 if (refs == 0)
677 rmm_exit();
678
679 DBC_ENSURE(refs >= 0);
680}
681
682/*
683 * ======== nldr_get_fxn_addr ========
684 */
685int nldr_get_fxn_addr(struct nldr_nodeobject *nldr_node_obj,
383b8345 686 char *str_fxn, u32 * addr)
7d55524d
ORL
687{
688 struct dbll_sym_val *dbll_sym;
689 struct nldr_object *nldr_obj;
690 int status = 0;
691 bool status1 = false;
692 s32 i = 0;
693 struct lib_node root = { NULL, 0, NULL };
694 DBC_REQUIRE(refs > 0);
695 DBC_REQUIRE(nldr_node_obj);
383b8345
RS
696 DBC_REQUIRE(addr != NULL);
697 DBC_REQUIRE(str_fxn != NULL);
7d55524d
ORL
698
699 nldr_obj = nldr_node_obj->nldr_obj;
700 /* Called from node_create(), node_delete(), or node_run(). */
701 if (nldr_node_obj->dynamic && *nldr_node_obj->pf_phase_split) {
702 switch (nldr_node_obj->phase) {
703 case NLDR_CREATE:
704 root = nldr_node_obj->create_lib;
705 break;
706 case NLDR_EXECUTE:
707 root = nldr_node_obj->execute_lib;
708 break;
709 case NLDR_DELETE:
710 root = nldr_node_obj->delete_lib;
711 break;
712 default:
713 DBC_ASSERT(false);
714 break;
715 }
716 } else {
717 /* for Overlay nodes or non-split Dynamic nodes */
718 root = nldr_node_obj->root;
719 }
720 status1 =
383b8345 721 nldr_obj->ldr_fxns.get_c_addr_fxn(root.lib, str_fxn, &dbll_sym);
7d55524d
ORL
722 if (!status1)
723 status1 =
383b8345 724 nldr_obj->ldr_fxns.get_addr_fxn(root.lib, str_fxn,
7d55524d
ORL
725 &dbll_sym);
726
727 /* If symbol not found, check dependent libraries */
728 if (!status1) {
729 for (i = 0; i < root.dep_libs; i++) {
730 status1 =
731 nldr_obj->ldr_fxns.get_addr_fxn(root.dep_libs_tree
383b8345 732 [i].lib, str_fxn,
7d55524d
ORL
733 &dbll_sym);
734 if (!status1) {
735 status1 =
736 nldr_obj->ldr_fxns.
737 get_c_addr_fxn(root.dep_libs_tree[i].lib,
383b8345 738 str_fxn, &dbll_sym);
7d55524d
ORL
739 }
740 if (status1) {
741 /* Symbol found */
742 break;
743 }
744 }
745 }
746 /* Check persistent libraries */
747 if (!status1) {
748 for (i = 0; i < nldr_node_obj->pers_libs; i++) {
749 status1 =
750 nldr_obj->ldr_fxns.
751 get_addr_fxn(nldr_node_obj->pers_lib_table[i].lib,
383b8345 752 str_fxn, &dbll_sym);
7d55524d
ORL
753 if (!status1) {
754 status1 =
755 nldr_obj->ldr_fxns.
756 get_c_addr_fxn(nldr_node_obj->pers_lib_table
383b8345 757 [i].lib, str_fxn, &dbll_sym);
7d55524d
ORL
758 }
759 if (status1) {
760 /* Symbol found */
761 break;
762 }
763 }
764 }
765
766 if (status1)
383b8345 767 *addr = dbll_sym->value;
7d55524d
ORL
768 else
769 status = -ESPIPE;
770
771 return status;
772}
773
774/*
775 * ======== nldr_get_rmm_manager ========
776 * Given a NLDR object, retrieve RMM Manager Handle
777 */
e6890692 778int nldr_get_rmm_manager(struct nldr_object *nldr,
e6bf74f0 779 struct rmm_target_obj **rmm_mgr)
7d55524d
ORL
780{
781 int status = 0;
e6890692 782 struct nldr_object *nldr_obj = nldr;
daa89e6c 783 DBC_REQUIRE(rmm_mgr != NULL);
7d55524d 784
e6890692 785 if (nldr) {
daa89e6c 786 *rmm_mgr = nldr_obj->rmm;
7d55524d 787 } else {
daa89e6c 788 *rmm_mgr = NULL;
7d55524d
ORL
789 status = -EFAULT;
790 }
791
daa89e6c
RS
792 DBC_ENSURE(DSP_SUCCEEDED(status) || ((rmm_mgr != NULL) &&
793 (*rmm_mgr == NULL)));
7d55524d
ORL
794
795 return status;
796}
797
798/*
799 * ======== nldr_init ========
800 * Initialize the NLDR module.
801 */
802bool nldr_init(void)
803{
804 DBC_REQUIRE(refs >= 0);
805
806 if (refs == 0)
807 rmm_init();
808
809 refs++;
810
811 DBC_ENSURE(refs > 0);
812 return true;
813}
814
815/*
816 * ======== nldr_load ========
817 */
818int nldr_load(struct nldr_nodeobject *nldr_node_obj,
819 enum nldr_phase phase)
820{
821 struct nldr_object *nldr_obj;
822 struct dsp_uuid lib_uuid;
823 int status = 0;
824
825 DBC_REQUIRE(refs > 0);
826 DBC_REQUIRE(nldr_node_obj);
827
828 nldr_obj = nldr_node_obj->nldr_obj;
829
830 if (nldr_node_obj->dynamic) {
831 nldr_node_obj->phase = phase;
832
833 lib_uuid = nldr_node_obj->uuid;
834
835 /* At this point, we may not know if node is split into
836 * different libraries. So we'll go ahead and load the
837 * library, and then save the pointer to the appropriate
838 * location after we know. */
839
840 status =
841 load_lib(nldr_node_obj, &nldr_node_obj->root, lib_uuid,
842 false, nldr_node_obj->lib_path, phase, 0);
843
844 if (DSP_SUCCEEDED(status)) {
845 if (*nldr_node_obj->pf_phase_split) {
846 switch (phase) {
847 case NLDR_CREATE:
848 nldr_node_obj->create_lib =
849 nldr_node_obj->root;
850 break;
851
852 case NLDR_EXECUTE:
853 nldr_node_obj->execute_lib =
854 nldr_node_obj->root;
855 break;
856
857 case NLDR_DELETE:
858 nldr_node_obj->delete_lib =
859 nldr_node_obj->root;
860 break;
861
862 default:
863 DBC_ASSERT(false);
864 break;
865 }
866 }
867 }
868 } else {
869 if (nldr_node_obj->overlay)
870 status = load_ovly(nldr_node_obj, phase);
871
872 }
873
874 return status;
875}
876
877/*
878 * ======== nldr_unload ========
879 */
880int nldr_unload(struct nldr_nodeobject *nldr_node_obj,
881 enum nldr_phase phase)
882{
883 int status = 0;
884 struct lib_node *root_lib = NULL;
885 s32 i = 0;
886
887 DBC_REQUIRE(refs > 0);
888 DBC_REQUIRE(nldr_node_obj);
889
890 if (nldr_node_obj != NULL) {
891 if (nldr_node_obj->dynamic) {
892 if (*nldr_node_obj->pf_phase_split) {
893 switch (phase) {
894 case NLDR_CREATE:
895 root_lib = &nldr_node_obj->create_lib;
896 break;
897 case NLDR_EXECUTE:
898 root_lib = &nldr_node_obj->execute_lib;
899 break;
900 case NLDR_DELETE:
901 root_lib = &nldr_node_obj->delete_lib;
902 /* Unload persistent libraries */
903 for (i = 0;
904 i < nldr_node_obj->pers_libs;
905 i++) {
906 unload_lib(nldr_node_obj,
907 &nldr_node_obj->
908 pers_lib_table[i]);
909 }
910 nldr_node_obj->pers_libs = 0;
911 break;
912 default:
913 DBC_ASSERT(false);
914 break;
915 }
916 } else {
917 /* Unload main library */
918 root_lib = &nldr_node_obj->root;
919 }
920 if (root_lib)
921 unload_lib(nldr_node_obj, root_lib);
922 } else {
923 if (nldr_node_obj->overlay)
924 unload_ovly(nldr_node_obj, phase);
925
926 }
927 }
928 return status;
929}
930
931/*
932 * ======== add_ovly_info ========
933 */
934static int add_ovly_info(void *handle, struct dbll_sect_info *sect_info,
935 u32 addr, u32 bytes)
936{
937 char *node_name;
938 char *sect_name = (char *)sect_info->name;
939 bool sect_exists = false;
940 char seps = ':';
941 char *pch;
942 u16 i;
943 struct nldr_object *nldr_obj = (struct nldr_object *)handle;
944 int status = 0;
945
946 /* Is this an overlay section (load address != run address)? */
947 if (sect_info->sect_load_addr == sect_info->sect_run_addr)
948 goto func_end;
949
950 /* Find the node it belongs to */
951 for (i = 0; i < nldr_obj->ovly_nodes; i++) {
952 node_name = nldr_obj->ovly_table[i].node_name;
953 DBC_REQUIRE(node_name);
954 if (strncmp(node_name, sect_name + 1, strlen(node_name)) == 0) {
955 /* Found the node */
956 break;
957 }
958 }
959 if (!(i < nldr_obj->ovly_nodes))
960 goto func_end;
961
962 /* Determine which phase this section belongs to */
963 for (pch = sect_name + 1; *pch && *pch != seps; pch++)
964 ;;
965
966 if (*pch) {
967 pch++; /* Skip over the ':' */
968 if (strncmp(pch, PCREATE, strlen(PCREATE)) == 0) {
969 status =
970 add_ovly_sect(nldr_obj,
971 &nldr_obj->
972 ovly_table[i].create_sects_list,
973 sect_info, &sect_exists, addr, bytes);
974 if (DSP_SUCCEEDED(status) && !sect_exists)
975 nldr_obj->ovly_table[i].create_sects++;
976
977 } else if (strncmp(pch, PDELETE, strlen(PDELETE)) == 0) {
978 status =
979 add_ovly_sect(nldr_obj,
980 &nldr_obj->
981 ovly_table[i].delete_sects_list,
982 sect_info, &sect_exists, addr, bytes);
983 if (DSP_SUCCEEDED(status) && !sect_exists)
984 nldr_obj->ovly_table[i].delete_sects++;
985
986 } else if (strncmp(pch, PEXECUTE, strlen(PEXECUTE)) == 0) {
987 status =
988 add_ovly_sect(nldr_obj,
989 &nldr_obj->
990 ovly_table[i].execute_sects_list,
991 sect_info, &sect_exists, addr, bytes);
992 if (DSP_SUCCEEDED(status) && !sect_exists)
993 nldr_obj->ovly_table[i].execute_sects++;
994
995 } else {
996 /* Put in "other" sectins */
997 status =
998 add_ovly_sect(nldr_obj,
999 &nldr_obj->
1000 ovly_table[i].other_sects_list,
1001 sect_info, &sect_exists, addr, bytes);
1002 if (DSP_SUCCEEDED(status) && !sect_exists)
1003 nldr_obj->ovly_table[i].other_sects++;
1004
1005 }
1006 }
1007func_end:
1008 return status;
1009}
1010
1011/*
1012 * ======== add_ovly_node =========
1013 * Callback function passed to dcd_get_objects.
1014 */
1015static int add_ovly_node(struct dsp_uuid *uuid_obj,
9d7d0a52 1016 enum dsp_dcdobjtype obj_type, void *handle)
7d55524d
ORL
1017{
1018 struct nldr_object *nldr_obj = (struct nldr_object *)handle;
1019 char *node_name = NULL;
1020 char *pbuf = NULL;
1021 u32 len;
1022 struct dcd_genericobj obj_def;
1023 int status = 0;
1024
1025 if (obj_type != DSP_DCDNODETYPE)
1026 goto func_end;
1027
1028 status =
1029 dcd_get_object_def(nldr_obj->hdcd_mgr, uuid_obj, obj_type,
1030 &obj_def);
1031 if (DSP_FAILED(status))
1032 goto func_end;
1033
1034 /* If overlay node, add to the list */
1035 if (obj_def.obj_data.node_obj.us_load_type == NLDR_OVLYLOAD) {
1036 if (nldr_obj->ovly_table == NULL) {
1037 nldr_obj->ovly_nodes++;
1038 } else {
1039 /* Add node to table */
1040 nldr_obj->ovly_table[nldr_obj->ovly_nid].uuid =
1041 *uuid_obj;
1042 DBC_REQUIRE(obj_def.obj_data.node_obj.ndb_props.
1043 ac_name);
1044 len =
1045 strlen(obj_def.obj_data.node_obj.ndb_props.ac_name);
1046 node_name = obj_def.obj_data.node_obj.ndb_props.ac_name;
1047 pbuf = kzalloc(len + 1, GFP_KERNEL);
1048 if (pbuf == NULL) {
1049 status = -ENOMEM;
1050 } else {
1051 strncpy(pbuf, node_name, len);
1052 nldr_obj->ovly_table[nldr_obj->ovly_nid].
1053 node_name = pbuf;
1054 nldr_obj->ovly_nid++;
1055 }
1056 }
1057 }
1058 /* These were allocated in dcd_get_object_def */
1059 kfree(obj_def.obj_data.node_obj.pstr_create_phase_fxn);
1060
1061 kfree(obj_def.obj_data.node_obj.pstr_execute_phase_fxn);
1062
1063 kfree(obj_def.obj_data.node_obj.pstr_delete_phase_fxn);
1064
1065 kfree(obj_def.obj_data.node_obj.pstr_i_alg_name);
1066
1067func_end:
1068 return status;
1069}
1070
1071/*
1072 * ======== add_ovly_sect ========
1073 */
1074static int add_ovly_sect(struct nldr_object *nldr_obj,
daa89e6c 1075 struct ovly_sect **lst,
13b18c29 1076 struct dbll_sect_info *sect_inf,
a5120278 1077 bool *exists, u32 addr, u32 bytes)
7d55524d
ORL
1078{
1079 struct ovly_sect *new_sect = NULL;
1080 struct ovly_sect *last_sect;
1081 struct ovly_sect *ovly_section;
1082 int status = 0;
1083
daa89e6c 1084 ovly_section = last_sect = *lst;
a5120278 1085 *exists = false;
7d55524d
ORL
1086 while (ovly_section) {
1087 /*
1088 * Make sure section has not already been added. Multiple
1089 * 'write' calls may be made to load the section.
1090 */
1091 if (ovly_section->sect_load_addr == addr) {
1092 /* Already added */
a5120278 1093 *exists = true;
7d55524d
ORL
1094 break;
1095 }
1096 last_sect = ovly_section;
1097 ovly_section = ovly_section->next_sect;
1098 }
1099
1100 if (!ovly_section) {
1101 /* New section */
1102 new_sect = kzalloc(sizeof(struct ovly_sect), GFP_KERNEL);
1103 if (new_sect == NULL) {
1104 status = -ENOMEM;
1105 } else {
1106 new_sect->sect_load_addr = addr;
13b18c29
RS
1107 new_sect->sect_run_addr = sect_inf->sect_run_addr +
1108 (addr - sect_inf->sect_load_addr);
7d55524d 1109 new_sect->size = bytes;
13b18c29 1110 new_sect->page = sect_inf->type;
7d55524d
ORL
1111 }
1112
1113 /* Add to the list */
1114 if (DSP_SUCCEEDED(status)) {
daa89e6c 1115 if (*lst == NULL) {
7d55524d 1116 /* First in the list */
daa89e6c 1117 *lst = new_sect;
7d55524d
ORL
1118 } else {
1119 last_sect->next_sect = new_sect;
1120 }
1121 }
1122 }
1123
1124 return status;
1125}
1126
1127/*
1128 * ======== fake_ovly_write ========
1129 */
b301c858 1130static s32 fake_ovly_write(void *handle, u32 dsp_address, void *buf, u32 bytes,
7d55524d
ORL
1131 s32 mtype)
1132{
1133 return (s32) bytes;
1134}
1135
1136/*
1137 * ======== free_sects ========
1138 */
1139static void free_sects(struct nldr_object *nldr_obj,
1140 struct ovly_sect *phase_sects, u16 alloc_num)
1141{
1142 struct ovly_sect *ovly_section = phase_sects;
1143 u16 i = 0;
1144 bool ret;
1145
1146 while (ovly_section && i < alloc_num) {
1147 /* 'Deallocate' */
1148 /* segid - page not supported yet */
1149 /* Reserved memory */
1150 ret =
1151 rmm_free(nldr_obj->rmm, 0, ovly_section->sect_run_addr,
1152 ovly_section->size, true);
1153 DBC_ASSERT(ret);
1154 ovly_section = ovly_section->next_sect;
1155 i++;
1156 }
1157}
1158
1159/*
1160 * ======== get_symbol_value ========
1161 * Find symbol in library's base image. If not there, check dependent
1162 * libraries.
1163 */
1164static bool get_symbol_value(void *handle, void *parg, void *rmm_handle,
c8c1ad8c 1165 char *sym_name, struct dbll_sym_val **sym)
7d55524d
ORL
1166{
1167 struct nldr_object *nldr_obj = (struct nldr_object *)handle;
1168 struct nldr_nodeobject *nldr_node_obj =
1169 (struct nldr_nodeobject *)rmm_handle;
1170 struct lib_node *root = (struct lib_node *)parg;
1171 u16 i;
1172 bool status = false;
1173
1174 /* check the base image */
c8c1ad8c
RS
1175 status = nldr_obj->ldr_fxns.get_addr_fxn(nldr_obj->base_lib,
1176 sym_name, sym);
7d55524d
ORL
1177 if (!status)
1178 status =
c8c1ad8c
RS
1179 nldr_obj->ldr_fxns.get_c_addr_fxn(nldr_obj->base_lib,
1180 sym_name, sym);
7d55524d
ORL
1181
1182 /*
1183 * Check in root lib itself. If the library consists of
1184 * multiple object files linked together, some symbols in the
1185 * library may need to be resolved.
1186 */
1187 if (!status) {
c8c1ad8c
RS
1188 status = nldr_obj->ldr_fxns.get_addr_fxn(root->lib, sym_name,
1189 sym);
7d55524d
ORL
1190 if (!status) {
1191 status =
c8c1ad8c
RS
1192 nldr_obj->ldr_fxns.get_c_addr_fxn(root->lib,
1193 sym_name, sym);
7d55524d
ORL
1194 }
1195 }
1196
1197 /*
1198 * Check in root lib's dependent libraries, but not dependent
1199 * libraries' dependents.
1200 */
1201 if (!status) {
1202 for (i = 0; i < root->dep_libs; i++) {
1203 status =
c8c1ad8c
RS
1204 nldr_obj->ldr_fxns.get_addr_fxn(root->
1205 dep_libs_tree
1206 [i].lib,
1207 sym_name, sym);
7d55524d
ORL
1208 if (!status) {
1209 status =
1210 nldr_obj->ldr_fxns.
1211 get_c_addr_fxn(root->dep_libs_tree[i].lib,
c8c1ad8c 1212 sym_name, sym);
7d55524d
ORL
1213 }
1214 if (status) {
1215 /* Symbol found */
1216 break;
1217 }
1218 }
1219 }
1220 /*
1221 * Check in persistent libraries
1222 */
1223 if (!status) {
1224 for (i = 0; i < nldr_node_obj->pers_libs; i++) {
1225 status =
1226 nldr_obj->ldr_fxns.
1227 get_addr_fxn(nldr_node_obj->pers_lib_table[i].lib,
c8c1ad8c 1228 sym_name, sym);
7d55524d
ORL
1229 if (!status) {
1230 status = nldr_obj->ldr_fxns.get_c_addr_fxn
c8c1ad8c
RS
1231 (nldr_node_obj->pers_lib_table[i].lib,
1232 sym_name, sym);
7d55524d
ORL
1233 }
1234 if (status) {
1235 /* Symbol found */
1236 break;
1237 }
1238 }
1239 }
1240
1241 return status;
1242}
1243
1244/*
1245 * ======== load_lib ========
1246 * Recursively load library and all its dependent libraries. The library
1247 * we're loading is specified by a uuid.
1248 */
1249static int load_lib(struct nldr_nodeobject *nldr_node_obj,
1250 struct lib_node *root, struct dsp_uuid uuid,
318b5df9 1251 bool root_prstnt,
7d55524d
ORL
1252 struct dbll_library_obj **lib_path,
1253 enum nldr_phase phase, u16 depth)
1254{
1255 struct nldr_object *nldr_obj = nldr_node_obj->nldr_obj;
1256 u16 nd_libs = 0; /* Number of dependent libraries */
1257 u16 np_libs = 0; /* Number of persistent libraries */
1258 u16 nd_libs_loaded = 0; /* Number of dep. libraries loaded */
1259 u16 i;
1260 u32 entry;
1261 u32 dw_buf_size = NLDR_MAXPATHLENGTH;
1262 dbll_flags flags = DBLL_SYMB | DBLL_CODE | DBLL_DATA | DBLL_DYNAMIC;
1263 struct dbll_attrs new_attrs;
1264 char *psz_file_name = NULL;
1265 struct dsp_uuid *dep_lib_uui_ds = NULL;
1266 bool *persistent_dep_libs = NULL;
1267 int status = 0;
1268 bool lib_status = false;
1269 struct lib_node *dep_lib;
1270
1271 if (depth > MAXDEPTH) {
1272 /* Error */
1273 DBC_ASSERT(false);
1274 }
1275 root->lib = NULL;
1276 /* Allocate a buffer for library file name of size DBL_MAXPATHLENGTH */
1277 psz_file_name = kzalloc(DBLL_MAXPATHLENGTH, GFP_KERNEL);
1278 if (psz_file_name == NULL)
1279 status = -ENOMEM;
1280
1281 if (DSP_SUCCEEDED(status)) {
1282 /* Get the name of the library */
1283 if (depth == 0) {
1284 status =
1285 dcd_get_library_name(nldr_node_obj->nldr_obj->
1286 hdcd_mgr, &uuid, psz_file_name,
1287 &dw_buf_size, phase,
1288 nldr_node_obj->pf_phase_split);
1289 } else {
1290 /* Dependent libraries are registered with a phase */
1291 status =
1292 dcd_get_library_name(nldr_node_obj->nldr_obj->
1293 hdcd_mgr, &uuid, psz_file_name,
1294 &dw_buf_size, NLDR_NOPHASE,
1295 NULL);
1296 }
1297 }
1298 if (DSP_SUCCEEDED(status)) {
1299 /* Open the library, don't load symbols */
1300 status =
1301 nldr_obj->ldr_fxns.open_fxn(nldr_obj->dbll, psz_file_name,
1302 DBLL_NOLOAD, &root->lib);
1303 }
1304 /* Done with file name */
1305 kfree(psz_file_name);
1306
1307 /* Check to see if library not already loaded */
318b5df9 1308 if (DSP_SUCCEEDED(status) && root_prstnt) {
7d55524d
ORL
1309 lib_status =
1310 find_in_persistent_lib_array(nldr_node_obj, root->lib);
1311 /* Close library */
1312 if (lib_status) {
1313 nldr_obj->ldr_fxns.close_fxn(root->lib);
1314 return 0;
1315 }
1316 }
1317 if (DSP_SUCCEEDED(status)) {
1318 /* Check for circular dependencies. */
1319 for (i = 0; i < depth; i++) {
1320 if (root->lib == lib_path[i]) {
1321 /* This condition could be checked by a
1322 * tool at build time. */
1323 status = -EILSEQ;
1324 }
1325 }
1326 }
1327 if (DSP_SUCCEEDED(status)) {
1328 /* Add library to current path in dependency tree */
1329 lib_path[depth] = root->lib;
1330 depth++;
1331 /* Get number of dependent libraries */
1332 status =
1333 dcd_get_num_dep_libs(nldr_node_obj->nldr_obj->hdcd_mgr,
1334 &uuid, &nd_libs, &np_libs, phase);
1335 }
1336 DBC_ASSERT(nd_libs >= np_libs);
1337 if (DSP_SUCCEEDED(status)) {
1338 if (!(*nldr_node_obj->pf_phase_split))
1339 np_libs = 0;
1340
1341 /* nd_libs = #of dependent libraries */
1342 root->dep_libs = nd_libs - np_libs;
1343 if (nd_libs > 0) {
1344 dep_lib_uui_ds = kzalloc(sizeof(struct dsp_uuid) *
1345 nd_libs, GFP_KERNEL);
1346 persistent_dep_libs =
1347 kzalloc(sizeof(bool) * nd_libs, GFP_KERNEL);
1348 if (!dep_lib_uui_ds || !persistent_dep_libs)
1349 status = -ENOMEM;
1350
1351 if (root->dep_libs > 0) {
1352 /* Allocate arrays for dependent lib UUIDs,
1353 * lib nodes */
1354 root->dep_libs_tree = kzalloc
1355 (sizeof(struct lib_node) *
1356 (root->dep_libs), GFP_KERNEL);
1357 if (!(root->dep_libs_tree))
1358 status = -ENOMEM;
1359
1360 }
1361
1362 if (DSP_SUCCEEDED(status)) {
1363 /* Get the dependent library UUIDs */
1364 status =
1365 dcd_get_dep_libs(nldr_node_obj->
1366 nldr_obj->hdcd_mgr, &uuid,
1367 nd_libs, dep_lib_uui_ds,
1368 persistent_dep_libs,
1369 phase);
1370 }
1371 }
1372 }
1373
1374 /*
1375 * Recursively load dependent libraries.
1376 */
1377 if (DSP_SUCCEEDED(status)) {
1378 for (i = 0; i < nd_libs; i++) {
1379 /* If root library is NOT persistent, and dep library
1380 * is, then record it. If root library IS persistent,
1381 * the deplib is already included */
318b5df9 1382 if (!root_prstnt && persistent_dep_libs[i] &&
7d55524d
ORL
1383 *nldr_node_obj->pf_phase_split) {
1384 if ((nldr_node_obj->pers_libs) >= MAXLIBS) {
1385 status = -EILSEQ;
1386 break;
1387 }
1388
1389 /* Allocate library outside of phase */
1390 dep_lib =
1391 &nldr_node_obj->pers_lib_table
1392 [nldr_node_obj->pers_libs];
1393 } else {
318b5df9 1394 if (root_prstnt)
7d55524d
ORL
1395 persistent_dep_libs[i] = true;
1396
1397 /* Allocate library within phase */
1398 dep_lib = &root->dep_libs_tree[nd_libs_loaded];
1399 }
1400
1401 status = load_lib(nldr_node_obj, dep_lib,
1402 dep_lib_uui_ds[i],
1403 persistent_dep_libs[i], lib_path,
1404 phase, depth);
1405
1406 if (DSP_SUCCEEDED(status)) {
1407 if ((status != 0) &&
318b5df9 1408 !root_prstnt && persistent_dep_libs[i] &&
7d55524d
ORL
1409 *nldr_node_obj->pf_phase_split) {
1410 (nldr_node_obj->pers_libs)++;
1411 } else {
1412 if (!persistent_dep_libs[i] ||
1413 !(*nldr_node_obj->pf_phase_split)) {
1414 nd_libs_loaded++;
1415 }
1416 }
1417 } else {
1418 break;
1419 }
1420 }
1421 }
1422
1423 /* Now we can load the root library */
1424 if (DSP_SUCCEEDED(status)) {
1425 new_attrs = nldr_obj->ldr_attrs;
1426 new_attrs.sym_arg = root;
1427 new_attrs.rmm_handle = nldr_node_obj;
1428 new_attrs.input_params = nldr_node_obj->priv_ref;
1429 new_attrs.base_image = false;
1430
1431 status =
1432 nldr_obj->ldr_fxns.load_fxn(root->lib, flags, &new_attrs,
1433 &entry);
1434 }
1435
1436 /*
1437 * In case of failure, unload any dependent libraries that
1438 * were loaded, and close the root library.
1439 * (Persistent libraries are unloaded from the very top)
1440 */
1441 if (DSP_FAILED(status)) {
1442 if (phase != NLDR_EXECUTE) {
1443 for (i = 0; i < nldr_node_obj->pers_libs; i++)
1444 unload_lib(nldr_node_obj,
1445 &nldr_node_obj->pers_lib_table[i]);
1446
1447 nldr_node_obj->pers_libs = 0;
1448 }
1449 for (i = 0; i < nd_libs_loaded; i++)
1450 unload_lib(nldr_node_obj, &root->dep_libs_tree[i]);
1451
1452 if (root->lib)
1453 nldr_obj->ldr_fxns.close_fxn(root->lib);
1454
1455 }
1456
1457 /* Going up one node in the dependency tree */
1458 depth--;
1459
1460 kfree(dep_lib_uui_ds);
1461 dep_lib_uui_ds = NULL;
1462
1463 kfree(persistent_dep_libs);
1464 persistent_dep_libs = NULL;
1465
1466 return status;
1467}
1468
1469/*
1470 * ======== load_ovly ========
1471 */
1472static int load_ovly(struct nldr_nodeobject *nldr_node_obj,
1473 enum nldr_phase phase)
1474{
1475 struct nldr_object *nldr_obj = nldr_node_obj->nldr_obj;
1476 struct ovly_node *po_node = NULL;
1477 struct ovly_sect *phase_sects = NULL;
1478 struct ovly_sect *other_sects_list = NULL;
1479 u16 i;
1480 u16 alloc_num = 0;
1481 u16 other_alloc = 0;
1482 u16 *ref_count = NULL;
1483 u16 *other_ref = NULL;
1484 u32 bytes;
1485 struct ovly_sect *ovly_section;
1486 int status = 0;
1487
1488 /* Find the node in the table */
1489 for (i = 0; i < nldr_obj->ovly_nodes; i++) {
1490 if (IS_EQUAL_UUID
1491 (nldr_node_obj->uuid, nldr_obj->ovly_table[i].uuid)) {
1492 /* Found it */
1493 po_node = &(nldr_obj->ovly_table[i]);
1494 break;
1495 }
1496 }
1497
1498 DBC_ASSERT(i < nldr_obj->ovly_nodes);
1499
1500 if (!po_node) {
1501 status = -ENOENT;
1502 goto func_end;
1503 }
1504
1505 switch (phase) {
1506 case NLDR_CREATE:
1507 ref_count = &(po_node->create_ref);
1508 other_ref = &(po_node->other_ref);
1509 phase_sects = po_node->create_sects_list;
1510 other_sects_list = po_node->other_sects_list;
1511 break;
1512
1513 case NLDR_EXECUTE:
1514 ref_count = &(po_node->execute_ref);
1515 phase_sects = po_node->execute_sects_list;
1516 break;
1517
1518 case NLDR_DELETE:
1519 ref_count = &(po_node->delete_ref);
1520 phase_sects = po_node->delete_sects_list;
1521 break;
1522
1523 default:
1524 DBC_ASSERT(false);
1525 break;
1526 }
1527
1528 if (ref_count == NULL)
1529 goto func_end;
1530
1531 if (*ref_count != 0)
1532 goto func_end;
1533
1534 /* 'Allocate' memory for overlay sections of this phase */
1535 ovly_section = phase_sects;
1536 while (ovly_section) {
1537 /* allocate *//* page not supported yet */
1538 /* reserve *//* align */
1539 status = rmm_alloc(nldr_obj->rmm, 0, ovly_section->size, 0,
1540 &(ovly_section->sect_run_addr), true);
1541 if (DSP_SUCCEEDED(status)) {
1542 ovly_section = ovly_section->next_sect;
1543 alloc_num++;
1544 } else {
1545 break;
1546 }
1547 }
1548 if (other_ref && *other_ref == 0) {
1549 /* 'Allocate' memory for other overlay sections
1550 * (create phase) */
1551 if (DSP_SUCCEEDED(status)) {
1552 ovly_section = other_sects_list;
1553 while (ovly_section) {
1554 /* page not supported *//* align */
1555 /* reserve */
1556 status =
1557 rmm_alloc(nldr_obj->rmm, 0,
1558 ovly_section->size, 0,
1559 &(ovly_section->sect_run_addr),
1560 true);
1561 if (DSP_SUCCEEDED(status)) {
1562 ovly_section = ovly_section->next_sect;
1563 other_alloc++;
1564 } else {
1565 break;
1566 }
1567 }
1568 }
1569 }
1570 if (*ref_count == 0) {
1571 if (DSP_SUCCEEDED(status)) {
1572 /* Load sections for this phase */
1573 ovly_section = phase_sects;
1574 while (ovly_section && DSP_SUCCEEDED(status)) {
1575 bytes =
1576 (*nldr_obj->ovly_fxn) (nldr_node_obj->
1577 priv_ref,
1578 ovly_section->
1579 sect_run_addr,
1580 ovly_section->
1581 sect_load_addr,
1582 ovly_section->size,
1583 ovly_section->page);
1584 if (bytes != ovly_section->size)
1585 status = -EPERM;
1586
1587 ovly_section = ovly_section->next_sect;
1588 }
1589 }
1590 }
1591 if (other_ref && *other_ref == 0) {
1592 if (DSP_SUCCEEDED(status)) {
1593 /* Load other sections (create phase) */
1594 ovly_section = other_sects_list;
1595 while (ovly_section && DSP_SUCCEEDED(status)) {
1596 bytes =
1597 (*nldr_obj->ovly_fxn) (nldr_node_obj->
1598 priv_ref,
1599 ovly_section->
1600 sect_run_addr,
1601 ovly_section->
1602 sect_load_addr,
1603 ovly_section->size,
1604 ovly_section->page);
1605 if (bytes != ovly_section->size)
1606 status = -EPERM;
1607
1608 ovly_section = ovly_section->next_sect;
1609 }
1610 }
1611 }
1612 if (DSP_FAILED(status)) {
1613 /* 'Deallocate' memory */
1614 free_sects(nldr_obj, phase_sects, alloc_num);
1615 free_sects(nldr_obj, other_sects_list, other_alloc);
1616 }
1617func_end:
1618 if (DSP_SUCCEEDED(status) && (ref_count != NULL)) {
1619 *ref_count += 1;
1620 if (other_ref)
1621 *other_ref += 1;
1622
1623 }
1624
1625 return status;
1626}
1627
1628/*
1629 * ======== remote_alloc ========
1630 */
c8c1ad8c 1631static int remote_alloc(void **ref, u16 mem_sect, u32 size,
b301c858 1632 u32 align, u32 *dsp_address,
21aaf42e 1633 s32 segmnt_id, s32 req,
7d55524d
ORL
1634 bool reserve)
1635{
13b18c29 1636 struct nldr_nodeobject *hnode = (struct nldr_nodeobject *)ref;
7d55524d
ORL
1637 struct nldr_object *nldr_obj;
1638 struct rmm_target_obj *rmm;
1639 u16 mem_phase_bit = MAXFLAGS;
1640 u16 segid = 0;
1641 u16 i;
1642 u16 mem_sect_type;
1643 u32 word_size;
b301c858 1644 struct rmm_addr *rmm_addr_obj = (struct rmm_addr *)dsp_address;
7d55524d
ORL
1645 bool mem_load_req = false;
1646 int status = -ENOMEM; /* Set to fail */
1647 DBC_REQUIRE(hnode);
c8c1ad8c
RS
1648 DBC_REQUIRE(mem_sect == DBLL_CODE || mem_sect == DBLL_DATA ||
1649 mem_sect == DBLL_BSS);
7d55524d
ORL
1650 nldr_obj = hnode->nldr_obj;
1651 rmm = nldr_obj->rmm;
1652 /* Convert size to DSP words */
1653 word_size =
1654 (size + nldr_obj->us_dsp_word_size -
1655 1) / nldr_obj->us_dsp_word_size;
1656 /* Modify memory 'align' to account for DSP cache line size */
1657 align = find_lcm(GEM_CACHE_LINE_SIZE, align);
1658 dev_dbg(bridge, "%s: memory align to 0x%x\n", __func__, align);
0cd343a4
RS
1659 if (segmnt_id != -1) {
1660 rmm_addr_obj->segid = segmnt_id;
1661 segid = segmnt_id;
7d55524d
ORL
1662 mem_load_req = req;
1663 } else {
1664 switch (hnode->phase) {
1665 case NLDR_CREATE:
1666 mem_phase_bit = CREATEDATAFLAGBIT;
1667 break;
1668 case NLDR_DELETE:
1669 mem_phase_bit = DELETEDATAFLAGBIT;
1670 break;
1671 case NLDR_EXECUTE:
1672 mem_phase_bit = EXECUTEDATAFLAGBIT;
1673 break;
1674 default:
1675 DBC_ASSERT(false);
1676 break;
1677 }
c8c1ad8c 1678 if (mem_sect == DBLL_CODE)
7d55524d
ORL
1679 mem_phase_bit++;
1680
1681 if (mem_phase_bit < MAXFLAGS)
1682 segid = hnode->seg_id[mem_phase_bit];
1683
1684 /* Determine if there is a memory loading requirement */
1685 if ((hnode->code_data_flag_mask >> mem_phase_bit) & 0x1)
1686 mem_load_req = true;
1687
1688 }
c8c1ad8c 1689 mem_sect_type = (mem_sect == DBLL_CODE) ? DYNM_CODE : DYNM_DATA;
7d55524d 1690
c8c1ad8c 1691 /* Find an appropriate segment based on mem_sect */
7d55524d
ORL
1692 if (segid == NULLID) {
1693 /* No memory requirements of preferences */
1694 DBC_ASSERT(!mem_load_req);
1695 goto func_cont;
1696 }
1697 if (segid <= MAXSEGID) {
1698 DBC_ASSERT(segid < nldr_obj->dload_segs);
1699 /* Attempt to allocate from segid first. */
1700 rmm_addr_obj->segid = segid;
1701 status =
b301c858 1702 rmm_alloc(rmm, segid, word_size, align, dsp_address, false);
7d55524d
ORL
1703 if (DSP_FAILED(status)) {
1704 dev_dbg(bridge, "%s: Unable allocate from segment %d\n",
1705 __func__, segid);
1706 }
1707 } else {
1708 /* segid > MAXSEGID ==> Internal or external memory */
1709 DBC_ASSERT(segid == MEMINTERNALID || segid == MEMEXTERNALID);
1710 /* Check for any internal or external memory segment,
1711 * depending on segid. */
1712 mem_sect_type |= segid == MEMINTERNALID ?
1713 DYNM_INTERNAL : DYNM_EXTERNAL;
1714 for (i = 0; i < nldr_obj->dload_segs; i++) {
1715 if ((nldr_obj->seg_table[i] & mem_sect_type) !=
1716 mem_sect_type)
1717 continue;
1718
b301c858
RS
1719 status = rmm_alloc(rmm, i, word_size, align,
1720 dsp_address, false);
7d55524d
ORL
1721 if (DSP_SUCCEEDED(status)) {
1722 /* Save segid for freeing later */
1723 rmm_addr_obj->segid = i;
1724 break;
1725 }
1726 }
1727 }
1728func_cont:
1729 /* Haven't found memory yet, attempt to find any segment that works */
1730 if (status == -ENOMEM && !mem_load_req) {
1731 dev_dbg(bridge, "%s: Preferred segment unavailable, trying "
1732 "another\n", __func__);
1733 for (i = 0; i < nldr_obj->dload_segs; i++) {
1734 /* All bits of mem_sect_type must be set */
1735 if ((nldr_obj->seg_table[i] & mem_sect_type) !=
1736 mem_sect_type)
1737 continue;
1738
b301c858
RS
1739 status = rmm_alloc(rmm, i, word_size, align,
1740 dsp_address, false);
7d55524d
ORL
1741 if (DSP_SUCCEEDED(status)) {
1742 /* Save segid */
1743 rmm_addr_obj->segid = i;
1744 break;
1745 }
1746 }
1747 }
1748
1749 return status;
1750}
1751
13b18c29 1752static int remote_free(void **ref, u16 space, u32 dsp_address,
7d55524d
ORL
1753 u32 size, bool reserve)
1754{
13b18c29 1755 struct nldr_object *nldr_obj = (struct nldr_object *)ref;
7d55524d
ORL
1756 struct rmm_target_obj *rmm;
1757 u32 word_size;
1758 int status = -ENOMEM; /* Set to fail */
1759
1760 DBC_REQUIRE(nldr_obj);
1761
1762 rmm = nldr_obj->rmm;
1763
1764 /* Convert size to DSP words */
1765 word_size =
1766 (size + nldr_obj->us_dsp_word_size -
1767 1) / nldr_obj->us_dsp_word_size;
1768
b301c858 1769 if (rmm_free(rmm, space, dsp_address, word_size, reserve))
7d55524d
ORL
1770 status = 0;
1771
1772 return status;
1773}
1774
1775/*
1776 * ======== unload_lib ========
1777 */
1778static void unload_lib(struct nldr_nodeobject *nldr_node_obj,
1779 struct lib_node *root)
1780{
1781 struct dbll_attrs new_attrs;
1782 struct nldr_object *nldr_obj = nldr_node_obj->nldr_obj;
1783 u16 i;
1784
1785 DBC_ASSERT(root != NULL);
1786
1787 /* Unload dependent libraries */
1788 for (i = 0; i < root->dep_libs; i++)
1789 unload_lib(nldr_node_obj, &root->dep_libs_tree[i]);
1790
1791 root->dep_libs = 0;
1792
1793 new_attrs = nldr_obj->ldr_attrs;
1794 new_attrs.rmm_handle = nldr_obj->rmm;
1795 new_attrs.input_params = nldr_node_obj->priv_ref;
1796 new_attrs.base_image = false;
1797 new_attrs.sym_arg = root;
1798
1799 if (root->lib) {
1800 /* Unload the root library */
1801 nldr_obj->ldr_fxns.unload_fxn(root->lib, &new_attrs);
1802 nldr_obj->ldr_fxns.close_fxn(root->lib);
1803 }
1804
1805 /* Free dependent library list */
1806 kfree(root->dep_libs_tree);
1807 root->dep_libs_tree = NULL;
1808}
1809
1810/*
1811 * ======== unload_ovly ========
1812 */
1813static void unload_ovly(struct nldr_nodeobject *nldr_node_obj,
1814 enum nldr_phase phase)
1815{
1816 struct nldr_object *nldr_obj = nldr_node_obj->nldr_obj;
1817 struct ovly_node *po_node = NULL;
1818 struct ovly_sect *phase_sects = NULL;
1819 struct ovly_sect *other_sects_list = NULL;
1820 u16 i;
1821 u16 alloc_num = 0;
1822 u16 other_alloc = 0;
1823 u16 *ref_count = NULL;
1824 u16 *other_ref = NULL;
1825
1826 /* Find the node in the table */
1827 for (i = 0; i < nldr_obj->ovly_nodes; i++) {
1828 if (IS_EQUAL_UUID
1829 (nldr_node_obj->uuid, nldr_obj->ovly_table[i].uuid)) {
1830 /* Found it */
1831 po_node = &(nldr_obj->ovly_table[i]);
1832 break;
1833 }
1834 }
1835
1836 DBC_ASSERT(i < nldr_obj->ovly_nodes);
1837
1838 if (!po_node)
1839 /* TODO: Should we print warning here? */
1840 return;
1841
1842 switch (phase) {
1843 case NLDR_CREATE:
1844 ref_count = &(po_node->create_ref);
1845 phase_sects = po_node->create_sects_list;
1846 alloc_num = po_node->create_sects;
1847 break;
1848 case NLDR_EXECUTE:
1849 ref_count = &(po_node->execute_ref);
1850 phase_sects = po_node->execute_sects_list;
1851 alloc_num = po_node->execute_sects;
1852 break;
1853 case NLDR_DELETE:
1854 ref_count = &(po_node->delete_ref);
1855 other_ref = &(po_node->other_ref);
1856 phase_sects = po_node->delete_sects_list;
1857 /* 'Other' overlay sections are unloaded in the delete phase */
1858 other_sects_list = po_node->other_sects_list;
1859 alloc_num = po_node->delete_sects;
1860 other_alloc = po_node->other_sects;
1861 break;
1862 default:
1863 DBC_ASSERT(false);
1864 break;
1865 }
1866 DBC_ASSERT(ref_count && (*ref_count > 0));
1867 if (ref_count && (*ref_count > 0)) {
1868 *ref_count -= 1;
1869 if (other_ref) {
1870 DBC_ASSERT(*other_ref > 0);
1871 *other_ref -= 1;
1872 }
1873 }
1874
1875 if (ref_count && *ref_count == 0) {
1876 /* 'Deallocate' memory */
1877 free_sects(nldr_obj, phase_sects, alloc_num);
1878 }
1879 if (other_ref && *other_ref == 0)
1880 free_sects(nldr_obj, other_sects_list, other_alloc);
1881}
1882
1883/*
1884 * ======== find_in_persistent_lib_array ========
1885 */
1886static bool find_in_persistent_lib_array(struct nldr_nodeobject *nldr_node_obj,
1887 struct dbll_library_obj *lib)
1888{
1889 s32 i = 0;
1890
1891 for (i = 0; i < nldr_node_obj->pers_libs; i++) {
1892 if (lib == nldr_node_obj->pers_lib_table[i].lib)
1893 return true;
1894
1895 }
1896
1897 return false;
1898}
1899
1900/*
1901 * ================ Find LCM (Least Common Multiplier ===
1902 */
1903static u32 find_lcm(u32 a, u32 b)
1904{
1905 u32 ret;
1906
1907 ret = a * b / find_gcf(a, b);
1908
1909 return ret;
1910}
1911
1912/*
1913 * ================ Find GCF (Greatest Common Factor ) ===
1914 */
1915static u32 find_gcf(u32 a, u32 b)
1916{
1917 u32 c;
1918
1919 /* Get the GCF (Greatest common factor between the numbers,
1920 * using Euclidian Algo */
1921 while ((c = (a % b))) {
1922 a = b;
1923 b = c;
1924 }
1925 return b;
1926}
1927
4f551c8f 1928#ifdef CONFIG_TIDSPBRIDGE_BACKTRACE
7d55524d
ORL
1929/**
1930 * nldr_find_addr() - Find the closest symbol to the given address based on
1931 * dynamic node object.
1932 *
1933 * @nldr_node: Dynamic node object
1934 * @sym_addr: Given address to find the dsp symbol
1935 * @offset_range: offset range to look for dsp symbol
1936 * @offset_output: Symbol Output address
1937 * @sym_name: String with the dsp symbol
1938 *
1939 * This function finds the node library for a given address and
1940 * retrieves the dsp symbol by calling dbll_find_dsp_symbol.
1941 */
1942int nldr_find_addr(struct nldr_nodeobject *nldr_node, u32 sym_addr,
1943 u32 offset_range, void *offset_output, char *sym_name)
1944{
1945 int status = 0;
1946 bool status1 = false;
1947 s32 i = 0;
1948 struct lib_node root = { NULL, 0, NULL };
1949 DBC_REQUIRE(refs > 0);
1950 DBC_REQUIRE(offset_output != NULL);
1951 DBC_REQUIRE(sym_name != NULL);
1952 pr_debug("%s(0x%x, 0x%x, 0x%x, 0x%x, %s)\n", __func__, (u32) nldr_node,
1953 sym_addr, offset_range, (u32) offset_output, sym_name);
1954
1955 if (nldr_node->dynamic && *nldr_node->pf_phase_split) {
1956 switch (nldr_node->phase) {
1957 case NLDR_CREATE:
1958 root = nldr_node->create_lib;
1959 break;
1960 case NLDR_EXECUTE:
1961 root = nldr_node->execute_lib;
1962 break;
1963 case NLDR_DELETE:
1964 root = nldr_node->delete_lib;
1965 break;
1966 default:
1967 DBC_ASSERT(false);
1968 break;
1969 }
1970 } else {
1971 /* for Overlay nodes or non-split Dynamic nodes */
1972 root = nldr_node->root;
1973 }
1974
1975 status1 = dbll_find_dsp_symbol(root.lib, sym_addr,
1976 offset_range, offset_output, sym_name);
1977
1978 /* If symbol not found, check dependent libraries */
1979 if (!status1)
1980 for (i = 0; i < root.dep_libs; i++) {
1981 status1 = dbll_find_dsp_symbol(
1982 root.dep_libs_tree[i].lib, sym_addr,
1983 offset_range, offset_output, sym_name);
1984 if (status1)
1985 /* Symbol found */
1986 break;
1987 }
1988 /* Check persistent libraries */
1989 if (!status1)
1990 for (i = 0; i < nldr_node->pers_libs; i++) {
1991 status1 = dbll_find_dsp_symbol(
1992 nldr_node->pers_lib_table[i].lib, sym_addr,
1993 offset_range, offset_output, sym_name);
1994 if (status1)
1995 /* Symbol found */
1996 break;
1997 }
1998
1999 if (!status1) {
2000 pr_debug("%s: Address 0x%x not found in range %d.\n",
2001 __func__, sym_addr, offset_range);
2002 status = -ESPIPE;
2003 }
2004
2005 return status;
2006}
4f551c8f 2007#endif