Commit | Line | Data |
---|---|---|
7d55524d ORL |
1 | /* |
2 | * proc.c | |
3 | * | |
4 | * DSP-BIOS Bridge driver support functions for TI OMAP processors. | |
5 | * | |
6 | * Processor interface at the driver level. | |
7 | * | |
8 | * Copyright (C) 2005-2006 Texas Instruments, Inc. | |
9 | * | |
10 | * This package is free software; you can redistribute it and/or modify | |
11 | * it under the terms of the GNU General Public License version 2 as | |
12 | * published by the Free Software Foundation. | |
13 | * | |
14 | * THIS PACKAGE IS PROVIDED ``AS IS'' AND WITHOUT ANY EXPRESS OR | |
15 | * IMPLIED WARRANTIES, INCLUDING, WITHOUT LIMITATION, THE IMPLIED | |
16 | * WARRANTIES OF MERCHANTIBILITY AND FITNESS FOR A PARTICULAR PURPOSE. | |
17 | */ | |
18 | ||
2094f12d | 19 | #include <linux/types.h> |
7d55524d ORL |
20 | /* ------------------------------------ Host OS */ |
21 | #include <linux/dma-mapping.h> | |
22 | #include <linux/scatterlist.h> | |
23 | #include <dspbridge/host_os.h> | |
24 | ||
25 | /* ----------------------------------- DSP/BIOS Bridge */ | |
7d55524d ORL |
26 | #include <dspbridge/dbdefs.h> |
27 | ||
28 | /* ----------------------------------- Trace & Debug */ | |
29 | #include <dspbridge/dbc.h> | |
30 | ||
31 | /* ----------------------------------- OS Adaptation Layer */ | |
32 | #include <dspbridge/cfg.h> | |
33 | #include <dspbridge/list.h> | |
34 | #include <dspbridge/ntfy.h> | |
35 | #include <dspbridge/sync.h> | |
36 | /* ----------------------------------- Bridge Driver */ | |
37 | #include <dspbridge/dspdefs.h> | |
38 | #include <dspbridge/dspdeh.h> | |
39 | /* ----------------------------------- Platform Manager */ | |
40 | #include <dspbridge/cod.h> | |
41 | #include <dspbridge/dev.h> | |
42 | #include <dspbridge/procpriv.h> | |
43 | #include <dspbridge/dmm.h> | |
44 | ||
45 | /* ----------------------------------- Resource Manager */ | |
46 | #include <dspbridge/mgr.h> | |
47 | #include <dspbridge/node.h> | |
48 | #include <dspbridge/nldr.h> | |
49 | #include <dspbridge/rmm.h> | |
50 | ||
51 | /* ----------------------------------- Others */ | |
52 | #include <dspbridge/dbdcd.h> | |
53 | #include <dspbridge/msg.h> | |
54 | #include <dspbridge/dspioctl.h> | |
55 | #include <dspbridge/drv.h> | |
56 | ||
57 | /* ----------------------------------- This */ | |
58 | #include <dspbridge/proc.h> | |
59 | #include <dspbridge/pwr.h> | |
60 | ||
61 | #include <dspbridge/resourcecleanup.h> | |
62 | /* ----------------------------------- Defines, Data Structures, Typedefs */ | |
63 | #define MAXCMDLINELEN 255 | |
64 | #define PROC_ENVPROCID "PROC_ID=%d" | |
65 | #define MAXPROCIDLEN (8 + 5) | |
66 | #define PROC_DFLT_TIMEOUT 10000 /* Time out in milliseconds */ | |
67 | #define PWR_TIMEOUT 500 /* Sleep/wake timout in msec */ | |
68 | #define EXTEND "_EXT_END" /* Extmem end addr in DSP binary */ | |
69 | ||
70 | #define DSP_CACHE_LINE 128 | |
71 | ||
72 | #define BUFMODE_MASK (3 << 14) | |
73 | ||
74 | /* Buffer modes from DSP perspective */ | |
75 | #define RBUF 0x4000 /* Input buffer */ | |
76 | #define WBUF 0x8000 /* Output Buffer */ | |
77 | ||
78 | extern struct device *bridge; | |
79 | ||
80 | /* ----------------------------------- Globals */ | |
81 | ||
82 | /* The proc_object structure. */ | |
83 | struct proc_object { | |
84 | struct list_head link; /* Link to next proc_object */ | |
85 | struct dev_object *hdev_obj; /* Device this PROC represents */ | |
86 | u32 process; /* Process owning this Processor */ | |
87 | struct mgr_object *hmgr_obj; /* Manager Object Handle */ | |
88 | u32 attach_count; /* Processor attach count */ | |
89 | u32 processor_id; /* Processor number */ | |
90 | u32 utimeout; /* Time out count */ | |
91 | enum dsp_procstate proc_state; /* Processor state */ | |
92 | u32 ul_unit; /* DDSP unit number */ | |
93 | bool is_already_attached; /* | |
94 | * True if the Device below has | |
95 | * GPP Client attached | |
96 | */ | |
97 | struct ntfy_object *ntfy_obj; /* Manages notifications */ | |
98 | /* Bridge Context Handle */ | |
99 | struct bridge_dev_context *hbridge_context; | |
100 | /* Function interface to Bridge driver */ | |
101 | struct bridge_drv_interface *intf_fxns; | |
102 | char *psz_last_coff; | |
103 | struct list_head proc_list; | |
104 | }; | |
105 | ||
106 | static u32 refs; | |
107 | ||
108 | DEFINE_MUTEX(proc_lock); /* For critical sections */ | |
109 | ||
110 | /* ----------------------------------- Function Prototypes */ | |
c8c1ad8c | 111 | static int proc_monitor(struct proc_object *proc_obj); |
7d55524d ORL |
112 | static s32 get_envp_count(char **envp); |
113 | static char **prepend_envp(char **new_envp, char **envp, s32 envp_elems, | |
0cd343a4 | 114 | s32 cnew_envp, char *sz_var); |
7d55524d ORL |
115 | |
116 | /* remember mapping information */ | |
117 | static struct dmm_map_object *add_mapping_info(struct process_context *pr_ctxt, | |
118 | u32 mpu_addr, u32 dsp_addr, u32 size) | |
119 | { | |
120 | struct dmm_map_object *map_obj; | |
121 | ||
122 | u32 num_usr_pgs = size / PG_SIZE4K; | |
123 | ||
124 | pr_debug("%s: adding map info: mpu_addr 0x%x virt 0x%x size 0x%x\n", | |
125 | __func__, mpu_addr, | |
126 | dsp_addr, size); | |
127 | ||
128 | map_obj = kzalloc(sizeof(struct dmm_map_object), GFP_KERNEL); | |
129 | if (!map_obj) { | |
130 | pr_err("%s: kzalloc failed\n", __func__); | |
131 | return NULL; | |
132 | } | |
133 | INIT_LIST_HEAD(&map_obj->link); | |
134 | ||
135 | map_obj->pages = kcalloc(num_usr_pgs, sizeof(struct page *), | |
136 | GFP_KERNEL); | |
137 | if (!map_obj->pages) { | |
138 | pr_err("%s: kzalloc failed\n", __func__); | |
139 | kfree(map_obj); | |
140 | return NULL; | |
141 | } | |
142 | ||
143 | map_obj->mpu_addr = mpu_addr; | |
144 | map_obj->dsp_addr = dsp_addr; | |
145 | map_obj->size = size; | |
146 | map_obj->num_usr_pgs = num_usr_pgs; | |
147 | ||
148 | spin_lock(&pr_ctxt->dmm_map_lock); | |
149 | list_add(&map_obj->link, &pr_ctxt->dmm_map_list); | |
150 | spin_unlock(&pr_ctxt->dmm_map_lock); | |
151 | ||
152 | return map_obj; | |
153 | } | |
154 | ||
155 | static int match_exact_map_obj(struct dmm_map_object *map_obj, | |
156 | u32 dsp_addr, u32 size) | |
157 | { | |
158 | if (map_obj->dsp_addr == dsp_addr && map_obj->size != size) | |
159 | pr_err("%s: addr match (0x%x), size don't (0x%x != 0x%x)\n", | |
160 | __func__, dsp_addr, map_obj->size, size); | |
161 | ||
162 | return map_obj->dsp_addr == dsp_addr && | |
163 | map_obj->size == size; | |
164 | } | |
165 | ||
166 | static void remove_mapping_information(struct process_context *pr_ctxt, | |
167 | u32 dsp_addr, u32 size) | |
168 | { | |
169 | struct dmm_map_object *map_obj; | |
170 | ||
171 | pr_debug("%s: looking for virt 0x%x size 0x%x\n", __func__, | |
172 | dsp_addr, size); | |
173 | ||
174 | spin_lock(&pr_ctxt->dmm_map_lock); | |
175 | list_for_each_entry(map_obj, &pr_ctxt->dmm_map_list, link) { | |
176 | pr_debug("%s: candidate: mpu_addr 0x%x virt 0x%x size 0x%x\n", | |
177 | __func__, | |
178 | map_obj->mpu_addr, | |
179 | map_obj->dsp_addr, | |
180 | map_obj->size); | |
181 | ||
182 | if (match_exact_map_obj(map_obj, dsp_addr, size)) { | |
183 | pr_debug("%s: match, deleting map info\n", __func__); | |
184 | list_del(&map_obj->link); | |
185 | kfree(map_obj->dma_info.sg); | |
186 | kfree(map_obj->pages); | |
187 | kfree(map_obj); | |
188 | goto out; | |
189 | } | |
190 | pr_debug("%s: candidate didn't match\n", __func__); | |
191 | } | |
192 | ||
193 | pr_err("%s: failed to find given map info\n", __func__); | |
194 | out: | |
195 | spin_unlock(&pr_ctxt->dmm_map_lock); | |
196 | } | |
197 | ||
198 | static int match_containing_map_obj(struct dmm_map_object *map_obj, | |
199 | u32 mpu_addr, u32 size) | |
200 | { | |
201 | u32 map_obj_end = map_obj->mpu_addr + map_obj->size; | |
202 | ||
203 | return mpu_addr >= map_obj->mpu_addr && | |
204 | mpu_addr + size <= map_obj_end; | |
205 | } | |
206 | ||
207 | static struct dmm_map_object *find_containing_mapping( | |
208 | struct process_context *pr_ctxt, | |
209 | u32 mpu_addr, u32 size) | |
210 | { | |
211 | struct dmm_map_object *map_obj; | |
212 | pr_debug("%s: looking for mpu_addr 0x%x size 0x%x\n", __func__, | |
213 | mpu_addr, size); | |
214 | ||
215 | spin_lock(&pr_ctxt->dmm_map_lock); | |
216 | list_for_each_entry(map_obj, &pr_ctxt->dmm_map_list, link) { | |
217 | pr_debug("%s: candidate: mpu_addr 0x%x virt 0x%x size 0x%x\n", | |
218 | __func__, | |
219 | map_obj->mpu_addr, | |
220 | map_obj->dsp_addr, | |
221 | map_obj->size); | |
222 | if (match_containing_map_obj(map_obj, mpu_addr, size)) { | |
223 | pr_debug("%s: match!\n", __func__); | |
224 | goto out; | |
225 | } | |
226 | ||
227 | pr_debug("%s: no match!\n", __func__); | |
228 | } | |
229 | ||
230 | map_obj = NULL; | |
231 | out: | |
232 | spin_unlock(&pr_ctxt->dmm_map_lock); | |
233 | return map_obj; | |
234 | } | |
235 | ||
236 | static int find_first_page_in_cache(struct dmm_map_object *map_obj, | |
237 | unsigned long mpu_addr) | |
238 | { | |
239 | u32 mapped_base_page = map_obj->mpu_addr >> PAGE_SHIFT; | |
240 | u32 requested_base_page = mpu_addr >> PAGE_SHIFT; | |
241 | int pg_index = requested_base_page - mapped_base_page; | |
242 | ||
243 | if (pg_index < 0 || pg_index >= map_obj->num_usr_pgs) { | |
244 | pr_err("%s: failed (got %d)\n", __func__, pg_index); | |
245 | return -1; | |
246 | } | |
247 | ||
248 | pr_debug("%s: first page is %d\n", __func__, pg_index); | |
249 | return pg_index; | |
250 | } | |
251 | ||
252 | static inline struct page *get_mapping_page(struct dmm_map_object *map_obj, | |
253 | int pg_i) | |
254 | { | |
255 | pr_debug("%s: looking for pg_i %d, num_usr_pgs: %d\n", __func__, | |
256 | pg_i, map_obj->num_usr_pgs); | |
257 | ||
258 | if (pg_i < 0 || pg_i >= map_obj->num_usr_pgs) { | |
259 | pr_err("%s: requested pg_i %d is out of mapped range\n", | |
260 | __func__, pg_i); | |
261 | return NULL; | |
262 | } | |
263 | ||
264 | return map_obj->pages[pg_i]; | |
265 | } | |
266 | ||
267 | /* | |
268 | * ======== proc_attach ======== | |
269 | * Purpose: | |
270 | * Prepare for communication with a particular DSP processor, and return | |
271 | * a handle to the processor object. | |
272 | */ | |
273 | int | |
274 | proc_attach(u32 processor_id, | |
21aaf42e | 275 | const struct dsp_processorattrin *attr_in, |
7d55524d ORL |
276 | void **ph_processor, struct process_context *pr_ctxt) |
277 | { | |
278 | int status = 0; | |
279 | struct dev_object *hdev_obj; | |
280 | struct proc_object *p_proc_object = NULL; | |
281 | struct mgr_object *hmgr_obj = NULL; | |
282 | struct drv_object *hdrv_obj = NULL; | |
283 | u8 dev_type; | |
284 | ||
285 | DBC_REQUIRE(refs > 0); | |
286 | DBC_REQUIRE(ph_processor != NULL); | |
287 | ||
288 | if (pr_ctxt->hprocessor) { | |
289 | *ph_processor = pr_ctxt->hprocessor; | |
290 | return status; | |
291 | } | |
292 | ||
293 | /* Get the Driver and Manager Object Handles */ | |
294 | status = cfg_get_object((u32 *) &hdrv_obj, REG_DRV_OBJECT); | |
295 | if (DSP_SUCCEEDED(status)) | |
296 | status = cfg_get_object((u32 *) &hmgr_obj, REG_MGR_OBJECT); | |
297 | ||
298 | if (DSP_SUCCEEDED(status)) { | |
299 | /* Get the Device Object */ | |
300 | status = drv_get_dev_object(processor_id, hdrv_obj, &hdev_obj); | |
301 | } | |
302 | if (DSP_SUCCEEDED(status)) | |
303 | status = dev_get_dev_type(hdev_obj, &dev_type); | |
304 | ||
305 | if (DSP_FAILED(status)) | |
306 | goto func_end; | |
307 | ||
308 | /* If we made it this far, create the Proceesor object: */ | |
309 | p_proc_object = kzalloc(sizeof(struct proc_object), GFP_KERNEL); | |
310 | /* Fill out the Processor Object: */ | |
311 | if (p_proc_object == NULL) { | |
312 | status = -ENOMEM; | |
313 | goto func_end; | |
314 | } | |
315 | p_proc_object->hdev_obj = hdev_obj; | |
316 | p_proc_object->hmgr_obj = hmgr_obj; | |
317 | p_proc_object->processor_id = dev_type; | |
318 | /* Store TGID instead of process handle */ | |
319 | p_proc_object->process = current->tgid; | |
320 | ||
321 | INIT_LIST_HEAD(&p_proc_object->proc_list); | |
322 | ||
323 | if (attr_in) | |
324 | p_proc_object->utimeout = attr_in->utimeout; | |
325 | else | |
326 | p_proc_object->utimeout = PROC_DFLT_TIMEOUT; | |
327 | ||
328 | status = dev_get_intf_fxns(hdev_obj, &p_proc_object->intf_fxns); | |
329 | if (DSP_SUCCEEDED(status)) { | |
330 | status = dev_get_bridge_context(hdev_obj, | |
331 | &p_proc_object->hbridge_context); | |
332 | if (DSP_FAILED(status)) | |
333 | kfree(p_proc_object); | |
334 | } else | |
335 | kfree(p_proc_object); | |
336 | ||
337 | if (DSP_FAILED(status)) | |
338 | goto func_end; | |
339 | ||
340 | /* Create the Notification Object */ | |
341 | /* This is created with no event mask, no notify mask | |
342 | * and no valid handle to the notification. They all get | |
343 | * filled up when proc_register_notify is called */ | |
344 | p_proc_object->ntfy_obj = kmalloc(sizeof(struct ntfy_object), | |
345 | GFP_KERNEL); | |
346 | if (p_proc_object->ntfy_obj) | |
347 | ntfy_init(p_proc_object->ntfy_obj); | |
348 | else | |
349 | status = -ENOMEM; | |
350 | ||
351 | if (DSP_SUCCEEDED(status)) { | |
352 | /* Insert the Processor Object into the DEV List. | |
353 | * Return handle to this Processor Object: | |
354 | * Find out if the Device is already attached to a | |
355 | * Processor. If so, return AlreadyAttached status */ | |
356 | lst_init_elem(&p_proc_object->link); | |
357 | status = dev_insert_proc_object(p_proc_object->hdev_obj, | |
358 | (u32) p_proc_object, | |
359 | &p_proc_object-> | |
360 | is_already_attached); | |
361 | if (DSP_SUCCEEDED(status)) { | |
362 | if (p_proc_object->is_already_attached) | |
363 | status = 0; | |
364 | } else { | |
365 | if (p_proc_object->ntfy_obj) { | |
366 | ntfy_delete(p_proc_object->ntfy_obj); | |
367 | kfree(p_proc_object->ntfy_obj); | |
368 | } | |
369 | ||
370 | kfree(p_proc_object); | |
371 | } | |
372 | if (DSP_SUCCEEDED(status)) { | |
373 | *ph_processor = (void *)p_proc_object; | |
374 | pr_ctxt->hprocessor = *ph_processor; | |
375 | (void)proc_notify_clients(p_proc_object, | |
376 | DSP_PROCESSORATTACH); | |
377 | } | |
378 | } else { | |
379 | /* Don't leak memory if DSP_FAILED */ | |
380 | kfree(p_proc_object); | |
381 | } | |
382 | func_end: | |
383 | DBC_ENSURE((status == -EPERM && *ph_processor == NULL) || | |
384 | (DSP_SUCCEEDED(status) && p_proc_object) || | |
385 | (status == 0 && p_proc_object)); | |
386 | ||
387 | return status; | |
388 | } | |
389 | ||
390 | static int get_exec_file(struct cfg_devnode *dev_node_obj, | |
391 | struct dev_object *hdev_obj, | |
b301c858 | 392 | u32 size, char *exec_file) |
7d55524d ORL |
393 | { |
394 | u8 dev_type; | |
395 | s32 len; | |
396 | ||
397 | dev_get_dev_type(hdev_obj, (u8 *) &dev_type); | |
398 | if (dev_type == DSP_UNIT) { | |
b301c858 | 399 | return cfg_get_exec_file(dev_node_obj, size, exec_file); |
7d55524d ORL |
400 | } else if (dev_type == IVA_UNIT) { |
401 | if (iva_img) { | |
402 | len = strlen(iva_img); | |
b301c858 | 403 | strncpy(exec_file, iva_img, len + 1); |
7d55524d ORL |
404 | return 0; |
405 | } | |
406 | } | |
407 | return -ENOENT; | |
408 | } | |
409 | ||
410 | /* | |
411 | * ======== proc_auto_start ======== = | |
412 | * Purpose: | |
413 | * A Particular device gets loaded with the default image | |
414 | * if the AutoStart flag is set. | |
415 | * Parameters: | |
416 | * hdev_obj: Handle to the Device | |
417 | * Returns: | |
418 | * 0: On Successful Loading | |
419 | * -EPERM General Failure | |
420 | * Requires: | |
421 | * hdev_obj != NULL | |
422 | * Ensures: | |
423 | */ | |
424 | int proc_auto_start(struct cfg_devnode *dev_node_obj, | |
425 | struct dev_object *hdev_obj) | |
426 | { | |
427 | int status = -EPERM; | |
428 | struct proc_object *p_proc_object; | |
429 | char sz_exec_file[MAXCMDLINELEN]; | |
430 | char *argv[2]; | |
431 | struct mgr_object *hmgr_obj = NULL; | |
432 | u8 dev_type; | |
433 | ||
434 | DBC_REQUIRE(refs > 0); | |
435 | DBC_REQUIRE(dev_node_obj != NULL); | |
436 | DBC_REQUIRE(hdev_obj != NULL); | |
437 | ||
438 | /* Create a Dummy PROC Object */ | |
439 | status = cfg_get_object((u32 *) &hmgr_obj, REG_MGR_OBJECT); | |
440 | if (DSP_FAILED(status)) | |
441 | goto func_end; | |
442 | ||
443 | p_proc_object = kzalloc(sizeof(struct proc_object), GFP_KERNEL); | |
444 | if (p_proc_object == NULL) { | |
445 | status = -ENOMEM; | |
446 | goto func_end; | |
447 | } | |
448 | p_proc_object->hdev_obj = hdev_obj; | |
449 | p_proc_object->hmgr_obj = hmgr_obj; | |
450 | status = dev_get_intf_fxns(hdev_obj, &p_proc_object->intf_fxns); | |
451 | if (DSP_SUCCEEDED(status)) | |
452 | status = dev_get_bridge_context(hdev_obj, | |
453 | &p_proc_object->hbridge_context); | |
454 | if (DSP_FAILED(status)) | |
455 | goto func_cont; | |
456 | ||
457 | /* Stop the Device, put it into standby mode */ | |
458 | status = proc_stop(p_proc_object); | |
459 | ||
460 | if (DSP_FAILED(status)) | |
461 | goto func_cont; | |
462 | ||
463 | /* Get the default executable for this board... */ | |
464 | dev_get_dev_type(hdev_obj, (u8 *) &dev_type); | |
465 | p_proc_object->processor_id = dev_type; | |
466 | status = get_exec_file(dev_node_obj, hdev_obj, sizeof(sz_exec_file), | |
467 | sz_exec_file); | |
468 | if (DSP_SUCCEEDED(status)) { | |
469 | argv[0] = sz_exec_file; | |
470 | argv[1] = NULL; | |
471 | /* ...and try to load it: */ | |
cd4f13c0 | 472 | status = proc_load(p_proc_object, 1, (const char **)argv, NULL); |
7d55524d ORL |
473 | if (DSP_SUCCEEDED(status)) |
474 | status = proc_start(p_proc_object); | |
475 | } | |
476 | kfree(p_proc_object->psz_last_coff); | |
477 | p_proc_object->psz_last_coff = NULL; | |
478 | func_cont: | |
479 | kfree(p_proc_object); | |
480 | func_end: | |
481 | return status; | |
482 | } | |
483 | ||
484 | /* | |
485 | * ======== proc_ctrl ======== | |
486 | * Purpose: | |
487 | * Pass control information to the GPP device driver managing the | |
488 | * DSP processor. | |
489 | * | |
490 | * This will be an OEM-only function, and not part of the DSP/BIOS Bridge | |
491 | * application developer's API. | |
492 | * Call the bridge_dev_ctrl fxn with the Argument. This is a Synchronous | |
493 | * Operation. arg can be null. | |
494 | */ | |
9d7d0a52 | 495 | int proc_ctrl(void *hprocessor, u32 dw_cmd, struct dsp_cbdata * arg) |
7d55524d ORL |
496 | { |
497 | int status = 0; | |
498 | struct proc_object *p_proc_object = hprocessor; | |
499 | u32 timeout = 0; | |
500 | ||
501 | DBC_REQUIRE(refs > 0); | |
502 | ||
503 | if (p_proc_object) { | |
504 | /* intercept PWR deep sleep command */ | |
505 | if (dw_cmd == BRDIOCTL_DEEPSLEEP) { | |
506 | timeout = arg->cb_data; | |
507 | status = pwr_sleep_dsp(PWR_DEEPSLEEP, timeout); | |
508 | } | |
509 | /* intercept PWR emergency sleep command */ | |
510 | else if (dw_cmd == BRDIOCTL_EMERGENCYSLEEP) { | |
511 | timeout = arg->cb_data; | |
512 | status = pwr_sleep_dsp(PWR_EMERGENCYDEEPSLEEP, timeout); | |
513 | } else if (dw_cmd == PWR_DEEPSLEEP) { | |
514 | /* timeout = arg->cb_data; */ | |
515 | status = pwr_sleep_dsp(PWR_DEEPSLEEP, timeout); | |
516 | } | |
517 | /* intercept PWR wake commands */ | |
518 | else if (dw_cmd == BRDIOCTL_WAKEUP) { | |
519 | timeout = arg->cb_data; | |
520 | status = pwr_wake_dsp(timeout); | |
521 | } else if (dw_cmd == PWR_WAKEUP) { | |
522 | /* timeout = arg->cb_data; */ | |
523 | status = pwr_wake_dsp(timeout); | |
524 | } else | |
525 | if (DSP_SUCCEEDED((*p_proc_object->intf_fxns->pfn_dev_cntrl) | |
526 | (p_proc_object->hbridge_context, dw_cmd, | |
527 | arg))) { | |
528 | status = 0; | |
529 | } else { | |
530 | status = -EPERM; | |
531 | } | |
532 | } else { | |
533 | status = -EFAULT; | |
534 | } | |
535 | ||
536 | return status; | |
537 | } | |
538 | ||
539 | /* | |
540 | * ======== proc_detach ======== | |
541 | * Purpose: | |
542 | * Destroys the Processor Object. Removes the notification from the Dev | |
543 | * List. | |
544 | */ | |
545 | int proc_detach(struct process_context *pr_ctxt) | |
546 | { | |
547 | int status = 0; | |
548 | struct proc_object *p_proc_object = NULL; | |
549 | ||
550 | DBC_REQUIRE(refs > 0); | |
551 | ||
552 | p_proc_object = (struct proc_object *)pr_ctxt->hprocessor; | |
553 | ||
554 | if (p_proc_object) { | |
555 | /* Notify the Client */ | |
556 | ntfy_notify(p_proc_object->ntfy_obj, DSP_PROCESSORDETACH); | |
557 | /* Remove the notification memory */ | |
558 | if (p_proc_object->ntfy_obj) { | |
559 | ntfy_delete(p_proc_object->ntfy_obj); | |
560 | kfree(p_proc_object->ntfy_obj); | |
561 | } | |
562 | ||
563 | kfree(p_proc_object->psz_last_coff); | |
564 | p_proc_object->psz_last_coff = NULL; | |
565 | /* Remove the Proc from the DEV List */ | |
566 | (void)dev_remove_proc_object(p_proc_object->hdev_obj, | |
567 | (u32) p_proc_object); | |
568 | /* Free the Processor Object */ | |
569 | kfree(p_proc_object); | |
570 | pr_ctxt->hprocessor = NULL; | |
571 | } else { | |
572 | status = -EFAULT; | |
573 | } | |
574 | ||
575 | return status; | |
576 | } | |
577 | ||
578 | /* | |
579 | * ======== proc_enum_nodes ======== | |
580 | * Purpose: | |
581 | * Enumerate and get configuration information about nodes allocated | |
582 | * on a DSP processor. | |
583 | */ | |
584 | int proc_enum_nodes(void *hprocessor, void **node_tab, | |
e6bf74f0 MN |
585 | u32 node_tab_size, u32 *pu_num_nodes, |
586 | u32 *pu_allocated) | |
7d55524d ORL |
587 | { |
588 | int status = -EPERM; | |
589 | struct proc_object *p_proc_object = (struct proc_object *)hprocessor; | |
590 | struct node_mgr *hnode_mgr = NULL; | |
591 | ||
592 | DBC_REQUIRE(refs > 0); | |
593 | DBC_REQUIRE(node_tab != NULL || node_tab_size == 0); | |
594 | DBC_REQUIRE(pu_num_nodes != NULL); | |
595 | DBC_REQUIRE(pu_allocated != NULL); | |
596 | ||
597 | if (p_proc_object) { | |
598 | if (DSP_SUCCEEDED(dev_get_node_manager(p_proc_object->hdev_obj, | |
599 | &hnode_mgr))) { | |
600 | if (hnode_mgr) { | |
601 | status = node_enum_nodes(hnode_mgr, node_tab, | |
602 | node_tab_size, | |
603 | pu_num_nodes, | |
604 | pu_allocated); | |
605 | } | |
606 | } | |
607 | } else { | |
608 | status = -EFAULT; | |
609 | } | |
610 | ||
611 | return status; | |
612 | } | |
613 | ||
614 | /* Cache operation against kernel address instead of users */ | |
615 | static int build_dma_sg(struct dmm_map_object *map_obj, unsigned long start, | |
616 | ssize_t len, int pg_i) | |
617 | { | |
618 | struct page *page; | |
619 | unsigned long offset; | |
620 | ssize_t rest; | |
621 | int ret = 0, i = 0; | |
622 | struct scatterlist *sg = map_obj->dma_info.sg; | |
623 | ||
624 | while (len) { | |
625 | page = get_mapping_page(map_obj, pg_i); | |
626 | if (!page) { | |
627 | pr_err("%s: no page for %08lx\n", __func__, start); | |
628 | ret = -EINVAL; | |
629 | goto out; | |
630 | } else if (IS_ERR(page)) { | |
631 | pr_err("%s: err page for %08lx(%lu)\n", __func__, start, | |
632 | PTR_ERR(page)); | |
633 | ret = PTR_ERR(page); | |
634 | goto out; | |
635 | } | |
636 | ||
637 | offset = start & ~PAGE_MASK; | |
638 | rest = min_t(ssize_t, PAGE_SIZE - offset, len); | |
639 | ||
640 | sg_set_page(&sg[i], page, rest, offset); | |
641 | ||
642 | len -= rest; | |
643 | start += rest; | |
644 | pg_i++, i++; | |
645 | } | |
646 | ||
647 | if (i != map_obj->dma_info.num_pages) { | |
648 | pr_err("%s: bad number of sg iterations\n", __func__); | |
649 | ret = -EFAULT; | |
650 | goto out; | |
651 | } | |
652 | ||
653 | out: | |
654 | return ret; | |
655 | } | |
656 | ||
657 | static int memory_regain_ownership(struct dmm_map_object *map_obj, | |
658 | unsigned long start, ssize_t len, enum dma_data_direction dir) | |
659 | { | |
660 | int ret = 0; | |
661 | unsigned long first_data_page = start >> PAGE_SHIFT; | |
662 | unsigned long last_data_page = ((u32)(start + len - 1) >> PAGE_SHIFT); | |
663 | /* calculating the number of pages this area spans */ | |
664 | unsigned long num_pages = last_data_page - first_data_page + 1; | |
665 | struct bridge_dma_map_info *dma_info = &map_obj->dma_info; | |
666 | ||
667 | if (!dma_info->sg) | |
668 | goto out; | |
669 | ||
670 | if (dma_info->dir != dir || dma_info->num_pages != num_pages) { | |
671 | pr_err("%s: dma info doesn't match given params\n", __func__); | |
672 | return -EINVAL; | |
673 | } | |
674 | ||
675 | dma_unmap_sg(bridge, dma_info->sg, num_pages, dma_info->dir); | |
676 | ||
677 | pr_debug("%s: dma_map_sg unmapped\n", __func__); | |
678 | ||
679 | kfree(dma_info->sg); | |
680 | ||
681 | map_obj->dma_info.sg = NULL; | |
682 | ||
683 | out: | |
684 | return ret; | |
685 | } | |
686 | ||
687 | /* Cache operation against kernel address instead of users */ | |
688 | static int memory_give_ownership(struct dmm_map_object *map_obj, | |
689 | unsigned long start, ssize_t len, enum dma_data_direction dir) | |
690 | { | |
691 | int pg_i, ret, sg_num; | |
692 | struct scatterlist *sg; | |
693 | unsigned long first_data_page = start >> PAGE_SHIFT; | |
694 | unsigned long last_data_page = ((u32)(start + len - 1) >> PAGE_SHIFT); | |
695 | /* calculating the number of pages this area spans */ | |
696 | unsigned long num_pages = last_data_page - first_data_page + 1; | |
697 | ||
698 | pg_i = find_first_page_in_cache(map_obj, start); | |
699 | if (pg_i < 0) { | |
700 | pr_err("%s: failed to find first page in cache\n", __func__); | |
701 | ret = -EINVAL; | |
702 | goto out; | |
703 | } | |
704 | ||
705 | sg = kcalloc(num_pages, sizeof(*sg), GFP_KERNEL); | |
706 | if (!sg) { | |
707 | pr_err("%s: kcalloc failed\n", __func__); | |
708 | ret = -ENOMEM; | |
709 | goto out; | |
710 | } | |
711 | ||
712 | sg_init_table(sg, num_pages); | |
713 | ||
714 | /* cleanup a previous sg allocation */ | |
715 | /* this may happen if application doesn't signal for e/o DMA */ | |
716 | kfree(map_obj->dma_info.sg); | |
717 | ||
718 | map_obj->dma_info.sg = sg; | |
719 | map_obj->dma_info.dir = dir; | |
720 | map_obj->dma_info.num_pages = num_pages; | |
721 | ||
722 | ret = build_dma_sg(map_obj, start, len, pg_i); | |
723 | if (ret) | |
724 | goto kfree_sg; | |
725 | ||
726 | sg_num = dma_map_sg(bridge, sg, num_pages, dir); | |
727 | if (sg_num < 1) { | |
728 | pr_err("%s: dma_map_sg failed: %d\n", __func__, sg_num); | |
729 | ret = -EFAULT; | |
730 | goto kfree_sg; | |
731 | } | |
732 | ||
733 | pr_debug("%s: dma_map_sg mapped %d elements\n", __func__, sg_num); | |
734 | map_obj->dma_info.sg_num = sg_num; | |
735 | ||
736 | return 0; | |
737 | ||
738 | kfree_sg: | |
739 | kfree(sg); | |
740 | map_obj->dma_info.sg = NULL; | |
741 | out: | |
742 | return ret; | |
743 | } | |
744 | ||
745 | int proc_begin_dma(void *hprocessor, void *pmpu_addr, u32 ul_size, | |
746 | enum dma_data_direction dir) | |
747 | { | |
748 | /* Keep STATUS here for future additions to this function */ | |
749 | int status = 0; | |
750 | struct process_context *pr_ctxt = (struct process_context *) hprocessor; | |
751 | struct dmm_map_object *map_obj; | |
752 | ||
753 | DBC_REQUIRE(refs > 0); | |
754 | ||
755 | if (!pr_ctxt) { | |
756 | status = -EFAULT; | |
757 | goto err_out; | |
758 | } | |
759 | ||
760 | pr_debug("%s: addr 0x%x, size 0x%x, type %d\n", __func__, | |
761 | (u32)pmpu_addr, | |
762 | ul_size, dir); | |
763 | ||
764 | /* find requested memory are in cached mapping information */ | |
765 | map_obj = find_containing_mapping(pr_ctxt, (u32) pmpu_addr, ul_size); | |
766 | if (!map_obj) { | |
767 | pr_err("%s: find_containing_mapping failed\n", __func__); | |
768 | status = -EFAULT; | |
769 | goto err_out; | |
770 | } | |
771 | ||
772 | if (memory_give_ownership(map_obj, (u32) pmpu_addr, ul_size, dir)) { | |
773 | pr_err("%s: InValid address parameters %p %x\n", | |
774 | __func__, pmpu_addr, ul_size); | |
775 | status = -EFAULT; | |
776 | } | |
777 | ||
778 | err_out: | |
779 | ||
780 | return status; | |
781 | } | |
782 | ||
783 | int proc_end_dma(void *hprocessor, void *pmpu_addr, u32 ul_size, | |
784 | enum dma_data_direction dir) | |
785 | { | |
786 | /* Keep STATUS here for future additions to this function */ | |
787 | int status = 0; | |
788 | struct process_context *pr_ctxt = (struct process_context *) hprocessor; | |
789 | struct dmm_map_object *map_obj; | |
790 | ||
791 | DBC_REQUIRE(refs > 0); | |
792 | ||
793 | if (!pr_ctxt) { | |
794 | status = -EFAULT; | |
795 | goto err_out; | |
796 | } | |
797 | ||
798 | pr_debug("%s: addr 0x%x, size 0x%x, type %d\n", __func__, | |
799 | (u32)pmpu_addr, | |
800 | ul_size, dir); | |
801 | ||
802 | /* find requested memory are in cached mapping information */ | |
803 | map_obj = find_containing_mapping(pr_ctxt, (u32) pmpu_addr, ul_size); | |
804 | if (!map_obj) { | |
805 | pr_err("%s: find_containing_mapping failed\n", __func__); | |
806 | status = -EFAULT; | |
807 | goto err_out; | |
808 | } | |
809 | ||
810 | if (memory_regain_ownership(map_obj, (u32) pmpu_addr, ul_size, dir)) { | |
811 | pr_err("%s: InValid address parameters %p %x\n", | |
812 | __func__, pmpu_addr, ul_size); | |
813 | status = -EFAULT; | |
814 | goto err_out; | |
815 | } | |
816 | ||
817 | err_out: | |
818 | return status; | |
819 | } | |
820 | ||
821 | /* | |
822 | * ======== proc_flush_memory ======== | |
823 | * Purpose: | |
824 | * Flush cache | |
825 | */ | |
826 | int proc_flush_memory(void *hprocessor, void *pmpu_addr, | |
827 | u32 ul_size, u32 ul_flags) | |
828 | { | |
829 | enum dma_data_direction dir = DMA_BIDIRECTIONAL; | |
830 | ||
831 | return proc_begin_dma(hprocessor, pmpu_addr, ul_size, dir); | |
832 | } | |
833 | ||
834 | /* | |
835 | * ======== proc_invalidate_memory ======== | |
836 | * Purpose: | |
837 | * Invalidates the memory specified | |
838 | */ | |
839 | int proc_invalidate_memory(void *hprocessor, void *pmpu_addr, u32 size) | |
840 | { | |
841 | enum dma_data_direction dir = DMA_FROM_DEVICE; | |
842 | ||
843 | return proc_begin_dma(hprocessor, pmpu_addr, size, dir); | |
844 | } | |
845 | ||
846 | /* | |
847 | * ======== proc_get_resource_info ======== | |
848 | * Purpose: | |
849 | * Enumerate the resources currently available on a processor. | |
850 | */ | |
851 | int proc_get_resource_info(void *hprocessor, u32 resource_type, | |
e6bf74f0 | 852 | struct dsp_resourceinfo *resource_info, |
7d55524d ORL |
853 | u32 resource_info_size) |
854 | { | |
855 | int status = -EPERM; | |
856 | struct proc_object *p_proc_object = (struct proc_object *)hprocessor; | |
857 | struct node_mgr *hnode_mgr = NULL; | |
858 | struct nldr_object *nldr_obj = NULL; | |
859 | struct rmm_target_obj *rmm = NULL; | |
860 | struct io_mgr *hio_mgr = NULL; /* IO manager handle */ | |
861 | ||
862 | DBC_REQUIRE(refs > 0); | |
863 | DBC_REQUIRE(resource_info != NULL); | |
864 | DBC_REQUIRE(resource_info_size >= sizeof(struct dsp_resourceinfo)); | |
865 | ||
866 | if (!p_proc_object) { | |
867 | status = -EFAULT; | |
868 | goto func_end; | |
869 | } | |
870 | switch (resource_type) { | |
871 | case DSP_RESOURCE_DYNDARAM: | |
872 | case DSP_RESOURCE_DYNSARAM: | |
873 | case DSP_RESOURCE_DYNEXTERNAL: | |
874 | case DSP_RESOURCE_DYNSRAM: | |
875 | status = dev_get_node_manager(p_proc_object->hdev_obj, | |
876 | &hnode_mgr); | |
877 | if (!hnode_mgr) { | |
878 | status = -EFAULT; | |
879 | goto func_end; | |
880 | } | |
881 | ||
882 | status = node_get_nldr_obj(hnode_mgr, &nldr_obj); | |
883 | if (DSP_SUCCEEDED(status)) { | |
884 | status = nldr_get_rmm_manager(nldr_obj, &rmm); | |
885 | if (rmm) { | |
886 | if (!rmm_stat(rmm, | |
887 | (enum dsp_memtype)resource_type, | |
888 | (struct dsp_memstat *) | |
889 | &(resource_info->result. | |
890 | mem_stat))) | |
891 | status = -EINVAL; | |
892 | } else { | |
893 | status = -EFAULT; | |
894 | } | |
895 | } | |
896 | break; | |
897 | case DSP_RESOURCE_PROCLOAD: | |
898 | status = dev_get_io_mgr(p_proc_object->hdev_obj, &hio_mgr); | |
899 | if (hio_mgr) | |
900 | status = | |
901 | p_proc_object->intf_fxns-> | |
902 | pfn_io_get_proc_load(hio_mgr, | |
903 | (struct dsp_procloadstat *) | |
904 | &(resource_info->result. | |
905 | proc_load_stat)); | |
906 | else | |
907 | status = -EFAULT; | |
908 | break; | |
909 | default: | |
910 | status = -EPERM; | |
911 | break; | |
912 | } | |
913 | func_end: | |
914 | return status; | |
915 | } | |
916 | ||
917 | /* | |
918 | * ======== proc_exit ======== | |
919 | * Purpose: | |
920 | * Decrement reference count, and free resources when reference count is | |
921 | * 0. | |
922 | */ | |
923 | void proc_exit(void) | |
924 | { | |
925 | DBC_REQUIRE(refs > 0); | |
926 | ||
927 | refs--; | |
928 | ||
929 | DBC_ENSURE(refs >= 0); | |
930 | } | |
931 | ||
932 | /* | |
933 | * ======== proc_get_dev_object ======== | |
934 | * Purpose: | |
935 | * Return the Dev Object handle for a given Processor. | |
936 | * | |
937 | */ | |
938 | int proc_get_dev_object(void *hprocessor, | |
e436d07d | 939 | struct dev_object **device_obj) |
7d55524d ORL |
940 | { |
941 | int status = -EPERM; | |
942 | struct proc_object *p_proc_object = (struct proc_object *)hprocessor; | |
943 | ||
944 | DBC_REQUIRE(refs > 0); | |
e436d07d | 945 | DBC_REQUIRE(device_obj != NULL); |
7d55524d ORL |
946 | |
947 | if (p_proc_object) { | |
e436d07d | 948 | *device_obj = p_proc_object->hdev_obj; |
7d55524d ORL |
949 | status = 0; |
950 | } else { | |
e436d07d | 951 | *device_obj = NULL; |
7d55524d ORL |
952 | status = -EFAULT; |
953 | } | |
954 | ||
e436d07d RS |
955 | DBC_ENSURE((DSP_SUCCEEDED(status) && *device_obj != NULL) || |
956 | (DSP_FAILED(status) && *device_obj == NULL)); | |
7d55524d ORL |
957 | |
958 | return status; | |
959 | } | |
960 | ||
961 | /* | |
962 | * ======== proc_get_state ======== | |
963 | * Purpose: | |
964 | * Report the state of the specified DSP processor. | |
965 | */ | |
966 | int proc_get_state(void *hprocessor, | |
e6bf74f0 | 967 | struct dsp_processorstate *proc_state_obj, |
7d55524d ORL |
968 | u32 state_info_size) |
969 | { | |
970 | int status = 0; | |
971 | struct proc_object *p_proc_object = (struct proc_object *)hprocessor; | |
972 | int brd_status; | |
7d55524d ORL |
973 | |
974 | DBC_REQUIRE(refs > 0); | |
975 | DBC_REQUIRE(proc_state_obj != NULL); | |
976 | DBC_REQUIRE(state_info_size >= sizeof(struct dsp_processorstate)); | |
977 | ||
978 | if (p_proc_object) { | |
979 | /* First, retrieve BRD state information */ | |
980 | status = (*p_proc_object->intf_fxns->pfn_brd_status) | |
981 | (p_proc_object->hbridge_context, &brd_status); | |
982 | if (DSP_SUCCEEDED(status)) { | |
983 | switch (brd_status) { | |
984 | case BRD_STOPPED: | |
985 | proc_state_obj->proc_state = PROC_STOPPED; | |
986 | break; | |
987 | case BRD_SLEEP_TRANSITION: | |
988 | case BRD_DSP_HIBERNATION: | |
989 | /* Fall through */ | |
990 | case BRD_RUNNING: | |
991 | proc_state_obj->proc_state = PROC_RUNNING; | |
992 | break; | |
993 | case BRD_LOADED: | |
994 | proc_state_obj->proc_state = PROC_LOADED; | |
995 | break; | |
996 | case BRD_ERROR: | |
997 | proc_state_obj->proc_state = PROC_ERROR; | |
998 | break; | |
999 | default: | |
1000 | proc_state_obj->proc_state = 0xFF; | |
1001 | status = -EPERM; | |
1002 | break; | |
1003 | } | |
1004 | } | |
7d55524d ORL |
1005 | } else { |
1006 | status = -EFAULT; | |
1007 | } | |
1008 | dev_dbg(bridge, "%s, results: status: 0x%x proc_state_obj: 0x%x\n", | |
1009 | __func__, status, proc_state_obj->proc_state); | |
1010 | return status; | |
1011 | } | |
1012 | ||
1013 | /* | |
1014 | * ======== proc_get_trace ======== | |
1015 | * Purpose: | |
1016 | * Retrieve the current contents of the trace buffer, located on the | |
1017 | * Processor. Predefined symbols for the trace buffer must have been | |
1018 | * configured into the DSP executable. | |
1019 | * Details: | |
1020 | * We support using the symbols SYS_PUTCBEG and SYS_PUTCEND to define a | |
1021 | * trace buffer, only. Treat it as an undocumented feature. | |
1022 | * This call is destructive, meaning the processor is placed in the monitor | |
1023 | * state as a result of this function. | |
1024 | */ | |
1025 | int proc_get_trace(void *hprocessor, u8 * pbuf, u32 max_size) | |
1026 | { | |
1027 | int status; | |
1028 | status = -ENOSYS; | |
1029 | return status; | |
1030 | } | |
1031 | ||
1032 | /* | |
1033 | * ======== proc_init ======== | |
1034 | * Purpose: | |
1035 | * Initialize PROC's private state, keeping a reference count on each call | |
1036 | */ | |
1037 | bool proc_init(void) | |
1038 | { | |
1039 | bool ret = true; | |
1040 | ||
1041 | DBC_REQUIRE(refs >= 0); | |
1042 | ||
1043 | if (ret) | |
1044 | refs++; | |
1045 | ||
1046 | DBC_ENSURE((ret && (refs > 0)) || (!ret && (refs >= 0))); | |
1047 | ||
1048 | return ret; | |
1049 | } | |
1050 | ||
1051 | /* | |
1052 | * ======== proc_load ======== | |
1053 | * Purpose: | |
1054 | * Reset a processor and load a new base program image. | |
1055 | * This will be an OEM-only function, and not part of the DSP/BIOS Bridge | |
1056 | * application developer's API. | |
1057 | */ | |
9d7d0a52 MN |
1058 | int proc_load(void *hprocessor, const s32 argc_index, |
1059 | const char **user_args, const char **user_envp) | |
7d55524d ORL |
1060 | { |
1061 | int status = 0; | |
1062 | struct proc_object *p_proc_object = (struct proc_object *)hprocessor; | |
1063 | struct io_mgr *hio_mgr; /* IO manager handle */ | |
1064 | struct msg_mgr *hmsg_mgr; | |
1065 | struct cod_manager *cod_mgr; /* Code manager handle */ | |
1066 | char *pargv0; /* temp argv[0] ptr */ | |
1067 | char **new_envp; /* Updated envp[] array. */ | |
1068 | char sz_proc_id[MAXPROCIDLEN]; /* Size of "PROC_ID=<n>" */ | |
1069 | s32 envp_elems; /* Num elements in envp[]. */ | |
1070 | s32 cnew_envp; /* " " in new_envp[] */ | |
1071 | s32 nproc_id = 0; /* Anticipate MP version. */ | |
1072 | struct dcd_manager *hdcd_handle; | |
1073 | struct dmm_object *dmm_mgr; | |
1074 | u32 dw_ext_end; | |
1075 | u32 proc_id; | |
1076 | int brd_state; | |
1077 | struct drv_data *drv_datap = dev_get_drvdata(bridge); | |
1078 | ||
1079 | #ifdef OPT_LOAD_TIME_INSTRUMENTATION | |
1080 | struct timeval tv1; | |
1081 | struct timeval tv2; | |
1082 | #endif | |
1083 | ||
b3d23688 | 1084 | #if defined(CONFIG_TIDSPBRIDGE_DVFS) && !defined(CONFIG_CPU_FREQ) |
7d55524d ORL |
1085 | struct dspbridge_platform_data *pdata = |
1086 | omap_dspbridge_dev->dev.platform_data; | |
1087 | #endif | |
1088 | ||
1089 | DBC_REQUIRE(refs > 0); | |
1090 | DBC_REQUIRE(argc_index > 0); | |
1091 | DBC_REQUIRE(user_args != NULL); | |
1092 | ||
1093 | #ifdef OPT_LOAD_TIME_INSTRUMENTATION | |
1094 | do_gettimeofday(&tv1); | |
1095 | #endif | |
1096 | if (!p_proc_object) { | |
1097 | status = -EFAULT; | |
1098 | goto func_end; | |
1099 | } | |
1100 | dev_get_cod_mgr(p_proc_object->hdev_obj, &cod_mgr); | |
1101 | if (!cod_mgr) { | |
1102 | status = -EPERM; | |
1103 | goto func_end; | |
1104 | } | |
1105 | status = proc_stop(hprocessor); | |
1106 | if (DSP_FAILED(status)) | |
1107 | goto func_end; | |
1108 | ||
1109 | /* Place the board in the monitor state. */ | |
1110 | status = proc_monitor(hprocessor); | |
1111 | if (DSP_FAILED(status)) | |
1112 | goto func_end; | |
1113 | ||
1114 | /* Save ptr to original argv[0]. */ | |
1115 | pargv0 = (char *)user_args[0]; | |
1116 | /*Prepend "PROC_ID=<nproc_id>"to envp array for target. */ | |
1117 | envp_elems = get_envp_count((char **)user_envp); | |
1118 | cnew_envp = (envp_elems ? (envp_elems + 1) : (envp_elems + 2)); | |
1119 | new_envp = kzalloc(cnew_envp * sizeof(char **), GFP_KERNEL); | |
1120 | if (new_envp) { | |
1121 | status = snprintf(sz_proc_id, MAXPROCIDLEN, PROC_ENVPROCID, | |
1122 | nproc_id); | |
1123 | if (status == -1) { | |
1124 | dev_dbg(bridge, "%s: Proc ID string overflow\n", | |
1125 | __func__); | |
1126 | status = -EPERM; | |
1127 | } else { | |
1128 | new_envp = | |
1129 | prepend_envp(new_envp, (char **)user_envp, | |
1130 | envp_elems, cnew_envp, sz_proc_id); | |
1131 | /* Get the DCD Handle */ | |
1132 | status = mgr_get_dcd_handle(p_proc_object->hmgr_obj, | |
1133 | (u32 *) &hdcd_handle); | |
1134 | if (DSP_SUCCEEDED(status)) { | |
1135 | /* Before proceeding with new load, | |
1136 | * check if a previously registered COFF | |
1137 | * exists. | |
1138 | * If yes, unregister nodes in previously | |
1139 | * registered COFF. If any error occurred, | |
1140 | * set previously registered COFF to NULL. */ | |
1141 | if (p_proc_object->psz_last_coff != NULL) { | |
1142 | status = | |
1143 | dcd_auto_unregister(hdcd_handle, | |
1144 | p_proc_object-> | |
1145 | psz_last_coff); | |
1146 | /* Regardless of auto unregister status, | |
1147 | * free previously allocated | |
1148 | * memory. */ | |
1149 | kfree(p_proc_object->psz_last_coff); | |
1150 | p_proc_object->psz_last_coff = NULL; | |
1151 | } | |
1152 | } | |
1153 | /* On success, do cod_open_base() */ | |
1154 | status = cod_open_base(cod_mgr, (char *)user_args[0], | |
1155 | COD_SYMB); | |
1156 | } | |
1157 | } else { | |
1158 | status = -ENOMEM; | |
1159 | } | |
1160 | if (DSP_SUCCEEDED(status)) { | |
1161 | /* Auto-register data base */ | |
1162 | /* Get the DCD Handle */ | |
1163 | status = mgr_get_dcd_handle(p_proc_object->hmgr_obj, | |
1164 | (u32 *) &hdcd_handle); | |
1165 | if (DSP_SUCCEEDED(status)) { | |
1166 | /* Auto register nodes in specified COFF | |
1167 | * file. If registration did not fail, | |
1168 | * (status = 0 or -EACCES) | |
1169 | * save the name of the COFF file for | |
1170 | * de-registration in the future. */ | |
1171 | status = | |
1172 | dcd_auto_register(hdcd_handle, | |
1173 | (char *)user_args[0]); | |
1174 | if (status == -EACCES) | |
1175 | status = 0; | |
1176 | ||
1177 | if (DSP_FAILED(status)) { | |
1178 | status = -EPERM; | |
1179 | } else { | |
1180 | DBC_ASSERT(p_proc_object->psz_last_coff == | |
1181 | NULL); | |
1182 | /* Allocate memory for pszLastCoff */ | |
1183 | p_proc_object->psz_last_coff = | |
1184 | kzalloc((strlen(user_args[0]) + | |
1185 | 1), GFP_KERNEL); | |
1186 | /* If memory allocated, save COFF file name */ | |
1187 | if (p_proc_object->psz_last_coff) { | |
1188 | strncpy(p_proc_object->psz_last_coff, | |
1189 | (char *)user_args[0], | |
1190 | (strlen((char *)user_args[0]) + | |
1191 | 1)); | |
1192 | } | |
1193 | } | |
1194 | } | |
1195 | } | |
1196 | /* Update shared memory address and size */ | |
1197 | if (DSP_SUCCEEDED(status)) { | |
1198 | /* Create the message manager. This must be done | |
1199 | * before calling the IOOnLoaded function. */ | |
1200 | dev_get_msg_mgr(p_proc_object->hdev_obj, &hmsg_mgr); | |
1201 | if (!hmsg_mgr) { | |
1202 | status = msg_create(&hmsg_mgr, p_proc_object->hdev_obj, | |
1203 | (msg_onexit) node_on_exit); | |
1204 | DBC_ASSERT(DSP_SUCCEEDED(status)); | |
1205 | dev_set_msg_mgr(p_proc_object->hdev_obj, hmsg_mgr); | |
1206 | } | |
1207 | } | |
1208 | if (DSP_SUCCEEDED(status)) { | |
1209 | /* Set the Device object's message manager */ | |
1210 | status = dev_get_io_mgr(p_proc_object->hdev_obj, &hio_mgr); | |
1211 | if (hio_mgr) | |
1212 | status = (*p_proc_object->intf_fxns->pfn_io_on_loaded) | |
1213 | (hio_mgr); | |
1214 | else | |
1215 | status = -EFAULT; | |
1216 | } | |
1217 | if (DSP_SUCCEEDED(status)) { | |
1218 | /* Now, attempt to load an exec: */ | |
1219 | ||
1220 | /* Boost the OPP level to Maximum level supported by baseport */ | |
b3d23688 | 1221 | #if defined(CONFIG_TIDSPBRIDGE_DVFS) && !defined(CONFIG_CPU_FREQ) |
7d55524d ORL |
1222 | if (pdata->cpu_set_freq) |
1223 | (*pdata->cpu_set_freq) (pdata->mpu_speed[VDD1_OPP5]); | |
1224 | #endif | |
1225 | status = cod_load_base(cod_mgr, argc_index, (char **)user_args, | |
1226 | dev_brd_write_fxn, | |
1227 | p_proc_object->hdev_obj, NULL); | |
1228 | if (DSP_FAILED(status)) { | |
1229 | if (status == -EBADF) { | |
1230 | dev_dbg(bridge, "%s: Failure to Load the EXE\n", | |
1231 | __func__); | |
1232 | } | |
1233 | if (status == -ESPIPE) { | |
1234 | pr_err("%s: Couldn't parse the file\n", | |
1235 | __func__); | |
1236 | } | |
1237 | } | |
1238 | /* Requesting the lowest opp supported */ | |
b3d23688 | 1239 | #if defined(CONFIG_TIDSPBRIDGE_DVFS) && !defined(CONFIG_CPU_FREQ) |
7d55524d ORL |
1240 | if (pdata->cpu_set_freq) |
1241 | (*pdata->cpu_set_freq) (pdata->mpu_speed[VDD1_OPP1]); | |
1242 | #endif | |
1243 | ||
1244 | } | |
1245 | if (DSP_SUCCEEDED(status)) { | |
1246 | /* Update the Processor status to loaded */ | |
1247 | status = (*p_proc_object->intf_fxns->pfn_brd_set_state) | |
1248 | (p_proc_object->hbridge_context, BRD_LOADED); | |
1249 | if (DSP_SUCCEEDED(status)) { | |
1250 | p_proc_object->proc_state = PROC_LOADED; | |
1251 | if (p_proc_object->ntfy_obj) | |
1252 | proc_notify_clients(p_proc_object, | |
1253 | DSP_PROCESSORSTATECHANGE); | |
1254 | } | |
1255 | } | |
1256 | if (DSP_SUCCEEDED(status)) { | |
1257 | status = proc_get_processor_id(hprocessor, &proc_id); | |
1258 | if (proc_id == DSP_UNIT) { | |
1259 | /* Use all available DSP address space after EXTMEM | |
1260 | * for DMM */ | |
1261 | if (DSP_SUCCEEDED(status)) | |
1262 | status = cod_get_sym_value(cod_mgr, EXTEND, | |
1263 | &dw_ext_end); | |
1264 | ||
1265 | /* Reset DMM structs and add an initial free chunk */ | |
1266 | if (DSP_SUCCEEDED(status)) { | |
1267 | status = | |
1268 | dev_get_dmm_mgr(p_proc_object->hdev_obj, | |
1269 | &dmm_mgr); | |
1270 | if (dmm_mgr) { | |
1271 | /* Set dw_ext_end to DMM START u8 | |
1272 | * address */ | |
1273 | dw_ext_end = | |
1274 | (dw_ext_end + 1) * DSPWORDSIZE; | |
1275 | /* DMM memory is from EXT_END */ | |
1276 | status = dmm_create_tables(dmm_mgr, | |
1277 | dw_ext_end, | |
1278 | DMMPOOLSIZE); | |
1279 | } else { | |
1280 | status = -EFAULT; | |
1281 | } | |
1282 | } | |
1283 | } | |
1284 | } | |
1285 | /* Restore the original argv[0] */ | |
1286 | kfree(new_envp); | |
1287 | user_args[0] = pargv0; | |
1288 | if (DSP_SUCCEEDED(status)) { | |
1289 | if (DSP_SUCCEEDED((*p_proc_object->intf_fxns->pfn_brd_status) | |
1290 | (p_proc_object->hbridge_context, &brd_state))) { | |
1291 | pr_info("%s: Processor Loaded %s\n", __func__, pargv0); | |
1292 | kfree(drv_datap->base_img); | |
1293 | drv_datap->base_img = kmalloc(strlen(pargv0) + 1, | |
1294 | GFP_KERNEL); | |
1295 | if (drv_datap->base_img) | |
1296 | strncpy(drv_datap->base_img, pargv0, | |
1297 | strlen(pargv0) + 1); | |
1298 | else | |
1299 | status = -ENOMEM; | |
1300 | DBC_ASSERT(brd_state == BRD_LOADED); | |
1301 | } | |
1302 | } | |
1303 | ||
1304 | func_end: | |
1305 | if (DSP_FAILED(status)) | |
1306 | pr_err("%s: Processor failed to load\n", __func__); | |
1307 | ||
1308 | DBC_ENSURE((DSP_SUCCEEDED(status) | |
1309 | && p_proc_object->proc_state == PROC_LOADED) | |
1310 | || DSP_FAILED(status)); | |
1311 | #ifdef OPT_LOAD_TIME_INSTRUMENTATION | |
1312 | do_gettimeofday(&tv2); | |
1313 | if (tv2.tv_usec < tv1.tv_usec) { | |
1314 | tv2.tv_usec += 1000000; | |
1315 | tv2.tv_sec--; | |
1316 | } | |
1317 | dev_dbg(bridge, "%s: time to load %d sec and %d usec\n", __func__, | |
1318 | tv2.tv_sec - tv1.tv_sec, tv2.tv_usec - tv1.tv_usec); | |
1319 | #endif | |
1320 | return status; | |
1321 | } | |
1322 | ||
1323 | /* | |
1324 | * ======== proc_map ======== | |
1325 | * Purpose: | |
1326 | * Maps a MPU buffer to DSP address space. | |
1327 | */ | |
1328 | int proc_map(void *hprocessor, void *pmpu_addr, u32 ul_size, | |
1329 | void *req_addr, void **pp_map_addr, u32 ul_map_attr, | |
1330 | struct process_context *pr_ctxt) | |
1331 | { | |
1332 | u32 va_align; | |
1333 | u32 pa_align; | |
1334 | struct dmm_object *dmm_mgr; | |
1335 | u32 size_align; | |
1336 | int status = 0; | |
1337 | struct proc_object *p_proc_object = (struct proc_object *)hprocessor; | |
1338 | struct dmm_map_object *map_obj; | |
1339 | u32 tmp_addr = 0; | |
1340 | ||
b3d23688 | 1341 | #ifdef CONFIG_TIDSPBRIDGE_CACHE_LINE_CHECK |
7d55524d ORL |
1342 | if ((ul_map_attr & BUFMODE_MASK) != RBUF) { |
1343 | if (!IS_ALIGNED((u32)pmpu_addr, DSP_CACHE_LINE) || | |
1344 | !IS_ALIGNED(ul_size, DSP_CACHE_LINE)) { | |
1345 | pr_err("%s: not aligned: 0x%x (%d)\n", __func__, | |
1346 | (u32)pmpu_addr, ul_size); | |
1347 | return -EFAULT; | |
1348 | } | |
1349 | } | |
1350 | #endif | |
1351 | ||
1352 | /* Calculate the page-aligned PA, VA and size */ | |
1353 | va_align = PG_ALIGN_LOW((u32) req_addr, PG_SIZE4K); | |
1354 | pa_align = PG_ALIGN_LOW((u32) pmpu_addr, PG_SIZE4K); | |
1355 | size_align = PG_ALIGN_HIGH(ul_size + (u32) pmpu_addr - pa_align, | |
1356 | PG_SIZE4K); | |
1357 | ||
1358 | if (!p_proc_object) { | |
1359 | status = -EFAULT; | |
1360 | goto func_end; | |
1361 | } | |
1362 | /* Critical section */ | |
1363 | mutex_lock(&proc_lock); | |
1364 | dmm_get_handle(p_proc_object, &dmm_mgr); | |
1365 | if (dmm_mgr) | |
1366 | status = dmm_map_memory(dmm_mgr, va_align, size_align); | |
1367 | else | |
1368 | status = -EFAULT; | |
1369 | ||
1370 | /* Add mapping to the page tables. */ | |
1371 | if (DSP_SUCCEEDED(status)) { | |
1372 | ||
1373 | /* Mapped address = MSB of VA | LSB of PA */ | |
1374 | tmp_addr = (va_align | ((u32) pmpu_addr & (PG_SIZE4K - 1))); | |
1375 | /* mapped memory resource tracking */ | |
1376 | map_obj = add_mapping_info(pr_ctxt, pa_align, tmp_addr, | |
1377 | size_align); | |
1378 | if (!map_obj) | |
1379 | status = -ENOMEM; | |
1380 | else | |
1381 | status = (*p_proc_object->intf_fxns->pfn_brd_mem_map) | |
1382 | (p_proc_object->hbridge_context, pa_align, va_align, | |
1383 | size_align, ul_map_attr, map_obj->pages); | |
1384 | } | |
1385 | if (DSP_SUCCEEDED(status)) { | |
1386 | /* Mapped address = MSB of VA | LSB of PA */ | |
1387 | *pp_map_addr = (void *) tmp_addr; | |
1388 | } else { | |
1389 | remove_mapping_information(pr_ctxt, tmp_addr, size_align); | |
1390 | dmm_un_map_memory(dmm_mgr, va_align, &size_align); | |
1391 | } | |
1392 | mutex_unlock(&proc_lock); | |
1393 | ||
1394 | if (DSP_FAILED(status)) | |
1395 | goto func_end; | |
1396 | ||
1397 | func_end: | |
1398 | dev_dbg(bridge, "%s: hprocessor %p, pmpu_addr %p, ul_size %x, " | |
1399 | "req_addr %p, ul_map_attr %x, pp_map_addr %p, va_align %x, " | |
1400 | "pa_align %x, size_align %x status 0x%x\n", __func__, | |
1401 | hprocessor, pmpu_addr, ul_size, req_addr, ul_map_attr, | |
1402 | pp_map_addr, va_align, pa_align, size_align, status); | |
1403 | ||
1404 | return status; | |
1405 | } | |
1406 | ||
1407 | /* | |
1408 | * ======== proc_register_notify ======== | |
1409 | * Purpose: | |
1410 | * Register to be notified of specific processor events. | |
1411 | */ | |
1412 | int proc_register_notify(void *hprocessor, u32 event_mask, | |
1413 | u32 notify_type, struct dsp_notification | |
1414 | * hnotification) | |
1415 | { | |
1416 | int status = 0; | |
1417 | struct proc_object *p_proc_object = (struct proc_object *)hprocessor; | |
1418 | struct deh_mgr *hdeh_mgr; | |
1419 | ||
1420 | DBC_REQUIRE(hnotification != NULL); | |
1421 | DBC_REQUIRE(refs > 0); | |
1422 | ||
1423 | /* Check processor handle */ | |
1424 | if (!p_proc_object) { | |
1425 | status = -EFAULT; | |
1426 | goto func_end; | |
1427 | } | |
1428 | /* Check if event mask is a valid processor related event */ | |
1429 | if (event_mask & ~(DSP_PROCESSORSTATECHANGE | DSP_PROCESSORATTACH | | |
1430 | DSP_PROCESSORDETACH | DSP_PROCESSORRESTART | | |
1431 | DSP_MMUFAULT | DSP_SYSERROR | DSP_PWRERROR | | |
1432 | DSP_WDTOVERFLOW)) | |
1433 | status = -EINVAL; | |
1434 | ||
1435 | /* Check if notify type is valid */ | |
1436 | if (notify_type != DSP_SIGNALEVENT) | |
1437 | status = -EINVAL; | |
1438 | ||
1439 | if (DSP_SUCCEEDED(status)) { | |
1440 | /* If event mask is not DSP_SYSERROR, DSP_MMUFAULT, | |
1441 | * or DSP_PWRERROR then register event immediately. */ | |
1442 | if (event_mask & | |
1443 | ~(DSP_SYSERROR | DSP_MMUFAULT | DSP_PWRERROR | | |
1444 | DSP_WDTOVERFLOW)) { | |
1445 | status = ntfy_register(p_proc_object->ntfy_obj, | |
1446 | hnotification, event_mask, | |
1447 | notify_type); | |
1448 | /* Special case alert, special case alert! | |
1449 | * If we're trying to *deregister* (i.e. event_mask | |
1450 | * is 0), a DSP_SYSERROR or DSP_MMUFAULT notification, | |
1451 | * we have to deregister with the DEH manager. | |
1452 | * There's no way to know, based on event_mask which | |
1453 | * manager the notification event was registered with, | |
1454 | * so if we're trying to deregister and ntfy_register | |
1455 | * failed, we'll give the deh manager a shot. | |
1456 | */ | |
1457 | if ((event_mask == 0) && DSP_FAILED(status)) { | |
1458 | status = | |
1459 | dev_get_deh_mgr(p_proc_object->hdev_obj, | |
1460 | &hdeh_mgr); | |
7d55524d | 1461 | status = |
61a5b769 FC |
1462 | bridge_deh_register_notify(hdeh_mgr, |
1463 | event_mask, | |
1464 | notify_type, | |
1465 | hnotification); | |
7d55524d ORL |
1466 | } |
1467 | } else { | |
1468 | status = dev_get_deh_mgr(p_proc_object->hdev_obj, | |
1469 | &hdeh_mgr); | |
7d55524d | 1470 | status = |
61a5b769 FC |
1471 | bridge_deh_register_notify(hdeh_mgr, |
1472 | event_mask, | |
1473 | notify_type, | |
1474 | hnotification); | |
7d55524d ORL |
1475 | |
1476 | } | |
1477 | } | |
1478 | func_end: | |
1479 | return status; | |
1480 | } | |
1481 | ||
1482 | /* | |
1483 | * ======== proc_reserve_memory ======== | |
1484 | * Purpose: | |
1485 | * Reserve a virtually contiguous region of DSP address space. | |
1486 | */ | |
1487 | int proc_reserve_memory(void *hprocessor, u32 ul_size, | |
1488 | void **pp_rsv_addr, | |
1489 | struct process_context *pr_ctxt) | |
1490 | { | |
1491 | struct dmm_object *dmm_mgr; | |
1492 | int status = 0; | |
1493 | struct proc_object *p_proc_object = (struct proc_object *)hprocessor; | |
1494 | struct dmm_rsv_object *rsv_obj; | |
1495 | ||
1496 | if (!p_proc_object) { | |
1497 | status = -EFAULT; | |
1498 | goto func_end; | |
1499 | } | |
1500 | ||
1501 | status = dmm_get_handle(p_proc_object, &dmm_mgr); | |
1502 | if (!dmm_mgr) { | |
1503 | status = -EFAULT; | |
1504 | goto func_end; | |
1505 | } | |
1506 | ||
1507 | status = dmm_reserve_memory(dmm_mgr, ul_size, (u32 *) pp_rsv_addr); | |
1508 | if (status != 0) | |
1509 | goto func_end; | |
1510 | ||
1511 | /* | |
1512 | * A successful reserve should be followed by insertion of rsv_obj | |
1513 | * into dmm_rsv_list, so that reserved memory resource tracking | |
1514 | * remains uptodate | |
1515 | */ | |
1516 | rsv_obj = kmalloc(sizeof(struct dmm_rsv_object), GFP_KERNEL); | |
1517 | if (rsv_obj) { | |
1518 | rsv_obj->dsp_reserved_addr = (u32) *pp_rsv_addr; | |
1519 | spin_lock(&pr_ctxt->dmm_rsv_lock); | |
1520 | list_add(&rsv_obj->link, &pr_ctxt->dmm_rsv_list); | |
1521 | spin_unlock(&pr_ctxt->dmm_rsv_lock); | |
1522 | } | |
1523 | ||
1524 | func_end: | |
1525 | dev_dbg(bridge, "%s: hprocessor: 0x%p ul_size: 0x%x pp_rsv_addr: 0x%p " | |
1526 | "status 0x%x\n", __func__, hprocessor, | |
1527 | ul_size, pp_rsv_addr, status); | |
1528 | return status; | |
1529 | } | |
1530 | ||
1531 | /* | |
1532 | * ======== proc_start ======== | |
1533 | * Purpose: | |
1534 | * Start a processor running. | |
1535 | */ | |
1536 | int proc_start(void *hprocessor) | |
1537 | { | |
1538 | int status = 0; | |
1539 | struct proc_object *p_proc_object = (struct proc_object *)hprocessor; | |
1540 | struct cod_manager *cod_mgr; /* Code manager handle */ | |
1541 | u32 dw_dsp_addr; /* Loaded code's entry point. */ | |
1542 | int brd_state; | |
1543 | ||
1544 | DBC_REQUIRE(refs > 0); | |
1545 | if (!p_proc_object) { | |
1546 | status = -EFAULT; | |
1547 | goto func_end; | |
1548 | } | |
1549 | /* Call the bridge_brd_start */ | |
1550 | if (p_proc_object->proc_state != PROC_LOADED) { | |
1551 | status = -EBADR; | |
1552 | goto func_end; | |
1553 | } | |
1554 | status = dev_get_cod_mgr(p_proc_object->hdev_obj, &cod_mgr); | |
1555 | if (!cod_mgr) { | |
1556 | status = -EFAULT; | |
1557 | goto func_cont; | |
1558 | } | |
1559 | ||
1560 | status = cod_get_entry(cod_mgr, &dw_dsp_addr); | |
1561 | if (DSP_FAILED(status)) | |
1562 | goto func_cont; | |
1563 | ||
1564 | status = (*p_proc_object->intf_fxns->pfn_brd_start) | |
1565 | (p_proc_object->hbridge_context, dw_dsp_addr); | |
1566 | if (DSP_FAILED(status)) | |
1567 | goto func_cont; | |
1568 | ||
1569 | /* Call dev_create2 */ | |
1570 | status = dev_create2(p_proc_object->hdev_obj); | |
1571 | if (DSP_SUCCEEDED(status)) { | |
1572 | p_proc_object->proc_state = PROC_RUNNING; | |
1573 | /* Deep sleep switces off the peripheral clocks. | |
1574 | * we just put the DSP CPU in idle in the idle loop. | |
1575 | * so there is no need to send a command to DSP */ | |
1576 | ||
1577 | if (p_proc_object->ntfy_obj) { | |
1578 | proc_notify_clients(p_proc_object, | |
1579 | DSP_PROCESSORSTATECHANGE); | |
1580 | } | |
1581 | } else { | |
1582 | /* Failed to Create Node Manager and DISP Object | |
1583 | * Stop the Processor from running. Put it in STOPPED State */ | |
1584 | (void)(*p_proc_object->intf_fxns-> | |
1585 | pfn_brd_stop) (p_proc_object->hbridge_context); | |
1586 | p_proc_object->proc_state = PROC_STOPPED; | |
1587 | } | |
1588 | func_cont: | |
1589 | if (DSP_SUCCEEDED(status)) { | |
1590 | if (DSP_SUCCEEDED((*p_proc_object->intf_fxns->pfn_brd_status) | |
1591 | (p_proc_object->hbridge_context, &brd_state))) { | |
1592 | pr_info("%s: dsp in running state\n", __func__); | |
1593 | DBC_ASSERT(brd_state != BRD_HIBERNATION); | |
1594 | } | |
1595 | } else { | |
1596 | pr_err("%s: Failed to start the dsp\n", __func__); | |
1597 | } | |
1598 | ||
1599 | func_end: | |
1600 | DBC_ENSURE((DSP_SUCCEEDED(status) && p_proc_object->proc_state == | |
1601 | PROC_RUNNING) || DSP_FAILED(status)); | |
1602 | return status; | |
1603 | } | |
1604 | ||
1605 | /* | |
1606 | * ======== proc_stop ======== | |
1607 | * Purpose: | |
1608 | * Stop a processor running. | |
1609 | */ | |
1610 | int proc_stop(void *hprocessor) | |
1611 | { | |
1612 | int status = 0; | |
1613 | struct proc_object *p_proc_object = (struct proc_object *)hprocessor; | |
1614 | struct msg_mgr *hmsg_mgr; | |
1615 | struct node_mgr *hnode_mgr; | |
1616 | void *hnode; | |
1617 | u32 node_tab_size = 1; | |
1618 | u32 num_nodes = 0; | |
1619 | u32 nodes_allocated = 0; | |
1620 | int brd_state; | |
1621 | ||
1622 | DBC_REQUIRE(refs > 0); | |
1623 | if (!p_proc_object) { | |
1624 | status = -EFAULT; | |
1625 | goto func_end; | |
1626 | } | |
7d55524d ORL |
1627 | /* check if there are any running nodes */ |
1628 | status = dev_get_node_manager(p_proc_object->hdev_obj, &hnode_mgr); | |
1629 | if (DSP_SUCCEEDED(status) && hnode_mgr) { | |
1630 | status = node_enum_nodes(hnode_mgr, &hnode, node_tab_size, | |
1631 | &num_nodes, &nodes_allocated); | |
1632 | if ((status == -EINVAL) || (nodes_allocated > 0)) { | |
1633 | pr_err("%s: Can't stop device, active nodes = %d \n", | |
1634 | __func__, nodes_allocated); | |
1635 | return -EBADR; | |
1636 | } | |
1637 | } | |
1638 | /* Call the bridge_brd_stop */ | |
1639 | /* It is OK to stop a device that does n't have nodes OR not started */ | |
1640 | status = | |
1641 | (*p_proc_object->intf_fxns-> | |
1642 | pfn_brd_stop) (p_proc_object->hbridge_context); | |
1643 | if (DSP_SUCCEEDED(status)) { | |
1644 | dev_dbg(bridge, "%s: processor in standby mode\n", __func__); | |
1645 | p_proc_object->proc_state = PROC_STOPPED; | |
1646 | /* Destory the Node Manager, msg_ctrl Manager */ | |
1647 | if (DSP_SUCCEEDED(dev_destroy2(p_proc_object->hdev_obj))) { | |
1648 | /* Destroy the msg_ctrl by calling msg_delete */ | |
1649 | dev_get_msg_mgr(p_proc_object->hdev_obj, &hmsg_mgr); | |
1650 | if (hmsg_mgr) { | |
1651 | msg_delete(hmsg_mgr); | |
1652 | dev_set_msg_mgr(p_proc_object->hdev_obj, NULL); | |
1653 | } | |
1654 | if (DSP_SUCCEEDED | |
1655 | ((*p_proc_object-> | |
1656 | intf_fxns->pfn_brd_status) (p_proc_object-> | |
1657 | hbridge_context, | |
1658 | &brd_state))) | |
1659 | DBC_ASSERT(brd_state == BRD_STOPPED); | |
1660 | } | |
1661 | } else { | |
1662 | pr_err("%s: Failed to stop the processor\n", __func__); | |
1663 | } | |
1664 | func_end: | |
1665 | ||
1666 | return status; | |
1667 | } | |
1668 | ||
1669 | /* | |
1670 | * ======== proc_un_map ======== | |
1671 | * Purpose: | |
1672 | * Removes a MPU buffer mapping from the DSP address space. | |
1673 | */ | |
1674 | int proc_un_map(void *hprocessor, void *map_addr, | |
1675 | struct process_context *pr_ctxt) | |
1676 | { | |
1677 | int status = 0; | |
1678 | struct proc_object *p_proc_object = (struct proc_object *)hprocessor; | |
1679 | struct dmm_object *dmm_mgr; | |
1680 | u32 va_align; | |
1681 | u32 size_align; | |
1682 | ||
1683 | va_align = PG_ALIGN_LOW((u32) map_addr, PG_SIZE4K); | |
1684 | if (!p_proc_object) { | |
1685 | status = -EFAULT; | |
1686 | goto func_end; | |
1687 | } | |
1688 | ||
1689 | status = dmm_get_handle(hprocessor, &dmm_mgr); | |
1690 | if (!dmm_mgr) { | |
1691 | status = -EFAULT; | |
1692 | goto func_end; | |
1693 | } | |
1694 | ||
1695 | /* Critical section */ | |
1696 | mutex_lock(&proc_lock); | |
1697 | /* | |
1698 | * Update DMM structures. Get the size to unmap. | |
1699 | * This function returns error if the VA is not mapped | |
1700 | */ | |
1701 | status = dmm_un_map_memory(dmm_mgr, (u32) va_align, &size_align); | |
1702 | /* Remove mapping from the page tables. */ | |
1703 | if (DSP_SUCCEEDED(status)) { | |
1704 | status = (*p_proc_object->intf_fxns->pfn_brd_mem_un_map) | |
1705 | (p_proc_object->hbridge_context, va_align, size_align); | |
1706 | } | |
1707 | ||
1708 | mutex_unlock(&proc_lock); | |
1709 | if (DSP_FAILED(status)) | |
1710 | goto func_end; | |
1711 | ||
1712 | /* | |
1713 | * A successful unmap should be followed by removal of map_obj | |
1714 | * from dmm_map_list, so that mapped memory resource tracking | |
1715 | * remains uptodate | |
1716 | */ | |
1717 | remove_mapping_information(pr_ctxt, (u32) map_addr, size_align); | |
1718 | ||
1719 | func_end: | |
1720 | dev_dbg(bridge, "%s: hprocessor: 0x%p map_addr: 0x%p status: 0x%x\n", | |
1721 | __func__, hprocessor, map_addr, status); | |
1722 | return status; | |
1723 | } | |
1724 | ||
1725 | /* | |
1726 | * ======== proc_un_reserve_memory ======== | |
1727 | * Purpose: | |
1728 | * Frees a previously reserved region of DSP address space. | |
1729 | */ | |
1730 | int proc_un_reserve_memory(void *hprocessor, void *prsv_addr, | |
1731 | struct process_context *pr_ctxt) | |
1732 | { | |
1733 | struct dmm_object *dmm_mgr; | |
1734 | int status = 0; | |
1735 | struct proc_object *p_proc_object = (struct proc_object *)hprocessor; | |
1736 | struct dmm_rsv_object *rsv_obj; | |
1737 | ||
1738 | if (!p_proc_object) { | |
1739 | status = -EFAULT; | |
1740 | goto func_end; | |
1741 | } | |
1742 | ||
1743 | status = dmm_get_handle(p_proc_object, &dmm_mgr); | |
1744 | if (!dmm_mgr) { | |
1745 | status = -EFAULT; | |
1746 | goto func_end; | |
1747 | } | |
1748 | ||
1749 | status = dmm_un_reserve_memory(dmm_mgr, (u32) prsv_addr); | |
1750 | if (status != 0) | |
1751 | goto func_end; | |
1752 | ||
1753 | /* | |
1754 | * A successful unreserve should be followed by removal of rsv_obj | |
1755 | * from dmm_rsv_list, so that reserved memory resource tracking | |
1756 | * remains uptodate | |
1757 | */ | |
1758 | spin_lock(&pr_ctxt->dmm_rsv_lock); | |
1759 | list_for_each_entry(rsv_obj, &pr_ctxt->dmm_rsv_list, link) { | |
1760 | if (rsv_obj->dsp_reserved_addr == (u32) prsv_addr) { | |
1761 | list_del(&rsv_obj->link); | |
1762 | kfree(rsv_obj); | |
1763 | break; | |
1764 | } | |
1765 | } | |
1766 | spin_unlock(&pr_ctxt->dmm_rsv_lock); | |
1767 | ||
1768 | func_end: | |
1769 | dev_dbg(bridge, "%s: hprocessor: 0x%p prsv_addr: 0x%p status: 0x%x\n", | |
1770 | __func__, hprocessor, prsv_addr, status); | |
1771 | return status; | |
1772 | } | |
1773 | ||
1774 | /* | |
1775 | * ======== = proc_monitor ======== == | |
1776 | * Purpose: | |
1777 | * Place the Processor in Monitor State. This is an internal | |
1778 | * function and a requirement before Processor is loaded. | |
1779 | * This does a bridge_brd_stop, dev_destroy2 and bridge_brd_monitor. | |
1780 | * In dev_destroy2 we delete the node manager. | |
1781 | * Parameters: | |
1782 | * p_proc_object: Pointer to Processor Object | |
1783 | * Returns: | |
1784 | * 0: Processor placed in monitor mode. | |
1785 | * !0: Failed to place processor in monitor mode. | |
1786 | * Requires: | |
1787 | * Valid Processor Handle | |
1788 | * Ensures: | |
1789 | * Success: ProcObject state is PROC_IDLE | |
1790 | */ | |
c8c1ad8c | 1791 | static int proc_monitor(struct proc_object *proc_obj) |
7d55524d ORL |
1792 | { |
1793 | int status = -EPERM; | |
1794 | struct msg_mgr *hmsg_mgr; | |
1795 | int brd_state; | |
1796 | ||
1797 | DBC_REQUIRE(refs > 0); | |
c8c1ad8c | 1798 | DBC_REQUIRE(proc_obj); |
7d55524d ORL |
1799 | |
1800 | /* This is needed only when Device is loaded when it is | |
1801 | * already 'ACTIVE' */ | |
1802 | /* Destory the Node Manager, msg_ctrl Manager */ | |
c8c1ad8c | 1803 | if (DSP_SUCCEEDED(dev_destroy2(proc_obj->hdev_obj))) { |
7d55524d | 1804 | /* Destroy the msg_ctrl by calling msg_delete */ |
c8c1ad8c | 1805 | dev_get_msg_mgr(proc_obj->hdev_obj, &hmsg_mgr); |
7d55524d ORL |
1806 | if (hmsg_mgr) { |
1807 | msg_delete(hmsg_mgr); | |
c8c1ad8c | 1808 | dev_set_msg_mgr(proc_obj->hdev_obj, NULL); |
7d55524d ORL |
1809 | } |
1810 | } | |
1811 | /* Place the Board in the Monitor State */ | |
c8c1ad8c RS |
1812 | if (DSP_SUCCEEDED((*proc_obj->intf_fxns->pfn_brd_monitor) |
1813 | (proc_obj->hbridge_context))) { | |
7d55524d | 1814 | status = 0; |
c8c1ad8c RS |
1815 | if (DSP_SUCCEEDED((*proc_obj->intf_fxns->pfn_brd_status) |
1816 | (proc_obj->hbridge_context, &brd_state))) | |
7d55524d ORL |
1817 | DBC_ASSERT(brd_state == BRD_IDLE); |
1818 | } | |
1819 | ||
1820 | DBC_ENSURE((DSP_SUCCEEDED(status) && brd_state == BRD_IDLE) || | |
1821 | DSP_FAILED(status)); | |
1822 | return status; | |
1823 | } | |
1824 | ||
1825 | /* | |
1826 | * ======== get_envp_count ======== | |
1827 | * Purpose: | |
1828 | * Return the number of elements in the envp array, including the | |
1829 | * terminating NULL element. | |
1830 | */ | |
1831 | static s32 get_envp_count(char **envp) | |
1832 | { | |
1833 | s32 ret = 0; | |
1834 | if (envp) { | |
1835 | while (*envp++) | |
1836 | ret++; | |
1837 | ||
1838 | ret += 1; /* Include the terminating NULL in the count. */ | |
1839 | } | |
1840 | ||
1841 | return ret; | |
1842 | } | |
1843 | ||
1844 | /* | |
1845 | * ======== prepend_envp ======== | |
1846 | * Purpose: | |
1847 | * Prepend an environment variable=value pair to the new envp array, and | |
1848 | * copy in the existing var=value pairs in the old envp array. | |
1849 | */ | |
1850 | static char **prepend_envp(char **new_envp, char **envp, s32 envp_elems, | |
0cd343a4 | 1851 | s32 cnew_envp, char *sz_var) |
7d55524d ORL |
1852 | { |
1853 | char **pp_envp = new_envp; | |
1854 | ||
1855 | DBC_REQUIRE(new_envp); | |
1856 | ||
1857 | /* Prepend new environ var=value string */ | |
0cd343a4 | 1858 | *new_envp++ = sz_var; |
7d55524d ORL |
1859 | |
1860 | /* Copy user's environment into our own. */ | |
1861 | while (envp_elems--) | |
1862 | *new_envp++ = *envp++; | |
1863 | ||
1864 | /* Ensure NULL terminates the new environment strings array. */ | |
1865 | if (envp_elems == 0) | |
1866 | *new_envp = NULL; | |
1867 | ||
1868 | return pp_envp; | |
1869 | } | |
1870 | ||
1871 | /* | |
1872 | * ======== proc_notify_clients ======== | |
1873 | * Purpose: | |
1874 | * Notify the processor the events. | |
1875 | */ | |
0cd343a4 | 1876 | int proc_notify_clients(void *proc, u32 events) |
7d55524d ORL |
1877 | { |
1878 | int status = 0; | |
e6890692 | 1879 | struct proc_object *p_proc_object = (struct proc_object *)proc; |
7d55524d ORL |
1880 | |
1881 | DBC_REQUIRE(p_proc_object); | |
bf968b0a | 1882 | DBC_REQUIRE(is_valid_proc_event(events)); |
7d55524d ORL |
1883 | DBC_REQUIRE(refs > 0); |
1884 | if (!p_proc_object) { | |
1885 | status = -EFAULT; | |
1886 | goto func_end; | |
1887 | } | |
1888 | ||
0cd343a4 | 1889 | ntfy_notify(p_proc_object->ntfy_obj, events); |
7d55524d ORL |
1890 | func_end: |
1891 | return status; | |
1892 | } | |
1893 | ||
1894 | /* | |
1895 | * ======== proc_notify_all_clients ======== | |
1896 | * Purpose: | |
1897 | * Notify the processor the events. This includes notifying all clients | |
1898 | * attached to a particulat DSP. | |
1899 | */ | |
0cd343a4 | 1900 | int proc_notify_all_clients(void *proc, u32 events) |
7d55524d ORL |
1901 | { |
1902 | int status = 0; | |
e6890692 | 1903 | struct proc_object *p_proc_object = (struct proc_object *)proc; |
7d55524d | 1904 | |
bf968b0a | 1905 | DBC_REQUIRE(is_valid_proc_event(events)); |
7d55524d ORL |
1906 | DBC_REQUIRE(refs > 0); |
1907 | ||
1908 | if (!p_proc_object) { | |
1909 | status = -EFAULT; | |
1910 | goto func_end; | |
1911 | } | |
1912 | ||
0cd343a4 | 1913 | dev_notify_clients(p_proc_object->hdev_obj, events); |
7d55524d ORL |
1914 | |
1915 | func_end: | |
1916 | return status; | |
1917 | } | |
1918 | ||
1919 | /* | |
1920 | * ======== proc_get_processor_id ======== | |
1921 | * Purpose: | |
1922 | * Retrieves the processor ID. | |
1923 | */ | |
13b18c29 | 1924 | int proc_get_processor_id(void *proc, u32 * proc_id) |
7d55524d ORL |
1925 | { |
1926 | int status = 0; | |
e6890692 | 1927 | struct proc_object *p_proc_object = (struct proc_object *)proc; |
7d55524d ORL |
1928 | |
1929 | if (p_proc_object) | |
13b18c29 | 1930 | *proc_id = p_proc_object->processor_id; |
7d55524d ORL |
1931 | else |
1932 | status = -EFAULT; | |
1933 | ||
1934 | return status; | |
1935 | } |