Merge tag 'perf-urgent-for-mingo' of git://git.kernel.org/pub/scm/linux/kernel/git...
[GitHub/mt8127/android_kernel_alcatel_ttab.git] / drivers / staging / tidspbridge / rmgr / proc.c
CommitLineData
7d55524d
ORL
1/*
2 * proc.c
3 *
4 * DSP-BIOS Bridge driver support functions for TI OMAP processors.
5 *
6 * Processor interface at the driver level.
7 *
8 * Copyright (C) 2005-2006 Texas Instruments, Inc.
9 *
10 * This package is free software; you can redistribute it and/or modify
11 * it under the terms of the GNU General Public License version 2 as
12 * published by the Free Software Foundation.
13 *
14 * THIS PACKAGE IS PROVIDED ``AS IS'' AND WITHOUT ANY EXPRESS OR
15 * IMPLIED WARRANTIES, INCLUDING, WITHOUT LIMITATION, THE IMPLIED
16 * WARRANTIES OF MERCHANTIBILITY AND FITNESS FOR A PARTICULAR PURPOSE.
17 */
18
2094f12d 19#include <linux/types.h>
7d55524d
ORL
20/* ------------------------------------ Host OS */
21#include <linux/dma-mapping.h>
22#include <linux/scatterlist.h>
23#include <dspbridge/host_os.h>
24
25/* ----------------------------------- DSP/BIOS Bridge */
7d55524d
ORL
26#include <dspbridge/dbdefs.h>
27
7d55524d 28/* ----------------------------------- OS Adaptation Layer */
7d55524d
ORL
29#include <dspbridge/ntfy.h>
30#include <dspbridge/sync.h>
31/* ----------------------------------- Bridge Driver */
32#include <dspbridge/dspdefs.h>
33#include <dspbridge/dspdeh.h>
34/* ----------------------------------- Platform Manager */
35#include <dspbridge/cod.h>
36#include <dspbridge/dev.h>
37#include <dspbridge/procpriv.h>
677f2ded 38#include <dspbridge/dmm.h>
7d55524d
ORL
39
40/* ----------------------------------- Resource Manager */
41#include <dspbridge/mgr.h>
42#include <dspbridge/node.h>
43#include <dspbridge/nldr.h>
44#include <dspbridge/rmm.h>
45
46/* ----------------------------------- Others */
47#include <dspbridge/dbdcd.h>
48#include <dspbridge/msg.h>
49#include <dspbridge/dspioctl.h>
50#include <dspbridge/drv.h>
51
52/* ----------------------------------- This */
53#include <dspbridge/proc.h>
54#include <dspbridge/pwr.h>
55
56#include <dspbridge/resourcecleanup.h>
57/* ----------------------------------- Defines, Data Structures, Typedefs */
58#define MAXCMDLINELEN 255
59#define PROC_ENVPROCID "PROC_ID=%d"
60#define MAXPROCIDLEN (8 + 5)
61#define PROC_DFLT_TIMEOUT 10000 /* Time out in milliseconds */
62#define PWR_TIMEOUT 500 /* Sleep/wake timout in msec */
63#define EXTEND "_EXT_END" /* Extmem end addr in DSP binary */
64
65#define DSP_CACHE_LINE 128
66
67#define BUFMODE_MASK (3 << 14)
68
69/* Buffer modes from DSP perspective */
70#define RBUF 0x4000 /* Input buffer */
71#define WBUF 0x8000 /* Output Buffer */
72
73extern struct device *bridge;
74
75/* ----------------------------------- Globals */
76
77/* The proc_object structure. */
78struct proc_object {
79 struct list_head link; /* Link to next proc_object */
085467b8 80 struct dev_object *dev_obj; /* Device this PROC represents */
7d55524d 81 u32 process; /* Process owning this Processor */
085467b8 82 struct mgr_object *mgr_obj; /* Manager Object Handle */
7d55524d
ORL
83 u32 attach_count; /* Processor attach count */
84 u32 processor_id; /* Processor number */
a534f17b 85 u32 timeout; /* Time out count */
7d55524d 86 enum dsp_procstate proc_state; /* Processor state */
085467b8 87 u32 unit; /* DDSP unit number */
7d55524d
ORL
88 bool is_already_attached; /*
89 * True if the Device below has
90 * GPP Client attached
91 */
92 struct ntfy_object *ntfy_obj; /* Manages notifications */
93 /* Bridge Context Handle */
085467b8 94 struct bridge_dev_context *bridge_context;
7d55524d
ORL
95 /* Function interface to Bridge driver */
96 struct bridge_drv_interface *intf_fxns;
085467b8 97 char *last_coff;
7d55524d
ORL
98 struct list_head proc_list;
99};
100
7d55524d
ORL
101DEFINE_MUTEX(proc_lock); /* For critical sections */
102
103/* ----------------------------------- Function Prototypes */
c8c1ad8c 104static int proc_monitor(struct proc_object *proc_obj);
7d55524d
ORL
105static s32 get_envp_count(char **envp);
106static char **prepend_envp(char **new_envp, char **envp, s32 envp_elems,
0cd343a4 107 s32 cnew_envp, char *sz_var);
7d55524d
ORL
108
109/* remember mapping information */
110static struct dmm_map_object *add_mapping_info(struct process_context *pr_ctxt,
111 u32 mpu_addr, u32 dsp_addr, u32 size)
112{
113 struct dmm_map_object *map_obj;
114
115 u32 num_usr_pgs = size / PG_SIZE4K;
116
117 pr_debug("%s: adding map info: mpu_addr 0x%x virt 0x%x size 0x%x\n",
118 __func__, mpu_addr,
119 dsp_addr, size);
120
121 map_obj = kzalloc(sizeof(struct dmm_map_object), GFP_KERNEL);
122 if (!map_obj) {
123 pr_err("%s: kzalloc failed\n", __func__);
124 return NULL;
125 }
126 INIT_LIST_HEAD(&map_obj->link);
127
128 map_obj->pages = kcalloc(num_usr_pgs, sizeof(struct page *),
129 GFP_KERNEL);
130 if (!map_obj->pages) {
131 pr_err("%s: kzalloc failed\n", __func__);
132 kfree(map_obj);
133 return NULL;
134 }
135
136 map_obj->mpu_addr = mpu_addr;
137 map_obj->dsp_addr = dsp_addr;
138 map_obj->size = size;
139 map_obj->num_usr_pgs = num_usr_pgs;
140
141 spin_lock(&pr_ctxt->dmm_map_lock);
142 list_add(&map_obj->link, &pr_ctxt->dmm_map_list);
143 spin_unlock(&pr_ctxt->dmm_map_lock);
144
145 return map_obj;
146}
147
2fa28a51
FC
148static int match_exact_map_obj(struct dmm_map_object *map_obj,
149 u32 dsp_addr, u32 size)
150{
151 if (map_obj->dsp_addr == dsp_addr && map_obj->size != size)
152 pr_err("%s: addr match (0x%x), size don't (0x%x != 0x%x)\n",
153 __func__, dsp_addr, map_obj->size, size);
154
155 return map_obj->dsp_addr == dsp_addr &&
156 map_obj->size == size;
157}
158
7d55524d 159static void remove_mapping_information(struct process_context *pr_ctxt,
2fa28a51 160 u32 dsp_addr, u32 size)
7d55524d
ORL
161{
162 struct dmm_map_object *map_obj;
163
2fa28a51
FC
164 pr_debug("%s: looking for virt 0x%x size 0x%x\n", __func__,
165 dsp_addr, size);
7d55524d
ORL
166
167 spin_lock(&pr_ctxt->dmm_map_lock);
168 list_for_each_entry(map_obj, &pr_ctxt->dmm_map_list, link) {
2fa28a51 169 pr_debug("%s: candidate: mpu_addr 0x%x virt 0x%x size 0x%x\n",
7d55524d
ORL
170 __func__,
171 map_obj->mpu_addr,
2fa28a51
FC
172 map_obj->dsp_addr,
173 map_obj->size);
7d55524d 174
2fa28a51 175 if (match_exact_map_obj(map_obj, dsp_addr, size)) {
7d55524d
ORL
176 pr_debug("%s: match, deleting map info\n", __func__);
177 list_del(&map_obj->link);
178 kfree(map_obj->dma_info.sg);
179 kfree(map_obj->pages);
180 kfree(map_obj);
181 goto out;
182 }
183 pr_debug("%s: candidate didn't match\n", __func__);
184 }
185
186 pr_err("%s: failed to find given map info\n", __func__);
187out:
188 spin_unlock(&pr_ctxt->dmm_map_lock);
189}
190
191static int match_containing_map_obj(struct dmm_map_object *map_obj,
192 u32 mpu_addr, u32 size)
193{
194 u32 map_obj_end = map_obj->mpu_addr + map_obj->size;
195
196 return mpu_addr >= map_obj->mpu_addr &&
197 mpu_addr + size <= map_obj_end;
198}
199
200static struct dmm_map_object *find_containing_mapping(
201 struct process_context *pr_ctxt,
202 u32 mpu_addr, u32 size)
203{
204 struct dmm_map_object *map_obj;
205 pr_debug("%s: looking for mpu_addr 0x%x size 0x%x\n", __func__,
206 mpu_addr, size);
207
208 spin_lock(&pr_ctxt->dmm_map_lock);
209 list_for_each_entry(map_obj, &pr_ctxt->dmm_map_list, link) {
210 pr_debug("%s: candidate: mpu_addr 0x%x virt 0x%x size 0x%x\n",
211 __func__,
212 map_obj->mpu_addr,
213 map_obj->dsp_addr,
214 map_obj->size);
215 if (match_containing_map_obj(map_obj, mpu_addr, size)) {
216 pr_debug("%s: match!\n", __func__);
217 goto out;
218 }
219
220 pr_debug("%s: no match!\n", __func__);
221 }
222
223 map_obj = NULL;
224out:
225 spin_unlock(&pr_ctxt->dmm_map_lock);
226 return map_obj;
227}
228
229static int find_first_page_in_cache(struct dmm_map_object *map_obj,
230 unsigned long mpu_addr)
231{
232 u32 mapped_base_page = map_obj->mpu_addr >> PAGE_SHIFT;
233 u32 requested_base_page = mpu_addr >> PAGE_SHIFT;
234 int pg_index = requested_base_page - mapped_base_page;
235
236 if (pg_index < 0 || pg_index >= map_obj->num_usr_pgs) {
237 pr_err("%s: failed (got %d)\n", __func__, pg_index);
238 return -1;
239 }
240
241 pr_debug("%s: first page is %d\n", __func__, pg_index);
242 return pg_index;
243}
244
245static inline struct page *get_mapping_page(struct dmm_map_object *map_obj,
246 int pg_i)
247{
248 pr_debug("%s: looking for pg_i %d, num_usr_pgs: %d\n", __func__,
249 pg_i, map_obj->num_usr_pgs);
250
251 if (pg_i < 0 || pg_i >= map_obj->num_usr_pgs) {
252 pr_err("%s: requested pg_i %d is out of mapped range\n",
253 __func__, pg_i);
254 return NULL;
255 }
256
257 return map_obj->pages[pg_i];
258}
259
260/*
261 * ======== proc_attach ========
262 * Purpose:
263 * Prepare for communication with a particular DSP processor, and return
264 * a handle to the processor object.
265 */
266int
267proc_attach(u32 processor_id,
21aaf42e 268 const struct dsp_processorattrin *attr_in,
7d55524d
ORL
269 void **ph_processor, struct process_context *pr_ctxt)
270{
271 int status = 0;
272 struct dev_object *hdev_obj;
273 struct proc_object *p_proc_object = NULL;
274 struct mgr_object *hmgr_obj = NULL;
275 struct drv_object *hdrv_obj = NULL;
73b87a91 276 struct drv_data *drv_datap = dev_get_drvdata(bridge);
7d55524d
ORL
277 u8 dev_type;
278
a534f17b
RS
279 if (pr_ctxt->processor) {
280 *ph_processor = pr_ctxt->processor;
7d55524d
ORL
281 return status;
282 }
283
284 /* Get the Driver and Manager Object Handles */
73b87a91
IGC
285 if (!drv_datap || !drv_datap->drv_object || !drv_datap->mgr_object) {
286 status = -ENODATA;
287 pr_err("%s: Failed to get object handles\n", __func__);
288 } else {
289 hdrv_obj = drv_datap->drv_object;
290 hmgr_obj = drv_datap->mgr_object;
291 }
7d55524d 292
a741ea6e 293 if (!status) {
7d55524d
ORL
294 /* Get the Device Object */
295 status = drv_get_dev_object(processor_id, hdrv_obj, &hdev_obj);
296 }
a741ea6e 297 if (!status)
7d55524d
ORL
298 status = dev_get_dev_type(hdev_obj, &dev_type);
299
b66e0986 300 if (status)
7d55524d
ORL
301 goto func_end;
302
0142919c 303 /* If we made it this far, create the Processor object: */
7d55524d
ORL
304 p_proc_object = kzalloc(sizeof(struct proc_object), GFP_KERNEL);
305 /* Fill out the Processor Object: */
306 if (p_proc_object == NULL) {
307 status = -ENOMEM;
308 goto func_end;
309 }
085467b8
RS
310 p_proc_object->dev_obj = hdev_obj;
311 p_proc_object->mgr_obj = hmgr_obj;
7d55524d
ORL
312 p_proc_object->processor_id = dev_type;
313 /* Store TGID instead of process handle */
314 p_proc_object->process = current->tgid;
315
316 INIT_LIST_HEAD(&p_proc_object->proc_list);
317
318 if (attr_in)
a534f17b 319 p_proc_object->timeout = attr_in->timeout;
7d55524d 320 else
a534f17b 321 p_proc_object->timeout = PROC_DFLT_TIMEOUT;
7d55524d
ORL
322
323 status = dev_get_intf_fxns(hdev_obj, &p_proc_object->intf_fxns);
a741ea6e 324 if (!status) {
7d55524d 325 status = dev_get_bridge_context(hdev_obj,
085467b8 326 &p_proc_object->bridge_context);
b66e0986 327 if (status)
7d55524d
ORL
328 kfree(p_proc_object);
329 } else
330 kfree(p_proc_object);
331
b66e0986 332 if (status)
7d55524d
ORL
333 goto func_end;
334
335 /* Create the Notification Object */
336 /* This is created with no event mask, no notify mask
337 * and no valid handle to the notification. They all get
338 * filled up when proc_register_notify is called */
339 p_proc_object->ntfy_obj = kmalloc(sizeof(struct ntfy_object),
340 GFP_KERNEL);
341 if (p_proc_object->ntfy_obj)
342 ntfy_init(p_proc_object->ntfy_obj);
343 else
344 status = -ENOMEM;
345
a741ea6e 346 if (!status) {
7d55524d
ORL
347 /* Insert the Processor Object into the DEV List.
348 * Return handle to this Processor Object:
349 * Find out if the Device is already attached to a
350 * Processor. If so, return AlreadyAttached status */
085467b8 351 status = dev_insert_proc_object(p_proc_object->dev_obj,
7d55524d
ORL
352 (u32) p_proc_object,
353 &p_proc_object->
354 is_already_attached);
a741ea6e 355 if (!status) {
7d55524d
ORL
356 if (p_proc_object->is_already_attached)
357 status = 0;
358 } else {
359 if (p_proc_object->ntfy_obj) {
360 ntfy_delete(p_proc_object->ntfy_obj);
361 kfree(p_proc_object->ntfy_obj);
362 }
363
364 kfree(p_proc_object);
365 }
a741ea6e 366 if (!status) {
7d55524d 367 *ph_processor = (void *)p_proc_object;
a534f17b 368 pr_ctxt->processor = *ph_processor;
7d55524d
ORL
369 (void)proc_notify_clients(p_proc_object,
370 DSP_PROCESSORATTACH);
371 }
372 } else {
b66e0986 373 /* Don't leak memory if status is failed */
7d55524d
ORL
374 kfree(p_proc_object);
375 }
376func_end:
7d55524d
ORL
377 return status;
378}
379
380static int get_exec_file(struct cfg_devnode *dev_node_obj,
381 struct dev_object *hdev_obj,
b301c858 382 u32 size, char *exec_file)
7d55524d
ORL
383{
384 u8 dev_type;
385 s32 len;
315a1a20 386 struct drv_data *drv_datap = dev_get_drvdata(bridge);
7d55524d
ORL
387
388 dev_get_dev_type(hdev_obj, (u8 *) &dev_type);
315a1a20
IGC
389
390 if (!exec_file)
391 return -EFAULT;
392
7d55524d 393 if (dev_type == DSP_UNIT) {
315a1a20
IGC
394 if (!drv_datap || !drv_datap->base_img)
395 return -EFAULT;
396
397 if (strlen(drv_datap->base_img) > size)
398 return -EINVAL;
399
400 strcpy(exec_file, drv_datap->base_img);
401 } else if (dev_type == IVA_UNIT && iva_img) {
402 len = strlen(iva_img);
403 strncpy(exec_file, iva_img, len + 1);
404 } else {
405 return -ENOENT;
7d55524d 406 }
315a1a20
IGC
407
408 return 0;
7d55524d
ORL
409}
410
411/*
412 * ======== proc_auto_start ======== =
413 * Purpose:
414 * A Particular device gets loaded with the default image
415 * if the AutoStart flag is set.
416 * Parameters:
417 * hdev_obj: Handle to the Device
418 * Returns:
419 * 0: On Successful Loading
420 * -EPERM General Failure
421 * Requires:
422 * hdev_obj != NULL
423 * Ensures:
424 */
425int proc_auto_start(struct cfg_devnode *dev_node_obj,
426 struct dev_object *hdev_obj)
427{
428 int status = -EPERM;
429 struct proc_object *p_proc_object;
430 char sz_exec_file[MAXCMDLINELEN];
431 char *argv[2];
432 struct mgr_object *hmgr_obj = NULL;
73b87a91 433 struct drv_data *drv_datap = dev_get_drvdata(bridge);
7d55524d
ORL
434 u8 dev_type;
435
7d55524d 436 /* Create a Dummy PROC Object */
73b87a91
IGC
437 if (!drv_datap || !drv_datap->mgr_object) {
438 status = -ENODATA;
439 pr_err("%s: Failed to retrieve the object handle\n", __func__);
7d55524d 440 goto func_end;
73b87a91
IGC
441 } else {
442 hmgr_obj = drv_datap->mgr_object;
443 }
7d55524d
ORL
444
445 p_proc_object = kzalloc(sizeof(struct proc_object), GFP_KERNEL);
446 if (p_proc_object == NULL) {
447 status = -ENOMEM;
448 goto func_end;
449 }
085467b8
RS
450 p_proc_object->dev_obj = hdev_obj;
451 p_proc_object->mgr_obj = hmgr_obj;
7d55524d 452 status = dev_get_intf_fxns(hdev_obj, &p_proc_object->intf_fxns);
a741ea6e 453 if (!status)
7d55524d 454 status = dev_get_bridge_context(hdev_obj,
085467b8 455 &p_proc_object->bridge_context);
b66e0986 456 if (status)
7d55524d
ORL
457 goto func_cont;
458
459 /* Stop the Device, put it into standby mode */
460 status = proc_stop(p_proc_object);
461
b66e0986 462 if (status)
7d55524d
ORL
463 goto func_cont;
464
465 /* Get the default executable for this board... */
466 dev_get_dev_type(hdev_obj, (u8 *) &dev_type);
467 p_proc_object->processor_id = dev_type;
468 status = get_exec_file(dev_node_obj, hdev_obj, sizeof(sz_exec_file),
469 sz_exec_file);
a741ea6e 470 if (!status) {
7d55524d
ORL
471 argv[0] = sz_exec_file;
472 argv[1] = NULL;
473 /* ...and try to load it: */
cd4f13c0 474 status = proc_load(p_proc_object, 1, (const char **)argv, NULL);
a741ea6e 475 if (!status)
7d55524d
ORL
476 status = proc_start(p_proc_object);
477 }
085467b8
RS
478 kfree(p_proc_object->last_coff);
479 p_proc_object->last_coff = NULL;
7d55524d
ORL
480func_cont:
481 kfree(p_proc_object);
482func_end:
483 return status;
484}
485
486/*
487 * ======== proc_ctrl ========
488 * Purpose:
489 * Pass control information to the GPP device driver managing the
490 * DSP processor.
491 *
492 * This will be an OEM-only function, and not part of the DSP/BIOS Bridge
493 * application developer's API.
494 * Call the bridge_dev_ctrl fxn with the Argument. This is a Synchronous
495 * Operation. arg can be null.
496 */
9d7d0a52 497int proc_ctrl(void *hprocessor, u32 dw_cmd, struct dsp_cbdata * arg)
7d55524d
ORL
498{
499 int status = 0;
500 struct proc_object *p_proc_object = hprocessor;
501 u32 timeout = 0;
502
7d55524d
ORL
503 if (p_proc_object) {
504 /* intercept PWR deep sleep command */
505 if (dw_cmd == BRDIOCTL_DEEPSLEEP) {
506 timeout = arg->cb_data;
507 status = pwr_sleep_dsp(PWR_DEEPSLEEP, timeout);
508 }
509 /* intercept PWR emergency sleep command */
510 else if (dw_cmd == BRDIOCTL_EMERGENCYSLEEP) {
511 timeout = arg->cb_data;
512 status = pwr_sleep_dsp(PWR_EMERGENCYDEEPSLEEP, timeout);
513 } else if (dw_cmd == PWR_DEEPSLEEP) {
514 /* timeout = arg->cb_data; */
515 status = pwr_sleep_dsp(PWR_DEEPSLEEP, timeout);
516 }
517 /* intercept PWR wake commands */
518 else if (dw_cmd == BRDIOCTL_WAKEUP) {
519 timeout = arg->cb_data;
520 status = pwr_wake_dsp(timeout);
521 } else if (dw_cmd == PWR_WAKEUP) {
522 /* timeout = arg->cb_data; */
523 status = pwr_wake_dsp(timeout);
524 } else
e17ba7f2 525 if (!((*p_proc_object->intf_fxns->dev_cntrl)
085467b8 526 (p_proc_object->bridge_context, dw_cmd,
7d55524d
ORL
527 arg))) {
528 status = 0;
529 } else {
530 status = -EPERM;
531 }
532 } else {
533 status = -EFAULT;
534 }
535
536 return status;
537}
538
539/*
540 * ======== proc_detach ========
541 * Purpose:
542 * Destroys the Processor Object. Removes the notification from the Dev
543 * List.
544 */
545int proc_detach(struct process_context *pr_ctxt)
546{
547 int status = 0;
548 struct proc_object *p_proc_object = NULL;
549
a534f17b 550 p_proc_object = (struct proc_object *)pr_ctxt->processor;
7d55524d
ORL
551
552 if (p_proc_object) {
553 /* Notify the Client */
554 ntfy_notify(p_proc_object->ntfy_obj, DSP_PROCESSORDETACH);
555 /* Remove the notification memory */
556 if (p_proc_object->ntfy_obj) {
557 ntfy_delete(p_proc_object->ntfy_obj);
558 kfree(p_proc_object->ntfy_obj);
559 }
560
085467b8
RS
561 kfree(p_proc_object->last_coff);
562 p_proc_object->last_coff = NULL;
7d55524d 563 /* Remove the Proc from the DEV List */
085467b8 564 (void)dev_remove_proc_object(p_proc_object->dev_obj,
7d55524d
ORL
565 (u32) p_proc_object);
566 /* Free the Processor Object */
567 kfree(p_proc_object);
a534f17b 568 pr_ctxt->processor = NULL;
7d55524d
ORL
569 } else {
570 status = -EFAULT;
571 }
572
573 return status;
574}
575
576/*
577 * ======== proc_enum_nodes ========
578 * Purpose:
579 * Enumerate and get configuration information about nodes allocated
580 * on a DSP processor.
581 */
582int proc_enum_nodes(void *hprocessor, void **node_tab,
e6bf74f0
MN
583 u32 node_tab_size, u32 *pu_num_nodes,
584 u32 *pu_allocated)
7d55524d
ORL
585{
586 int status = -EPERM;
587 struct proc_object *p_proc_object = (struct proc_object *)hprocessor;
588 struct node_mgr *hnode_mgr = NULL;
589
7d55524d 590 if (p_proc_object) {
085467b8 591 if (!(dev_get_node_manager(p_proc_object->dev_obj,
7d55524d
ORL
592 &hnode_mgr))) {
593 if (hnode_mgr) {
594 status = node_enum_nodes(hnode_mgr, node_tab,
595 node_tab_size,
596 pu_num_nodes,
597 pu_allocated);
598 }
599 }
600 } else {
601 status = -EFAULT;
602 }
603
604 return status;
605}
606
607/* Cache operation against kernel address instead of users */
608static int build_dma_sg(struct dmm_map_object *map_obj, unsigned long start,
609 ssize_t len, int pg_i)
610{
611 struct page *page;
612 unsigned long offset;
613 ssize_t rest;
614 int ret = 0, i = 0;
615 struct scatterlist *sg = map_obj->dma_info.sg;
616
617 while (len) {
618 page = get_mapping_page(map_obj, pg_i);
619 if (!page) {
620 pr_err("%s: no page for %08lx\n", __func__, start);
621 ret = -EINVAL;
622 goto out;
623 } else if (IS_ERR(page)) {
624 pr_err("%s: err page for %08lx(%lu)\n", __func__, start,
625 PTR_ERR(page));
626 ret = PTR_ERR(page);
627 goto out;
628 }
629
630 offset = start & ~PAGE_MASK;
631 rest = min_t(ssize_t, PAGE_SIZE - offset, len);
632
633 sg_set_page(&sg[i], page, rest, offset);
634
635 len -= rest;
636 start += rest;
637 pg_i++, i++;
638 }
639
640 if (i != map_obj->dma_info.num_pages) {
641 pr_err("%s: bad number of sg iterations\n", __func__);
642 ret = -EFAULT;
643 goto out;
644 }
645
646out:
647 return ret;
648}
649
650static int memory_regain_ownership(struct dmm_map_object *map_obj,
651 unsigned long start, ssize_t len, enum dma_data_direction dir)
652{
653 int ret = 0;
654 unsigned long first_data_page = start >> PAGE_SHIFT;
655 unsigned long last_data_page = ((u32)(start + len - 1) >> PAGE_SHIFT);
656 /* calculating the number of pages this area spans */
657 unsigned long num_pages = last_data_page - first_data_page + 1;
658 struct bridge_dma_map_info *dma_info = &map_obj->dma_info;
659
660 if (!dma_info->sg)
661 goto out;
662
663 if (dma_info->dir != dir || dma_info->num_pages != num_pages) {
664 pr_err("%s: dma info doesn't match given params\n", __func__);
665 return -EINVAL;
666 }
667
668 dma_unmap_sg(bridge, dma_info->sg, num_pages, dma_info->dir);
669
670 pr_debug("%s: dma_map_sg unmapped\n", __func__);
671
672 kfree(dma_info->sg);
673
674 map_obj->dma_info.sg = NULL;
675
676out:
677 return ret;
678}
679
680/* Cache operation against kernel address instead of users */
681static int memory_give_ownership(struct dmm_map_object *map_obj,
682 unsigned long start, ssize_t len, enum dma_data_direction dir)
683{
684 int pg_i, ret, sg_num;
685 struct scatterlist *sg;
686 unsigned long first_data_page = start >> PAGE_SHIFT;
687 unsigned long last_data_page = ((u32)(start + len - 1) >> PAGE_SHIFT);
688 /* calculating the number of pages this area spans */
689 unsigned long num_pages = last_data_page - first_data_page + 1;
690
691 pg_i = find_first_page_in_cache(map_obj, start);
692 if (pg_i < 0) {
693 pr_err("%s: failed to find first page in cache\n", __func__);
694 ret = -EINVAL;
695 goto out;
696 }
697
698 sg = kcalloc(num_pages, sizeof(*sg), GFP_KERNEL);
699 if (!sg) {
700 pr_err("%s: kcalloc failed\n", __func__);
701 ret = -ENOMEM;
702 goto out;
703 }
704
705 sg_init_table(sg, num_pages);
706
707 /* cleanup a previous sg allocation */
708 /* this may happen if application doesn't signal for e/o DMA */
709 kfree(map_obj->dma_info.sg);
710
711 map_obj->dma_info.sg = sg;
712 map_obj->dma_info.dir = dir;
713 map_obj->dma_info.num_pages = num_pages;
714
715 ret = build_dma_sg(map_obj, start, len, pg_i);
716 if (ret)
717 goto kfree_sg;
718
719 sg_num = dma_map_sg(bridge, sg, num_pages, dir);
720 if (sg_num < 1) {
721 pr_err("%s: dma_map_sg failed: %d\n", __func__, sg_num);
722 ret = -EFAULT;
723 goto kfree_sg;
724 }
725
726 pr_debug("%s: dma_map_sg mapped %d elements\n", __func__, sg_num);
727 map_obj->dma_info.sg_num = sg_num;
728
729 return 0;
730
731kfree_sg:
732 kfree(sg);
733 map_obj->dma_info.sg = NULL;
734out:
735 return ret;
736}
737
738int proc_begin_dma(void *hprocessor, void *pmpu_addr, u32 ul_size,
739 enum dma_data_direction dir)
740{
741 /* Keep STATUS here for future additions to this function */
742 int status = 0;
743 struct process_context *pr_ctxt = (struct process_context *) hprocessor;
744 struct dmm_map_object *map_obj;
745
7d55524d
ORL
746 if (!pr_ctxt) {
747 status = -EFAULT;
748 goto err_out;
749 }
750
751 pr_debug("%s: addr 0x%x, size 0x%x, type %d\n", __func__,
752 (u32)pmpu_addr,
753 ul_size, dir);
754
ab42abf3
FC
755 mutex_lock(&proc_lock);
756
7d55524d
ORL
757 /* find requested memory are in cached mapping information */
758 map_obj = find_containing_mapping(pr_ctxt, (u32) pmpu_addr, ul_size);
759 if (!map_obj) {
760 pr_err("%s: find_containing_mapping failed\n", __func__);
761 status = -EFAULT;
ab42abf3 762 goto no_map;
7d55524d
ORL
763 }
764
765 if (memory_give_ownership(map_obj, (u32) pmpu_addr, ul_size, dir)) {
766 pr_err("%s: InValid address parameters %p %x\n",
767 __func__, pmpu_addr, ul_size);
768 status = -EFAULT;
769 }
770
ab42abf3
FC
771no_map:
772 mutex_unlock(&proc_lock);
7d55524d
ORL
773err_out:
774
775 return status;
776}
777
778int proc_end_dma(void *hprocessor, void *pmpu_addr, u32 ul_size,
779 enum dma_data_direction dir)
780{
781 /* Keep STATUS here for future additions to this function */
782 int status = 0;
783 struct process_context *pr_ctxt = (struct process_context *) hprocessor;
784 struct dmm_map_object *map_obj;
785
7d55524d
ORL
786 if (!pr_ctxt) {
787 status = -EFAULT;
788 goto err_out;
789 }
790
791 pr_debug("%s: addr 0x%x, size 0x%x, type %d\n", __func__,
792 (u32)pmpu_addr,
793 ul_size, dir);
794
ab42abf3
FC
795 mutex_lock(&proc_lock);
796
7d55524d
ORL
797 /* find requested memory are in cached mapping information */
798 map_obj = find_containing_mapping(pr_ctxt, (u32) pmpu_addr, ul_size);
799 if (!map_obj) {
800 pr_err("%s: find_containing_mapping failed\n", __func__);
801 status = -EFAULT;
ab42abf3 802 goto no_map;
7d55524d
ORL
803 }
804
805 if (memory_regain_ownership(map_obj, (u32) pmpu_addr, ul_size, dir)) {
806 pr_err("%s: InValid address parameters %p %x\n",
807 __func__, pmpu_addr, ul_size);
808 status = -EFAULT;
7d55524d
ORL
809 }
810
ab42abf3
FC
811no_map:
812 mutex_unlock(&proc_lock);
7d55524d
ORL
813err_out:
814 return status;
815}
816
817/*
818 * ======== proc_flush_memory ========
819 * Purpose:
820 * Flush cache
821 */
822int proc_flush_memory(void *hprocessor, void *pmpu_addr,
823 u32 ul_size, u32 ul_flags)
824{
825 enum dma_data_direction dir = DMA_BIDIRECTIONAL;
826
827 return proc_begin_dma(hprocessor, pmpu_addr, ul_size, dir);
828}
829
830/*
831 * ======== proc_invalidate_memory ========
832 * Purpose:
833 * Invalidates the memory specified
834 */
835int proc_invalidate_memory(void *hprocessor, void *pmpu_addr, u32 size)
836{
837 enum dma_data_direction dir = DMA_FROM_DEVICE;
838
839 return proc_begin_dma(hprocessor, pmpu_addr, size, dir);
840}
841
842/*
843 * ======== proc_get_resource_info ========
844 * Purpose:
845 * Enumerate the resources currently available on a processor.
846 */
847int proc_get_resource_info(void *hprocessor, u32 resource_type,
e6bf74f0 848 struct dsp_resourceinfo *resource_info,
7d55524d
ORL
849 u32 resource_info_size)
850{
851 int status = -EPERM;
852 struct proc_object *p_proc_object = (struct proc_object *)hprocessor;
853 struct node_mgr *hnode_mgr = NULL;
854 struct nldr_object *nldr_obj = NULL;
855 struct rmm_target_obj *rmm = NULL;
856 struct io_mgr *hio_mgr = NULL; /* IO manager handle */
857
7d55524d
ORL
858 if (!p_proc_object) {
859 status = -EFAULT;
860 goto func_end;
861 }
862 switch (resource_type) {
863 case DSP_RESOURCE_DYNDARAM:
864 case DSP_RESOURCE_DYNSARAM:
865 case DSP_RESOURCE_DYNEXTERNAL:
866 case DSP_RESOURCE_DYNSRAM:
085467b8 867 status = dev_get_node_manager(p_proc_object->dev_obj,
7d55524d
ORL
868 &hnode_mgr);
869 if (!hnode_mgr) {
870 status = -EFAULT;
871 goto func_end;
872 }
873
874 status = node_get_nldr_obj(hnode_mgr, &nldr_obj);
a741ea6e 875 if (!status) {
7d55524d
ORL
876 status = nldr_get_rmm_manager(nldr_obj, &rmm);
877 if (rmm) {
878 if (!rmm_stat(rmm,
879 (enum dsp_memtype)resource_type,
880 (struct dsp_memstat *)
881 &(resource_info->result.
882 mem_stat)))
883 status = -EINVAL;
884 } else {
885 status = -EFAULT;
886 }
887 }
888 break;
889 case DSP_RESOURCE_PROCLOAD:
085467b8 890 status = dev_get_io_mgr(p_proc_object->dev_obj, &hio_mgr);
7d55524d
ORL
891 if (hio_mgr)
892 status =
893 p_proc_object->intf_fxns->
09f13304 894 io_get_proc_load(hio_mgr,
7d55524d
ORL
895 (struct dsp_procloadstat *)
896 &(resource_info->result.
897 proc_load_stat));
898 else
899 status = -EFAULT;
900 break;
901 default:
902 status = -EPERM;
903 break;
904 }
905func_end:
906 return status;
907}
908
7d55524d
ORL
909/*
910 * ======== proc_get_dev_object ========
911 * Purpose:
912 * Return the Dev Object handle for a given Processor.
913 *
914 */
915int proc_get_dev_object(void *hprocessor,
e436d07d 916 struct dev_object **device_obj)
7d55524d
ORL
917{
918 int status = -EPERM;
919 struct proc_object *p_proc_object = (struct proc_object *)hprocessor;
920
7d55524d 921 if (p_proc_object) {
085467b8 922 *device_obj = p_proc_object->dev_obj;
7d55524d
ORL
923 status = 0;
924 } else {
e436d07d 925 *device_obj = NULL;
7d55524d
ORL
926 status = -EFAULT;
927 }
928
7d55524d
ORL
929 return status;
930}
931
932/*
933 * ======== proc_get_state ========
934 * Purpose:
935 * Report the state of the specified DSP processor.
936 */
937int proc_get_state(void *hprocessor,
e6bf74f0 938 struct dsp_processorstate *proc_state_obj,
7d55524d
ORL
939 u32 state_info_size)
940{
941 int status = 0;
942 struct proc_object *p_proc_object = (struct proc_object *)hprocessor;
943 int brd_status;
7d55524d 944
7d55524d
ORL
945 if (p_proc_object) {
946 /* First, retrieve BRD state information */
e17ba7f2 947 status = (*p_proc_object->intf_fxns->brd_status)
085467b8 948 (p_proc_object->bridge_context, &brd_status);
a741ea6e 949 if (!status) {
7d55524d
ORL
950 switch (brd_status) {
951 case BRD_STOPPED:
952 proc_state_obj->proc_state = PROC_STOPPED;
953 break;
954 case BRD_SLEEP_TRANSITION:
955 case BRD_DSP_HIBERNATION:
956 /* Fall through */
957 case BRD_RUNNING:
958 proc_state_obj->proc_state = PROC_RUNNING;
959 break;
960 case BRD_LOADED:
961 proc_state_obj->proc_state = PROC_LOADED;
962 break;
963 case BRD_ERROR:
964 proc_state_obj->proc_state = PROC_ERROR;
965 break;
966 default:
967 proc_state_obj->proc_state = 0xFF;
968 status = -EPERM;
969 break;
970 }
971 }
7d55524d
ORL
972 } else {
973 status = -EFAULT;
974 }
975 dev_dbg(bridge, "%s, results: status: 0x%x proc_state_obj: 0x%x\n",
976 __func__, status, proc_state_obj->proc_state);
977 return status;
978}
979
980/*
981 * ======== proc_get_trace ========
982 * Purpose:
983 * Retrieve the current contents of the trace buffer, located on the
984 * Processor. Predefined symbols for the trace buffer must have been
985 * configured into the DSP executable.
986 * Details:
987 * We support using the symbols SYS_PUTCBEG and SYS_PUTCEND to define a
988 * trace buffer, only. Treat it as an undocumented feature.
989 * This call is destructive, meaning the processor is placed in the monitor
990 * state as a result of this function.
991 */
992int proc_get_trace(void *hprocessor, u8 * pbuf, u32 max_size)
993{
994 int status;
995 status = -ENOSYS;
996 return status;
997}
998
7d55524d
ORL
999/*
1000 * ======== proc_load ========
1001 * Purpose:
1002 * Reset a processor and load a new base program image.
1003 * This will be an OEM-only function, and not part of the DSP/BIOS Bridge
1004 * application developer's API.
1005 */
9d7d0a52
MN
1006int proc_load(void *hprocessor, const s32 argc_index,
1007 const char **user_args, const char **user_envp)
7d55524d
ORL
1008{
1009 int status = 0;
1010 struct proc_object *p_proc_object = (struct proc_object *)hprocessor;
1011 struct io_mgr *hio_mgr; /* IO manager handle */
1012 struct msg_mgr *hmsg_mgr;
1013 struct cod_manager *cod_mgr; /* Code manager handle */
1014 char *pargv0; /* temp argv[0] ptr */
1015 char **new_envp; /* Updated envp[] array. */
1016 char sz_proc_id[MAXPROCIDLEN]; /* Size of "PROC_ID=<n>" */
1017 s32 envp_elems; /* Num elements in envp[]. */
1018 s32 cnew_envp; /* " " in new_envp[] */
1019 s32 nproc_id = 0; /* Anticipate MP version. */
1020 struct dcd_manager *hdcd_handle;
677f2ded 1021 struct dmm_object *dmm_mgr;
7d55524d
ORL
1022 u32 dw_ext_end;
1023 u32 proc_id;
1024 int brd_state;
1025 struct drv_data *drv_datap = dev_get_drvdata(bridge);
1026
1027#ifdef OPT_LOAD_TIME_INSTRUMENTATION
1028 struct timeval tv1;
1029 struct timeval tv2;
1030#endif
1031
b3d23688 1032#if defined(CONFIG_TIDSPBRIDGE_DVFS) && !defined(CONFIG_CPU_FREQ)
7d55524d
ORL
1033 struct dspbridge_platform_data *pdata =
1034 omap_dspbridge_dev->dev.platform_data;
1035#endif
1036
7d55524d
ORL
1037#ifdef OPT_LOAD_TIME_INSTRUMENTATION
1038 do_gettimeofday(&tv1);
1039#endif
1040 if (!p_proc_object) {
1041 status = -EFAULT;
1042 goto func_end;
1043 }
085467b8 1044 dev_get_cod_mgr(p_proc_object->dev_obj, &cod_mgr);
7d55524d
ORL
1045 if (!cod_mgr) {
1046 status = -EPERM;
1047 goto func_end;
1048 }
1049 status = proc_stop(hprocessor);
b66e0986 1050 if (status)
7d55524d
ORL
1051 goto func_end;
1052
1053 /* Place the board in the monitor state. */
1054 status = proc_monitor(hprocessor);
b66e0986 1055 if (status)
7d55524d
ORL
1056 goto func_end;
1057
1058 /* Save ptr to original argv[0]. */
1059 pargv0 = (char *)user_args[0];
1060 /*Prepend "PROC_ID=<nproc_id>"to envp array for target. */
1061 envp_elems = get_envp_count((char **)user_envp);
1062 cnew_envp = (envp_elems ? (envp_elems + 1) : (envp_elems + 2));
1063 new_envp = kzalloc(cnew_envp * sizeof(char **), GFP_KERNEL);
1064 if (new_envp) {
1065 status = snprintf(sz_proc_id, MAXPROCIDLEN, PROC_ENVPROCID,
1066 nproc_id);
1067 if (status == -1) {
1068 dev_dbg(bridge, "%s: Proc ID string overflow\n",
1069 __func__);
1070 status = -EPERM;
1071 } else {
1072 new_envp =
1073 prepend_envp(new_envp, (char **)user_envp,
1074 envp_elems, cnew_envp, sz_proc_id);
1075 /* Get the DCD Handle */
085467b8 1076 status = mgr_get_dcd_handle(p_proc_object->mgr_obj,
7d55524d 1077 (u32 *) &hdcd_handle);
a741ea6e 1078 if (!status) {
7d55524d
ORL
1079 /* Before proceeding with new load,
1080 * check if a previously registered COFF
1081 * exists.
1082 * If yes, unregister nodes in previously
1083 * registered COFF. If any error occurred,
1084 * set previously registered COFF to NULL. */
085467b8 1085 if (p_proc_object->last_coff != NULL) {
7d55524d
ORL
1086 status =
1087 dcd_auto_unregister(hdcd_handle,
1088 p_proc_object->
085467b8 1089 last_coff);
7d55524d
ORL
1090 /* Regardless of auto unregister status,
1091 * free previously allocated
1092 * memory. */
085467b8
RS
1093 kfree(p_proc_object->last_coff);
1094 p_proc_object->last_coff = NULL;
7d55524d
ORL
1095 }
1096 }
1097 /* On success, do cod_open_base() */
1098 status = cod_open_base(cod_mgr, (char *)user_args[0],
1099 COD_SYMB);
1100 }
1101 } else {
1102 status = -ENOMEM;
1103 }
a741ea6e 1104 if (!status) {
7d55524d
ORL
1105 /* Auto-register data base */
1106 /* Get the DCD Handle */
085467b8 1107 status = mgr_get_dcd_handle(p_proc_object->mgr_obj,
7d55524d 1108 (u32 *) &hdcd_handle);
a741ea6e 1109 if (!status) {
7d55524d
ORL
1110 /* Auto register nodes in specified COFF
1111 * file. If registration did not fail,
1112 * (status = 0 or -EACCES)
1113 * save the name of the COFF file for
1114 * de-registration in the future. */
1115 status =
1116 dcd_auto_register(hdcd_handle,
1117 (char *)user_args[0]);
1118 if (status == -EACCES)
1119 status = 0;
1120
b66e0986 1121 if (status) {
7d55524d
ORL
1122 status = -EPERM;
1123 } else {
7d55524d 1124 /* Allocate memory for pszLastCoff */
085467b8 1125 p_proc_object->last_coff =
7d55524d
ORL
1126 kzalloc((strlen(user_args[0]) +
1127 1), GFP_KERNEL);
1128 /* If memory allocated, save COFF file name */
085467b8
RS
1129 if (p_proc_object->last_coff) {
1130 strncpy(p_proc_object->last_coff,
7d55524d
ORL
1131 (char *)user_args[0],
1132 (strlen((char *)user_args[0]) +
1133 1));
1134 }
1135 }
1136 }
1137 }
1138 /* Update shared memory address and size */
a741ea6e 1139 if (!status) {
7d55524d
ORL
1140 /* Create the message manager. This must be done
1141 * before calling the IOOnLoaded function. */
085467b8 1142 dev_get_msg_mgr(p_proc_object->dev_obj, &hmsg_mgr);
7d55524d 1143 if (!hmsg_mgr) {
085467b8 1144 status = msg_create(&hmsg_mgr, p_proc_object->dev_obj,
7d55524d 1145 (msg_onexit) node_on_exit);
085467b8 1146 dev_set_msg_mgr(p_proc_object->dev_obj, hmsg_mgr);
7d55524d
ORL
1147 }
1148 }
a741ea6e 1149 if (!status) {
7d55524d 1150 /* Set the Device object's message manager */
085467b8 1151 status = dev_get_io_mgr(p_proc_object->dev_obj, &hio_mgr);
7d55524d 1152 if (hio_mgr)
09f13304 1153 status = (*p_proc_object->intf_fxns->io_on_loaded)
7d55524d
ORL
1154 (hio_mgr);
1155 else
1156 status = -EFAULT;
1157 }
a741ea6e 1158 if (!status) {
7d55524d
ORL
1159 /* Now, attempt to load an exec: */
1160
1161 /* Boost the OPP level to Maximum level supported by baseport */
b3d23688 1162#if defined(CONFIG_TIDSPBRIDGE_DVFS) && !defined(CONFIG_CPU_FREQ)
7d55524d
ORL
1163 if (pdata->cpu_set_freq)
1164 (*pdata->cpu_set_freq) (pdata->mpu_speed[VDD1_OPP5]);
1165#endif
1166 status = cod_load_base(cod_mgr, argc_index, (char **)user_args,
1167 dev_brd_write_fxn,
085467b8 1168 p_proc_object->dev_obj, NULL);
b66e0986 1169 if (status) {
7d55524d
ORL
1170 if (status == -EBADF) {
1171 dev_dbg(bridge, "%s: Failure to Load the EXE\n",
1172 __func__);
1173 }
1174 if (status == -ESPIPE) {
1175 pr_err("%s: Couldn't parse the file\n",
1176 __func__);
1177 }
1178 }
1179 /* Requesting the lowest opp supported */
b3d23688 1180#if defined(CONFIG_TIDSPBRIDGE_DVFS) && !defined(CONFIG_CPU_FREQ)
7d55524d
ORL
1181 if (pdata->cpu_set_freq)
1182 (*pdata->cpu_set_freq) (pdata->mpu_speed[VDD1_OPP1]);
1183#endif
1184
1185 }
a741ea6e 1186 if (!status) {
7d55524d 1187 /* Update the Processor status to loaded */
e17ba7f2 1188 status = (*p_proc_object->intf_fxns->brd_set_state)
085467b8 1189 (p_proc_object->bridge_context, BRD_LOADED);
a741ea6e 1190 if (!status) {
7d55524d
ORL
1191 p_proc_object->proc_state = PROC_LOADED;
1192 if (p_proc_object->ntfy_obj)
1193 proc_notify_clients(p_proc_object,
1194 DSP_PROCESSORSTATECHANGE);
1195 }
1196 }
a741ea6e 1197 if (!status) {
7d55524d
ORL
1198 status = proc_get_processor_id(hprocessor, &proc_id);
1199 if (proc_id == DSP_UNIT) {
1200 /* Use all available DSP address space after EXTMEM
1201 * for DMM */
a741ea6e 1202 if (!status)
7d55524d
ORL
1203 status = cod_get_sym_value(cod_mgr, EXTEND,
1204 &dw_ext_end);
677f2ded
FC
1205
1206 /* Reset DMM structs and add an initial free chunk */
1207 if (!status) {
1208 status =
085467b8 1209 dev_get_dmm_mgr(p_proc_object->dev_obj,
677f2ded
FC
1210 &dmm_mgr);
1211 if (dmm_mgr) {
1212 /* Set dw_ext_end to DMM START u8
1213 * address */
1214 dw_ext_end =
1215 (dw_ext_end + 1) * DSPWORDSIZE;
1216 /* DMM memory is from EXT_END */
1217 status = dmm_create_tables(dmm_mgr,
1218 dw_ext_end,
1219 DMMPOOLSIZE);
1220 } else {
1221 status = -EFAULT;
1222 }
1223 }
7d55524d
ORL
1224 }
1225 }
1226 /* Restore the original argv[0] */
1227 kfree(new_envp);
1228 user_args[0] = pargv0;
a741ea6e 1229 if (!status) {
e17ba7f2 1230 if (!((*p_proc_object->intf_fxns->brd_status)
085467b8 1231 (p_proc_object->bridge_context, &brd_state))) {
7d55524d
ORL
1232 pr_info("%s: Processor Loaded %s\n", __func__, pargv0);
1233 kfree(drv_datap->base_img);
1234 drv_datap->base_img = kmalloc(strlen(pargv0) + 1,
1235 GFP_KERNEL);
1236 if (drv_datap->base_img)
1237 strncpy(drv_datap->base_img, pargv0,
1238 strlen(pargv0) + 1);
1239 else
1240 status = -ENOMEM;
7d55524d
ORL
1241 }
1242 }
1243
1244func_end:
cfccf244 1245 if (status) {
7d55524d 1246 pr_err("%s: Processor failed to load\n", __func__);
cfccf244
ER
1247 proc_stop(p_proc_object);
1248 }
7d55524d
ORL
1249#ifdef OPT_LOAD_TIME_INSTRUMENTATION
1250 do_gettimeofday(&tv2);
1251 if (tv2.tv_usec < tv1.tv_usec) {
1252 tv2.tv_usec += 1000000;
1253 tv2.tv_sec--;
1254 }
1255 dev_dbg(bridge, "%s: time to load %d sec and %d usec\n", __func__,
1256 tv2.tv_sec - tv1.tv_sec, tv2.tv_usec - tv1.tv_usec);
1257#endif
1258 return status;
1259}
1260
1261/*
1262 * ======== proc_map ========
1263 * Purpose:
1264 * Maps a MPU buffer to DSP address space.
1265 */
1266int proc_map(void *hprocessor, void *pmpu_addr, u32 ul_size,
1267 void *req_addr, void **pp_map_addr, u32 ul_map_attr,
1268 struct process_context *pr_ctxt)
1269{
1270 u32 va_align;
1271 u32 pa_align;
2fa28a51 1272 struct dmm_object *dmm_mgr;
7d55524d
ORL
1273 u32 size_align;
1274 int status = 0;
1275 struct proc_object *p_proc_object = (struct proc_object *)hprocessor;
1276 struct dmm_map_object *map_obj;
d0b345f3 1277 u32 tmp_addr = 0;
7d55524d 1278
b3d23688 1279#ifdef CONFIG_TIDSPBRIDGE_CACHE_LINE_CHECK
7d55524d
ORL
1280 if ((ul_map_attr & BUFMODE_MASK) != RBUF) {
1281 if (!IS_ALIGNED((u32)pmpu_addr, DSP_CACHE_LINE) ||
1282 !IS_ALIGNED(ul_size, DSP_CACHE_LINE)) {
1283 pr_err("%s: not aligned: 0x%x (%d)\n", __func__,
1284 (u32)pmpu_addr, ul_size);
1285 return -EFAULT;
1286 }
1287 }
1288#endif
1289
1290 /* Calculate the page-aligned PA, VA and size */
1291 va_align = PG_ALIGN_LOW((u32) req_addr, PG_SIZE4K);
1292 pa_align = PG_ALIGN_LOW((u32) pmpu_addr, PG_SIZE4K);
1293 size_align = PG_ALIGN_HIGH(ul_size + (u32) pmpu_addr - pa_align,
1294 PG_SIZE4K);
1295
1296 if (!p_proc_object) {
1297 status = -EFAULT;
1298 goto func_end;
1299 }
1300 /* Critical section */
1301 mutex_lock(&proc_lock);
2fa28a51
FC
1302 dmm_get_handle(p_proc_object, &dmm_mgr);
1303 if (dmm_mgr)
1304 status = dmm_map_memory(dmm_mgr, va_align, size_align);
1305 else
1306 status = -EFAULT;
7d55524d
ORL
1307
1308 /* Add mapping to the page tables. */
a741ea6e 1309 if (!status) {
d0b345f3
FC
1310
1311 /* Mapped address = MSB of VA | LSB of PA */
1312 tmp_addr = (va_align | ((u32) pmpu_addr & (PG_SIZE4K - 1)));
7d55524d 1313 /* mapped memory resource tracking */
d0b345f3 1314 map_obj = add_mapping_info(pr_ctxt, pa_align, tmp_addr,
7d55524d 1315 size_align);
d0b345f3 1316 if (!map_obj)
7d55524d 1317 status = -ENOMEM;
d0b345f3 1318 else
3c882de5 1319 status = (*p_proc_object->intf_fxns->brd_mem_map)
085467b8 1320 (p_proc_object->bridge_context, pa_align, va_align,
d0b345f3 1321 size_align, ul_map_attr, map_obj->pages);
7d55524d 1322 }
a741ea6e 1323 if (!status) {
7d55524d 1324 /* Mapped address = MSB of VA | LSB of PA */
d0b345f3 1325 *pp_map_addr = (void *) tmp_addr;
7d55524d 1326 } else {
d0b345f3 1327 remove_mapping_information(pr_ctxt, tmp_addr, size_align);
2fa28a51 1328 dmm_un_map_memory(dmm_mgr, va_align, &size_align);
7d55524d
ORL
1329 }
1330 mutex_unlock(&proc_lock);
1331
b66e0986 1332 if (status)
7d55524d
ORL
1333 goto func_end;
1334
1335func_end:
1336 dev_dbg(bridge, "%s: hprocessor %p, pmpu_addr %p, ul_size %x, "
1337 "req_addr %p, ul_map_attr %x, pp_map_addr %p, va_align %x, "
1338 "pa_align %x, size_align %x status 0x%x\n", __func__,
1339 hprocessor, pmpu_addr, ul_size, req_addr, ul_map_attr,
1340 pp_map_addr, va_align, pa_align, size_align, status);
1341
1342 return status;
1343}
1344
1345/*
1346 * ======== proc_register_notify ========
1347 * Purpose:
1348 * Register to be notified of specific processor events.
1349 */
1350int proc_register_notify(void *hprocessor, u32 event_mask,
1351 u32 notify_type, struct dsp_notification
1352 * hnotification)
1353{
1354 int status = 0;
1355 struct proc_object *p_proc_object = (struct proc_object *)hprocessor;
1356 struct deh_mgr *hdeh_mgr;
1357
7d55524d
ORL
1358 /* Check processor handle */
1359 if (!p_proc_object) {
1360 status = -EFAULT;
1361 goto func_end;
1362 }
1363 /* Check if event mask is a valid processor related event */
1364 if (event_mask & ~(DSP_PROCESSORSTATECHANGE | DSP_PROCESSORATTACH |
1365 DSP_PROCESSORDETACH | DSP_PROCESSORRESTART |
1366 DSP_MMUFAULT | DSP_SYSERROR | DSP_PWRERROR |
1367 DSP_WDTOVERFLOW))
1368 status = -EINVAL;
1369
1370 /* Check if notify type is valid */
1371 if (notify_type != DSP_SIGNALEVENT)
1372 status = -EINVAL;
1373
a741ea6e 1374 if (!status) {
7d55524d
ORL
1375 /* If event mask is not DSP_SYSERROR, DSP_MMUFAULT,
1376 * or DSP_PWRERROR then register event immediately. */
1377 if (event_mask &
1378 ~(DSP_SYSERROR | DSP_MMUFAULT | DSP_PWRERROR |
1379 DSP_WDTOVERFLOW)) {
1380 status = ntfy_register(p_proc_object->ntfy_obj,
1381 hnotification, event_mask,
1382 notify_type);
1383 /* Special case alert, special case alert!
1384 * If we're trying to *deregister* (i.e. event_mask
1385 * is 0), a DSP_SYSERROR or DSP_MMUFAULT notification,
1386 * we have to deregister with the DEH manager.
1387 * There's no way to know, based on event_mask which
1388 * manager the notification event was registered with,
1389 * so if we're trying to deregister and ntfy_register
1390 * failed, we'll give the deh manager a shot.
1391 */
b66e0986 1392 if ((event_mask == 0) && status) {
7d55524d 1393 status =
085467b8 1394 dev_get_deh_mgr(p_proc_object->dev_obj,
7d55524d 1395 &hdeh_mgr);
7d55524d 1396 status =
61a5b769
FC
1397 bridge_deh_register_notify(hdeh_mgr,
1398 event_mask,
1399 notify_type,
1400 hnotification);
7d55524d
ORL
1401 }
1402 } else {
085467b8 1403 status = dev_get_deh_mgr(p_proc_object->dev_obj,
7d55524d 1404 &hdeh_mgr);
7d55524d 1405 status =
61a5b769
FC
1406 bridge_deh_register_notify(hdeh_mgr,
1407 event_mask,
1408 notify_type,
1409 hnotification);
7d55524d
ORL
1410
1411 }
1412 }
1413func_end:
1414 return status;
1415}
1416
2fa28a51
FC
1417/*
1418 * ======== proc_reserve_memory ========
1419 * Purpose:
1420 * Reserve a virtually contiguous region of DSP address space.
1421 */
1422int proc_reserve_memory(void *hprocessor, u32 ul_size,
1423 void **pp_rsv_addr,
1424 struct process_context *pr_ctxt)
1425{
1426 struct dmm_object *dmm_mgr;
1427 int status = 0;
1428 struct proc_object *p_proc_object = (struct proc_object *)hprocessor;
a2890350 1429 struct dmm_rsv_object *rsv_obj;
2fa28a51
FC
1430
1431 if (!p_proc_object) {
1432 status = -EFAULT;
1433 goto func_end;
1434 }
1435
1436 status = dmm_get_handle(p_proc_object, &dmm_mgr);
1437 if (!dmm_mgr) {
1438 status = -EFAULT;
1439 goto func_end;
1440 }
1441
1442 status = dmm_reserve_memory(dmm_mgr, ul_size, (u32 *) pp_rsv_addr);
a2890350
FC
1443 if (status != 0)
1444 goto func_end;
1445
1446 /*
1447 * A successful reserve should be followed by insertion of rsv_obj
1448 * into dmm_rsv_list, so that reserved memory resource tracking
1449 * remains uptodate
1450 */
1451 rsv_obj = kmalloc(sizeof(struct dmm_rsv_object), GFP_KERNEL);
1452 if (rsv_obj) {
1453 rsv_obj->dsp_reserved_addr = (u32) *pp_rsv_addr;
1454 spin_lock(&pr_ctxt->dmm_rsv_lock);
1455 list_add(&rsv_obj->link, &pr_ctxt->dmm_rsv_list);
1456 spin_unlock(&pr_ctxt->dmm_rsv_lock);
1457 }
1458
2fa28a51
FC
1459func_end:
1460 dev_dbg(bridge, "%s: hprocessor: 0x%p ul_size: 0x%x pp_rsv_addr: 0x%p "
1461 "status 0x%x\n", __func__, hprocessor,
1462 ul_size, pp_rsv_addr, status);
1463 return status;
1464}
1465
7d55524d
ORL
1466/*
1467 * ======== proc_start ========
1468 * Purpose:
1469 * Start a processor running.
1470 */
1471int proc_start(void *hprocessor)
1472{
1473 int status = 0;
1474 struct proc_object *p_proc_object = (struct proc_object *)hprocessor;
1475 struct cod_manager *cod_mgr; /* Code manager handle */
1476 u32 dw_dsp_addr; /* Loaded code's entry point. */
1477 int brd_state;
1478
7d55524d
ORL
1479 if (!p_proc_object) {
1480 status = -EFAULT;
1481 goto func_end;
1482 }
1483 /* Call the bridge_brd_start */
1484 if (p_proc_object->proc_state != PROC_LOADED) {
1485 status = -EBADR;
1486 goto func_end;
1487 }
085467b8 1488 status = dev_get_cod_mgr(p_proc_object->dev_obj, &cod_mgr);
7d55524d
ORL
1489 if (!cod_mgr) {
1490 status = -EFAULT;
1491 goto func_cont;
1492 }
1493
1494 status = cod_get_entry(cod_mgr, &dw_dsp_addr);
b66e0986 1495 if (status)
7d55524d
ORL
1496 goto func_cont;
1497
e17ba7f2 1498 status = (*p_proc_object->intf_fxns->brd_start)
085467b8 1499 (p_proc_object->bridge_context, dw_dsp_addr);
b66e0986 1500 if (status)
7d55524d
ORL
1501 goto func_cont;
1502
1503 /* Call dev_create2 */
085467b8 1504 status = dev_create2(p_proc_object->dev_obj);
a741ea6e 1505 if (!status) {
7d55524d
ORL
1506 p_proc_object->proc_state = PROC_RUNNING;
1507 /* Deep sleep switces off the peripheral clocks.
1508 * we just put the DSP CPU in idle in the idle loop.
1509 * so there is no need to send a command to DSP */
1510
1511 if (p_proc_object->ntfy_obj) {
1512 proc_notify_clients(p_proc_object,
1513 DSP_PROCESSORSTATECHANGE);
1514 }
1515 } else {
1516 /* Failed to Create Node Manager and DISP Object
1517 * Stop the Processor from running. Put it in STOPPED State */
1518 (void)(*p_proc_object->intf_fxns->
085467b8 1519 brd_stop) (p_proc_object->bridge_context);
7d55524d
ORL
1520 p_proc_object->proc_state = PROC_STOPPED;
1521 }
1522func_cont:
a741ea6e 1523 if (!status) {
e17ba7f2 1524 if (!((*p_proc_object->intf_fxns->brd_status)
085467b8 1525 (p_proc_object->bridge_context, &brd_state))) {
7d55524d 1526 pr_info("%s: dsp in running state\n", __func__);
7d55524d
ORL
1527 }
1528 } else {
1529 pr_err("%s: Failed to start the dsp\n", __func__);
cfccf244 1530 proc_stop(p_proc_object);
7d55524d
ORL
1531 }
1532
1533func_end:
7d55524d
ORL
1534 return status;
1535}
1536
1537/*
1538 * ======== proc_stop ========
1539 * Purpose:
1540 * Stop a processor running.
1541 */
1542int proc_stop(void *hprocessor)
1543{
1544 int status = 0;
1545 struct proc_object *p_proc_object = (struct proc_object *)hprocessor;
1546 struct msg_mgr *hmsg_mgr;
1547 struct node_mgr *hnode_mgr;
1548 void *hnode;
1549 u32 node_tab_size = 1;
1550 u32 num_nodes = 0;
1551 u32 nodes_allocated = 0;
7d55524d 1552
7d55524d
ORL
1553 if (!p_proc_object) {
1554 status = -EFAULT;
1555 goto func_end;
1556 }
7d55524d 1557 /* check if there are any running nodes */
085467b8 1558 status = dev_get_node_manager(p_proc_object->dev_obj, &hnode_mgr);
a741ea6e 1559 if (!status && hnode_mgr) {
7d55524d
ORL
1560 status = node_enum_nodes(hnode_mgr, &hnode, node_tab_size,
1561 &num_nodes, &nodes_allocated);
1562 if ((status == -EINVAL) || (nodes_allocated > 0)) {
1563 pr_err("%s: Can't stop device, active nodes = %d \n",
1564 __func__, nodes_allocated);
1565 return -EBADR;
1566 }
1567 }
1568 /* Call the bridge_brd_stop */
1569 /* It is OK to stop a device that does n't have nodes OR not started */
1570 status =
1571 (*p_proc_object->intf_fxns->
085467b8 1572 brd_stop) (p_proc_object->bridge_context);
a741ea6e 1573 if (!status) {
7d55524d
ORL
1574 dev_dbg(bridge, "%s: processor in standby mode\n", __func__);
1575 p_proc_object->proc_state = PROC_STOPPED;
25985edc 1576 /* Destroy the Node Manager, msg_ctrl Manager */
085467b8 1577 if (!(dev_destroy2(p_proc_object->dev_obj))) {
7d55524d 1578 /* Destroy the msg_ctrl by calling msg_delete */
085467b8 1579 dev_get_msg_mgr(p_proc_object->dev_obj, &hmsg_mgr);
7d55524d
ORL
1580 if (hmsg_mgr) {
1581 msg_delete(hmsg_mgr);
085467b8 1582 dev_set_msg_mgr(p_proc_object->dev_obj, NULL);
7d55524d 1583 }
7d55524d
ORL
1584 }
1585 } else {
1586 pr_err("%s: Failed to stop the processor\n", __func__);
1587 }
1588func_end:
1589
1590 return status;
1591}
1592
1593/*
1594 * ======== proc_un_map ========
1595 * Purpose:
1596 * Removes a MPU buffer mapping from the DSP address space.
1597 */
1598int proc_un_map(void *hprocessor, void *map_addr,
1599 struct process_context *pr_ctxt)
1600{
1601 int status = 0;
1602 struct proc_object *p_proc_object = (struct proc_object *)hprocessor;
2fa28a51 1603 struct dmm_object *dmm_mgr;
7d55524d 1604 u32 va_align;
677f2ded 1605 u32 size_align;
7d55524d
ORL
1606
1607 va_align = PG_ALIGN_LOW((u32) map_addr, PG_SIZE4K);
1608 if (!p_proc_object) {
1609 status = -EFAULT;
1610 goto func_end;
1611 }
1612
2fa28a51
FC
1613 status = dmm_get_handle(hprocessor, &dmm_mgr);
1614 if (!dmm_mgr) {
1615 status = -EFAULT;
1616 goto func_end;
1617 }
1618
7d55524d
ORL
1619 /* Critical section */
1620 mutex_lock(&proc_lock);
2fa28a51
FC
1621 /*
1622 * Update DMM structures. Get the size to unmap.
1623 * This function returns error if the VA is not mapped
1624 */
1625 status = dmm_un_map_memory(dmm_mgr, (u32) va_align, &size_align);
7d55524d 1626 /* Remove mapping from the page tables. */
d0b345f3 1627 if (!status) {
3c882de5 1628 status = (*p_proc_object->intf_fxns->brd_mem_un_map)
085467b8 1629 (p_proc_object->bridge_context, va_align, size_align);
d0b345f3 1630 }
7d55524d 1631
b66e0986 1632 if (status)
ab42abf3 1633 goto unmap_failed;
7d55524d
ORL
1634
1635 /*
1636 * A successful unmap should be followed by removal of map_obj
1637 * from dmm_map_list, so that mapped memory resource tracking
1638 * remains uptodate
1639 */
2fa28a51 1640 remove_mapping_information(pr_ctxt, (u32) map_addr, size_align);
7d55524d 1641
ab42abf3
FC
1642unmap_failed:
1643 mutex_unlock(&proc_lock);
1644
7d55524d
ORL
1645func_end:
1646 dev_dbg(bridge, "%s: hprocessor: 0x%p map_addr: 0x%p status: 0x%x\n",
1647 __func__, hprocessor, map_addr, status);
1648 return status;
1649}
1650
2fa28a51
FC
1651/*
1652 * ======== proc_un_reserve_memory ========
1653 * Purpose:
1654 * Frees a previously reserved region of DSP address space.
1655 */
1656int proc_un_reserve_memory(void *hprocessor, void *prsv_addr,
1657 struct process_context *pr_ctxt)
1658{
1659 struct dmm_object *dmm_mgr;
1660 int status = 0;
1661 struct proc_object *p_proc_object = (struct proc_object *)hprocessor;
a2890350 1662 struct dmm_rsv_object *rsv_obj;
2fa28a51
FC
1663
1664 if (!p_proc_object) {
1665 status = -EFAULT;
1666 goto func_end;
1667 }
1668
1669 status = dmm_get_handle(p_proc_object, &dmm_mgr);
1670 if (!dmm_mgr) {
1671 status = -EFAULT;
1672 goto func_end;
1673 }
1674
1675 status = dmm_un_reserve_memory(dmm_mgr, (u32) prsv_addr);
a2890350
FC
1676 if (status != 0)
1677 goto func_end;
1678
1679 /*
1680 * A successful unreserve should be followed by removal of rsv_obj
1681 * from dmm_rsv_list, so that reserved memory resource tracking
1682 * remains uptodate
1683 */
1684 spin_lock(&pr_ctxt->dmm_rsv_lock);
1685 list_for_each_entry(rsv_obj, &pr_ctxt->dmm_rsv_list, link) {
1686 if (rsv_obj->dsp_reserved_addr == (u32) prsv_addr) {
1687 list_del(&rsv_obj->link);
1688 kfree(rsv_obj);
1689 break;
1690 }
1691 }
1692 spin_unlock(&pr_ctxt->dmm_rsv_lock);
1693
2fa28a51
FC
1694func_end:
1695 dev_dbg(bridge, "%s: hprocessor: 0x%p prsv_addr: 0x%p status: 0x%x\n",
1696 __func__, hprocessor, prsv_addr, status);
1697 return status;
1698}
1699
7d55524d
ORL
1700/*
1701 * ======== = proc_monitor ======== ==
1702 * Purpose:
1703 * Place the Processor in Monitor State. This is an internal
1704 * function and a requirement before Processor is loaded.
1705 * This does a bridge_brd_stop, dev_destroy2 and bridge_brd_monitor.
1706 * In dev_destroy2 we delete the node manager.
1707 * Parameters:
1708 * p_proc_object: Pointer to Processor Object
1709 * Returns:
1710 * 0: Processor placed in monitor mode.
1711 * !0: Failed to place processor in monitor mode.
1712 * Requires:
1713 * Valid Processor Handle
1714 * Ensures:
1715 * Success: ProcObject state is PROC_IDLE
1716 */
c8c1ad8c 1717static int proc_monitor(struct proc_object *proc_obj)
7d55524d
ORL
1718{
1719 int status = -EPERM;
1720 struct msg_mgr *hmsg_mgr;
7d55524d 1721
7d55524d
ORL
1722 /* This is needed only when Device is loaded when it is
1723 * already 'ACTIVE' */
25985edc 1724 /* Destroy the Node Manager, msg_ctrl Manager */
085467b8 1725 if (!dev_destroy2(proc_obj->dev_obj)) {
7d55524d 1726 /* Destroy the msg_ctrl by calling msg_delete */
085467b8 1727 dev_get_msg_mgr(proc_obj->dev_obj, &hmsg_mgr);
7d55524d
ORL
1728 if (hmsg_mgr) {
1729 msg_delete(hmsg_mgr);
085467b8 1730 dev_set_msg_mgr(proc_obj->dev_obj, NULL);
7d55524d
ORL
1731 }
1732 }
1733 /* Place the Board in the Monitor State */
3c882de5 1734 if (!((*proc_obj->intf_fxns->brd_monitor)
085467b8 1735 (proc_obj->bridge_context))) {
7d55524d 1736 status = 0;
7d55524d
ORL
1737 }
1738
7d55524d
ORL
1739 return status;
1740}
1741
1742/*
1743 * ======== get_envp_count ========
1744 * Purpose:
1745 * Return the number of elements in the envp array, including the
1746 * terminating NULL element.
1747 */
1748static s32 get_envp_count(char **envp)
1749{
1750 s32 ret = 0;
1751 if (envp) {
1752 while (*envp++)
1753 ret++;
1754
1755 ret += 1; /* Include the terminating NULL in the count. */
1756 }
1757
1758 return ret;
1759}
1760
1761/*
1762 * ======== prepend_envp ========
1763 * Purpose:
1764 * Prepend an environment variable=value pair to the new envp array, and
1765 * copy in the existing var=value pairs in the old envp array.
1766 */
1767static char **prepend_envp(char **new_envp, char **envp, s32 envp_elems,
0cd343a4 1768 s32 cnew_envp, char *sz_var)
7d55524d
ORL
1769{
1770 char **pp_envp = new_envp;
1771
7d55524d 1772 /* Prepend new environ var=value string */
0cd343a4 1773 *new_envp++ = sz_var;
7d55524d
ORL
1774
1775 /* Copy user's environment into our own. */
1776 while (envp_elems--)
1777 *new_envp++ = *envp++;
1778
1779 /* Ensure NULL terminates the new environment strings array. */
1780 if (envp_elems == 0)
1781 *new_envp = NULL;
1782
1783 return pp_envp;
1784}
1785
1786/*
1787 * ======== proc_notify_clients ========
1788 * Purpose:
1789 * Notify the processor the events.
1790 */
0cd343a4 1791int proc_notify_clients(void *proc, u32 events)
7d55524d
ORL
1792{
1793 int status = 0;
e6890692 1794 struct proc_object *p_proc_object = (struct proc_object *)proc;
7d55524d 1795
7d55524d
ORL
1796 if (!p_proc_object) {
1797 status = -EFAULT;
1798 goto func_end;
1799 }
1800
0cd343a4 1801 ntfy_notify(p_proc_object->ntfy_obj, events);
7d55524d
ORL
1802func_end:
1803 return status;
1804}
1805
1806/*
1807 * ======== proc_notify_all_clients ========
1808 * Purpose:
1809 * Notify the processor the events. This includes notifying all clients
1810 * attached to a particulat DSP.
1811 */
0cd343a4 1812int proc_notify_all_clients(void *proc, u32 events)
7d55524d
ORL
1813{
1814 int status = 0;
e6890692 1815 struct proc_object *p_proc_object = (struct proc_object *)proc;
7d55524d 1816
7d55524d
ORL
1817 if (!p_proc_object) {
1818 status = -EFAULT;
1819 goto func_end;
1820 }
1821
085467b8 1822 dev_notify_clients(p_proc_object->dev_obj, events);
7d55524d
ORL
1823
1824func_end:
1825 return status;
1826}
1827
1828/*
1829 * ======== proc_get_processor_id ========
1830 * Purpose:
1831 * Retrieves the processor ID.
1832 */
13b18c29 1833int proc_get_processor_id(void *proc, u32 * proc_id)
7d55524d
ORL
1834{
1835 int status = 0;
e6890692 1836 struct proc_object *p_proc_object = (struct proc_object *)proc;
7d55524d
ORL
1837
1838 if (p_proc_object)
13b18c29 1839 *proc_id = p_proc_object->processor_id;
7d55524d
ORL
1840 else
1841 status = -EFAULT;
1842
1843 return status;
1844}