/* DEH Manager: only one created per board: */
struct deh_mgr {
- struct bridge_dev_context *hbridge_context; /* Bridge context. */
+ struct bridge_dev_context *bridge_context; /* Bridge context. */
struct ntfy_object *ntfy_obj; /* NTFY object */
/* MMU Fault DPC */
*/
struct msg_queue {
struct list_head list_elem;
- struct msg_mgr *hmsg_mgr;
+ struct msg_mgr *msg_mgr;
u32 max_msgs; /* Node message depth */
u32 msgq_id; /* Node environment pointer */
struct list_head msg_free_list; /* Free MsgFrames ready to be filled */
/* This Bridge driver's device context: */
struct bridge_dev_context {
- struct dev_object *hdev_obj; /* Handle to Bridge device object. */
+ struct dev_object *dev_obj; /* Handle to Bridge device object. */
u32 dsp_base_addr; /* Arm's API to DSP virt base addr */
/*
* DSP External memory prog address as seen virtually by the OS on
chnl_mgr_obj->open_channels = 0;
chnl_mgr_obj->output_mask = 0;
chnl_mgr_obj->last_output = 0;
- chnl_mgr_obj->hdev_obj = hdev_obj;
+ chnl_mgr_obj->dev_obj = hdev_obj;
spin_lock_init(&chnl_mgr_obj->chnl_mgr_lock);
} else {
status = -ENOMEM;
kfree(chnl_mgr_obj->ap_channel);
/* Set hchnl_mgr to NULL in device object. */
- dev_set_chnl_mgr(chnl_mgr_obj->hdev_obj, NULL);
+ dev_set_chnl_mgr(chnl_mgr_obj->dev_obj, NULL);
/* Free this Chnl Mgr object: */
kfree(hchnl_mgr);
} else {
if (channel_info != NULL) {
if (pchnl) {
/* Return the requested information: */
- channel_info->hchnl_mgr = pchnl->chnl_mgr_obj;
+ channel_info->chnl_mgr = pchnl->chnl_mgr_obj;
channel_info->event_obj = pchnl->user_event;
channel_info->cnhl_id = pchnl->chnl_id;
channel_info->mode = pchnl->chnl_mode;
struct io_mgr {
/* These four fields must be the first fields in a io_mgr_ struct */
/* Bridge device context */
- struct bridge_dev_context *hbridge_context;
+ struct bridge_dev_context *bridge_context;
/* Function interface to Bridge driver */
struct bridge_drv_interface *intf_fxns;
- struct dev_object *hdev_obj; /* Device this board represents */
+ struct dev_object *dev_obj; /* Device this board represents */
/* These fields initialized in bridge_io_create() */
- struct chnl_mgr *hchnl_mgr;
+ struct chnl_mgr *chnl_mgr;
struct shm *shared_mem; /* Shared Memory control */
u8 *input; /* Address of input channel */
u8 *output; /* Address of output channel */
- struct msg_mgr *hmsg_mgr; /* Message manager */
+ struct msg_mgr *msg_mgr; /* Message manager */
/* Msg control for from DSP messages */
struct msg_ctrl *msg_input_ctrl;
/* Msg control for to DSP messages */
u16 intr_val; /* Interrupt value */
/* Private extnd proc info; mmu setup */
struct mgr_processorextinfo ext_proc_info;
- struct cmm_object *hcmm_mgr; /* Shared Mem Mngr */
+ struct cmm_object *cmm_mgr; /* Shared Mem Mngr */
struct work_struct io_workq; /* workqueue */
#if defined(CONFIG_TIDSPBRIDGE_BACKTRACE) || defined(CONFIG_TIDSPBRIDGE_DEBUG)
- u32 ul_trace_buffer_begin; /* Trace message start address */
- u32 ul_trace_buffer_end; /* Trace message end address */
- u32 ul_trace_buffer_current; /* Trace message current address */
+ u32 trace_buffer_begin; /* Trace message start address */
+ u32 trace_buffer_end; /* Trace message end address */
+ u32 trace_buffer_current; /* Trace message current address */
u32 gpp_read_pointer; /* GPP Read pointer to Trace buffer */
u8 *pmsg;
u32 gpp_va;
return -ENOMEM;
/* Initialize chnl_mgr object */
- pio_mgr->hchnl_mgr = hchnl_mgr;
+ pio_mgr->chnl_mgr = hchnl_mgr;
pio_mgr->word_size = mgr_attrts->word_size;
if (dev_type == DSP_UNIT) {
}
}
- pio_mgr->hbridge_context = hbridge_context;
+ pio_mgr->bridge_context = hbridge_context;
pio_mgr->shared_irq = mgr_attrts->irq_shared;
if (dsp_wdt_init()) {
bridge_io_destroy(pio_mgr);
HW_PAGE_SIZE64KB, HW_PAGE_SIZE4KB
};
- status = dev_get_bridge_context(hio_mgr->hdev_obj, &pbridge_context);
+ status = dev_get_bridge_context(hio_mgr->dev_obj, &pbridge_context);
if (!pbridge_context) {
status = -EFAULT;
goto func_end;
status = -EFAULT;
goto func_end;
}
- status = dev_get_cod_mgr(hio_mgr->hdev_obj, &cod_man);
+ status = dev_get_cod_mgr(hio_mgr->dev_obj, &cod_man);
if (!cod_man) {
status = -EFAULT;
goto func_end;
}
- hchnl_mgr = hio_mgr->hchnl_mgr;
+ hchnl_mgr = hio_mgr->chnl_mgr;
/* The message manager is destroyed when the board is stopped. */
- dev_get_msg_mgr(hio_mgr->hdev_obj, &hio_mgr->hmsg_mgr);
- hmsg_mgr = hio_mgr->hmsg_mgr;
+ dev_get_msg_mgr(hio_mgr->dev_obj, &hio_mgr->msg_mgr);
+ hmsg_mgr = hio_mgr->msg_mgr;
if (!hchnl_mgr || !hmsg_mgr) {
status = -EFAULT;
goto func_end;
1)) == 0)) {
status =
hio_mgr->intf_fxns->
- brd_mem_map(hio_mgr->hbridge_context,
+ brd_mem_map(hio_mgr->bridge_context,
pa_curr, va_curr,
page_size[i], map_attrs,
NULL);
ae_proc[ndx].gpp_va = gpp_va_curr;
ae_proc[ndx].dsp_va =
va_curr / hio_mgr->word_size;
- ae_proc[ndx].ul_size = page_size[i];
+ ae_proc[ndx].size = page_size[i];
ae_proc[ndx].endianism = HW_LITTLE_ENDIAN;
ae_proc[ndx].elem_size = HW_ELEM_SIZE16BIT;
ae_proc[ndx].mixed_mode = HW_MMU_CPUES;
} else {
status =
hio_mgr->intf_fxns->
- brd_mem_map(hio_mgr->hbridge_context,
+ brd_mem_map(hio_mgr->bridge_context,
pa_curr, va_curr,
page_size[i], map_attrs,
NULL);
gpp_phys;
ae_proc[ndx].gpp_va = 0;
/* 1 MB */
- ae_proc[ndx].ul_size = 0x100000;
+ ae_proc[ndx].size = 0x100000;
dev_dbg(bridge, "shm MMU entry PA %x "
"DSP_VA 0x%x\n", ae_proc[ndx].gpp_pa,
ae_proc[ndx].dsp_va);
ndx++;
} else {
status = hio_mgr->intf_fxns->brd_mem_map
- (hio_mgr->hbridge_context,
+ (hio_mgr->bridge_context,
hio_mgr->ext_proc_info.ty_tlb[i].
gpp_phys,
hio_mgr->ext_proc_info.ty_tlb[i].
i = 0;
while (l4_peripheral_table[i].phys_addr) {
status = hio_mgr->intf_fxns->brd_mem_map
- (hio_mgr->hbridge_context, l4_peripheral_table[i].phys_addr,
+ (hio_mgr->bridge_context, l4_peripheral_table[i].phys_addr,
l4_peripheral_table[i].dsp_virt_addr, HW_PAGE_SIZE4KB,
map_attrs, NULL);
if (status)
ae_proc[i].dsp_va = 0;
ae_proc[i].gpp_pa = 0;
ae_proc[i].gpp_va = 0;
- ae_proc[i].ul_size = 0;
+ ae_proc[i].size = 0;
}
/*
* Set the shm physical address entry (grayed out in CDB file)
*/
status =
- hio_mgr->intf_fxns->dev_cntrl(hio_mgr->hbridge_context,
+ hio_mgr->intf_fxns->dev_cntrl(hio_mgr->bridge_context,
BRDIOCTL_SETMMUCONFIG,
ae_proc);
if (status)
#if defined(CONFIG_TIDSPBRIDGE_BACKTRACE) || defined(CONFIG_TIDSPBRIDGE_DEBUG)
/* Get the start address of trace buffer */
status = cod_get_sym_value(cod_man, SYS_PUTCBEG,
- &hio_mgr->ul_trace_buffer_begin);
+ &hio_mgr->trace_buffer_begin);
if (status) {
status = -EFAULT;
goto func_end;
}
- hio_mgr->gpp_read_pointer = hio_mgr->ul_trace_buffer_begin =
+ hio_mgr->gpp_read_pointer = hio_mgr->trace_buffer_begin =
(ul_gpp_va + ul_seg1_size + ul_pad_size) +
- (hio_mgr->ul_trace_buffer_begin - ul_dsp_va);
+ (hio_mgr->trace_buffer_begin - ul_dsp_va);
/* Get the end address of trace buffer */
status = cod_get_sym_value(cod_man, SYS_PUTCEND,
- &hio_mgr->ul_trace_buffer_end);
+ &hio_mgr->trace_buffer_end);
if (status) {
status = -EFAULT;
goto func_end;
}
- hio_mgr->ul_trace_buffer_end =
+ hio_mgr->trace_buffer_end =
(ul_gpp_va + ul_seg1_size + ul_pad_size) +
- (hio_mgr->ul_trace_buffer_end - ul_dsp_va);
+ (hio_mgr->trace_buffer_end - ul_dsp_va);
/* Get the current address of DSP write pointer */
status = cod_get_sym_value(cod_man, BRIDGE_SYS_PUTC_CURRENT,
- &hio_mgr->ul_trace_buffer_current);
+ &hio_mgr->trace_buffer_current);
if (status) {
status = -EFAULT;
goto func_end;
}
- hio_mgr->ul_trace_buffer_current =
+ hio_mgr->trace_buffer_current =
(ul_gpp_va + ul_seg1_size + ul_pad_size) +
- (hio_mgr->ul_trace_buffer_current - ul_dsp_va);
+ (hio_mgr->trace_buffer_current - ul_dsp_va);
/* Calculate the size of trace buffer */
kfree(hio_mgr->pmsg);
- hio_mgr->pmsg = kmalloc(((hio_mgr->ul_trace_buffer_end -
- hio_mgr->ul_trace_buffer_begin) *
+ hio_mgr->pmsg = kmalloc(((hio_mgr->trace_buffer_end -
+ hio_mgr->trace_buffer_begin) *
hio_mgr->word_size) + 2, GFP_KERNEL);
if (!hio_mgr->pmsg)
status = -ENOMEM;
/* Inform DSP that we have no more buffers on this channel */
set_chnl_free(sm, chnl);
- sm_interrupt_dsp(pio_mgr->hbridge_context, MBX_PCPY_CLASS);
+ sm_interrupt_dsp(pio_mgr->bridge_context, MBX_PCPY_CLASS);
func_end:
return;
}
if (parg[0] == MBX_PM_HIBERNATE_EN) {
dev_dbg(bridge, "PM: Hibernate command\n");
status = pio_mgr->intf_fxns->
- dev_cntrl(pio_mgr->hbridge_context,
+ dev_cntrl(pio_mgr->bridge_context,
BRDIOCTL_PWR_HIBERNATE, parg);
if (status)
pr_err("%s: hibernate cmd failed 0x%x\n",
parg[1] = pio_mgr->shared_mem->opp_request.rqst_opp_pt;
dev_dbg(bridge, "PM: Requested OPP = 0x%x\n", parg[1]);
status = pio_mgr->intf_fxns->
- dev_cntrl(pio_mgr->hbridge_context,
+ dev_cntrl(pio_mgr->bridge_context,
BRDIOCTL_CONSTRAINT_REQUEST, parg);
if (status)
dev_dbg(bridge, "PM: Failed to set constraint "
dev_dbg(bridge, "PM: clk control value of msg = 0x%x\n",
parg[0]);
status = pio_mgr->intf_fxns->
- dev_cntrl(pio_mgr->hbridge_context,
+ dev_cntrl(pio_mgr->bridge_context,
BRDIOCTL_CLK_CTRL, parg);
if (status)
dev_dbg(bridge, "PM: Failed to ctrl the DSP clk"
if (!pio_mgr)
goto func_end;
- chnl_mgr_obj = pio_mgr->hchnl_mgr;
- dev_get_msg_mgr(pio_mgr->hdev_obj, &msg_mgr_obj);
- dev_get_deh_mgr(pio_mgr->hdev_obj, &hdeh_mgr);
+ chnl_mgr_obj = pio_mgr->chnl_mgr;
+ dev_get_msg_mgr(pio_mgr->dev_obj, &msg_mgr_obj);
+ dev_get_deh_mgr(pio_mgr->dev_obj, &hdeh_mgr);
if (!chnl_mgr_obj)
goto func_end;
if (!pchnl || !mbx_val)
goto func_end;
- chnl_mgr_obj = io_manager->hchnl_mgr;
+ chnl_mgr_obj = io_manager->chnl_mgr;
sm = io_manager->shared_mem;
if (io_mode == IO_INPUT) {
/*
bool notify_client = false;
sm = pio_mgr->shared_mem;
- chnl_mgr_obj = pio_mgr->hchnl_mgr;
+ chnl_mgr_obj = pio_mgr->chnl_mgr;
/* Attempt to perform input */
if (!sm->input_full)
if (clear_chnl) {
/* Indicate to the DSP we have read the input */
sm->input_full = 0;
- sm_interrupt_dsp(pio_mgr->hbridge_context, MBX_PCPY_CLASS);
+ sm_interrupt_dsp(pio_mgr->bridge_context, MBX_PCPY_CLASS);
}
if (notify_client) {
/* Notify client with IO completion record */
/* Read the next message */
addr = (u32) &(((struct msg_dspmsg *)msg_input)->msg.cmd);
msg.msg.cmd =
- read_ext32_bit_dsp_data(pio_mgr->hbridge_context, addr);
+ read_ext32_bit_dsp_data(pio_mgr->bridge_context, addr);
addr = (u32) &(((struct msg_dspmsg *)msg_input)->msg.arg1);
msg.msg.arg1 =
- read_ext32_bit_dsp_data(pio_mgr->hbridge_context, addr);
+ read_ext32_bit_dsp_data(pio_mgr->bridge_context, addr);
addr = (u32) &(((struct msg_dspmsg *)msg_input)->msg.arg2);
msg.msg.arg2 =
- read_ext32_bit_dsp_data(pio_mgr->hbridge_context, addr);
+ read_ext32_bit_dsp_data(pio_mgr->bridge_context, addr);
addr = (u32) &(((struct msg_dspmsg *)msg_input)->msgq_id);
msg.msgq_id =
- read_ext32_bit_dsp_data(pio_mgr->hbridge_context, addr);
+ read_ext32_bit_dsp_data(pio_mgr->bridge_context, addr);
msg_input += sizeof(struct msg_dspmsg);
/* Determine which queue to put the message in */
/* Tell the DSP we've read the messages */
msg_ctr_obj->buf_empty = true;
msg_ctr_obj->post_swi = true;
- sm_interrupt_dsp(pio_mgr->hbridge_context, MBX_PCPY_CLASS);
+ sm_interrupt_dsp(pio_mgr->bridge_context, MBX_PCPY_CLASS);
}
}
struct chnl_irp *chnl_packet_obj;
u32 dw_dsp_f_mask;
- chnl_mgr_obj = pio_mgr->hchnl_mgr;
+ chnl_mgr_obj = pio_mgr->chnl_mgr;
sm = pio_mgr->shared_mem;
/* Attempt to perform output */
if (sm->output_full)
#endif
sm->output_full = 1;
/* Indicate to the DSP we have written the output */
- sm_interrupt_dsp(pio_mgr->hbridge_context, MBX_PCPY_CLASS);
+ sm_interrupt_dsp(pio_mgr->bridge_context, MBX_PCPY_CLASS);
/* Notify client with IO completion record (keep EOS) */
chnl_packet_obj->status &= CHNL_IOCSTATEOS;
notify_chnl_complete(pchnl, chnl_packet_obj);
val = (pmsg->msg_data).msgq_id;
addr = (u32) &msg_output->msgq_id;
- write_ext32_bit_dsp_data(pio_mgr->hbridge_context, addr, val);
+ write_ext32_bit_dsp_data(pio_mgr->bridge_context, addr, val);
val = (pmsg->msg_data).msg.cmd;
addr = (u32) &msg_output->msg.cmd;
- write_ext32_bit_dsp_data(pio_mgr->hbridge_context, addr, val);
+ write_ext32_bit_dsp_data(pio_mgr->bridge_context, addr, val);
val = (pmsg->msg_data).msg.arg1;
addr = (u32) &msg_output->msg.arg1;
- write_ext32_bit_dsp_data(pio_mgr->hbridge_context, addr, val);
+ write_ext32_bit_dsp_data(pio_mgr->bridge_context, addr, val);
val = (pmsg->msg_data).msg.arg2;
addr = (u32) &msg_output->msg.arg2;
- write_ext32_bit_dsp_data(pio_mgr->hbridge_context, addr, val);
+ write_ext32_bit_dsp_data(pio_mgr->bridge_context, addr, val);
msg_output++;
list_add_tail(&pmsg->list_elem, &hmsg_mgr->msg_free_list);
/* Set the post SWI flag */
msg_ctr_obj->post_swi = true;
/* Tell the DSP we have written the output. */
- sm_interrupt_dsp(pio_mgr->hbridge_context, MBX_PCPY_CLASS);
+ sm_interrupt_dsp(pio_mgr->bridge_context, MBX_PCPY_CLASS);
}
}
}
/* Register with CMM */
if (!status) {
- status = dev_get_cmm_mgr(hio_mgr->hdev_obj, &hio_mgr->hcmm_mgr);
+ status = dev_get_cmm_mgr(hio_mgr->dev_obj, &hio_mgr->cmm_mgr);
if (!status) {
- status = cmm_un_register_gppsm_seg(hio_mgr->hcmm_mgr,
+ status = cmm_un_register_gppsm_seg(hio_mgr->cmm_mgr,
CMM_ALLSEGMENTS);
}
}
ul_dsp_virt;
/* Register SM Segment 0. */
status =
- cmm_register_gppsm_seg(hio_mgr->hcmm_mgr, dw_gpp_base_pa,
+ cmm_register_gppsm_seg(hio_mgr->cmm_mgr, dw_gpp_base_pa,
ul_rsrvd_size, dw_offset,
(dw_gpp_base_pa >
ul_dsp_virt) ? CMM_ADDTODSPPA :
while (true) {
/* Get the DSP current pointer */
ul_gpp_cur_pointer =
- *(u32 *) (hio_mgr->ul_trace_buffer_current);
+ *(u32 *) (hio_mgr->trace_buffer_current);
ul_gpp_cur_pointer =
hio_mgr->gpp_va + (ul_gpp_cur_pointer -
hio_mgr->dsp_va);
/* Handle trace buffer wraparound */
memcpy(hio_mgr->pmsg,
(char *)hio_mgr->gpp_read_pointer,
- hio_mgr->ul_trace_buffer_end -
+ hio_mgr->trace_buffer_end -
hio_mgr->gpp_read_pointer);
ul_new_message_length =
- ul_gpp_cur_pointer - hio_mgr->ul_trace_buffer_begin;
- memcpy(&hio_mgr->pmsg[hio_mgr->ul_trace_buffer_end -
+ ul_gpp_cur_pointer - hio_mgr->trace_buffer_begin;
+ memcpy(&hio_mgr->pmsg[hio_mgr->trace_buffer_end -
hio_mgr->gpp_read_pointer],
- (char *)hio_mgr->ul_trace_buffer_begin,
+ (char *)hio_mgr->trace_buffer_begin,
ul_new_message_length);
- hio_mgr->pmsg[hio_mgr->ul_trace_buffer_end -
+ hio_mgr->pmsg[hio_mgr->trace_buffer_end -
hio_mgr->gpp_read_pointer +
ul_new_message_length] = '\0';
/*
* pointer.
*/
hio_mgr->gpp_read_pointer =
- hio_mgr->ul_trace_buffer_begin +
+ hio_mgr->trace_buffer_begin +
ul_new_message_length;
/* Print the trace messages */
pr_info("DSPTrace: %s\n", hio_mgr->pmsg);
struct bridge_dev_context *pbridge_context = hbridge_context;
struct bridge_drv_interface *intf_fxns;
struct dev_object *dev_obj = (struct dev_object *)
- pbridge_context->hdev_obj;
+ pbridge_context->dev_obj;
status = dev_get_cod_mgr(dev_obj, &cod_mgr);
"ILC", "RILC", "IER", "CSR"};
const char *exec_ctxt[] = {"Task", "SWI", "HWI", "Unknown"};
struct bridge_drv_interface *intf_fxns;
- struct dev_object *dev_object = bridge_context->hdev_obj;
+ struct dev_object *dev_object = bridge_context->dev_obj;
status = dev_get_cod_mgr(dev_object, &code_mgr);
if (!code_mgr) {
struct cod_manager *code_mgr;
struct bridge_drv_interface *intf_fxns;
struct bridge_dev_context *bridge_ctxt = bridge_context;
- struct dev_object *dev_object = bridge_ctxt->hdev_obj;
+ struct dev_object *dev_object = bridge_ctxt->dev_obj;
struct modules_header modules_hdr;
struct dll_module *module_struct = NULL;
u32 module_dsp_addr;
return -ENOMEM;
msg_q->max_msgs = max_msgs;
- msg_q->hmsg_mgr = hmsg_mgr;
+ msg_q->msg_mgr = hmsg_mgr;
msg_q->arg = arg; /* Node handle */
msg_q->msgq_id = msgq_id; /* Node env (not valid yet) */
/* Queues of Message frames for messages from the DSP */
struct msg_mgr *hmsg_mgr;
u32 io_msg_pend;
- if (!msg_queue_obj || !msg_queue_obj->hmsg_mgr)
+ if (!msg_queue_obj || !msg_queue_obj->msg_mgr)
return;
- hmsg_mgr = msg_queue_obj->hmsg_mgr;
+ hmsg_mgr = msg_queue_obj->msg_mgr;
msg_queue_obj->done = true;
/* Unblock all threads blocked in MSG_Get() or MSG_Put(). */
io_msg_pend = msg_queue_obj->io_msg_pend;
if (!msg_queue_obj || pmsg == NULL)
return -ENOMEM;
- hmsg_mgr = msg_queue_obj->hmsg_mgr;
+ hmsg_mgr = msg_queue_obj->msg_mgr;
spin_lock_bh(&hmsg_mgr->msg_mgr_lock);
/* If a message is already there, get it */
u32 index;
int status;
- if (!msg_queue_obj || !pmsg || !msg_queue_obj->hmsg_mgr)
+ if (!msg_queue_obj || !pmsg || !msg_queue_obj->msg_mgr)
return -EFAULT;
- hmsg_mgr = msg_queue_obj->hmsg_mgr;
+ hmsg_mgr = msg_queue_obj->msg_mgr;
spin_lock_bh(&hmsg_mgr->msg_mgr_lock);
struct msg_frame *pmsg, *tmp;
u32 i;
- if (!msg_queue_obj || !msg_queue_obj->hmsg_mgr)
+ if (!msg_queue_obj || !msg_queue_obj->msg_mgr)
return;
- hmsg_mgr = msg_queue_obj->hmsg_mgr;
+ hmsg_mgr = msg_queue_obj->msg_mgr;
/* Pull off num_to_dsp message frames from Msg manager and free */
i = 0;
* last dsp base image was loaded. The first entry is always
* SHMMEM base. */
/* Get SHM_BEG - convert to byte address */
- (void)dev_get_symbol(dev_context->hdev_obj, SHMBASENAME,
+ (void)dev_get_symbol(dev_context->dev_obj, SHMBASENAME,
&ul_shm_base_virt);
ul_shm_base_virt *= DSPWORDSIZE;
DBC_ASSERT(ul_shm_base_virt != 0);
itmp_entry_ndx,
e->gpp_pa,
e->dsp_va,
- e->ul_size);
+ e->size);
hw_mmu_tlb_add(dev_context->dsp_mmu_base,
e->gpp_pa,
e->dsp_va,
- e->ul_size,
+ e->size,
itmp_entry_ndx,
&map_attrs, 1, 1);
hw_mmu_enable(resources->dmmu_base);
/* Enable the BIOS clock */
- (void)dev_get_symbol(dev_context->hdev_obj,
+ (void)dev_get_symbol(dev_context->dev_obj,
BRIDGEINIT_BIOSGPTIMER, &ul_bios_gp_timer);
- (void)dev_get_symbol(dev_context->hdev_obj,
+ (void)dev_get_symbol(dev_context->dev_obj,
BRIDGEINIT_LOADMON_GPTIMER,
&ul_load_monitor_timer);
}
if (!status) {
/* Set the DSP clock rate */
- (void)dev_get_symbol(dev_context->hdev_obj,
+ (void)dev_get_symbol(dev_context->dev_obj,
"_BRIDGEINIT_DSP_FREQ", &ul_dsp_clk_addr);
/*Set Autoidle Mode for IVA2 PLL */
(*pdata->dsp_cm_write)(1 << OMAP3430_AUTO_IVA2_DPLL_SHIFT,
dsp_wdt_sm_set((void *)ul_shm_base);
dsp_wdt_enable(true);
- status = dev_get_io_mgr(dev_context->hdev_obj, &hio_mgr);
+ status = dev_get_io_mgr(dev_context->dev_obj, &hio_mgr);
if (hio_mgr) {
io_sh_msetting(hio_mgr, SHM_OPPINFO, NULL);
/* Write the synchronization bit to indicate the
dev_context->dsp_mmu_base = resources->dmmu_base;
}
if (!status) {
- dev_context->hdev_obj = hdev_obj;
+ dev_context->dev_obj = hdev_obj;
/* Store current board state. */
dev_context->brd_state = BRD_UNKNOWN;
dev_context->resources = resources;
dev_context->brd_state = BRD_DSP_HIBERNATION;
#ifdef CONFIG_TIDSPBRIDGE_DVFS
status =
- dev_get_io_mgr(dev_context->hdev_obj, &hio_mgr);
+ dev_get_io_mgr(dev_context->dev_obj, &hio_mgr);
if (!hio_mgr) {
status = DSP_EHANDLE;
return status;
pr_err("%s: Timed out waiting for DSP off mode, state %x\n",
__func__, pwr_state);
#ifdef CONFIG_TIDSPBRIDGE_NTFY_PWRERR
- dev_get_deh_mgr(dev_context->hdev_obj, &hdeh_mgr);
+ dev_get_deh_mgr(dev_context->dev_obj, &hdeh_mgr);
bridge_deh_notify(hdeh_mgr, DSP_PWRERROR, 0);
#endif /* CONFIG_TIDSPBRIDGE_NTFY_PWRERR */
return -ETIMEDOUT;
u32 voltage_domain;
struct io_mgr *hio_mgr;
- status = dev_get_io_mgr(dev_context->hdev_obj, &hio_mgr);
+ status = dev_get_io_mgr(dev_context->dev_obj, &hio_mgr);
if (!hio_mgr)
return -EFAULT;
bool trace_read = false;
if (!ul_shm_base_virt) {
- status = dev_get_symbol(dev_context->hdev_obj,
+ status = dev_get_symbol(dev_context->dev_obj,
SHMBASENAME, &ul_shm_base_virt);
}
DBC_ASSERT(ul_shm_base_virt != 0);
/* Check if it is a read of Trace section */
if (!status && !ul_trace_sec_beg) {
- status = dev_get_symbol(dev_context->hdev_obj,
+ status = dev_get_symbol(dev_context->dev_obj,
DSP_TRACESEC_BEG, &ul_trace_sec_beg);
}
DBC_ASSERT(ul_trace_sec_beg != 0);
if (!status && !ul_trace_sec_end) {
- status = dev_get_symbol(dev_context->hdev_obj,
+ status = dev_get_symbol(dev_context->dev_obj,
DSP_TRACESEC_END, &ul_trace_sec_end);
}
DBC_ASSERT(ul_trace_sec_end != 0);
/* Get DYNEXT_BEG, EXT_BEG and EXT_END. */
if (!status && !ul_dyn_ext_base) {
- status = dev_get_symbol(dev_context->hdev_obj,
+ status = dev_get_symbol(dev_context->dev_obj,
DYNEXTBASE, &ul_dyn_ext_base);
}
DBC_ASSERT(ul_dyn_ext_base != 0);
if (!status) {
- status = dev_get_symbol(dev_context->hdev_obj,
+ status = dev_get_symbol(dev_context->dev_obj,
EXTBASE, &ul_ext_base);
}
DBC_ASSERT(ul_ext_base != 0);
if (!status) {
- status = dev_get_symbol(dev_context->hdev_obj,
+ status = dev_get_symbol(dev_context->dev_obj,
EXTEND, &ul_ext_end);
}
DBC_ASSERT(ul_ext_end != 0);
if (symbols_reloaded) {
/* Check if it is a load to Trace section */
- ret = dev_get_symbol(dev_context->hdev_obj,
+ ret = dev_get_symbol(dev_context->dev_obj,
DSP_TRACESEC_BEG, &ul_trace_sec_beg);
if (!ret)
- ret = dev_get_symbol(dev_context->hdev_obj,
+ ret = dev_get_symbol(dev_context->dev_obj,
DSP_TRACESEC_END,
&ul_trace_sec_end);
}
if (!dw_base_addr) {
if (symbols_reloaded)
/* Get SHM_BEG EXT_BEG and EXT_END. */
- ret = dev_get_symbol(dev_context->hdev_obj,
+ ret = dev_get_symbol(dev_context->dev_obj,
SHMBASENAME, &ul_shm_base_virt);
DBC_ASSERT(ul_shm_base_virt != 0);
if (dynamic_load) {
if (symbols_reloaded)
ret =
dev_get_symbol
- (dev_context->hdev_obj, DYNEXTBASE,
+ (dev_context->dev_obj, DYNEXTBASE,
&ul_ext_base);
}
DBC_ASSERT(ul_ext_base != 0);
if (symbols_reloaded)
ret =
dev_get_symbol
- (dev_context->hdev_obj, EXTEND,
+ (dev_context->dev_obj, EXTEND,
&ul_ext_end);
}
} else {
if (!ret)
ret =
dev_get_symbol
- (dev_context->hdev_obj, EXTBASE,
+ (dev_context->dev_obj, EXTBASE,
&ul_ext_base);
DBC_ASSERT(ul_ext_base != 0);
if (!ret)
ret =
dev_get_symbol
- (dev_context->hdev_obj, EXTEND,
+ (dev_context->dev_obj, EXTEND,
&ul_ext_end);
}
}
if (symbols_reloaded) {
ret = dev_get_symbol
- (dev_context->hdev_obj,
+ (dev_context->dev_obj,
DSP_TRACESEC_END, &shm0_end);
if (!ret) {
ret =
dev_get_symbol
- (dev_context->hdev_obj, DYNEXTBASE,
+ (dev_context->dev_obj, DYNEXTBASE,
&ul_dyn_ext_base);
}
}
if (!deh)
return IRQ_HANDLED;
- resources = deh->hbridge_context->resources;
+ resources = deh->bridge_context->resources;
if (!resources) {
dev_dbg(bridge, "%s: Failed to get Host Resources\n",
__func__);
tasklet_init(&deh->dpc_tasklet, mmu_fault_dpc, (u32) deh);
/* Fill in context structure */
- deh->hbridge_context = hbridge_context;
+ deh->bridge_context = hbridge_context;
/* Install ISR function for DSP MMU fault */
status = request_irq(INT_DSP_MMU_IRQ, mmu_fault_isr, 0,
return;
dev_dbg(bridge, "%s: device exception", __func__);
- dev_context = deh->hbridge_context;
+ dev_context = deh->bridge_context;
switch (event) {
case DSP_SYSERROR:
struct bridge_drv_interface *intf_fxns;
struct io_mgr *hio_mgr; /* IO manager */
/* Device this board represents */
- struct dev_object *hdev_obj;
+ struct dev_object *dev_obj;
/* These fields initialized in bridge_chnl_create(): */
u32 output_mask; /* Host output channels w/ full buffers */
/* Channel info. */
struct chnl_info {
- struct chnl_mgr *hchnl_mgr; /* Owning channel manager. */
+ struct chnl_mgr *chnl_mgr; /* Owning channel manager. */
u32 cnhl_id; /* Channel ID. */
void *event_obj; /* Channel I/O completion event. */
/*Abstraction of I/O completion event. */
/* Attributes for CMM_AllocBuf() & CMM_AllocDesc() */
struct cmm_attrs {
- u32 ul_seg_id; /* 1,2... are SM segments. 0 is not. */
+ u32 seg_id; /* 1,2... are SM segments. 0 is not. */
u32 alignment; /* 0,1,2,4....min_block_size */
};
struct cmm_seginfo {
u32 seg_base_pa; /* Start Phys address of SM segment */
/* Total size in bytes of segment: DSP+GPP */
- u32 ul_total_seg_size;
+ u32 total_seg_size;
u32 gpp_base_pa; /* Start Phys addr of Gpp SM seg */
u32 gpp_size; /* Size of Gpp SM seg in bytes */
u32 dsp_base_va; /* DSP virt base byte address */
/* # of SM segments registered with this Cmm. */
u32 num_gppsm_segs;
/* Total # of allocations outstanding for CMM */
- u32 ul_total_in_use_cnt;
+ u32 total_in_use_cnt;
/* Min SM block size allocation from cmm_create() */
u32 min_block_size;
/* Info per registered SM segment. */
/* XlatorCreate attributes */
struct cmm_xlatorattrs {
- u32 ul_seg_id; /* segment Id used for SM allocations */
+ u32 seg_id; /* segment Id used for SM allocations */
u32 dsp_bufs; /* # of DSP-side bufs */
u32 dsp_buf_size; /* size of DSP-side bufs in GPP bytes */
/* Vm base address alloc'd in client process context */
/* Memory Segment Status Values */
struct dsp_memstat {
- u32 ul_size;
- u32 ul_total_free_size;
+ u32 size;
+ u32 total_free_size;
u32 len_max_free_block;
u32 num_free_blocks;
u32 num_alloc_blocks;
u32 cb_struct;
enum dsp_resourceinfotype resource_type;
union {
- u32 ul_resource;
+ u32 resource;
struct dsp_memstat mem_stat;
struct dsp_procloadstat proc_load_stat;
} result;
* DEV Initialized
* Valid hdev_obj
* Ensures:
- * 0 and hdev_obj->hnode_mgr != NULL
- * else hdev_obj->hnode_mgr == NULL
+ * 0 and hdev_obj->node_mgr != NULL
+ * else hdev_obj->node_mgr == NULL
*/
extern int dev_create2(struct dev_object *hdev_obj);
* DEV Initialized
* Valid hdev_obj
* Ensures:
- * 0 and hdev_obj->hnode_mgr == NULL
+ * 0 and hdev_obj->node_mgr == NULL
* else -EPERM.
*/
extern int dev_destroy2(struct dev_object *hdev_obj);
struct {
void *hprocessor;
- u32 ul_size;
+ u32 size;
void *__user *pp_rsv_addr;
} args_proc_rsvmem;
struct {
void *hprocessor;
- u32 ul_size;
+ u32 size;
void *prsv_addr;
} args_proc_unrsvmem;
struct {
void *hprocessor;
void *pmpu_addr;
- u32 ul_size;
+ u32 size;
void *req_addr;
void *__user *pp_map_addr;
u32 ul_map_attr;
struct {
void *hprocessor;
- u32 ul_size;
+ u32 size;
void *map_addr;
} args_proc_unmapmem;
struct {
void *hprocessor;
void *pmpu_addr;
- u32 ul_size;
+ u32 size;
u32 dir;
} args_proc_dma;
struct {
void *hprocessor;
void *pmpu_addr;
- u32 ul_size;
+ u32 size;
u32 ul_flags;
} args_proc_flushmemory;
struct {
void *hprocessor;
void *pmpu_addr;
- u32 ul_size;
+ u32 size;
} args_proc_invalidatememory;
/* NODE Module */
/* CMM Module */
struct {
- struct cmm_object *hcmm_mgr;
+ struct cmm_object *cmm_mgr;
u32 usize;
struct cmm_attrs *pattrs;
void **pp_buf_va;
} args_cmm_allocbuf;
struct {
- struct cmm_object *hcmm_mgr;
+ struct cmm_object *cmm_mgr;
void *buf_pa;
u32 ul_seg_id;
} args_cmm_freebuf;
} args_cmm_gethandle;
struct {
- struct cmm_object *hcmm_mgr;
+ struct cmm_object *cmm_mgr;
struct cmm_info __user *cmm_info_obj;
} args_cmm_getinfo;
u32 gpp_pa; /* GPP physical address */
/* GPP virtual address. __va does not work for ioremapped addresses */
u32 gpp_va;
- u32 ul_size; /* Size of the mapped memory in bytes */
+ u32 size; /* Size of the mapped memory in bytes */
enum hw_endianism_t endianism;
enum hw_mmu_mixed_size_t mixed_mode;
enum hw_element_size_t elem_size;
struct nldr_attrs {
nldr_ovlyfxn ovly;
nldr_writefxn write;
- u16 us_dsp_word_size;
- u16 us_dsp_mau_size;
+ u16 dsp_word_size;
+ u16 dsp_mau_size;
};
/*
char *pstr_event_name;
void *virt_base; /* Process virtual base address of
* mapped SM */
- u32 ul_virt_size; /* Size of virtual space in bytes */
+ u32 virt_size; /* Size of virtual space in bytes */
struct dsp_streamattrin *stream_attr_in;
};
#include <dspbridge/cmm.h>
/* ----------------------------------- Defines, Data Structures, Typedefs */
-#define NEXT_PA(pnode) (pnode->pa + pnode->ul_size)
+#define NEXT_PA(pnode) (pnode->pa + pnode->size)
/* Other bus/platform translations */
#define DSPPA2GPPPA(base, x, y) ((x)+(y))
*/
struct cmm_allocator { /* sma */
unsigned int shm_base; /* Start of physical SM block */
- u32 ul_sm_size; /* Size of SM block in bytes */
+ u32 sm_size; /* Size of SM block in bytes */
unsigned int vm_base; /* Start of VM block. (Dev driver
* context for 'sma') */
u32 dsp_phys_addr_offset; /* DSP PA to GPP PA offset for this
s8 c_factor; /* DSPPa to GPPPa Conversion Factor */
unsigned int dsp_base; /* DSP virt base byte address */
u32 dsp_size; /* DSP seg size in bytes */
- struct cmm_object *hcmm_mgr; /* back ref to parent mgr */
+ struct cmm_object *cmm_mgr; /* back ref to parent mgr */
/* node list of available memory */
struct list_head free_list;
/* node list of memory in use */
struct cmm_xlator { /* Pa<->Va translator object */
/* CMM object this translator associated */
- struct cmm_object *hcmm_mgr;
+ struct cmm_object *cmm_mgr;
/*
* Client process virtual base address that corresponds to phys SM
- * base address for translator's ul_seg_id.
+ * base address for translator's seg_id.
* Only 1 segment ID currently supported.
*/
unsigned int virt_base; /* virtual base address */
- u32 ul_virt_size; /* size of virt space in bytes */
- u32 ul_seg_id; /* Segment Id */
+ u32 virt_size; /* size of virt space in bytes */
+ u32 seg_id; /* Segment Id */
};
/* CMM Mgr */
/* Default allocation attributes */
static struct cmm_attrs cmm_dfltalctattrs = {
- 1 /* ul_seg_id, default segment Id for allocator */
+ 1 /* seg_id, default segment Id for allocator */
};
/* Address translator default attrs */
static struct cmm_xlatorattrs cmm_dfltxlatorattrs = {
- /* ul_seg_id, does not have to match cmm_dfltalctattrs ul_seg_id */
+ /* seg_id, does not have to match cmm_dfltalctattrs ul_seg_id */
1,
0, /* dsp_bufs */
0, /* dsp_buf_size */
struct list_head link; /* must be 1st element */
u32 pa; /* Phys addr */
u32 va; /* Virtual address in device process context */
- u32 ul_size; /* SM block size in bytes */
+ u32 size; /* SM block size in bytes */
u32 client_proc; /* Process that allocated this mem block */
};
*pp_buf_va = NULL;
if (cmm_mgr_obj && (usize != 0)) {
- if (pattrs->ul_seg_id > 0) {
+ if (pattrs->seg_id > 0) {
/* SegId > 0 is SM */
/* get the allocator object for this segment id */
allocator =
- get_allocator(cmm_mgr_obj, pattrs->ul_seg_id);
+ get_allocator(cmm_mgr_obj, pattrs->seg_id);
/* keep block size a multiple of min_block_size */
usize =
((usize - 1) & ~(cmm_mgr_obj->min_block_size -
pnode = get_free_block(allocator, usize);
}
if (pnode) {
- delta_size = (pnode->ul_size - usize);
+ delta_size = (pnode->size - usize);
if (delta_size >= cmm_mgr_obj->min_block_size) {
/* create a new block with the leftovers and
* add to freelist */
/* leftovers go free */
add_to_free_list(allocator, new_node);
/* adjust our node's size */
- pnode->ul_size = usize;
+ pnode->size = usize;
}
/* Tag node with client process requesting allocation
* We'll need to free up a process's alloc'd SM if the
/* Check for outstanding memory allocations */
status = cmm_get_info(hcmm_mgr, &temp_info);
if (!status) {
- if (temp_info.ul_total_in_use_cnt > 0) {
+ if (temp_info.total_in_use_cnt > 0) {
/* outstanding allocations */
status = -EPERM;
}
if (ul_seg_id == 0) {
pattrs = &cmm_dfltalctattrs;
- ul_seg_id = pattrs->ul_seg_id;
+ ul_seg_id = pattrs->seg_id;
}
if (!hcmm_mgr || !(ul_seg_id > 0)) {
status = -EFAULT;
mutex_lock(&cmm_mgr_obj->cmm_lock);
cmm_info_obj->num_gppsm_segs = 0; /* # of SM segments */
/* Total # of outstanding alloc */
- cmm_info_obj->ul_total_in_use_cnt = 0;
+ cmm_info_obj->total_in_use_cnt = 0;
/* min block size */
cmm_info_obj->min_block_size = cmm_mgr_obj->min_block_size;
/* check SM memory segments */
cmm_info_obj->num_gppsm_segs++;
cmm_info_obj->seg_info[ul_seg - 1].seg_base_pa =
altr->shm_base - altr->dsp_size;
- cmm_info_obj->seg_info[ul_seg - 1].ul_total_seg_size =
- altr->dsp_size + altr->ul_sm_size;
+ cmm_info_obj->seg_info[ul_seg - 1].total_seg_size =
+ altr->dsp_size + altr->sm_size;
cmm_info_obj->seg_info[ul_seg - 1].gpp_base_pa =
altr->shm_base;
cmm_info_obj->seg_info[ul_seg - 1].gpp_size =
- altr->ul_sm_size;
+ altr->sm_size;
cmm_info_obj->seg_info[ul_seg - 1].dsp_base_va =
altr->dsp_base;
cmm_info_obj->seg_info[ul_seg - 1].dsp_size =
cmm_info_obj->seg_info[ul_seg - 1].in_use_cnt = 0;
list_for_each_entry(curr, &altr->in_use_list, link) {
- cmm_info_obj->ul_total_in_use_cnt++;
+ cmm_info_obj->total_in_use_cnt++;
cmm_info_obj->seg_info[ul_seg - 1].in_use_cnt++;
}
}
goto func_end;
}
- psma->hcmm_mgr = hcmm_mgr; /* ref to parent */
+ psma->cmm_mgr = hcmm_mgr; /* ref to parent */
psma->shm_base = dw_gpp_base_pa; /* SM Base phys */
- psma->ul_sm_size = ul_size; /* SM segment size in bytes */
+ psma->sm_size = ul_size; /* SM segment size in bytes */
psma->vm_base = gpp_base_va;
psma->dsp_phys_addr_offset = dsp_addr_offset;
psma->c_factor = c_factor;
pnode->pa = dw_pa;
pnode->va = dw_va;
- pnode->ul_size = ul_size;
+ pnode->size = ul_size;
return pnode;
}
return NULL;
list_for_each_entry_safe(node, tmp, &allocator->free_list, link) {
- if (usize <= node->ul_size) {
+ if (usize <= node->size) {
list_del(&node->link);
return node;
}
list_for_each_entry(curr, &allocator->free_list, link) {
if (NEXT_PA(curr) == node->pa) {
- curr->ul_size += node->ul_size;
- delete_node(allocator->hcmm_mgr, node);
+ curr->size += node->size;
+ delete_node(allocator->cmm_mgr, node);
return;
}
if (curr->pa == NEXT_PA(node)) {
curr->pa = node->pa;
curr->va = node->va;
- curr->ul_size += node->ul_size;
- delete_node(allocator->hcmm_mgr, node);
+ curr->size += node->size;
+ delete_node(allocator->cmm_mgr, node);
return;
}
}
list_for_each_entry(curr, &allocator->free_list, link) {
- if (curr->ul_size >= node->ul_size) {
+ if (curr->size >= node->size) {
list_add_tail(&node->link, &curr->link);
return;
}
xlator_object = kzalloc(sizeof(struct cmm_xlator), GFP_KERNEL);
if (xlator_object != NULL) {
- xlator_object->hcmm_mgr = hcmm_mgr; /* ref back to CMM */
+ xlator_object->cmm_mgr = hcmm_mgr; /* ref back to CMM */
/* SM seg_id */
- xlator_object->ul_seg_id = xlator_attrs->ul_seg_id;
+ xlator_object->seg_id = xlator_attrs->seg_id;
} else {
status = -ENOMEM;
}
DBC_REQUIRE(refs > 0);
DBC_REQUIRE(xlator != NULL);
- DBC_REQUIRE(xlator_obj->hcmm_mgr != NULL);
+ DBC_REQUIRE(xlator_obj->cmm_mgr != NULL);
DBC_REQUIRE(va_buf != NULL);
DBC_REQUIRE(pa_size > 0);
- DBC_REQUIRE(xlator_obj->ul_seg_id > 0);
+ DBC_REQUIRE(xlator_obj->seg_id > 0);
if (xlator_obj) {
- attrs.ul_seg_id = xlator_obj->ul_seg_id;
+ attrs.seg_id = xlator_obj->seg_id;
__raw_writel(0, va_buf);
/* Alloc SM */
pbuf =
- cmm_calloc_buf(xlator_obj->hcmm_mgr, pa_size, &attrs, NULL);
+ cmm_calloc_buf(xlator_obj->cmm_mgr, pa_size, &attrs, NULL);
if (pbuf) {
/* convert to translator(node/strm) process Virtual
* address */
DBC_REQUIRE(refs > 0);
DBC_REQUIRE(buf_va != NULL);
- DBC_REQUIRE(xlator_obj->ul_seg_id > 0);
+ DBC_REQUIRE(xlator_obj->seg_id > 0);
if (xlator_obj) {
/* convert Va to Pa so we can free it. */
buf_pa = cmm_xlator_translate(xlator, buf_va, CMM_VA2PA);
if (buf_pa) {
- status = cmm_free_buf(xlator_obj->hcmm_mgr, buf_pa,
- xlator_obj->ul_seg_id);
+ status = cmm_free_buf(xlator_obj->cmm_mgr, buf_pa,
+ xlator_obj->seg_id);
if (status) {
/* Uh oh, this shouldn't happen. Descriptor
* gone! */
if (set_info) {
/* set translators virtual address range */
xlator_obj->virt_base = (u32) *paddr;
- xlator_obj->ul_virt_size = ul_size;
+ xlator_obj->virt_size = ul_size;
} else { /* return virt base address */
*paddr = (u8 *) xlator_obj->virt_base;
}
if (!xlator_obj)
goto loop_cont;
- cmm_mgr_obj = (struct cmm_object *)xlator_obj->hcmm_mgr;
+ cmm_mgr_obj = (struct cmm_object *)xlator_obj->cmm_mgr;
/* get this translator's default SM allocator */
- DBC_ASSERT(xlator_obj->ul_seg_id > 0);
- allocator = cmm_mgr_obj->pa_gppsm_seg_tab[xlator_obj->ul_seg_id - 1];
+ DBC_ASSERT(xlator_obj->seg_id > 0);
+ allocator = cmm_mgr_obj->pa_gppsm_seg_tab[xlator_obj->seg_id - 1];
if (!allocator)
goto loop_cont;
if ((dw_addr_xlate < xlator_obj->virt_base) ||
(dw_addr_xlate >=
(xlator_obj->virt_base +
- xlator_obj->ul_virt_size))) {
+ xlator_obj->virt_size))) {
dw_addr_xlate = 0; /* bad address */
}
} else {
u8 dev_type; /* Device Type */
struct cfg_devnode *dev_node_obj; /* Platform specific dev id */
/* Bridge Context Handle */
- struct bridge_dev_context *hbridge_context;
+ struct bridge_dev_context *bridge_context;
/* Function interface to Bridge driver. */
struct bridge_drv_interface bridge_interface;
struct brd_object *lock_owner; /* Client with exclusive access. */
struct cod_manager *cod_mgr; /* Code manager handle. */
- struct chnl_mgr *hchnl_mgr; /* Channel manager. */
- struct deh_mgr *hdeh_mgr; /* DEH manager. */
- struct msg_mgr *hmsg_mgr; /* Message manager. */
+ struct chnl_mgr *chnl_mgr; /* Channel manager. */
+ struct deh_mgr *deh_mgr; /* DEH manager. */
+ struct msg_mgr *msg_mgr; /* Message manager. */
struct io_mgr *hio_mgr; /* IO manager (CHNL, msg_ctrl) */
- struct cmm_object *hcmm_mgr; /* SM memory manager. */
+ struct cmm_object *cmm_mgr; /* SM memory manager. */
struct dmm_object *dmm_mgr; /* Dynamic memory manager. */
u32 word_size; /* DSP word size: quick access. */
- struct drv_object *hdrv_obj; /* Driver Object */
+ struct drv_object *drv_obj; /* Driver Object */
/* List of Processors attached to this device */
struct list_head proc_list;
- struct node_mgr *hnode_mgr;
+ struct node_mgr *node_mgr;
};
struct drv_ext {
DBC_REQUIRE(host_buf != NULL); /* Required of BrdWrite(). */
if (dev_obj) {
/* Require of BrdWrite() */
- DBC_ASSERT(dev_obj->hbridge_context != NULL);
+ DBC_ASSERT(dev_obj->bridge_context != NULL);
status = (*dev_obj->bridge_interface.brd_write) (
- dev_obj->hbridge_context, host_buf,
+ dev_obj->bridge_context, host_buf,
dsp_add, ul_num_bytes, mem_space);
/* Special case of getting the address only */
if (ul_num_bytes == 0)
/* Fill out the rest of the Dev Object structure: */
dev_obj->dev_node_obj = dev_node_obj;
dev_obj->cod_mgr = NULL;
- dev_obj->hchnl_mgr = NULL;
- dev_obj->hdeh_mgr = NULL;
+ dev_obj->chnl_mgr = NULL;
+ dev_obj->deh_mgr = NULL;
dev_obj->lock_owner = NULL;
dev_obj->word_size = DSPWORDSIZE;
- dev_obj->hdrv_obj = hdrv_obj;
+ dev_obj->drv_obj = hdrv_obj;
dev_obj->dev_type = DSP_UNIT;
/* Store this Bridge's interface functions, based on its
* version. */
/* Call fxn_dev_create() to get the Bridge's device
* context handle. */
status = (dev_obj->bridge_interface.dev_create)
- (&dev_obj->hbridge_context, dev_obj,
+ (&dev_obj->bridge_context, dev_obj,
host_res);
/* Assert bridge_dev_create()'s ensure clause: */
DBC_ASSERT(status
- || (dev_obj->hbridge_context != NULL));
+ || (dev_obj->bridge_context != NULL));
} else {
status = -ENOMEM;
}
pr_err("%s: No memory reserved for shared structures\n",
__func__);
}
- status = chnl_create(&dev_obj->hchnl_mgr, dev_obj, &mgr_attrs);
+ status = chnl_create(&dev_obj->chnl_mgr, dev_obj, &mgr_attrs);
if (status == -ENOSYS) {
/* It's OK for a device not to have a channel
* manager: */
status = 0;
}
/* Create CMM mgr even if Msg Mgr not impl. */
- status = cmm_create(&dev_obj->hcmm_mgr,
+ status = cmm_create(&dev_obj->cmm_mgr,
(struct dev_object *)dev_obj, NULL);
/* Only create IO manager if we have a channel manager */
- if (!status && dev_obj->hchnl_mgr) {
+ if (!status && dev_obj->chnl_mgr) {
status = io_create(&dev_obj->hio_mgr, dev_obj,
&io_mgr_attrs);
}
/* Only create DEH manager if we have an IO manager */
if (!status) {
/* Instantiate the DEH module */
- status = bridge_deh_create(&dev_obj->hdeh_mgr, dev_obj);
+ status = bridge_deh_create(&dev_obj->deh_mgr, dev_obj);
}
/* Create DMM mgr . */
status = dmm_create(&dev_obj->dmm_mgr,
DBC_REQUIRE(hdev_obj);
/* There can be only one Node Manager per DEV object */
- DBC_ASSERT(!dev_obj->hnode_mgr);
- status = node_create_mgr(&dev_obj->hnode_mgr, hdev_obj);
+ DBC_ASSERT(!dev_obj->node_mgr);
+ status = node_create_mgr(&dev_obj->node_mgr, hdev_obj);
if (status)
- dev_obj->hnode_mgr = NULL;
+ dev_obj->node_mgr = NULL;
- DBC_ENSURE((!status && dev_obj->hnode_mgr != NULL)
- || (status && dev_obj->hnode_mgr == NULL));
+ DBC_ENSURE((!status && dev_obj->node_mgr != NULL)
+ || (status && dev_obj->node_mgr == NULL));
return status;
}
DBC_REQUIRE(refs > 0);
DBC_REQUIRE(hdev_obj);
- if (dev_obj->hnode_mgr) {
- if (node_delete_mgr(dev_obj->hnode_mgr))
+ if (dev_obj->node_mgr) {
+ if (node_delete_mgr(dev_obj->node_mgr))
status = -EPERM;
else
- dev_obj->hnode_mgr = NULL;
+ dev_obj->node_mgr = NULL;
}
- DBC_ENSURE((!status && dev_obj->hnode_mgr == NULL) || status);
+ DBC_ENSURE((!status && dev_obj->node_mgr == NULL) || status);
return status;
}
dev_obj->cod_mgr = NULL;
}
- if (dev_obj->hnode_mgr) {
- node_delete_mgr(dev_obj->hnode_mgr);
- dev_obj->hnode_mgr = NULL;
+ if (dev_obj->node_mgr) {
+ node_delete_mgr(dev_obj->node_mgr);
+ dev_obj->node_mgr = NULL;
}
/* Free the io, channel, and message managers for this board: */
io_destroy(dev_obj->hio_mgr);
dev_obj->hio_mgr = NULL;
}
- if (dev_obj->hchnl_mgr) {
- chnl_destroy(dev_obj->hchnl_mgr);
- dev_obj->hchnl_mgr = NULL;
+ if (dev_obj->chnl_mgr) {
+ chnl_destroy(dev_obj->chnl_mgr);
+ dev_obj->chnl_mgr = NULL;
}
- if (dev_obj->hmsg_mgr) {
- msg_delete(dev_obj->hmsg_mgr);
- dev_obj->hmsg_mgr = NULL;
+ if (dev_obj->msg_mgr) {
+ msg_delete(dev_obj->msg_mgr);
+ dev_obj->msg_mgr = NULL;
}
- if (dev_obj->hdeh_mgr) {
+ if (dev_obj->deh_mgr) {
/* Uninitialize DEH module. */
- bridge_deh_destroy(dev_obj->hdeh_mgr);
- dev_obj->hdeh_mgr = NULL;
+ bridge_deh_destroy(dev_obj->deh_mgr);
+ dev_obj->deh_mgr = NULL;
}
- if (dev_obj->hcmm_mgr) {
- cmm_destroy(dev_obj->hcmm_mgr, true);
- dev_obj->hcmm_mgr = NULL;
+ if (dev_obj->cmm_mgr) {
+ cmm_destroy(dev_obj->cmm_mgr, true);
+ dev_obj->cmm_mgr = NULL;
}
if (dev_obj->dmm_mgr) {
/* Call the driver's bridge_dev_destroy() function: */
/* Require of DevDestroy */
- if (dev_obj->hbridge_context) {
+ if (dev_obj->bridge_context) {
status = (*dev_obj->bridge_interface.dev_destroy)
- (dev_obj->hbridge_context);
- dev_obj->hbridge_context = NULL;
+ (dev_obj->bridge_context);
+ dev_obj->bridge_context = NULL;
} else
status = -EPERM;
if (!status) {
/* Remove this DEV_Object from the global list: */
- drv_remove_dev_object(dev_obj->hdrv_obj, dev_obj);
+ drv_remove_dev_object(dev_obj->drv_obj, dev_obj);
/* Free The library * LDR_FreeModule
* (dev_obj->module_obj); */
/* Free this dev object: */
DBC_REQUIRE(mgr != NULL);
if (hdev_obj) {
- *mgr = dev_obj->hchnl_mgr;
+ *mgr = dev_obj->chnl_mgr;
} else {
*mgr = NULL;
status = -EFAULT;
DBC_REQUIRE(mgr != NULL);
if (hdev_obj) {
- *mgr = dev_obj->hcmm_mgr;
+ *mgr = dev_obj->cmm_mgr;
} else {
*mgr = NULL;
status = -EFAULT;
DBC_REQUIRE(deh_manager != NULL);
DBC_REQUIRE(hdev_obj);
if (hdev_obj) {
- *deh_manager = hdev_obj->hdeh_mgr;
+ *deh_manager = hdev_obj->deh_mgr;
} else {
*deh_manager = NULL;
status = -EFAULT;
DBC_REQUIRE(msg_man != NULL);
DBC_REQUIRE(hdev_obj);
- *msg_man = hdev_obj->hmsg_mgr;
+ *msg_man = hdev_obj->msg_mgr;
}
/*
DBC_REQUIRE(node_man != NULL);
if (hdev_obj) {
- *node_man = dev_obj->hnode_mgr;
+ *node_man = dev_obj->node_mgr;
} else {
*node_man = NULL;
status = -EFAULT;
DBC_REQUIRE(phbridge_context != NULL);
if (hdev_obj) {
- *phbridge_context = dev_obj->hbridge_context;
+ *phbridge_context = dev_obj->bridge_context;
} else {
*phbridge_context = NULL;
status = -EFAULT;
DBC_REQUIRE(refs > 0);
if (hdev_obj)
- dev_obj->hchnl_mgr = hmgr;
+ dev_obj->chnl_mgr = hmgr;
else
status = -EFAULT;
- DBC_ENSURE(status || (dev_obj->hchnl_mgr == hmgr));
+ DBC_ENSURE(status || (dev_obj->chnl_mgr == hmgr));
return status;
}
DBC_REQUIRE(refs > 0);
DBC_REQUIRE(hdev_obj);
- hdev_obj->hmsg_mgr = hmgr;
+ hdev_obj->msg_mgr = hmgr;
}
/*
status = proc_end_dma(pr_ctxt,
args->args_proc_dma.pmpu_addr,
- args->args_proc_dma.ul_size,
+ args->args_proc_dma.size,
args->args_proc_dma.dir);
return status;
}
status = proc_begin_dma(pr_ctxt,
args->args_proc_dma.pmpu_addr,
- args->args_proc_dma.ul_size,
+ args->args_proc_dma.size,
args->args_proc_dma.dir);
return status;
}
status = proc_flush_memory(pr_ctxt,
args->args_proc_flushmemory.pmpu_addr,
- args->args_proc_flushmemory.ul_size,
+ args->args_proc_flushmemory.size,
args->args_proc_flushmemory.ul_flags);
return status;
}
status =
proc_invalidate_memory(pr_ctxt,
args->args_proc_invalidatememory.pmpu_addr,
- args->args_proc_invalidatememory.ul_size);
+ args->args_proc_invalidatememory.size);
return status;
}
void *map_addr;
void *hprocessor = ((struct process_context *)pr_ctxt)->hprocessor;
- if (!args->args_proc_mapmem.ul_size)
+ if (!args->args_proc_mapmem.size)
return -EINVAL;
status = proc_map(args->args_proc_mapmem.hprocessor,
args->args_proc_mapmem.pmpu_addr,
- args->args_proc_mapmem.ul_size,
+ args->args_proc_mapmem.size,
args->args_proc_mapmem.req_addr, &map_addr,
args->args_proc_mapmem.ul_map_attr, pr_ctxt);
if (!status) {
void *prsv_addr;
void *hprocessor = ((struct process_context *)pr_ctxt)->hprocessor;
- if ((args->args_proc_rsvmem.ul_size <= 0) ||
- (args->args_proc_rsvmem.ul_size & (PG_SIZE4K - 1)) != 0)
+ if ((args->args_proc_rsvmem.size <= 0) ||
+ (args->args_proc_rsvmem.size & (PG_SIZE4K - 1)) != 0)
return -EINVAL;
status = proc_reserve_memory(hprocessor,
- args->args_proc_rsvmem.ul_size, &prsv_addr,
+ args->args_proc_rsvmem.size, &prsv_addr,
pr_ctxt);
if (!status) {
if (put_user(prsv_addr, args->args_proc_rsvmem.pp_rsv_addr)) {
int status = 0;
struct cmm_info cmm_info_obj;
- status = cmm_get_info(args->args_cmm_getinfo.hcmm_mgr, &cmm_info_obj);
+ status = cmm_get_info(args->args_cmm_getinfo.cmm_mgr, &cmm_info_obj);
CP_TO_USR(args->args_cmm_getinfo.cmm_info_obj, &cmm_info_obj, status,
1);
if (!status) {
pio_mgr = (struct io_mgr_ *)hio_mgr;
pio_mgr->intf_fxns = intf_fxns;
- pio_mgr->hdev_obj = hdev_obj;
+ pio_mgr->dev_obj = hdev_obj;
/* Return the new channel manager handle: */
*io_man = hio_mgr;
*/
struct io_mgr_ {
/* These must be the first fields in a io_mgr struct: */
- struct bridge_dev_context *hbridge_context; /* Bridge context. */
+ struct bridge_dev_context *bridge_context; /* Bridge context. */
/* Function interface to Bridge driver. */
struct bridge_drv_interface *intf_fxns;
- struct dev_object *hdev_obj; /* Device this board represents. */
+ struct dev_object *dev_obj; /* Device this board represents. */
};
#endif /* IOOBJ_ */
* ======== disp_object ========
*/
struct disp_object {
- struct dev_object *hdev_obj; /* Device for this processor */
+ struct dev_object *dev_obj; /* Device for this processor */
/* Function interface to Bridge driver */
struct bridge_drv_interface *intf_fxns;
- struct chnl_mgr *hchnl_mgr; /* Channel manager */
+ struct chnl_mgr *chnl_mgr; /* Channel manager */
struct chnl_object *chnl_to_dsp; /* Chnl for commands to RMS */
struct chnl_object *chnl_from_dsp; /* Chnl for replies from RMS */
u8 *pbuf; /* Buffer for commands, replies */
if (disp_obj == NULL)
status = -ENOMEM;
else
- disp_obj->hdev_obj = hdev_obj;
+ disp_obj->dev_obj = hdev_obj;
/* Get Channel manager and Bridge function interface */
if (!status) {
- status = dev_get_chnl_mgr(hdev_obj, &(disp_obj->hchnl_mgr));
+ status = dev_get_chnl_mgr(hdev_obj, &(disp_obj->chnl_mgr));
if (!status) {
(void)dev_get_intf_fxns(hdev_obj, &intf_fxns);
disp_obj->intf_fxns = intf_fxns;
chnl_attr_obj.event_obj = NULL;
ul_chnl_id = disp_attrs->chnl_offset + CHNLTORMSOFFSET;
status = (*intf_fxns->chnl_open) (&(disp_obj->chnl_to_dsp),
- disp_obj->hchnl_mgr,
+ disp_obj->chnl_mgr,
CHNL_MODETODSP, ul_chnl_id,
&chnl_attr_obj);
ul_chnl_id = disp_attrs->chnl_offset + CHNLFROMRMSOFFSET;
status =
(*intf_fxns->chnl_open) (&(disp_obj->chnl_from_dsp),
- disp_obj->hchnl_mgr,
+ disp_obj->chnl_mgr,
CHNL_MODEFROMDSP, ul_chnl_id,
&chnl_attr_obj);
}
DBC_REQUIRE(node_get_type(hnode) != NODE_DEVICE);
DBC_REQUIRE(node_env != NULL);
- status = dev_get_dev_type(disp_obj->hdev_obj, &dev_type);
+ status = dev_get_dev_type(disp_obj->dev_obj, &dev_type);
if (status)
goto func_end;
DBC_REQUIRE(disp_obj);
DBC_REQUIRE(hnode != NULL);
- status = dev_get_dev_type(disp_obj->hdev_obj, &dev_type);
+ status = dev_get_dev_type(disp_obj->dev_obj, &dev_type);
if (!status) {
DBC_REQUIRE(disp_obj);
DBC_REQUIRE(hnode != NULL);
- status = dev_get_dev_type(disp_obj->hdev_obj, &dev_type);
+ status = dev_get_dev_type(disp_obj->dev_obj, &dev_type);
if (!status) {
#define ZLDLLNAME ""
struct mgr_object {
- struct dcd_manager *hdcd_mgr; /* Proc/Node data manager */
+ struct dcd_manager *dcd_mgr; /* Proc/Node data manager */
};
/* ----------------------------------- Globals */
pmgr_obj = kzalloc(sizeof(struct mgr_object), GFP_KERNEL);
if (pmgr_obj) {
- status = dcd_create_manager(ZLDLLNAME, &pmgr_obj->hdcd_mgr);
+ status = dcd_create_manager(ZLDLLNAME, &pmgr_obj->dcd_mgr);
if (!status) {
/* If succeeded store the handle in the MGR Object */
if (drv_datap) {
if (!status) {
*mgr_obj = pmgr_obj;
} else {
- dcd_destroy_manager(pmgr_obj->hdcd_mgr);
+ dcd_destroy_manager(pmgr_obj->dcd_mgr);
kfree(pmgr_obj);
}
} else {
DBC_REQUIRE(hmgr_obj);
/* Free resources */
- if (hmgr_obj->hdcd_mgr)
- dcd_destroy_manager(hmgr_obj->hdcd_mgr);
+ if (hmgr_obj->dcd_mgr)
+ dcd_destroy_manager(hmgr_obj->dcd_mgr);
kfree(pmgr_obj);
/* Update the driver data with NULL for MGR Object */
break;
*pu_num_nodes = node_index;
if (node_id == (node_index - 1)) {
- status = dcd_get_object_def(pmgr_obj->hdcd_mgr,
+ status = dcd_get_object_def(pmgr_obj->dcd_mgr,
&node_uuid, DSP_DCDNODETYPE, &gen_obj);
if (status)
break;
if (proc_detect != false)
continue;
- status2 = dcd_get_object_def(pmgr_obj->hdcd_mgr,
+ status2 = dcd_get_object_def(pmgr_obj->dcd_mgr,
(struct dsp_uuid *)&temp_uuid,
DSP_DCDPROCESSORTYPE, &gen_obj);
if (!status2) {
*dcd_handle = (u32) NULL;
if (pmgr_obj) {
- *dcd_handle = (u32) pmgr_obj->hdcd_mgr;
+ *dcd_handle = (u32) pmgr_obj->dcd_mgr;
status = 0;
}
DBC_ENSURE((!status && *dcd_handle != (u32) NULL) ||
* Overlay loader object.
*/
struct nldr_object {
- struct dev_object *hdev_obj; /* Device object */
- struct dcd_manager *hdcd_mgr; /* Proc/Node data manager */
+ struct dev_object *dev_obj; /* Device object */
+ struct dcd_manager *dcd_mgr; /* Proc/Node data manager */
struct dbll_tar_obj *dbll; /* The DBL loader */
struct dbll_library_obj *base_lib; /* Base image library */
struct rmm_target_obj *rmm; /* Remote memory manager for DSP */
u32 *seg_table; /* memtypes of dynamic memory segs
* indexed by segid
*/
- u16 us_dsp_mau_size; /* Size of DSP MAU */
- u16 us_dsp_word_size; /* Size of DSP word */
+ u16 dsp_mau_size; /* Size of DSP MAU */
+ u16 dsp_word_size; /* Size of DSP word */
};
/*
/* Allocate dynamic loader object */
nldr_obj = kzalloc(sizeof(struct nldr_object), GFP_KERNEL);
if (nldr_obj) {
- nldr_obj->hdev_obj = hdev_obj;
+ nldr_obj->dev_obj = hdev_obj;
/* warning, lazy status checking alert! */
dev_get_cod_mgr(hdev_obj, &cod_mgr);
if (cod_mgr) {
}
status = 0;
/* end lazy status checking */
- nldr_obj->us_dsp_mau_size = pattrs->us_dsp_mau_size;
- nldr_obj->us_dsp_word_size = pattrs->us_dsp_word_size;
+ nldr_obj->dsp_mau_size = pattrs->dsp_mau_size;
+ nldr_obj->dsp_word_size = pattrs->dsp_word_size;
nldr_obj->ldr_fxns = ldr_fxns;
if (!(nldr_obj->ldr_fxns.init_fxn()))
status = -ENOMEM;
}
/* Create the DCD Manager */
if (!status)
- status = dcd_create_manager(NULL, &nldr_obj->hdcd_mgr);
+ status = dcd_create_manager(NULL, &nldr_obj->dcd_mgr);
/* Get dynamic loading memory sections from base lib */
if (!status) {
&ul_len);
if (!status) {
psz_coff_buf =
- kzalloc(ul_len * nldr_obj->us_dsp_mau_size,
+ kzalloc(ul_len * nldr_obj->dsp_mau_size,
GFP_KERNEL);
if (!psz_coff_buf)
status = -ENOMEM;
DBC_ASSERT(!status);
/* First count number of overlay nodes */
status =
- dcd_get_objects(nldr_obj->hdcd_mgr, sz_zl_file,
+ dcd_get_objects(nldr_obj->dcd_mgr, sz_zl_file,
add_ovly_node, (void *)nldr_obj);
/* Now build table of overlay nodes */
if (!status && nldr_obj->ovly_nodes > 0) {
nldr_obj->ovly_nodes, GFP_KERNEL);
/* Put overlay nodes in the table */
nldr_obj->ovly_nid = 0;
- status = dcd_get_objects(nldr_obj->hdcd_mgr, sz_zl_file,
+ status = dcd_get_objects(nldr_obj->dcd_mgr, sz_zl_file,
add_ovly_node,
(void *)nldr_obj);
}
kfree(nldr_obj->seg_table);
- if (nldr_obj->hdcd_mgr)
- dcd_destroy_manager(nldr_obj->hdcd_mgr);
+ if (nldr_obj->dcd_mgr)
+ dcd_destroy_manager(nldr_obj->dcd_mgr);
/* Free overlay node information */
if (nldr_obj->ovly_table) {
goto func_end;
status =
- dcd_get_object_def(nldr_obj->hdcd_mgr, uuid_obj, obj_type,
+ dcd_get_object_def(nldr_obj->dcd_mgr, uuid_obj, obj_type,
&obj_def);
if (status)
goto func_end;
if (depth == 0) {
status =
dcd_get_library_name(nldr_node_obj->nldr_obj->
- hdcd_mgr, &uuid, psz_file_name,
+ dcd_mgr, &uuid, psz_file_name,
&dw_buf_size, phase,
nldr_node_obj->phase_split);
} else {
/* Dependent libraries are registered with a phase */
status =
dcd_get_library_name(nldr_node_obj->nldr_obj->
- hdcd_mgr, &uuid, psz_file_name,
+ dcd_mgr, &uuid, psz_file_name,
&dw_buf_size, NLDR_NOPHASE,
NULL);
}
depth++;
/* Get number of dependent libraries */
status =
- dcd_get_num_dep_libs(nldr_node_obj->nldr_obj->hdcd_mgr,
+ dcd_get_num_dep_libs(nldr_node_obj->nldr_obj->dcd_mgr,
&uuid, &nd_libs, &np_libs, phase);
}
DBC_ASSERT(nd_libs >= np_libs);
/* Get the dependent library UUIDs */
status =
dcd_get_dep_libs(nldr_node_obj->
- nldr_obj->hdcd_mgr, &uuid,
+ nldr_obj->dcd_mgr, &uuid,
nd_libs, dep_lib_uui_ds,
persistent_dep_libs,
phase);
rmm = nldr_obj->rmm;
/* Convert size to DSP words */
word_size =
- (size + nldr_obj->us_dsp_word_size -
- 1) / nldr_obj->us_dsp_word_size;
+ (size + nldr_obj->dsp_word_size -
+ 1) / nldr_obj->dsp_word_size;
/* Modify memory 'align' to account for DSP cache line size */
align = lcm(GEM_CACHE_LINE_SIZE, align);
dev_dbg(bridge, "%s: memory align to 0x%x\n", __func__, align);
/* Convert size to DSP words */
word_size =
- (size + nldr_obj->us_dsp_word_size -
- 1) / nldr_obj->us_dsp_word_size;
+ (size + nldr_obj->dsp_word_size -
+ 1) / nldr_obj->dsp_word_size;
if (rmm_free(rmm, space, dsp_address, word_size, reserve))
status = 0;
* ======== node_mgr ========
*/
struct node_mgr {
- struct dev_object *hdev_obj; /* Device object */
+ struct dev_object *dev_obj; /* Device object */
/* Function interface to Bridge driver */
struct bridge_drv_interface *intf_fxns;
- struct dcd_manager *hdcd_mgr; /* Proc/Node data manager */
+ struct dcd_manager *dcd_mgr; /* Proc/Node data manager */
struct disp_object *disp_obj; /* Node dispatcher */
struct list_head node_list; /* List of all allocated nodes */
u32 num_nodes; /* Number of nodes in node_list */
*/
struct node_object {
struct list_head list_elem;
- struct node_mgr *hnode_mgr; /* The manager of this node */
+ struct node_mgr *node_mgr; /* The manager of this node */
struct proc_object *hprocessor; /* Back pointer to processor */
struct dsp_uuid node_uuid; /* Node's ID */
s32 prio; /* Node's current priority */
status = -ENOMEM;
goto func_end;
}
- pnode->hnode_mgr = hnode_mgr;
+ pnode->node_mgr = hnode_mgr;
/* This critical section protects get_node_props */
mutex_lock(&hnode_mgr->node_mgr_lock);
/* Get dsp_ndbprops from node database */
- status = get_node_props(hnode_mgr->hdcd_mgr, pnode, node_uuid,
+ status = get_node_props(hnode_mgr->dcd_mgr, pnode, node_uuid,
&(pnode->dcd_props));
if (status)
goto func_cont;
DBC_REQUIRE(refs > 0);
- if (!hnode || !hnode->hnode_mgr) {
+ if (!hnode || !hnode->node_mgr) {
status = -EFAULT;
} else {
- hnode_mgr = hnode->hnode_mgr;
+ hnode_mgr = hnode->node_mgr;
node_type = node_get_type(hnode);
if (node_type != NODE_TASK && node_type != NODE_DAISSOCKET)
status = -EPERM;
/* The two nodes must be on the same processor */
if (node1 != (struct node_object *)DSP_HGPPNODE &&
node2 != (struct node_object *)DSP_HGPPNODE &&
- node1->hnode_mgr != node2->hnode_mgr)
+ node1->node_mgr != node2->node_mgr)
return -EPERM;
/* Cannot connect a node to itself */
return -EPERM; /* illegal stream mode */
if (node1_type != NODE_GPP) {
- hnode_mgr = node1->hnode_mgr;
+ hnode_mgr = node1->node_mgr;
} else {
DBC_ASSERT(node2 != (struct node_object *)DSP_HGPPNODE);
- hnode_mgr = node2->hnode_mgr;
+ hnode_mgr = node2->node_mgr;
}
/* Enter critical section */
/* create struct dsp_cbdata struct for PWR calls */
cb_data.cb_data = PWR_TIMEOUT;
node_type = node_get_type(hnode);
- hnode_mgr = hnode->hnode_mgr;
+ hnode_mgr = hnode->node_mgr;
intf_fxns = hnode_mgr->intf_fxns;
/* Get access to node dispatcher */
mutex_lock(&hnode_mgr->node_mgr_lock);
if (!node_mgr_obj)
return -ENOMEM;
- node_mgr_obj->hdev_obj = hdev_obj;
+ node_mgr_obj->dev_obj = hdev_obj;
node_mgr_obj->ntfy_obj = kmalloc(sizeof(struct ntfy_object),
GFP_KERNEL);
dev_get_dev_type(hdev_obj, &dev_type);
- status = dcd_create_manager(sz_zl_file, &node_mgr_obj->hdcd_mgr);
+ status = dcd_create_manager(sz_zl_file, &node_mgr_obj->dcd_mgr);
if (status)
goto out_err;
nldr_attrs_obj.ovly = ovly;
nldr_attrs_obj.write = mem_write;
- nldr_attrs_obj.us_dsp_word_size = node_mgr_obj->udsp_word_size;
- nldr_attrs_obj.us_dsp_mau_size = node_mgr_obj->udsp_mau_size;
+ nldr_attrs_obj.dsp_word_size = node_mgr_obj->udsp_word_size;
+ nldr_attrs_obj.dsp_mau_size = node_mgr_obj->udsp_mau_size;
node_mgr_obj->loader_init = node_mgr_obj->nldr_fxns.init();
status = node_mgr_obj->nldr_fxns.create(&node_mgr_obj->nldr_obj,
hdev_obj,
}
/* create struct dsp_cbdata struct for PWR call */
cb_data.cb_data = PWR_TIMEOUT;
- hnode_mgr = pnode->hnode_mgr;
+ hnode_mgr = pnode->node_mgr;
hprocessor = pnode->hprocessor;
disp_obj = hnode_mgr->disp_obj;
node_type = node_get_type(pnode);
if (!hnode)
return -EFAULT;
- hnode_mgr = hnode->hnode_mgr;
+ hnode_mgr = hnode->node_mgr;
/* Enter hnode_mgr critical section (since we're accessing
* data that could be changed by node_change_priority() and
* node_connect(). */
status = -EPERM;
goto func_end;
}
- hnode_mgr = hnode->hnode_mgr;
+ hnode_mgr = hnode->node_mgr;
node_type = node_get_type(hnode);
if (node_type != NODE_MESSAGE && node_type != NODE_TASK &&
node_type != NODE_DAISSOCKET) {
/* Translate DSP byte addr to GPP Va. */
tmp_buf = cmm_xlator_translate(hnode->xlator,
(void *)(message->arg1 *
- hnode->hnode_mgr->
+ hnode->node_mgr->
udsp_word_size), CMM_DSPPA2PA);
if (tmp_buf != NULL) {
/* now convert this GPP Pa to Va */
if (tmp_buf != NULL) {
/* Adjust SM size in msg */
message->arg1 = (u32) tmp_buf;
- message->arg2 *= hnode->hnode_mgr->udsp_word_size;
+ message->arg2 *= hnode->node_mgr->udsp_word_size;
} else {
status = -ESRCH;
}
if (!hnode)
status = -EFAULT;
else
- *strm_man = hnode->hnode_mgr->strm_mgr_obj;
+ *strm_man = hnode->node_mgr->strm_mgr_obj;
return status;
}
NODE_SET_STATE(hnode, NODE_DONE);
hnode->exit_status = node_status;
if (hnode->loaded && hnode->phase_split) {
- (void)hnode->hnode_mgr->nldr_fxns.unload(hnode->
+ (void)hnode->node_mgr->nldr_fxns.unload(hnode->
nldr_node_obj,
NLDR_EXECUTE);
hnode->loaded = false;
status = -ENOSYS;
if (!status) {
- hnode_mgr = hnode->hnode_mgr;
+ hnode_mgr = hnode->node_mgr;
/* Enter critical section */
mutex_lock(&hnode_mgr->node_mgr_lock);
status = -EPERM;
goto func_end;
}
- hnode_mgr = hnode->hnode_mgr;
+ hnode_mgr = hnode->node_mgr;
node_type = node_get_type(hnode);
if (node_type != NODE_MESSAGE && node_type != NODE_TASK &&
node_type != NODE_DAISSOCKET)
CMM_VA2DSPPA);
if (tmp_buf != NULL) {
/* got translation, convert to MAUs in msg */
- if (hnode->hnode_mgr->udsp_word_size != 0) {
+ if (hnode->node_mgr->udsp_word_size != 0) {
new_msg.arg1 =
(u32) tmp_buf /
- hnode->hnode_mgr->udsp_word_size;
+ hnode->node_mgr->udsp_word_size;
/* MAUs */
- new_msg.arg2 /= hnode->hnode_mgr->
+ new_msg.arg2 /= hnode->node_mgr->
udsp_word_size;
} else {
pr_err("%s: udsp_word_size is zero!\n",
notify_type);
} else {
/* Send Message part of event mask to msg_ctrl */
- intf_fxns = hnode->hnode_mgr->intf_fxns;
+ intf_fxns = hnode->node_mgr->intf_fxns;
status = (*intf_fxns->msg_register_notify)
(hnode->msg_queue_obj,
event_mask & DSP_NODEMESSAGEREADY, notify_type,
if (status)
goto func_end;
- hnode_mgr = hnode->hnode_mgr;
+ hnode_mgr = hnode->node_mgr;
if (!hnode_mgr) {
status = -EFAULT;
goto func_end;
DBC_REQUIRE(refs > 0);
DBC_REQUIRE(pstatus != NULL);
- if (!hnode || !hnode->hnode_mgr) {
+ if (!hnode || !hnode->node_mgr) {
status = -EFAULT;
goto func_end;
}
status = proc_get_processor_id(pnode->hprocessor, &proc_id);
if (!status) {
- hnode_mgr = hnode->hnode_mgr;
+ hnode_mgr = hnode->node_mgr;
node_type = node_get_type(hnode);
if (node_type != NODE_TASK && node_type != NODE_DAISSOCKET)
status = -EPERM;
* Here it goes the part of the simulation of
* the DSP exception.
*/
- dev_get_deh_mgr(hnode_mgr->hdev_obj, &hdeh_mgr);
+ dev_get_deh_mgr(hnode_mgr->dev_obj, &hdeh_mgr);
if (!hdeh_mgr)
goto func_cont;
int status;
if (!hnode)
goto func_end;
- hnode_mgr = hnode->hnode_mgr;
+ hnode_mgr = hnode->node_mgr;
if (!hnode_mgr)
goto func_end;
kfree(hnode->xlator);
kfree(hnode->nldr_node_obj);
hnode->nldr_node_obj = NULL;
- hnode->hnode_mgr = NULL;
+ hnode->node_mgr = NULL;
kfree(hnode);
hnode = NULL;
func_end:
if (hnode_mgr) {
/* Free resources */
- if (hnode_mgr->hdcd_mgr)
- dcd_destroy_manager(hnode_mgr->hdcd_mgr);
+ if (hnode_mgr->dcd_mgr)
+ dcd_destroy_manager(hnode_mgr->dcd_mgr);
/* Remove any elements remaining in lists */
list_for_each_entry_safe(hnode, tmp, &hnode_mgr->node_list,
struct node_strmdef *pstrm_def,
struct dsp_strmattr *pattrs)
{
- struct node_mgr *hnode_mgr = hnode->hnode_mgr;
+ struct node_mgr *hnode_mgr = hnode->node_mgr;
if (pattrs != NULL) {
pstrm_def->num_bufs = pattrs->num_bufs;
u32 phase)
{
char *pstr_fxn_name = NULL;
- struct node_mgr *hnode_mgr = hnode->hnode_mgr;
+ struct node_mgr *hnode_mgr = hnode->node_mgr;
int status = 0;
DBC_REQUIRE(node_get_type(hnode) == NODE_TASK ||
node_get_type(hnode) == NODE_DAISSOCKET ||
dcd_node_props.pstr_delete_phase_fxn = NULL;
dcd_node_props.pstr_i_alg_name = NULL;
- status = dcd_get_object_def(hnode_mgr->hdcd_mgr,
+ status = dcd_get_object_def(hnode_mgr->dcd_mgr,
(struct dsp_uuid *)node_uuid, DSP_DCDNODETYPE,
(struct dcd_genericobj *)&dcd_node_props);
static int get_rms_fxns(struct node_mgr *hnode_mgr)
{
s32 i;
- struct dev_object *dev_obj = hnode_mgr->hdev_obj;
+ struct dev_object *dev_obj = hnode_mgr->dev_obj;
int status = 0;
static char *psz_fxns[NUMRMSFXNS] = {
DBC_REQUIRE(hnode);
- hnode_mgr = hnode->hnode_mgr;
+ hnode_mgr = hnode->node_mgr;
ul_size = ul_num_bytes / hnode_mgr->udsp_word_size;
ul_timeout = hnode->utimeout;
/* Call new MemCopy function */
intf_fxns = hnode_mgr->intf_fxns;
- status = dev_get_bridge_context(hnode_mgr->hdev_obj, &hbridge_context);
+ status = dev_get_bridge_context(hnode_mgr->dev_obj, &hbridge_context);
if (!status) {
status =
(*intf_fxns->brd_mem_copy) (hbridge_context,
DBC_REQUIRE(hnode);
DBC_REQUIRE(mem_space & DBLL_CODE || mem_space & DBLL_DATA);
- hnode_mgr = hnode->hnode_mgr;
+ hnode_mgr = hnode->node_mgr;
ul_timeout = hnode->utimeout;
mem_sect_type = (mem_space & DBLL_CODE) ? RMS_CODE : RMS_DATA;
/* Call new MemWrite function */
intf_fxns = hnode_mgr->intf_fxns;
- status = dev_get_bridge_context(hnode_mgr->hdev_obj, &hbridge_context);
+ status = dev_get_bridge_context(hnode_mgr->dev_obj, &hbridge_context);
status = (*intf_fxns->brd_mem_write) (hbridge_context, pbuf,
dsp_add, ul_num_bytes, mem_sect_type);
/* The proc_object structure. */
struct proc_object {
struct list_head link; /* Link to next proc_object */
- struct dev_object *hdev_obj; /* Device this PROC represents */
+ struct dev_object *dev_obj; /* Device this PROC represents */
u32 process; /* Process owning this Processor */
- struct mgr_object *hmgr_obj; /* Manager Object Handle */
+ struct mgr_object *mgr_obj; /* Manager Object Handle */
u32 attach_count; /* Processor attach count */
u32 processor_id; /* Processor number */
u32 utimeout; /* Time out count */
enum dsp_procstate proc_state; /* Processor state */
- u32 ul_unit; /* DDSP unit number */
+ u32 unit; /* DDSP unit number */
bool is_already_attached; /*
* True if the Device below has
* GPP Client attached
*/
struct ntfy_object *ntfy_obj; /* Manages notifications */
/* Bridge Context Handle */
- struct bridge_dev_context *hbridge_context;
+ struct bridge_dev_context *bridge_context;
/* Function interface to Bridge driver */
struct bridge_drv_interface *intf_fxns;
- char *psz_last_coff;
+ char *last_coff;
struct list_head proc_list;
};
status = -ENOMEM;
goto func_end;
}
- p_proc_object->hdev_obj = hdev_obj;
- p_proc_object->hmgr_obj = hmgr_obj;
+ p_proc_object->dev_obj = hdev_obj;
+ p_proc_object->mgr_obj = hmgr_obj;
p_proc_object->processor_id = dev_type;
/* Store TGID instead of process handle */
p_proc_object->process = current->tgid;
status = dev_get_intf_fxns(hdev_obj, &p_proc_object->intf_fxns);
if (!status) {
status = dev_get_bridge_context(hdev_obj,
- &p_proc_object->hbridge_context);
+ &p_proc_object->bridge_context);
if (status)
kfree(p_proc_object);
} else
* Return handle to this Processor Object:
* Find out if the Device is already attached to a
* Processor. If so, return AlreadyAttached status */
- status = dev_insert_proc_object(p_proc_object->hdev_obj,
+ status = dev_insert_proc_object(p_proc_object->dev_obj,
(u32) p_proc_object,
&p_proc_object->
is_already_attached);
status = -ENOMEM;
goto func_end;
}
- p_proc_object->hdev_obj = hdev_obj;
- p_proc_object->hmgr_obj = hmgr_obj;
+ p_proc_object->dev_obj = hdev_obj;
+ p_proc_object->mgr_obj = hmgr_obj;
status = dev_get_intf_fxns(hdev_obj, &p_proc_object->intf_fxns);
if (!status)
status = dev_get_bridge_context(hdev_obj,
- &p_proc_object->hbridge_context);
+ &p_proc_object->bridge_context);
if (status)
goto func_cont;
if (!status)
status = proc_start(p_proc_object);
}
- kfree(p_proc_object->psz_last_coff);
- p_proc_object->psz_last_coff = NULL;
+ kfree(p_proc_object->last_coff);
+ p_proc_object->last_coff = NULL;
func_cont:
kfree(p_proc_object);
func_end:
status = pwr_wake_dsp(timeout);
} else
if (!((*p_proc_object->intf_fxns->dev_cntrl)
- (p_proc_object->hbridge_context, dw_cmd,
+ (p_proc_object->bridge_context, dw_cmd,
arg))) {
status = 0;
} else {
kfree(p_proc_object->ntfy_obj);
}
- kfree(p_proc_object->psz_last_coff);
- p_proc_object->psz_last_coff = NULL;
+ kfree(p_proc_object->last_coff);
+ p_proc_object->last_coff = NULL;
/* Remove the Proc from the DEV List */
- (void)dev_remove_proc_object(p_proc_object->hdev_obj,
+ (void)dev_remove_proc_object(p_proc_object->dev_obj,
(u32) p_proc_object);
/* Free the Processor Object */
kfree(p_proc_object);
DBC_REQUIRE(pu_allocated != NULL);
if (p_proc_object) {
- if (!(dev_get_node_manager(p_proc_object->hdev_obj,
+ if (!(dev_get_node_manager(p_proc_object->dev_obj,
&hnode_mgr))) {
if (hnode_mgr) {
status = node_enum_nodes(hnode_mgr, node_tab,
case DSP_RESOURCE_DYNSARAM:
case DSP_RESOURCE_DYNEXTERNAL:
case DSP_RESOURCE_DYNSRAM:
- status = dev_get_node_manager(p_proc_object->hdev_obj,
+ status = dev_get_node_manager(p_proc_object->dev_obj,
&hnode_mgr);
if (!hnode_mgr) {
status = -EFAULT;
}
break;
case DSP_RESOURCE_PROCLOAD:
- status = dev_get_io_mgr(p_proc_object->hdev_obj, &hio_mgr);
+ status = dev_get_io_mgr(p_proc_object->dev_obj, &hio_mgr);
if (hio_mgr)
status =
p_proc_object->intf_fxns->
DBC_REQUIRE(device_obj != NULL);
if (p_proc_object) {
- *device_obj = p_proc_object->hdev_obj;
+ *device_obj = p_proc_object->dev_obj;
status = 0;
} else {
*device_obj = NULL;
if (p_proc_object) {
/* First, retrieve BRD state information */
status = (*p_proc_object->intf_fxns->brd_status)
- (p_proc_object->hbridge_context, &brd_status);
+ (p_proc_object->bridge_context, &brd_status);
if (!status) {
switch (brd_status) {
case BRD_STOPPED:
status = -EFAULT;
goto func_end;
}
- dev_get_cod_mgr(p_proc_object->hdev_obj, &cod_mgr);
+ dev_get_cod_mgr(p_proc_object->dev_obj, &cod_mgr);
if (!cod_mgr) {
status = -EPERM;
goto func_end;
prepend_envp(new_envp, (char **)user_envp,
envp_elems, cnew_envp, sz_proc_id);
/* Get the DCD Handle */
- status = mgr_get_dcd_handle(p_proc_object->hmgr_obj,
+ status = mgr_get_dcd_handle(p_proc_object->mgr_obj,
(u32 *) &hdcd_handle);
if (!status) {
/* Before proceeding with new load,
* If yes, unregister nodes in previously
* registered COFF. If any error occurred,
* set previously registered COFF to NULL. */
- if (p_proc_object->psz_last_coff != NULL) {
+ if (p_proc_object->last_coff != NULL) {
status =
dcd_auto_unregister(hdcd_handle,
p_proc_object->
- psz_last_coff);
+ last_coff);
/* Regardless of auto unregister status,
* free previously allocated
* memory. */
- kfree(p_proc_object->psz_last_coff);
- p_proc_object->psz_last_coff = NULL;
+ kfree(p_proc_object->last_coff);
+ p_proc_object->last_coff = NULL;
}
}
/* On success, do cod_open_base() */
if (!status) {
/* Auto-register data base */
/* Get the DCD Handle */
- status = mgr_get_dcd_handle(p_proc_object->hmgr_obj,
+ status = mgr_get_dcd_handle(p_proc_object->mgr_obj,
(u32 *) &hdcd_handle);
if (!status) {
/* Auto register nodes in specified COFF
if (status) {
status = -EPERM;
} else {
- DBC_ASSERT(p_proc_object->psz_last_coff ==
+ DBC_ASSERT(p_proc_object->last_coff ==
NULL);
/* Allocate memory for pszLastCoff */
- p_proc_object->psz_last_coff =
+ p_proc_object->last_coff =
kzalloc((strlen(user_args[0]) +
1), GFP_KERNEL);
/* If memory allocated, save COFF file name */
- if (p_proc_object->psz_last_coff) {
- strncpy(p_proc_object->psz_last_coff,
+ if (p_proc_object->last_coff) {
+ strncpy(p_proc_object->last_coff,
(char *)user_args[0],
(strlen((char *)user_args[0]) +
1));
if (!status) {
/* Create the message manager. This must be done
* before calling the IOOnLoaded function. */
- dev_get_msg_mgr(p_proc_object->hdev_obj, &hmsg_mgr);
+ dev_get_msg_mgr(p_proc_object->dev_obj, &hmsg_mgr);
if (!hmsg_mgr) {
- status = msg_create(&hmsg_mgr, p_proc_object->hdev_obj,
+ status = msg_create(&hmsg_mgr, p_proc_object->dev_obj,
(msg_onexit) node_on_exit);
DBC_ASSERT(!status);
- dev_set_msg_mgr(p_proc_object->hdev_obj, hmsg_mgr);
+ dev_set_msg_mgr(p_proc_object->dev_obj, hmsg_mgr);
}
}
if (!status) {
/* Set the Device object's message manager */
- status = dev_get_io_mgr(p_proc_object->hdev_obj, &hio_mgr);
+ status = dev_get_io_mgr(p_proc_object->dev_obj, &hio_mgr);
if (hio_mgr)
status = (*p_proc_object->intf_fxns->io_on_loaded)
(hio_mgr);
#endif
status = cod_load_base(cod_mgr, argc_index, (char **)user_args,
dev_brd_write_fxn,
- p_proc_object->hdev_obj, NULL);
+ p_proc_object->dev_obj, NULL);
if (status) {
if (status == -EBADF) {
dev_dbg(bridge, "%s: Failure to Load the EXE\n",
if (!status) {
/* Update the Processor status to loaded */
status = (*p_proc_object->intf_fxns->brd_set_state)
- (p_proc_object->hbridge_context, BRD_LOADED);
+ (p_proc_object->bridge_context, BRD_LOADED);
if (!status) {
p_proc_object->proc_state = PROC_LOADED;
if (p_proc_object->ntfy_obj)
/* Reset DMM structs and add an initial free chunk */
if (!status) {
status =
- dev_get_dmm_mgr(p_proc_object->hdev_obj,
+ dev_get_dmm_mgr(p_proc_object->dev_obj,
&dmm_mgr);
if (dmm_mgr) {
/* Set dw_ext_end to DMM START u8
user_args[0] = pargv0;
if (!status) {
if (!((*p_proc_object->intf_fxns->brd_status)
- (p_proc_object->hbridge_context, &brd_state))) {
+ (p_proc_object->bridge_context, &brd_state))) {
pr_info("%s: Processor Loaded %s\n", __func__, pargv0);
kfree(drv_datap->base_img);
drv_datap->base_img = kmalloc(strlen(pargv0) + 1,
status = -ENOMEM;
else
status = (*p_proc_object->intf_fxns->brd_mem_map)
- (p_proc_object->hbridge_context, pa_align, va_align,
+ (p_proc_object->bridge_context, pa_align, va_align,
size_align, ul_map_attr, map_obj->pages);
}
if (!status) {
*/
if ((event_mask == 0) && status) {
status =
- dev_get_deh_mgr(p_proc_object->hdev_obj,
+ dev_get_deh_mgr(p_proc_object->dev_obj,
&hdeh_mgr);
status =
bridge_deh_register_notify(hdeh_mgr,
hnotification);
}
} else {
- status = dev_get_deh_mgr(p_proc_object->hdev_obj,
+ status = dev_get_deh_mgr(p_proc_object->dev_obj,
&hdeh_mgr);
status =
bridge_deh_register_notify(hdeh_mgr,
status = -EBADR;
goto func_end;
}
- status = dev_get_cod_mgr(p_proc_object->hdev_obj, &cod_mgr);
+ status = dev_get_cod_mgr(p_proc_object->dev_obj, &cod_mgr);
if (!cod_mgr) {
status = -EFAULT;
goto func_cont;
goto func_cont;
status = (*p_proc_object->intf_fxns->brd_start)
- (p_proc_object->hbridge_context, dw_dsp_addr);
+ (p_proc_object->bridge_context, dw_dsp_addr);
if (status)
goto func_cont;
/* Call dev_create2 */
- status = dev_create2(p_proc_object->hdev_obj);
+ status = dev_create2(p_proc_object->dev_obj);
if (!status) {
p_proc_object->proc_state = PROC_RUNNING;
/* Deep sleep switces off the peripheral clocks.
/* Failed to Create Node Manager and DISP Object
* Stop the Processor from running. Put it in STOPPED State */
(void)(*p_proc_object->intf_fxns->
- brd_stop) (p_proc_object->hbridge_context);
+ brd_stop) (p_proc_object->bridge_context);
p_proc_object->proc_state = PROC_STOPPED;
}
func_cont:
if (!status) {
if (!((*p_proc_object->intf_fxns->brd_status)
- (p_proc_object->hbridge_context, &brd_state))) {
+ (p_proc_object->bridge_context, &brd_state))) {
pr_info("%s: dsp in running state\n", __func__);
DBC_ASSERT(brd_state != BRD_HIBERNATION);
}
goto func_end;
}
/* check if there are any running nodes */
- status = dev_get_node_manager(p_proc_object->hdev_obj, &hnode_mgr);
+ status = dev_get_node_manager(p_proc_object->dev_obj, &hnode_mgr);
if (!status && hnode_mgr) {
status = node_enum_nodes(hnode_mgr, &hnode, node_tab_size,
&num_nodes, &nodes_allocated);
/* It is OK to stop a device that does n't have nodes OR not started */
status =
(*p_proc_object->intf_fxns->
- brd_stop) (p_proc_object->hbridge_context);
+ brd_stop) (p_proc_object->bridge_context);
if (!status) {
dev_dbg(bridge, "%s: processor in standby mode\n", __func__);
p_proc_object->proc_state = PROC_STOPPED;
/* Destory the Node Manager, msg_ctrl Manager */
- if (!(dev_destroy2(p_proc_object->hdev_obj))) {
+ if (!(dev_destroy2(p_proc_object->dev_obj))) {
/* Destroy the msg_ctrl by calling msg_delete */
- dev_get_msg_mgr(p_proc_object->hdev_obj, &hmsg_mgr);
+ dev_get_msg_mgr(p_proc_object->dev_obj, &hmsg_mgr);
if (hmsg_mgr) {
msg_delete(hmsg_mgr);
- dev_set_msg_mgr(p_proc_object->hdev_obj, NULL);
+ dev_set_msg_mgr(p_proc_object->dev_obj, NULL);
}
if (!((*p_proc_object->
intf_fxns->brd_status) (p_proc_object->
- hbridge_context,
+ bridge_context,
&brd_state)))
DBC_ASSERT(brd_state == BRD_STOPPED);
}
/* Remove mapping from the page tables. */
if (!status) {
status = (*p_proc_object->intf_fxns->brd_mem_un_map)
- (p_proc_object->hbridge_context, va_align, size_align);
+ (p_proc_object->bridge_context, va_align, size_align);
}
mutex_unlock(&proc_lock);
/* This is needed only when Device is loaded when it is
* already 'ACTIVE' */
/* Destory the Node Manager, msg_ctrl Manager */
- if (!dev_destroy2(proc_obj->hdev_obj)) {
+ if (!dev_destroy2(proc_obj->dev_obj)) {
/* Destroy the msg_ctrl by calling msg_delete */
- dev_get_msg_mgr(proc_obj->hdev_obj, &hmsg_mgr);
+ dev_get_msg_mgr(proc_obj->dev_obj, &hmsg_mgr);
if (hmsg_mgr) {
msg_delete(hmsg_mgr);
- dev_set_msg_mgr(proc_obj->hdev_obj, NULL);
+ dev_set_msg_mgr(proc_obj->dev_obj, NULL);
}
}
/* Place the Board in the Monitor State */
if (!((*proc_obj->intf_fxns->brd_monitor)
- (proc_obj->hbridge_context))) {
+ (proc_obj->bridge_context))) {
status = 0;
if (!((*proc_obj->intf_fxns->brd_status)
- (proc_obj->hbridge_context, &brd_state)))
+ (proc_obj->bridge_context, &brd_state)))
DBC_ASSERT(brd_state == BRD_IDLE);
}
goto func_end;
}
- dev_notify_clients(p_proc_object->hdev_obj, events);
+ dev_notify_clients(p_proc_object->dev_obj, events);
func_end:
return status;
}
/* ul_size */
- mem_stat_buf->ul_size = target->seg_tab[segid].length;
+ mem_stat_buf->size = target->seg_tab[segid].length;
/* num_free_blocks */
mem_stat_buf->num_free_blocks = free_blocks;
- /* ul_total_free_size */
- mem_stat_buf->ul_total_free_size = total_free_size;
+ /* total_free_size */
+ mem_stat_buf->total_free_size = total_free_size;
/* len_max_free_block */
mem_stat_buf->len_max_free_block = max_free_size;
*/
struct strm_mgr {
struct dev_object *dev_obj; /* Device for this processor */
- struct chnl_mgr *hchnl_mgr; /* Channel manager */
+ struct chnl_mgr *chnl_mgr; /* Channel manager */
/* Function interface to Bridge driver */
struct bridge_drv_interface *intf_fxns;
};
/* Get Channel manager and Bridge function interface */
if (!status) {
- status = dev_get_chnl_mgr(dev_obj, &(strm_mgr_obj->hchnl_mgr));
+ status = dev_get_chnl_mgr(dev_obj, &(strm_mgr_obj->chnl_mgr));
if (!status) {
(void)dev_get_intf_fxns(dev_obj,
&(strm_mgr_obj->intf_fxns));
if (status)
goto func_cont;
- if ((pattr->virt_base == NULL) || !(pattr->ul_virt_size > 0))
+ if ((pattr->virt_base == NULL) || !(pattr->virt_size > 0))
goto func_cont;
/* No System DMA */
/* Set translators Virt Addr attributes */
status = cmm_xlator_info(strm_obj->xlator,
(u8 **) &pattr->virt_base,
- pattr->ul_virt_size,
+ pattr->virt_size,
strm_obj->segment_id, true);
}
}
CHNL_MODETODSP : CHNL_MODEFROMDSP;
intf_fxns = strm_mgr_obj->intf_fxns;
status = (*intf_fxns->chnl_open) (&(strm_obj->chnl_obj),
- strm_mgr_obj->hchnl_mgr,
+ strm_mgr_obj->chnl_mgr,
chnl_mode, ul_chnl_id,
&chnl_attr_obj);
if (status) {
* We got a status that's not return-able.
* Assert that we got something we were
* expecting (-EFAULT isn't acceptable,
- * strm_mgr_obj->hchnl_mgr better be valid or we
+ * strm_mgr_obj->chnl_mgr better be valid or we
* assert here), and then return -EPERM.
*/
DBC_ASSERT(status == -ENOSR ||