* purpose:
* Flushes all the outstanding data requests on a channel.
*/
-int bridge_chnl_flush_io(struct chnl_object *chnl_obj, u32 dwTimeOut)
+int bridge_chnl_flush_io(struct chnl_object *chnl_obj, u32 timeout)
{
int status = 0;
struct chnl_object *pchnl = (struct chnl_object *)chnl_obj;
struct chnl_ioc chnl_ioc_obj;
/* Check args: */
if (pchnl) {
- if ((dwTimeOut == CHNL_IOCNOWAIT)
+ if ((timeout == CHNL_IOCNOWAIT)
&& CHNL_IS_OUTPUT(pchnl->chnl_mode)) {
status = -EINVAL;
} else {
while (!LST_IS_EMPTY(pchnl->pio_requests) &&
DSP_SUCCEEDED(status)) {
status = bridge_chnl_get_ioc(chnl_obj,
- dwTimeOut, &chnl_ioc_obj);
+ timeout, &chnl_ioc_obj);
if (DSP_FAILED(status))
continue;
* I/O request.
* Note: Ensures Channel Invariant (see notes above).
*/
-int bridge_chnl_get_ioc(struct chnl_object *chnl_obj, u32 dwTimeOut,
+int bridge_chnl_get_ioc(struct chnl_object *chnl_obj, u32 timeout,
OUT struct chnl_ioc *pIOC)
{
int status = 0;
/* Check args: */
if (!pIOC || !pchnl) {
status = -EFAULT;
- } else if (dwTimeOut == CHNL_IOCNOWAIT) {
+ } else if (timeout == CHNL_IOCNOWAIT) {
if (LST_IS_EMPTY(pchnl->pio_completions))
status = -EREMOTEIO;
goto func_end;
ioc.status = CHNL_IOCSTATCOMPLETE;
- if (dwTimeOut !=
+ if (timeout !=
CHNL_IOCNOWAIT && LST_IS_EMPTY(pchnl->pio_completions)) {
- if (dwTimeOut == CHNL_IOCINFINITE)
- dwTimeOut = SYNC_INFINITE;
+ if (timeout == CHNL_IOCINFINITE)
+ timeout = SYNC_INFINITE;
- stat_sync = sync_wait_on_event(pchnl->sync_event, dwTimeOut);
+ stat_sync = sync_wait_on_event(pchnl->sync_event, timeout);
if (stat_sync == -ETIME) {
/* No response from DSP */
ioc.status |= CHNL_IOCSTATTIMEOUT;
* value may be non-zero, we still have to set the event.
* Therefore, this optimization is taken out.
*
- * if (dwTimeOut == CHNL_IOCNOWAIT) {
+ * if (timeout == CHNL_IOCNOWAIT) {
* ... ensure event is set..
* sync_set_event(pchnl->sync_event);
* } */
* ======== bridge_chnl_idle ========
* Idles a particular channel.
*/
-int bridge_chnl_idle(struct chnl_object *chnl_obj, u32 dwTimeOut,
- bool fFlush)
+int bridge_chnl_idle(struct chnl_object *chnl_obj, u32 timeout,
+ bool flush_data)
{
s8 chnl_mode;
struct chnl_mgr *chnl_mgr_obj;
chnl_mode = chnl_obj->chnl_mode;
chnl_mgr_obj = chnl_obj->chnl_mgr_obj;
- if (CHNL_IS_OUTPUT(chnl_mode) && !fFlush) {
+ if (CHNL_IS_OUTPUT(chnl_mode) && !flush_data) {
/* Wait for IO completions, up to the specified timeout: */
- status = bridge_chnl_flush_io(chnl_obj, dwTimeOut);
+ status = bridge_chnl_flush_io(chnl_obj, timeout);
} else {
status = bridge_chnl_cancel_io(chnl_obj);
static void input_msg(struct io_mgr *pio_mgr, struct msg_mgr *hmsg_mgr);
static void output_msg(struct io_mgr *pio_mgr, struct msg_mgr *hmsg_mgr);
static u32 find_ready_output(struct chnl_mgr *chnl_mgr_obj,
- struct chnl_object *pchnl, u32 dwMask);
+ struct chnl_object *pchnl, u32 mask);
static u32 read_data(struct bridge_dev_context *hDevContext, void *dest,
void *pSrc, u32 usize);
static u32 write_data(struct bridge_dev_context *hDevContext, void *dest,
* IO_Dispatch()), so just start searching from the current channel id.
*/
static u32 find_ready_output(struct chnl_mgr *chnl_mgr_obj,
- struct chnl_object *pchnl, u32 dwMask)
+ struct chnl_object *pchnl, u32 mask)
{
u32 ret = OUTPUTNOTREADY;
u32 id, start_id;
id = ((id == CHNL_MAXCHANNELS) ? 0 : id);
if (id >= CHNL_MAXCHANNELS)
goto func_end;
- if (dwMask) {
+ if (mask) {
shift = (1 << id);
start_id = id;
do {
- if (dwMask & shift) {
+ if (mask & shift) {
ret = id;
if (pchnl == NULL)
chnl_mgr_obj->dw_last_output = id;
static int bridge_brd_monitor(struct bridge_dev_context *dev_context);
static int bridge_brd_read(struct bridge_dev_context *dev_context,
OUT u8 *pbHostBuf,
- u32 dwDSPAddr, u32 ul_num_bytes,
+ u32 dsp_addr, u32 ul_num_bytes,
u32 ulMemType);
static int bridge_brd_start(struct bridge_dev_context *dev_context,
- u32 dwDSPAddr);
+ u32 dsp_addr);
static int bridge_brd_status(struct bridge_dev_context *dev_context,
int *pdwState);
static int bridge_brd_stop(struct bridge_dev_context *dev_context);
static int bridge_brd_write(struct bridge_dev_context *dev_context,
IN u8 *pbHostBuf,
- u32 dwDSPAddr, u32 ul_num_bytes,
+ u32 dsp_addr, u32 ul_num_bytes,
u32 ulMemType);
static int bridge_brd_set_state(struct bridge_dev_context *hDevContext,
u32 ulBrdState);
u32 ulDspDestAddr, u32 ulDspSrcAddr,
u32 ul_num_bytes, u32 ulMemType);
static int bridge_brd_mem_write(struct bridge_dev_context *dev_context,
- IN u8 *pbHostBuf, u32 dwDSPAddr,
+ IN u8 *pbHostBuf, u32 dsp_addr,
u32 ul_num_bytes, u32 ulMemType);
static int bridge_brd_mem_map(struct bridge_dev_context *hDevContext,
u32 ul_mpu_addr, u32 ulVirtAddr,
* Reads buffers for DSP memory.
*/
static int bridge_brd_read(struct bridge_dev_context *hDevContext,
- OUT u8 *pbHostBuf, u32 dwDSPAddr,
+ OUT u8 *pbHostBuf, u32 dsp_addr,
u32 ul_num_bytes, u32 ulMemType)
{
int status = 0;
u32 offset;
u32 dsp_base_addr = hDevContext->dw_dsp_base_addr;
- if (dwDSPAddr < dev_context->dw_dsp_start_add) {
+ if (dsp_addr < dev_context->dw_dsp_start_add) {
status = -EPERM;
return status;
}
/* change here to account for the 3 bands of the DSP internal memory */
- if ((dwDSPAddr - dev_context->dw_dsp_start_add) <
+ if ((dsp_addr - dev_context->dw_dsp_start_add) <
dev_context->dw_internal_size) {
- offset = dwDSPAddr - dev_context->dw_dsp_start_add;
+ offset = dsp_addr - dev_context->dw_dsp_start_add;
} else {
- status = read_ext_dsp_data(dev_context, pbHostBuf, dwDSPAddr,
+ status = read_ext_dsp_data(dev_context, pbHostBuf, dsp_addr,
ul_num_bytes, ulMemType);
return status;
}
* b) DSP_RST2 is released.
*/
static int bridge_brd_start(struct bridge_dev_context *hDevContext,
- u32 dwDSPAddr)
+ u32 dsp_addr)
{
int status = 0;
struct bridge_dev_context *dev_context = hDevContext;
OMAP3430_RST1_IVA2_MASK, OMAP3430_IVA2_MOD,
OMAP2_RM_RSTCTRL);
/* Mask address with 1K for compatibility */
- __raw_writel(dwDSPAddr & OMAP3_IVA2_BOOTADDR_MASK,
+ __raw_writel(dsp_addr & OMAP3_IVA2_BOOTADDR_MASK,
OMAP343X_CTRL_REGADDR(
OMAP343X_CONTROL_IVA2_BOOTADDR));
/*
OMAP3430_IVA2_MOD, OMAP2_RM_RSTCTRL);
dev_dbg(bridge, "Waiting for Sync @ 0x%x\n", dw_sync_addr);
- dev_dbg(bridge, "DSP c_int00 Address = 0x%x\n", dwDSPAddr);
+ dev_dbg(bridge, "DSP c_int00 Address = 0x%x\n", dsp_addr);
if (dsp_debug)
while (*((volatile u16 *)dw_sync_addr))
;;
* Copies the buffers to DSP internal or external memory.
*/
static int bridge_brd_write(struct bridge_dev_context *hDevContext,
- IN u8 *pbHostBuf, u32 dwDSPAddr,
+ IN u8 *pbHostBuf, u32 dsp_addr,
u32 ul_num_bytes, u32 ulMemType)
{
int status = 0;
struct bridge_dev_context *dev_context = hDevContext;
- if (dwDSPAddr < dev_context->dw_dsp_start_add) {
+ if (dsp_addr < dev_context->dw_dsp_start_add) {
status = -EPERM;
return status;
}
- if ((dwDSPAddr - dev_context->dw_dsp_start_add) <
+ if ((dsp_addr - dev_context->dw_dsp_start_add) <
dev_context->dw_internal_size) {
- status = write_dsp_data(hDevContext, pbHostBuf, dwDSPAddr,
+ status = write_dsp_data(hDevContext, pbHostBuf, dsp_addr,
ul_num_bytes, ulMemType);
} else {
- status = write_ext_dsp_data(dev_context, pbHostBuf, dwDSPAddr,
+ status = write_ext_dsp_data(dev_context, pbHostBuf, dsp_addr,
ul_num_bytes, ulMemType, false);
}
/* Mem Write does not halt the DSP to write unlike bridge_brd_write */
static int bridge_brd_mem_write(struct bridge_dev_context *hDevContext,
- IN u8 *pbHostBuf, u32 dwDSPAddr,
+ IN u8 *pbHostBuf, u32 dsp_addr,
u32 ul_num_bytes, u32 ulMemType)
{
int status = 0;
while (ul_remain_bytes > 0 && DSP_SUCCEEDED(status)) {
ul_bytes =
ul_remain_bytes > BUFFERSIZE ? BUFFERSIZE : ul_remain_bytes;
- if (dwDSPAddr < (dev_context->dw_dsp_start_add +
+ if (dsp_addr < (dev_context->dw_dsp_start_add +
dev_context->dw_internal_size)) {
status =
- write_dsp_data(hDevContext, pbHostBuf, dwDSPAddr,
+ write_dsp_data(hDevContext, pbHostBuf, dsp_addr,
ul_bytes, ulMemType);
} else {
status = write_ext_dsp_data(hDevContext, pbHostBuf,
- dwDSPAddr, ul_bytes,
+ dsp_addr, ul_bytes,
ulMemType, true);
}
ul_remain_bytes -= ul_bytes;
- dwDSPAddr += ul_bytes;
+ dsp_addr += ul_bytes;
pbHostBuf = pbHostBuf + ul_bytes;
}
return status;
* Copies DSP external memory buffers to the host side buffers.
*/
int read_ext_dsp_data(struct bridge_dev_context *hDevContext,
- OUT u8 *pbHostBuf, u32 dwDSPAddr,
+ OUT u8 *pbHostBuf, u32 dsp_addr,
u32 ul_num_bytes, u32 ulMemType)
{
int status = 0;
DBC_ASSERT(ul_trace_sec_end != 0);
if (DSP_SUCCEEDED(status)) {
- if ((dwDSPAddr <= ul_trace_sec_end) &&
- (dwDSPAddr >= ul_trace_sec_beg))
+ if ((dsp_addr <= ul_trace_sec_end) &&
+ (dsp_addr >= ul_trace_sec_beg))
trace_read = true;
}
if (!dw_base_addr || !ul_ext_base || !ul_ext_end)
status = -EPERM;
- offset = dwDSPAddr - ul_ext_base;
+ offset = dsp_addr - ul_ext_base;
if (DSP_SUCCEEDED(status))
memcpy(pbHostBuf, (u8 *) dw_base_addr + offset, ul_num_bytes);
* Copies buffers to the DSP internal/external memory.
*/
int write_dsp_data(struct bridge_dev_context *hDevContext,
- IN u8 *pbHostBuf, u32 dwDSPAddr, u32 ul_num_bytes,
+ IN u8 *pbHostBuf, u32 dsp_addr, u32 ul_num_bytes,
u32 ulMemType)
{
u32 offset;
if (!resources)
return -EPERM;
- offset = dwDSPAddr - hDevContext->dw_dsp_start_add;
+ offset = dsp_addr - hDevContext->dw_dsp_start_add;
if (offset < base1) {
dw_base_addr = MEM_LINEAR_ADDRESS(resources->dw_mem_base[2],
resources->dw_mem_length[2]);
*
*/
int write_ext_dsp_data(struct bridge_dev_context *dev_context,
- IN u8 *pbHostBuf, u32 dwDSPAddr,
+ IN u8 *pbHostBuf, u32 dsp_addr,
u32 ul_num_bytes, u32 ulMemType,
bool dynamic_load)
{
&ul_trace_sec_end);
}
if (DSP_SUCCEEDED(ret)) {
- if ((dwDSPAddr <= ul_trace_sec_end) &&
- (dwDSPAddr >= ul_trace_sec_beg))
+ if ((dsp_addr <= ul_trace_sec_end) &&
+ (dsp_addr >= ul_trace_sec_beg))
trace_load = true;
}
for (i = 0; i < 4; i++)
remain_byte[i] = 0x0;
- dw_offset = dwDSPAddr - ul_ext_base;
- /* Also make sure the dwDSPAddr is < ul_ext_end */
- if (dwDSPAddr > ul_ext_end || dw_offset > dwDSPAddr)
+ dw_offset = dsp_addr - ul_ext_base;
+ /* Also make sure the dsp_addr is < ul_ext_end */
+ if (dsp_addr > ul_ext_end || dw_offset > dsp_addr)
ret = -EPERM;
}
if (DSP_SUCCEEDED(ret)) {
* is configured by the combination of DSP MMU and shm Memory manager in the CDB
*/
extern int read_ext_dsp_data(struct bridge_dev_context *dev_context,
- OUT u8 *pbHostBuf, u32 dwDSPAddr,
+ OUT u8 *pbHostBuf, u32 dsp_addr,
u32 ul_num_bytes, u32 ulMemType);
/*
* ======== write_dsp_data ========
*/
extern int write_dsp_data(struct bridge_dev_context *dev_context,
- OUT u8 *pbHostBuf, u32 dwDSPAddr,
+ OUT u8 *pbHostBuf, u32 dsp_addr,
u32 ul_num_bytes, u32 ulMemType);
/*
* shm Memory manager in the CDB
*/
extern int write_ext_dsp_data(struct bridge_dev_context *dev_context,
- IN u8 *pbHostBuf, u32 dwDSPAddr,
+ IN u8 *pbHostBuf, u32 dsp_addr,
u32 ul_num_bytes, u32 ulMemType,
bool dynamic_load);
*/
extern inline void write_ext32_bit_dsp_data(IN const
struct bridge_dev_context *dev_context,
- IN u32 dwDSPAddr, IN u32 val)
+ IN u32 dsp_addr, IN u32 val)
{
- *(u32 *) dwDSPAddr = ((dev_context->tc_word_swap_on) ? (((val << 16) &
+ *(u32 *) dsp_addr = ((dev_context->tc_word_swap_on) ? (((val << 16) &
0xFFFF0000) |
((val >> 16) &
0x0000FFFF)) :
* Reads 32 bit data from the external memory
*/
extern inline u32 read_ext32_bit_dsp_data(IN const struct bridge_dev_context
- *dev_context, IN u32 dwDSPAddr)
+ *dev_context, IN u32 dsp_addr)
{
u32 ret;
- ret = *(u32 *) dwDSPAddr;
+ ret = *(u32 *) dsp_addr;
ret = ((dev_context->tc_word_swap_on) ? (((ret << 16)
& 0xFFFF0000) | ((ret >> 16) &
u32 physicalAddr,
u32 virtualAddr,
u32 pageSize,
- u32 entryNum,
+ u32 entry_num,
struct hw_mmu_map_attrs_t *map_attrs,
s8 preservedBit, s8 validBit)
{
/* Update the MMU Lock Register */
/* currentVictim between lockedBaseValue and (MMU_Entries_Number - 1) */
- MMUMMU_LOCK_CURRENT_VICTIM_WRITE32(base_address, entryNum);
+ MMUMMU_LOCK_CURRENT_VICTIM_WRITE32(base_address, entry_num);
/* Enable loading of an entry in TLB by writing 1
into LD_TLB_REG register */
u32 physicalAddr,
u32 virtualAddr,
u32 pageSize,
- u32 entryNum,
+ u32 entry_num,
struct hw_mmu_map_attrs_t *map_attrs,
s8 preservedBit, s8 validBit);
* Store the Device Object handle for a given devnode.
* Parameters:
* dev_node_obj: Platform's dev_node handle we are storing value with.
- * dwValue: Arbitrary value to store.
+ * value: Arbitrary value to store.
* Returns:
* 0: Success.
* -EFAULT: dev_node_obj is invalid.
* 0: The Private u32 was successfully set.
*/
extern int cfg_set_dev_object(IN struct cfg_devnode *dev_node_obj,
- IN u32 dwValue);
+ IN u32 value);
/*
* ======== CFG_SetDrvObject ========
* Purpose:
* Store the Driver Object handle.
* Parameters:
- * dwValue: Arbitrary value to store.
+ * value: Arbitrary value to store.
* dw_type Type of Object to Store
* Returns:
* 0: Success.
* Ensures:
* 0: The Private u32 was successfully set.
*/
-extern int cfg_set_object(IN u32 dwValue, u8 dw_type);
+extern int cfg_set_object(IN u32 value, u8 dw_type);
#endif /* CFG_ */
* hcmm_mgr: Handle to a Cmm Mgr.
* lpGPPBasePA: GPP Base Physical address.
* ul_size: Size in GPP bytes.
- * dwDSPAddrOffset GPP PA to DSP PA Offset.
+ * dsp_addr_offset GPP PA to DSP PA Offset.
* c_factor: Add offset if CMM_ADDTODSPPA, sub if CMM_SUBFROMDSPPA.
* dw_dsp_base: DSP virtual base byte address.
* ul_dsp_size: Size of DSP segment in bytes.
extern int cmm_register_gppsm_seg(struct cmm_object *hcmm_mgr,
unsigned int dw_gpp_base_pa,
u32 ul_size,
- u32 dwDSPAddrOffset,
+ u32 dsp_addr_offset,
s8 c_factor,
unsigned int dw_dsp_base,
u32 ul_dsp_size,
- u32 *pulSegId, u32 dwGPPBaseBA);
+ u32 *pulSegId, u32 gpp_base_ba);
/*
* ======== cmm_un_register_gppsm_seg ========
* ======== dbl_alloc_fxn ========
* Allocate memory function. Allocate or reserve (if reserved == TRUE)
* "size" bytes of memory from segment "space" and return the address in
- * *dspAddr (or starting at *dspAddr if reserve == TRUE). Returns 0 on
+ * *dsp_address (or starting at *dsp_address if reserve == TRUE). Returns 0 on
* success, or an error code on failure.
*/
typedef s32(*dbl_alloc_fxn) (void *hdl, s32 space, u32 size, u32 align,
- u32 *dspAddr, s32 seg_id, s32 req, bool reserved);
+ u32 *dsp_address, s32 seg_id, s32 req,
+ bool reserved);
/*
* ======== dbl_free_fxn ========
/*
* ======== dbl_write_fxn ========
* Write memory function. Write "n" HOST bytes of memory to segment "mtype"
- * starting at address "dspAddr" from the buffer "buf". The buffer is
+ * starting at address "dsp_address" from the buffer "buf". The buffer is
* formatted as an array of words appropriate for the DSP.
*/
-typedef s32(*dbl_write_fxn) (void *hdl, u32 dspAddr, void *buf,
+typedef s32(*dbl_write_fxn) (void *hdl, u32 dsp_address, void *buf,
u32 n, s32 mtype);
/*
* ======== dbll_alloc_fxn ========
* Allocate memory function. Allocate or reserve (if reserved == TRUE)
* "size" bytes of memory from segment "space" and return the address in
- * *dspAddr (or starting at *dspAddr if reserve == TRUE). Returns 0 on
+ * *dsp_address (or starting at *dsp_address if reserve == TRUE). Returns 0 on
* success, or an error code on failure.
*/
typedef s32(*dbll_alloc_fxn) (void *hdl, s32 space, u32 size, u32 align,
- u32 *dspAddr, s32 seg_id, s32 req,
+ u32 *dsp_address, s32 seg_id, s32 req,
bool reserved);
/*
/*
* ======== dbll_write_fxn ========
* Write memory function. Write "n" HOST bytes of memory to segment "mtype"
- * starting at address "dspAddr" from the buffer "buf". The buffer is
+ * starting at address "dsp_address" from the buffer "buf". The buffer is
* formatted as an array of words appropriate for the DSP.
*/
-typedef s32(*dbll_write_fxn) (void *hdl, u32 dspAddr, void *buf,
+typedef s32(*dbll_write_fxn) (void *hdl, u32 dsp_address, void *buf,
u32 n, s32 mtype);
/*
* Parameters:
* pArb: Handle to a Device Object.
* hDevContext: Handle to Bridge driver defined device info.
- * dwDSPAddr: Address on DSP board (Destination).
+ * dsp_addr: Address on DSP board (Destination).
* pHostBuf: Pointer to host buffer (Source).
* ul_num_bytes: Number of bytes to transfer.
* ulMemType: Memory space on DSP to which to transfer.
OPTIONAL u32 dw_dsp_addr, u32 dw_arg);
extern int bridge_chnl_get_ioc(struct chnl_object *chnl_obj,
- u32 dwTimeOut, OUT struct chnl_ioc *pIOC);
+ u32 timeout, OUT struct chnl_ioc *pIOC);
extern int bridge_chnl_cancel_io(struct chnl_object *chnl_obj);
extern int bridge_chnl_flush_io(struct chnl_object *chnl_obj,
- u32 dwTimeOut);
+ u32 timeout);
extern int bridge_chnl_get_info(struct chnl_object *chnl_obj,
OUT struct chnl_info *pInfo);
*pMgrInfo);
extern int bridge_chnl_idle(struct chnl_object *chnl_obj,
- u32 dwTimeOut, bool fFlush);
+ u32 timeout, bool flush_data);
extern int bridge_chnl_register_notify(struct chnl_object *chnl_obj,
u32 event_mask,
* Bring board to the BRD_RUNNING (start) state.
* Parameters:
* hDevContext: Handle to Bridge driver defined device context.
- * dwDSPAddr: DSP address at which to start execution.
+ * dsp_addr: DSP address at which to start execution.
* Returns:
* 0: Success.
* -ETIMEDOUT: Timeout occured waiting for a response from hardware.
* else: Board state is indeterminate.
*/
typedef int(*fxn_brd_start) (struct bridge_dev_context
- * hDevContext, u32 dwDSPAddr);
+ * hDevContext, u32 dsp_addr);
/*
* ======== bridge_brd_mem_copy ========
* space. Unlike bridge_brd_write, this API does reset the DSP
* Parameters:
* hDevContext: Handle to Bridge driver defined device info.
- * dwDSPAddr: Address on DSP board (Destination).
+ * dsp_addr: Address on DSP board (Destination).
* pHostBuf: Pointer to host buffer (Source).
* ul_num_bytes: Number of bytes to transfer.
* ulMemType: Memory space on DSP to which to transfer.
typedef int(*fxn_brd_memwrite) (struct bridge_dev_context
* hDevContext,
IN u8 *pHostBuf,
- u32 dwDSPAddr, u32 ul_num_bytes,
+ u32 dsp_addr, u32 ul_num_bytes,
u32 ulMemType);
/*
* Parameters:
* hDevContext: Handle to Bridge driver defined device info.
* pHostBuf: Pointer to host buffer (Destination).
- * dwDSPAddr: Address on DSP board (Source).
+ * dsp_addr: Address on DSP board (Source).
* ul_num_bytes: Number of bytes to transfer.
* ulMemType: Memory space on DSP from which to transfer.
* Returns:
*/
typedef int(*fxn_brd_read) (struct bridge_dev_context *hDevContext,
OUT u8 *pHostBuf,
- u32 dwDSPAddr,
+ u32 dsp_addr,
u32 ul_num_bytes, u32 ulMemType);
/*
* space.
* Parameters:
* hDevContext: Handle to Bridge driver defined device info.
- * dwDSPAddr: Address on DSP board (Destination).
+ * dsp_addr: Address on DSP board (Destination).
* pHostBuf: Pointer to host buffer (Source).
* ul_num_bytes: Number of bytes to transfer.
* ulMemType: Memory space on DSP to which to transfer.
*/
typedef int(*fxn_brd_write) (struct bridge_dev_context *hDevContext,
IN u8 *pHostBuf,
- u32 dwDSPAddr,
+ u32 dsp_addr,
u32 ul_num_bytes, u32 ulMemType);
/*
* Parameters:
* hdeh_mgr: Handle to DEH manager object.
* ulEventMask: Indicate the type of exception
- * dwErrInfo: Error information
+ * error_info: Error information
* Returns:
*
* Requires:
* Ensures:
*/
typedef void (*fxn_deh_notify) (struct deh_mgr *hdeh_mgr,
- u32 ulEventMask, u32 dwErrInfo);
+ u32 ulEventMask, u32 error_info);
/*
* ======== bridge_chnl_open ========
* completed I/O request.
* Parameters:
* chnl_obj: Channel object handle.
- * dwTimeOut: A value of CHNL_IOCNOWAIT will simply dequeue the
+ * timeout: A value of CHNL_IOCNOWAIT will simply dequeue the
* first available IOC.
* pIOC: On output, contains host buffer address, bytes
* transferred, and status of I/O completion.
* Returns:
* 0: Success.
* -EFAULT: Invalid chnl_obj or pIOC.
- * -EREMOTEIO: CHNL_IOCNOWAIT was specified as the dwTimeOut parameter
+ * -EREMOTEIO: CHNL_IOCNOWAIT was specified as the timeout parameter
* yet no I/O completions were queued.
* Requires:
- * dwTimeOut == CHNL_IOCNOWAIT.
+ * timeout == CHNL_IOCNOWAIT.
* Ensures:
* 0: if there are any remaining IOC's queued before this call
* returns, the channel event object will be left in a signalled
* state.
*/
typedef int(*fxn_chnl_getioc) (struct chnl_object *chnl_obj,
- u32 dwTimeOut,
+ u32 timeout,
OUT struct chnl_ioc *pIOC);
/*
* cancel all pending IO requests.
* Parameters:
* chnl_obj: Channel object handle.
- * dwTimeOut: Timeout value for flush operation.
+ * timeout: Timeout value for flush operation.
* Returns:
* 0: Success;
* S_CHNLIOREQUEST: Returned if any IORequests are in the output queue.
* 0: No I/O requests will be pending on this channel.
*/
typedef int(*fxn_chnl_flushio) (struct chnl_object *chnl_obj,
- u32 dwTimeOut);
+ u32 timeout);
/*
* ======== bridge_chnl_get_info ========
* ======== bridge_chnl_idle ========
* Purpose:
* Idle a channel. If this is an input channel, or if this is an output
- * channel and fFlush is TRUE, all currently enqueued buffers will be
+ * channel and flush_data is TRUE, all currently enqueued buffers will be
* dequeued (data discarded for output channel).
- * If this is an output channel and fFlush is FALSE, this function
+ * If this is an output channel and flush_data is FALSE, this function
* will block until all currently buffered data is output, or the timeout
* specified has been reached.
*
* Parameters:
* chnl_obj: Channel object handle.
- * dwTimeOut: If output channel and fFlush is FALSE, timeout value
+ * timeout: If output channel and flush_data is FALSE, timeout value
* to wait for buffers to be output. (Not used for
* input channel).
- * fFlush: If output channel and fFlush is TRUE, discard any
+ * flush_data: If output channel and flush_data is TRUE, discard any
* currently buffered data. If FALSE, wait for currently
* buffered data to be output, or timeout, whichever
- * occurs first. fFlush is ignored for input channel.
+ * occurs first. flush_data is ignored for input channel.
* Returns:
* 0: Success;
* -EFAULT: Invalid chnl_obj.
* Ensures:
*/
typedef int(*fxn_chnl_idle) (struct chnl_object *chnl_obj,
- u32 dwTimeOut, bool fFlush);
+ u32 timeout, bool flush_data);
/*
* ======== bridge_chnl_register_notify ========
* This function is called by Device Manager to de-initialize a device.
* This function is not called by applications.
* Parameters:
- * dwDeviceContext:Handle to the device context. The XXX_Init function
+ * device_context:Handle to the device context. The XXX_Init function
* creates and returns this identifier.
* Returns:
* TRUE indicates the device successfully de-initialized. Otherwise it
* returns FALSE.
* Requires:
- * dwDeviceContext!= NULL. For a built in device this should never
+ * device_context!= NULL. For a built in device this should never
* get called.
* Ensures:
*/
-extern bool dsp_deinit(u32 dwDeviceContext);
+extern bool dsp_deinit(u32 device_context);
/*
* ======== dsp_init ========
/* Maximum channel bufsize that can be used. */
extern u32 io_buf_size(struct io_mgr *hio_mgr);
-extern u32 io_read_value(struct bridge_dev_context *hDevContext, u32 dwDSPAddr);
+extern u32 io_read_value(struct bridge_dev_context *hDevContext, u32 dsp_addr);
extern void io_write_value(struct bridge_dev_context *hDevContext,
- u32 dwDSPAddr, u32 dwValue);
+ u32 dsp_addr, u32 value);
extern u32 io_read_value_long(struct bridge_dev_context *hDevContext,
- u32 dwDSPAddr);
+ u32 dsp_addr);
extern void io_write_value_long(struct bridge_dev_context *hDevContext,
- u32 dwDSPAddr, u32 dwValue);
+ u32 dsp_addr, u32 value);
extern void io_or_set_value(struct bridge_dev_context *hDevContext,
- u32 dwDSPAddr, u32 dwValue);
+ u32 dsp_addr, u32 value);
extern void io_and_set_value(struct bridge_dev_context *hDevContext,
- u32 dwDSPAddr, u32 dwValue);
+ u32 dsp_addr, u32 value);
extern void io_intr_dsp2(IN struct io_mgr *pio_mgr, IN u16 mb_val);
* segid - Memory segment to allocate from.
* size - Size (target MAUS) to allocate.
* align - alignment.
- * dspAddr - If reserve is FALSE, the location to store allocated
+ * dsp_address - If reserve is FALSE, the location to store allocated
* address on output, otherwise, the DSP address to
* reserve.
- * reserve - If TRUE, reserve the memory specified by dspAddr.
+ * reserve - If TRUE, reserve the memory specified by dsp_address.
* Returns:
* 0: Success.
* -ENOMEM: Memory allocation on GPP failed.
* Requires:
* RMM initialized.
* Valid target.
- * dspAddr != NULL.
+ * dsp_address != NULL.
* size > 0
* reserve || target->num_segs > 0.
* Ensures:
*/
extern int rmm_alloc(struct rmm_target_obj *target, u32 segid, u32 size,
- u32 align, u32 *dspAdr, bool reserve);
+ u32 align, u32 *dsp_adr, bool reserve);
/*
* ======== rmm_create ========
* Parameters:
* target: - Target returned from rmm_create().
* segid: - Segment of memory to free.
- * dspAddr: - Address to free or unreserve.
+ * dsp_address: - Address to free or unreserve.
* size: - Size of memory to free or unreserve.
* reserved: - TRUE if memory was reserved only, otherwise FALSE.
* Returns:
* RMM initialized.
* Valid target.
* reserved || segid < target->num_segs.
- * reserve || [dspAddr, dspAddr + size] is a valid memory range.
+ * reserve || [dsp_address, dsp_address + size] is a valid memory range.
* Ensures:
*/
-extern bool rmm_free(struct rmm_target_obj *target, u32 segid, u32 dspAddr,
+extern bool rmm_free(struct rmm_target_obj *target, u32 segid, u32 dsp_address,
u32 size, bool reserved);
/*
* ======== strm_idle ========
* Purpose:
* Idle a stream and optionally flush output data buffers.
- * If this is an output stream and fFlush is TRUE, all data currently
+ * If this is an output stream and flush_data is TRUE, all data currently
* enqueued will be discarded.
- * If this is an output stream and fFlush is FALSE, this function
+ * If this is an output stream and flush_data is FALSE, this function
* will block until all currently buffered data is output, or the timeout
* specified has been reached.
* After a successful call to strm_idle(), all buffers can immediately
* be reclaimed.
* Parameters:
* hStrm: Stream handle returned from strm_open().
- * fFlush: If TRUE, discard output buffers.
+ * flush_data: If TRUE, discard output buffers.
* Returns:
* 0: Success.
* -EFAULT: Invalid hStrm.
* strm_init(void) called.
* Ensures:
*/
-extern int strm_idle(struct strm_object *hStrm, bool fFlush);
+extern int strm_idle(struct strm_object *hStrm, bool flush_data);
/*
* ======== strm_init ========
*/
int cmm_register_gppsm_seg(struct cmm_object *hcmm_mgr,
u32 dw_gpp_base_pa, u32 ul_size,
- u32 dwDSPAddrOffset, s8 c_factor,
+ u32 dsp_addr_offset, s8 c_factor,
u32 dw_dsp_base, u32 ul_dsp_size,
u32 *pulSegId, u32 dw_gpp_base_va)
{
DBC_REQUIRE(dw_gpp_base_va != 0);
DBC_REQUIRE((c_factor <= CMM_ADDTODSPPA) &&
(c_factor >= CMM_SUBFROMDSPPA));
- dev_dbg(bridge, "%s: dw_gpp_base_pa %x ul_size %x dwDSPAddrOffset %x "
+ dev_dbg(bridge, "%s: dw_gpp_base_pa %x ul_size %x dsp_addr_offset %x "
"dw_dsp_base %x ul_dsp_size %x dw_gpp_base_va %x\n", __func__,
- dw_gpp_base_pa, ul_size, dwDSPAddrOffset, dw_dsp_base,
+ dw_gpp_base_pa, ul_size, dsp_addr_offset, dw_dsp_base,
ul_dsp_size, dw_gpp_base_va);
if (!hcmm_mgr) {
status = -EFAULT;
psma->shm_base = dw_gpp_base_pa; /* SM Base phys */
psma->ul_sm_size = ul_size; /* SM segment size in bytes */
psma->dw_vm_base = dw_gpp_base_va;
- psma->dw_dsp_phys_addr_offset = dwDSPAddrOffset;
+ psma->dw_dsp_phys_addr_offset = dsp_addr_offset;
psma->c_factor = c_factor;
psma->dw_dsp_base = dw_dsp_base;
psma->ul_dsp_size = ul_dsp_size;
static int fill_stream_def(rms_word *pdw_buf, u32 *ptotal, u32 offset,
struct node_strmdef strm_def, u32 max,
u32 chars_in_rms_word);
-static int send_message(struct disp_object *disp_obj, u32 dwTimeout,
+static int send_message(struct disp_object *disp_obj, u32 timeout,
u32 ul_bytes, OUT u32 *pdw_arg);
/*
* ======== send_message ======
* Send command message to RMS, get reply from RMS.
*/
-static int send_message(struct disp_object *disp_obj, u32 dwTimeout,
+static int send_message(struct disp_object *disp_obj, u32 timeout,
u32 ul_bytes, u32 *pdw_arg)
{
struct bridge_drv_interface *intf_fxns;
goto func_end;
status =
- (*intf_fxns->pfn_chnl_get_ioc) (chnl_obj, dwTimeout, &chnl_ioc_obj);
+ (*intf_fxns->pfn_chnl_get_ioc) (chnl_obj, timeout, &chnl_ioc_obj);
if (DSP_SUCCEEDED(status)) {
if (!CHNL_IS_IO_COMPLETE(chnl_ioc_obj)) {
if (CHNL_IS_TIMED_OUT(chnl_ioc_obj))
goto func_end;
status =
- (*intf_fxns->pfn_chnl_get_ioc) (chnl_obj, dwTimeout, &chnl_ioc_obj);
+ (*intf_fxns->pfn_chnl_get_ioc) (chnl_obj, timeout, &chnl_ioc_obj);
if (DSP_SUCCEEDED(status)) {
if (CHNL_IS_TIMED_OUT(chnl_ioc_obj)) {
status = -ETIME;
struct ovly_sect **pList,
struct dbll_sect_info *pSectInfo,
bool *pExists, u32 addr, u32 bytes);
-static s32 fake_ovly_write(void *handle, u32 dspAddr, void *buf, u32 bytes,
+static s32 fake_ovly_write(void *handle, u32 dsp_address, void *buf, u32 bytes,
s32 mtype);
static void free_sects(struct nldr_object *nldr_obj,
struct ovly_sect *phase_sects, u16 alloc_num);
static int load_ovly(struct nldr_nodeobject *nldr_node_obj,
enum nldr_phase phase);
static int remote_alloc(void **pRef, u16 mem_sect_type, u32 size,
- u32 align, u32 *dspAddr, OPTIONAL s32 segmentId,
+ u32 align, u32 *dsp_address,
+ OPTIONAL s32 segmentId,
OPTIONAL s32 req, bool reserve);
-static int remote_free(void **pRef, u16 space, u32 dspAddr, u32 size,
+static int remote_free(void **pRef, u16 space, u32 dsp_address, u32 size,
bool reserve);
static void unload_lib(struct nldr_nodeobject *nldr_node_obj,
/*
* ======== fake_ovly_write ========
*/
-static s32 fake_ovly_write(void *handle, u32 dspAddr, void *buf, u32 bytes,
+static s32 fake_ovly_write(void *handle, u32 dsp_address, void *buf, u32 bytes,
s32 mtype)
{
return (s32) bytes;
* ======== remote_alloc ========
*/
static int remote_alloc(void **pRef, u16 space, u32 size,
- u32 align, u32 *dspAddr,
+ u32 align, u32 *dsp_address,
OPTIONAL s32 segmentId, OPTIONAL s32 req,
bool reserve)
{
u16 i;
u16 mem_sect_type;
u32 word_size;
- struct rmm_addr *rmm_addr_obj = (struct rmm_addr *)dspAddr;
+ struct rmm_addr *rmm_addr_obj = (struct rmm_addr *)dsp_address;
bool mem_load_req = false;
int status = -ENOMEM; /* Set to fail */
DBC_REQUIRE(hnode);
/* Attempt to allocate from segid first. */
rmm_addr_obj->segid = segid;
status =
- rmm_alloc(rmm, segid, word_size, align, dspAddr, false);
+ rmm_alloc(rmm, segid, word_size, align, dsp_address, false);
if (DSP_FAILED(status)) {
dev_dbg(bridge, "%s: Unable allocate from segment %d\n",
__func__, segid);
mem_sect_type)
continue;
- status = rmm_alloc(rmm, i, word_size, align, dspAddr,
- false);
+ status = rmm_alloc(rmm, i, word_size, align,
+ dsp_address, false);
if (DSP_SUCCEEDED(status)) {
/* Save segid for freeing later */
rmm_addr_obj->segid = i;
mem_sect_type)
continue;
- status = rmm_alloc(rmm, i, word_size, align, dspAddr,
- false);
+ status = rmm_alloc(rmm, i, word_size, align,
+ dsp_address, false);
if (DSP_SUCCEEDED(status)) {
/* Save segid */
rmm_addr_obj->segid = i;
return status;
}
-static int remote_free(void **pRef, u16 space, u32 dspAddr,
+static int remote_free(void **pRef, u16 space, u32 dsp_address,
u32 size, bool reserve)
{
struct nldr_object *nldr_obj = (struct nldr_object *)pRef;
(size + nldr_obj->us_dsp_word_size -
1) / nldr_obj->us_dsp_word_size;
- if (rmm_free(rmm, space, dspAddr, word_size, reserve))
+ if (rmm_free(rmm, space, dsp_address, word_size, reserve))
status = 0;
return status;
static int get_exec_file(struct cfg_devnode *dev_node_obj,
struct dev_object *hdev_obj,
- u32 size, char *execFile)
+ u32 size, char *exec_file)
{
u8 dev_type;
s32 len;
dev_get_dev_type(hdev_obj, (u8 *) &dev_type);
if (dev_type == DSP_UNIT) {
- return cfg_get_exec_file(dev_node_obj, size, execFile);
+ return cfg_get_exec_file(dev_node_obj, size, exec_file);
} else if (dev_type == IVA_UNIT) {
if (iva_img) {
len = strlen(iva_img);
- strncpy(execFile, iva_img, len + 1);
+ strncpy(exec_file, iva_img, len + 1);
return 0;
}
}
static u32 refs; /* module reference count */
static bool alloc_block(struct rmm_target_obj *target, u32 segid, u32 size,
- u32 align, u32 *dspAddr);
+ u32 align, u32 *dsp_address);
static bool free_block(struct rmm_target_obj *target, u32 segid, u32 addr,
u32 size);
* ======== rmm_alloc ========
*/
int rmm_alloc(struct rmm_target_obj *target, u32 segid, u32 size,
- u32 align, u32 *dspAddr, bool reserve)
+ u32 align, u32 *dsp_address, bool reserve)
{
struct rmm_ovly_sect *sect;
struct rmm_ovly_sect *prev_sect = NULL;
int status = 0;
DBC_REQUIRE(target);
- DBC_REQUIRE(dspAddr != NULL);
+ DBC_REQUIRE(dsp_address != NULL);
DBC_REQUIRE(size > 0);
DBC_REQUIRE(reserve || (target->num_segs > 0));
DBC_REQUIRE(refs > 0);
if (!reserve) {
- if (!alloc_block(target, segid, size, align, dspAddr)) {
+ if (!alloc_block(target, segid, size, align, dsp_address)) {
status = -ENOMEM;
} else {
/* Increment the number of allocated blocks in this
}
/* An overlay section - See if block is already in use. If not,
* insert into the list in ascending address size. */
- addr = *dspAddr;
+ addr = *dsp_address;
sect = (struct rmm_ovly_sect *)lst_first(target->ovly_list);
/* Find place to insert new list element. List is sorted from
* smallest to largest address. */
* first.
*/
static bool alloc_block(struct rmm_target_obj *target, u32 segid, u32 size,
- u32 align, u32 *dspAddr)
+ u32 align, u32 *dsp_address)
{
struct rmm_header *head;
struct rmm_header *prevhead = NULL;
if (tmpalign)
free_block(target, segid, addr, tmpalign);
- *dspAddr = addr + tmpalign;
+ *dsp_address = addr + tmpalign;
return true;
}
* Purpose:
* Idles a particular stream.
*/
-int strm_idle(struct strm_object *hStrm, bool fFlush)
+int strm_idle(struct strm_object *hStrm, bool flush_data)
{
struct bridge_drv_interface *intf_fxns;
int status = 0;
intf_fxns = hStrm->strm_mgr_obj->intf_fxns;
status = (*intf_fxns->pfn_chnl_idle) (hStrm->chnl_obj,
- hStrm->utimeout, fFlush);
+ hStrm->utimeout,
+ flush_data);
}
- dev_dbg(bridge, "%s: hStrm: %p fFlush: 0x%x status: 0x%x\n",
- __func__, hStrm, fFlush, status);
+ dev_dbg(bridge, "%s: hStrm: %p flush_data: 0x%x status: 0x%x\n",
+ __func__, hStrm, flush_data, status);
return status;
}
* Purpose:
* Store the Device Object handle and dev_node pointer for a given devnode.
*/
-int cfg_set_dev_object(struct cfg_devnode *dev_node_obj, u32 dwValue)
+int cfg_set_dev_object(struct cfg_devnode *dev_node_obj, u32 value)
{
int status = 0;
struct drv_data *drv_datap = dev_get_drvdata(bridge);
/* Store the Bridge device object in the Registry */
if (!(strcmp((char *)dev_node_obj, "TIOMAP1510")))
- drv_datap->dev_object = (void *) dwValue;
+ drv_datap->dev_object = (void *) value;
}
if (DSP_FAILED(status))
pr_err("%s: Failed, status 0x%x\n", __func__, status);
* Purpose:
* Store the Driver Object handle
*/
-int cfg_set_object(u32 dwValue, u8 dw_type)
+int cfg_set_object(u32 value, u8 dw_type)
{
int status = -EINVAL;
struct drv_data *drv_datap = dev_get_drvdata(bridge);
switch (dw_type) {
case (REG_DRV_OBJECT):
- drv_datap->drv_object = (void *)dwValue;
+ drv_datap->drv_object = (void *)value;
status = 0;
break;
case (REG_MGR_OBJECT):
- drv_datap->mgr_object = (void *)dwValue;
+ drv_datap->mgr_object = (void *)value;
status = 0;
break;
default: