u32 ul_trace_buffer_begin; /* Trace message start address */
u32 ul_trace_buffer_end; /* Trace message end address */
u32 ul_trace_buffer_current; /* Trace message current address */
- u32 ul_gpp_read_pointer; /* GPP Read pointer to Trace buffer */
+ u32 gpp_read_pointer; /* GPP Read pointer to Trace buffer */
u8 *pmsg;
- u32 ul_gpp_va;
+ u32 gpp_va;
u32 dsp_va;
#endif
/* IO Dpc */
* This is the virtual uncached ioremapped
* address!!!
*/
- ae_proc[ndx].ul_gpp_va = gpp_va_curr;
+ ae_proc[ndx].gpp_va = gpp_va_curr;
ae_proc[ndx].dsp_va =
va_curr / hio_mgr->word_size;
ae_proc[ndx].ul_size = page_size[i];
dev_dbg(bridge, "shm MMU TLB entry PA %x"
" VA %x DSP_VA %x Size %x\n",
ae_proc[ndx].gpp_pa,
- ae_proc[ndx].ul_gpp_va,
+ ae_proc[ndx].gpp_va,
ae_proc[ndx].dsp_va *
hio_mgr->word_size, page_size[i]);
ndx++;
"shm MMU PTE entry PA %x"
" VA %x DSP_VA %x Size %x\n",
ae_proc[ndx].gpp_pa,
- ae_proc[ndx].ul_gpp_va,
+ ae_proc[ndx].gpp_va,
ae_proc[ndx].dsp_va *
hio_mgr->word_size, page_size[i]);
if (status)
* should not conflict with shm entries on MPU or DSP side.
*/
for (i = 3; i < 7 && ndx < BRDIOCTL_NUMOFMMUTLB; i++) {
- if (hio_mgr->ext_proc_info.ty_tlb[i].ul_gpp_phys == 0)
+ if (hio_mgr->ext_proc_info.ty_tlb[i].gpp_phys == 0)
continue;
- if ((hio_mgr->ext_proc_info.ty_tlb[i].ul_gpp_phys >
+ if ((hio_mgr->ext_proc_info.ty_tlb[i].gpp_phys >
ul_gpp_pa - 0x100000
- && hio_mgr->ext_proc_info.ty_tlb[i].ul_gpp_phys <=
+ && hio_mgr->ext_proc_info.ty_tlb[i].gpp_phys <=
ul_gpp_pa + ul_seg_size)
|| (hio_mgr->ext_proc_info.ty_tlb[i].dsp_virt >
ul_dsp_va - 0x100000 / hio_mgr->word_size
"CDB MMU entry %d conflicts with "
"shm.\n\tCDB: GppPa %x, DspVa %x.\n\tSHM: "
"GppPa %x, DspVa %x, Bytes %x.\n", i,
- hio_mgr->ext_proc_info.ty_tlb[i].ul_gpp_phys,
+ hio_mgr->ext_proc_info.ty_tlb[i].gpp_phys,
hio_mgr->ext_proc_info.ty_tlb[i].dsp_virt,
ul_gpp_pa, ul_dsp_va, ul_seg_size);
status = -EPERM;
dsp_virt;
ae_proc[ndx].gpp_pa =
hio_mgr->ext_proc_info.ty_tlb[i].
- ul_gpp_phys;
- ae_proc[ndx].ul_gpp_va = 0;
+ gpp_phys;
+ ae_proc[ndx].gpp_va = 0;
/* 1 MB */
ae_proc[ndx].ul_size = 0x100000;
dev_dbg(bridge, "shm MMU entry PA %x "
status = hio_mgr->intf_fxns->brd_mem_map
(hio_mgr->hbridge_context,
hio_mgr->ext_proc_info.ty_tlb[i].
- ul_gpp_phys,
+ gpp_phys,
hio_mgr->ext_proc_info.ty_tlb[i].
dsp_virt, 0x100000, map_attrs,
NULL);
for (i = ndx; i < BRDIOCTL_NUMOFMMUTLB; i++) {
ae_proc[i].dsp_va = 0;
ae_proc[i].gpp_pa = 0;
- ae_proc[i].ul_gpp_va = 0;
+ ae_proc[i].gpp_va = 0;
ae_proc[i].ul_size = 0;
}
/*
* to the virtual uncached ioremapped address of shm reserved
* on MPU.
*/
- hio_mgr->ext_proc_info.ty_tlb[0].ul_gpp_phys =
+ hio_mgr->ext_proc_info.ty_tlb[0].gpp_phys =
(ul_gpp_va + ul_seg1_size + ul_pad_size);
/*
* Need shm Phys addr. IO supports only one DSP for now:
* num_procs = 1.
*/
- if (!hio_mgr->ext_proc_info.ty_tlb[0].ul_gpp_phys || num_procs != 1) {
+ if (!hio_mgr->ext_proc_info.ty_tlb[0].gpp_phys || num_procs != 1) {
status = -EFAULT;
goto func_end;
} else {
ae_proc);
if (status)
goto func_end;
- ul_shm_base = hio_mgr->ext_proc_info.ty_tlb[0].ul_gpp_phys;
+ ul_shm_base = hio_mgr->ext_proc_info.ty_tlb[0].gpp_phys;
ul_shm_base += ul_shm_base_offset;
ul_shm_base = (u32) MEM_LINEAR_ADDRESS((void *)ul_shm_base,
ul_mem_length);
goto func_end;
}
- hio_mgr->ul_gpp_read_pointer = hio_mgr->ul_trace_buffer_begin =
+ hio_mgr->gpp_read_pointer = hio_mgr->ul_trace_buffer_begin =
(ul_gpp_va + ul_seg1_size + ul_pad_size) +
(hio_mgr->ul_trace_buffer_begin - ul_dsp_va);
/* Get the end address of trace buffer */
status = -ENOMEM;
hio_mgr->dsp_va = ul_dsp_va;
- hio_mgr->ul_gpp_va = (ul_gpp_va + ul_seg1_size + ul_pad_size);
+ hio_mgr->gpp_va = (ul_gpp_va + ul_seg1_size + ul_pad_size);
#endif
func_end:
goto func_end;
}
/* First TLB entry reserved for Bridge SM use. */
- ul_gpp_phys = hio_mgr->ext_proc_info.ty_tlb[0].ul_gpp_phys;
+ ul_gpp_phys = hio_mgr->ext_proc_info.ty_tlb[0].gpp_phys;
/* Get size in bytes */
ul_dsp_virt =
hio_mgr->ext_proc_info.ty_tlb[0].dsp_virt *
ul_gpp_cur_pointer =
*(u32 *) (hio_mgr->ul_trace_buffer_current);
ul_gpp_cur_pointer =
- hio_mgr->ul_gpp_va + (ul_gpp_cur_pointer -
+ hio_mgr->gpp_va + (ul_gpp_cur_pointer -
hio_mgr->dsp_va);
/* No new debug messages available yet */
- if (ul_gpp_cur_pointer == hio_mgr->ul_gpp_read_pointer) {
+ if (ul_gpp_cur_pointer == hio_mgr->gpp_read_pointer) {
break;
- } else if (ul_gpp_cur_pointer > hio_mgr->ul_gpp_read_pointer) {
+ } else if (ul_gpp_cur_pointer > hio_mgr->gpp_read_pointer) {
/* Continuous data */
ul_new_message_length =
- ul_gpp_cur_pointer - hio_mgr->ul_gpp_read_pointer;
+ ul_gpp_cur_pointer - hio_mgr->gpp_read_pointer;
memcpy(hio_mgr->pmsg,
- (char *)hio_mgr->ul_gpp_read_pointer,
+ (char *)hio_mgr->gpp_read_pointer,
ul_new_message_length);
hio_mgr->pmsg[ul_new_message_length] = '\0';
/*
* Advance the GPP trace pointer to DSP current
* pointer.
*/
- hio_mgr->ul_gpp_read_pointer += ul_new_message_length;
+ hio_mgr->gpp_read_pointer += ul_new_message_length;
/* Print the trace messages */
pr_info("DSPTrace: %s\n", hio_mgr->pmsg);
- } else if (ul_gpp_cur_pointer < hio_mgr->ul_gpp_read_pointer) {
+ } else if (ul_gpp_cur_pointer < hio_mgr->gpp_read_pointer) {
/* Handle trace buffer wraparound */
memcpy(hio_mgr->pmsg,
- (char *)hio_mgr->ul_gpp_read_pointer,
+ (char *)hio_mgr->gpp_read_pointer,
hio_mgr->ul_trace_buffer_end -
- hio_mgr->ul_gpp_read_pointer);
+ hio_mgr->gpp_read_pointer);
ul_new_message_length =
ul_gpp_cur_pointer - hio_mgr->ul_trace_buffer_begin;
memcpy(&hio_mgr->pmsg[hio_mgr->ul_trace_buffer_end -
- hio_mgr->ul_gpp_read_pointer],
+ hio_mgr->gpp_read_pointer],
(char *)hio_mgr->ul_trace_buffer_begin,
ul_new_message_length);
hio_mgr->pmsg[hio_mgr->ul_trace_buffer_end -
- hio_mgr->ul_gpp_read_pointer +
+ hio_mgr->gpp_read_pointer +
ul_new_message_length] = '\0';
/*
* Advance the GPP trace pointer to DSP current
* pointer.
*/
- hio_mgr->ul_gpp_read_pointer =
+ hio_mgr->gpp_read_pointer =
hio_mgr->ul_trace_buffer_begin +
ul_new_message_length;
/* Print the trace messages */
ul_shm_offset_virt =
ul_shm_base_virt - (ul_tlb_base_virt * DSPWORDSIZE);
/* Kernel logical address */
- ul_shm_base = dev_context->atlb_entry[0].ul_gpp_va + ul_shm_offset_virt;
+ ul_shm_base = dev_context->atlb_entry[0].gpp_va + ul_shm_offset_virt;
DBC_ASSERT(ul_shm_base != 0);
/* 2nd wd is used as sync field */
dev_context->atlb_entry[0].dsp_va * DSPWORDSIZE;
DBC_ASSERT(ul_tlb_base_virt <= ul_shm_base_virt);
dw_ext_prog_virt_mem =
- dev_context->atlb_entry[0].ul_gpp_va;
+ dev_context->atlb_entry[0].gpp_va;
if (!trace_read) {
ul_shm_offset_virt =
ul_shm_base_virt - ul_tlb_base_virt;
if (trace_load) {
dw_ext_prog_virt_mem =
- dev_context->atlb_entry[0].ul_gpp_va;
+ dev_context->atlb_entry[0].gpp_va;
} else {
dw_ext_prog_virt_mem = host_res->mem_base[1];
dw_ext_prog_virt_mem +=
* Requires:
* cmm_init(void) called.
* ph_cmm_mgr != NULL.
- * mgr_attrts->ul_min_block_size >= 4 bytes.
+ * mgr_attrts->min_block_size >= 4 bytes.
* Ensures:
*
*/
/* Cmm attributes used in cmm_create() */
struct cmm_mgrattrs {
/* Minimum SM allocation; default 32 bytes. */
- u32 ul_min_block_size;
+ u32 min_block_size;
};
/* Attributes for CMM_AllocBuf() & CMM_AllocDesc() */
struct cmm_attrs {
u32 ul_seg_id; /* 1,2... are SM segments. 0 is not. */
- u32 alignment; /* 0,1,2,4....ul_min_block_size */
+ u32 alignment; /* 0,1,2,4....min_block_size */
};
/*
/* Total size in bytes of segment: DSP+GPP */
u32 ul_total_seg_size;
u32 gpp_base_pa; /* Start Phys addr of Gpp SM seg */
- u32 ul_gpp_size; /* Size of Gpp SM seg in bytes */
+ u32 gpp_size; /* Size of Gpp SM seg in bytes */
u32 dsp_base_va; /* DSP virt base byte address */
u32 dsp_size; /* DSP seg size in bytes */
/* # of current GPP allocations from this segment */
- u32 ul_in_use_cnt;
+ u32 in_use_cnt;
u32 seg_base_va; /* Start Virt address of SM seg */
};
/* CMM useful information */
struct cmm_info {
/* # of SM segments registered with this Cmm. */
- u32 ul_num_gppsm_segs;
+ u32 num_gppsm_segs;
/* Total # of allocations outstanding for CMM */
u32 ul_total_in_use_cnt;
/* Min SM block size allocation from cmm_create() */
- u32 ul_min_block_size;
+ u32 min_block_size;
/* Info per registered SM segment. */
struct cmm_seginfo seg_info[CMM_MAXGPPSEGS];
};
struct dsp_memstat {
u32 ul_size;
u32 ul_total_free_size;
- u32 ul_len_max_free_block;
- u32 ul_num_free_blocks;
- u32 ul_num_alloc_blocks;
+ u32 len_max_free_block;
+ u32 num_free_blocks;
+ u32 num_alloc_blocks;
};
/* Processor Load information Values */
};
struct dsp_nodeprofs {
- u32 ul_heap_size;
+ u32 heap_size;
};
/* The dsp_ndbprops structure reports the attributes of a node */
int processor_family;
int processor_type;
u32 clock_rate;
- u32 ul_internal_mem_size;
+ u32 internal_mem_size;
u32 external_mem_size;
u32 processor_id;
int ty_running_rtos;
u32 cb_struct;
u32 number_bufs_allowed;
u32 number_bufs_in_stream;
- u32 ul_number_bytes;
+ u32 number_bytes;
void *sync_object_handle;
enum dsp_streamstate ss_stream_state;
};
u32 dsp_va; /* DSP virtual address */
u32 gpp_pa; /* GPP physical address */
/* GPP virtual address. __va does not work for ioremapped addresses */
- u32 ul_gpp_va;
+ u32 gpp_va;
u32 ul_size; /* Size of the mapped memory in bytes */
enum hw_endianism_t endianism;
enum hw_mmu_mixed_size_t mixed_mode;
struct mgr_tlbentry {
u32 dsp_virt; /* DSP virtual address */
- u32 ul_gpp_phys; /* GPP physical address */
+ u32 gpp_phys; /* GPP physical address */
};
/*
*/
struct mutex cmm_lock; /* Lock to access cmm mgr */
struct list_head node_free_list; /* Free list of memory nodes */
- u32 ul_min_block_size; /* Min SM block; default 16 bytes */
+ u32 min_block_size; /* Min SM block; default 16 bytes */
u32 page_size; /* Memory Page size (1k/4k) */
/* GPP SM segment ptrs */
struct cmm_allocator *pa_gppsm_seg_tab[CMM_MAXGPPSEGS];
/* Default CMM Mgr attributes */
static struct cmm_mgrattrs cmm_dfltmgrattrs = {
- /* ul_min_block_size, min block size(bytes) allocated by cmm mgr */
+ /* min_block_size, min block size(bytes) allocated by cmm mgr */
16
};
/* get the allocator object for this segment id */
allocator =
get_allocator(cmm_mgr_obj, pattrs->ul_seg_id);
- /* keep block size a multiple of ul_min_block_size */
+ /* keep block size a multiple of min_block_size */
usize =
- ((usize - 1) & ~(cmm_mgr_obj->ul_min_block_size -
+ ((usize - 1) & ~(cmm_mgr_obj->min_block_size -
1))
- + cmm_mgr_obj->ul_min_block_size;
+ + cmm_mgr_obj->min_block_size;
mutex_lock(&cmm_mgr_obj->cmm_lock);
pnode = get_free_block(allocator, usize);
}
if (pnode) {
delta_size = (pnode->ul_size - usize);
- if (delta_size >= cmm_mgr_obj->ul_min_block_size) {
+ if (delta_size >= cmm_mgr_obj->min_block_size) {
/* create a new block with the leftovers and
* add to freelist */
new_node =
mgr_attrts = &cmm_dfltmgrattrs; /* set defaults */
/* 4 bytes minimum */
- DBC_ASSERT(mgr_attrts->ul_min_block_size >= 4);
+ DBC_ASSERT(mgr_attrts->min_block_size >= 4);
/* save away smallest block allocation for this cmm mgr */
- cmm_obj->ul_min_block_size = mgr_attrts->ul_min_block_size;
+ cmm_obj->min_block_size = mgr_attrts->min_block_size;
cmm_obj->page_size = PAGE_SIZE;
/* create node free list */
return status;
}
mutex_lock(&cmm_mgr_obj->cmm_lock);
- cmm_info_obj->ul_num_gppsm_segs = 0; /* # of SM segments */
+ cmm_info_obj->num_gppsm_segs = 0; /* # of SM segments */
/* Total # of outstanding alloc */
cmm_info_obj->ul_total_in_use_cnt = 0;
/* min block size */
- cmm_info_obj->ul_min_block_size = cmm_mgr_obj->ul_min_block_size;
+ cmm_info_obj->min_block_size = cmm_mgr_obj->min_block_size;
/* check SM memory segments */
for (ul_seg = 1; ul_seg <= CMM_MAXGPPSEGS; ul_seg++) {
/* get the allocator object for this segment id */
altr = get_allocator(cmm_mgr_obj, ul_seg);
if (!altr)
continue;
- cmm_info_obj->ul_num_gppsm_segs++;
+ cmm_info_obj->num_gppsm_segs++;
cmm_info_obj->seg_info[ul_seg - 1].seg_base_pa =
altr->shm_base - altr->dsp_size;
cmm_info_obj->seg_info[ul_seg - 1].ul_total_seg_size =
altr->dsp_size + altr->ul_sm_size;
cmm_info_obj->seg_info[ul_seg - 1].gpp_base_pa =
altr->shm_base;
- cmm_info_obj->seg_info[ul_seg - 1].ul_gpp_size =
+ cmm_info_obj->seg_info[ul_seg - 1].gpp_size =
altr->ul_sm_size;
cmm_info_obj->seg_info[ul_seg - 1].dsp_base_va =
altr->dsp_base;
altr->dsp_size;
cmm_info_obj->seg_info[ul_seg - 1].seg_base_va =
altr->vm_base - altr->dsp_size;
- cmm_info_obj->seg_info[ul_seg - 1].ul_in_use_cnt = 0;
+ cmm_info_obj->seg_info[ul_seg - 1].in_use_cnt = 0;
list_for_each_entry(curr, &altr->in_use_list, link) {
cmm_info_obj->ul_total_in_use_cnt++;
- cmm_info_obj->seg_info[ul_seg - 1].ul_in_use_cnt++;
+ cmm_info_obj->seg_info[ul_seg - 1].in_use_cnt++;
}
}
mutex_unlock(&cmm_mgr_obj->cmm_lock);
}
/* Check if input ul_size is big enough to alloc at least one block */
- if (ul_size < cmm_mgr_obj->ul_min_block_size) {
+ if (ul_size < cmm_mgr_obj->min_block_size) {
status = -EINVAL;
goto func_end;
}
u32 open_ref; /* Number of times opened */
u32 load_ref; /* Number of times loaded */
struct gh_t_hash_tab *sym_tab; /* Hash table of symbols */
- u32 ul_pos;
+ u32 pos;
};
/*
} else {
(*(zl_lib->target_obj->attrs.fseek)) (zl_lib->fp,
- zl_lib->ul_pos,
+ zl_lib->pos,
SEEK_SET);
}
} else {
}
if (!status) {
- zl_lib->ul_pos = (*(zl_lib->target_obj->attrs.ftell))
+ zl_lib->pos = (*(zl_lib->target_obj->attrs.ftell))
(zl_lib->fp);
/* Reset file cursor */
(*(zl_lib->target_obj->attrs.fseek)) (zl_lib->fp,
if (zl_lib == NULL) {
status = -ENOMEM;
} else {
- zl_lib->ul_pos = 0;
+ zl_lib->pos = 0;
/* Increment ref count to allow close on failure
* later on */
zl_lib->open_ref++;
if (!status && zl_lib->fp == NULL)
status = dof_open(zl_lib);
- zl_lib->ul_pos = (*(zl_lib->target_obj->attrs.ftell)) (zl_lib->fp);
+ zl_lib->pos = (*(zl_lib->target_obj->attrs.ftell)) (zl_lib->fp);
(*(zl_lib->target_obj->attrs.fseek)) (zl_lib->fp, (long)0, SEEK_SET);
/* Create a hash table for symbols if flag is set */
if (zl_lib->sym_tab != NULL || !(flags & DBLL_SYMB))
} else {
(*(zl_lib->target_obj->attrs.fseek)) (zl_lib->fp,
- zl_lib->ul_pos,
+ zl_lib->pos,
SEEK_SET);
}
} else {
/* Heap Size for the node */
gen_obj->obj_data.node_obj.
ndb_props.node_profiles[i].
- ul_heap_size = atoi(token);
+ heap_size = atoi(token);
}
}
}
gen_obj->obj_data.proc_info.clock_rate = atoi(token);
token = strsep(&psz_cur, seps);
- gen_obj->obj_data.proc_info.ul_internal_mem_size = atoi(token);
+ gen_obj->obj_data.proc_info.internal_mem_size = atoi(token);
token = strsep(&psz_cur, seps);
gen_obj->obj_data.proc_info.external_mem_size = atoi(token);
for (entry_id = 0; entry_id < 7; entry_id++) {
token = strsep(&psz_cur, seps);
gen_obj->obj_data.ext_proc_obj.ty_tlb[entry_id].
- ul_gpp_phys = atoi(token);
+ gpp_phys = atoi(token);
token = strsep(&psz_cur, seps);
gen_obj->obj_data.ext_proc_obj.ty_tlb[entry_id].
struct msg_mgr *msg_mgr_obj;
/* Processor properties needed by Node Dispatcher */
- u32 ul_num_chnls; /* Total number of channels */
+ u32 num_chnls; /* Total number of channels */
u32 chnl_offset; /* Offset of chnl ids rsvd for RMS */
u32 chnl_buf_size; /* Buffer size for data to RMS */
int proc_family; /* eg, 5000 */
set_bit(chnl_id, hnode_mgr->dma_chnl_map);
/* dma chans are 2nd transport chnl set
* ids(e.g. 16-31) */
- chnl_id = chnl_id + hnode_mgr->ul_num_chnls;
+ chnl_id = chnl_id + hnode_mgr->num_chnls;
}
break;
case STRMMODE_ZEROCOPY:
/* zero-copy chans are 3nd transport set
* (e.g. 32-47) */
chnl_id = chnl_id +
- (2 * hnode_mgr->ul_num_chnls);
+ (2 * hnode_mgr->num_chnls);
}
break;
case STRMMODE_PROCCOPY:
set_bit(stream.dev_id, hnode_mgr->pipe_done_map);
}
} else if (stream.type == HOSTCONNECT) {
- if (stream.dev_id < hnode_mgr->ul_num_chnls) {
+ if (stream.dev_id < hnode_mgr->num_chnls) {
clear_bit(stream.dev_id, hnode_mgr->chnl_map);
- } else if (stream.dev_id < (2 * hnode_mgr->ul_num_chnls)) {
+ } else if (stream.dev_id < (2 * hnode_mgr->num_chnls)) {
/* dsp-dma */
- clear_bit(stream.dev_id - (1 * hnode_mgr->ul_num_chnls),
+ clear_bit(stream.dev_id - (1 * hnode_mgr->num_chnls),
hnode_mgr->dma_chnl_map);
- } else if (stream.dev_id < (3 * hnode_mgr->ul_num_chnls)) {
+ } else if (stream.dev_id < (3 * hnode_mgr->num_chnls)) {
/* zero-copy */
- clear_bit(stream.dev_id - (2 * hnode_mgr->ul_num_chnls),
+ clear_bit(stream.dev_id - (2 * hnode_mgr->num_chnls),
hnode_mgr->zc_chnl_map);
}
}
return -EPERM;
hnode_mgr->chnl_offset = host_res->chnl_offset;
hnode_mgr->chnl_buf_size = host_res->chnl_buf_size;
- hnode_mgr->ul_num_chnls = host_res->num_chnls;
+ hnode_mgr->num_chnls = host_res->num_chnls;
/*
* PROC will add an API to get dsp_processorinfo.
/* ul_size */
mem_stat_buf->ul_size = target->seg_tab[segid].length;
- /* ul_num_free_blocks */
- mem_stat_buf->ul_num_free_blocks = free_blocks;
+ /* num_free_blocks */
+ mem_stat_buf->num_free_blocks = free_blocks;
/* ul_total_free_size */
mem_stat_buf->ul_total_free_size = total_free_size;
- /* ul_len_max_free_block */
- mem_stat_buf->ul_len_max_free_block = max_free_size;
+ /* len_max_free_block */
+ mem_stat_buf->len_max_free_block = max_free_size;
- /* ul_num_alloc_blocks */
- mem_stat_buf->ul_num_alloc_blocks =
+ /* num_alloc_blocks */
+ mem_stat_buf->num_alloc_blocks =
target->seg_tab[segid].number;
ret = true;
u32 utimeout;
u32 num_bufs; /* Max # of bufs allowed in stream */
u32 un_bufs_in_strm; /* Current # of bufs in stream */
- u32 ul_n_bytes; /* bytes transferred since idled */
+ u32 bytes; /* bytes transferred since idled */
/* STREAM_IDLE, STREAM_READY, ... */
enum dsp_streamstate strm_state;
void *user_event; /* Saved for strm_get_info() */
stream_info->user_strm->number_bufs_in_stream = chnl_info_obj.cio_cs +
chnl_info_obj.cio_reqs;
/* # of bytes transferred since last call to DSPStream_Idle() */
- stream_info->user_strm->ul_number_bytes = chnl_info_obj.bytes_tx;
+ stream_info->user_strm->number_bytes = chnl_info_obj.bytes_tx;
stream_info->user_strm->sync_object_handle = chnl_info_obj.event_obj;
/* Determine stream state based on channel state and info */
if (chnl_info_obj.state & CHNL_STATEEOS) {