*/
u32 dsp_ext_base_addr; /* See the comment above */
u32 api_reg_base; /* API mem map'd registers */
- void __iomem *dw_dsp_mmu_base; /* DSP MMU Mapped registers */
+ void __iomem *dsp_mmu_base; /* DSP MMU Mapped registers */
u32 api_clk_base; /* CLK Registers */
u32 dsp_clk_m2_base; /* DSP Clock Module m2 */
- u32 dw_public_rhea; /* Pub Rhea */
- u32 dw_int_addr; /* MB INTR reg */
+ u32 public_rhea; /* Pub Rhea */
+ u32 int_addr; /* MB INTR reg */
u32 dw_tc_endianism; /* TC Endianism register */
u32 dw_test_base; /* DSP MMU Mapped registers */
u32 dw_self_loop; /* Pointer to the selfloop */
u32 dsp_start_add; /* API Boot vector */
- u32 dw_internal_size; /* Internal memory size */
+ u32 internal_size; /* Internal memory size */
struct omap_mbox *mbox; /* Mail box handle */
} else {
/* Record that we no longer have output buffers
* available: */
- chnl_mgr_obj->dw_output_mask &= ~(1 << chnl_id);
+ chnl_mgr_obj->output_mask &= ~(1 << chnl_id);
}
}
/* Move all IOR's to IOC queue: */
/* Total # chnls supported */
chnl_mgr_obj->max_channels = max_channels;
chnl_mgr_obj->open_channels = 0;
- chnl_mgr_obj->dw_output_mask = 0;
- chnl_mgr_obj->dw_last_output = 0;
+ chnl_mgr_obj->output_mask = 0;
+ chnl_mgr_obj->last_output = 0;
chnl_mgr_obj->hdev_obj = hdev_obj;
spin_lock_init(&chnl_mgr_obj->chnl_mgr_lock);
} else {
channel_info->hchnl_mgr = pchnl->chnl_mgr_obj;
channel_info->event_obj = pchnl->user_event;
channel_info->cnhl_id = pchnl->chnl_id;
- channel_info->dw_mode = pchnl->chnl_mode;
+ channel_info->mode = pchnl->chnl_mode;
channel_info->bytes_tx = pchnl->bytes_moved;
channel_info->process = pchnl->process;
channel_info->sync_event = pchnl->sync_event;
/* The first MMU TLB entry(TLB_0) in DCD is ShmBase. */
ndx = 0;
- ul_gpp_pa = host_res->dw_mem_phys[1];
- ul_gpp_va = host_res->dw_mem_base[1];
+ ul_gpp_pa = host_res->mem_phys[1];
+ ul_gpp_va = host_res->mem_base[1];
/* This is the virtual uncached ioremapped address!!! */
/* Why can't we directly take the DSPVA from the symbols? */
ul_dsp_va = hio_mgr->ext_proc_info.ty_tlb[0].ul_dsp_virt;
ul_dyn_ext_base, ul_ext_end, ul_seg_size, ul_seg1_size);
if ((ul_seg_size + ul_seg1_size + ul_pad_size) >
- host_res->dw_mem_length[1]) {
+ host_res->mem_length[1]) {
pr_err("%s: shm Error, reserved 0x%x required 0x%x\n",
- __func__, host_res->dw_mem_length[1],
+ __func__, host_res->mem_length[1],
ul_seg_size + ul_seg1_size + ul_pad_size);
status = -ENOMEM;
}
* Record the fact that we have a buffer available for
* output.
*/
- chnl_mgr_obj->dw_output_mask |= (1 << pchnl->chnl_id);
+ chnl_mgr_obj->output_mask |= (1 << pchnl->chnl_id);
} else {
DBC_ASSERT(io_mode); /* Shouldn't get here. */
}
u32 shift;
id = (pchnl !=
- NULL ? pchnl->chnl_id : (chnl_mgr_obj->dw_last_output + 1));
+ NULL ? pchnl->chnl_id : (chnl_mgr_obj->last_output + 1));
id = ((id == CHNL_MAXCHANNELS) ? 0 : id);
if (id >= CHNL_MAXCHANNELS)
goto func_end;
if (mask & shift) {
ret = id;
if (pchnl == NULL)
- chnl_mgr_obj->dw_last_output = id;
+ chnl_mgr_obj->last_output = id;
break;
}
id = id + 1;
dw_dsp_f_mask = sm->dsp_free_mask;
chnl_id =
find_ready_output(chnl_mgr_obj, pchnl,
- (chnl_mgr_obj->dw_output_mask & dw_dsp_f_mask));
+ (chnl_mgr_obj->output_mask & dw_dsp_f_mask));
if (chnl_id == OUTPUTNOTREADY)
goto func_end;
/* Record fact that no more I/O buffers available */
if (list_empty(&pchnl->pio_requests))
- chnl_mgr_obj->dw_output_mask &= ~(1 << chnl_id);
+ chnl_mgr_obj->output_mask &= ~(1 << chnl_id);
/* Transfer buffer to DSP side */
chnl_packet_obj->byte_size = min(pio_mgr->usm_buf_size,
dev_context->brd_state == BRD_HIBERNATION)
wake_dsp(dev_context, NULL);
- hw_mmu_tlb_flush_all(dev_context->dw_dsp_mmu_base);
+ hw_mmu_tlb_flush_all(dev_context->dsp_mmu_base);
}
static void bad_page_dump(u32 pa, struct page *pg)
}
/* change here to account for the 3 bands of the DSP internal memory */
if ((dsp_addr - dev_context->dsp_start_add) <
- dev_context->dw_internal_size) {
+ dev_context->internal_size) {
offset = dsp_addr - dev_context->dsp_start_add;
} else {
status = read_ext_dsp_data(dev_context, host_buff, dsp_addr,
udelay(100);
/* Disbale the DSP MMU */
- hw_mmu_disable(resources->dw_dmmu_base);
+ hw_mmu_disable(resources->dmmu_base);
/* Disable TWL */
- hw_mmu_twl_disable(resources->dw_dmmu_base);
+ hw_mmu_twl_disable(resources->dmmu_base);
/* Only make TLB entry if both addresses are non-zero */
for (entry_ndx = 0; entry_ndx < BRDIOCTL_NUMOFMMUTLB;
e->ul_dsp_va,
e->ul_size);
- hw_mmu_tlb_add(dev_context->dw_dsp_mmu_base,
+ hw_mmu_tlb_add(dev_context->dsp_mmu_base,
e->ul_gpp_pa,
e->ul_dsp_va,
e->ul_size,
/* Lock the above TLB entries and get the BIOS and load monitor timer
* information */
if (!status) {
- hw_mmu_num_locked_set(resources->dw_dmmu_base, itmp_entry_ndx);
- hw_mmu_victim_num_set(resources->dw_dmmu_base, itmp_entry_ndx);
- hw_mmu_ttb_set(resources->dw_dmmu_base,
+ hw_mmu_num_locked_set(resources->dmmu_base, itmp_entry_ndx);
+ hw_mmu_victim_num_set(resources->dmmu_base, itmp_entry_ndx);
+ hw_mmu_ttb_set(resources->dmmu_base,
dev_context->pt_attrs->l1_base_pa);
- hw_mmu_twl_enable(resources->dw_dmmu_base);
+ hw_mmu_twl_enable(resources->dmmu_base);
/* Enable the SmartIdle and AutoIdle bit for MMU_SYSCONFIG */
- temp = __raw_readl((resources->dw_dmmu_base) + 0x10);
+ temp = __raw_readl((resources->dmmu_base) + 0x10);
temp = (temp & 0xFFFFFFEF) | 0x11;
- __raw_writel(temp, (resources->dw_dmmu_base) + 0x10);
+ __raw_writel(temp, (resources->dmmu_base) + 0x10);
/* Let the DSP MMU run */
- hw_mmu_enable(resources->dw_dmmu_base);
+ hw_mmu_enable(resources->dmmu_base);
/* Enable the BIOS clock */
(void)dev_get_symbol(dev_context->hdev_obj,
}
if (!status) {
/*PM_IVA2GRPSEL_PER = 0xC0;*/
- temp = readl(resources->dw_per_pm_base + 0xA8);
+ temp = readl(resources->per_pm_base + 0xA8);
temp = (temp & 0xFFFFFF30) | 0xC0;
- writel(temp, resources->dw_per_pm_base + 0xA8);
+ writel(temp, resources->per_pm_base + 0xA8);
/*PM_MPUGRPSEL_PER &= 0xFFFFFF3F; */
- temp = readl(resources->dw_per_pm_base + 0xA4);
+ temp = readl(resources->per_pm_base + 0xA4);
temp = (temp & 0xFFFFFF3F);
- writel(temp, resources->dw_per_pm_base + 0xA4);
+ writel(temp, resources->per_pm_base + 0xA4);
/*CM_SLEEPDEP_PER |= 0x04; */
- temp = readl(resources->dw_per_base + 0x44);
+ temp = readl(resources->per_base + 0x44);
temp = (temp & 0xFFFFFFFB) | 0x04;
- writel(temp, resources->dw_per_base + 0x44);
+ writel(temp, resources->per_base + 0x44);
/*CM_CLKSTCTRL_IVA2 = 0x00000003 -To Allow automatic transitions */
(*pdata->dsp_cm_write)(OMAP34XX_CLKSTCTRL_ENABLE_AUTO,
/* Let DSP go */
dev_dbg(bridge, "%s Unreset\n", __func__);
/* Enable DSP MMU Interrupts */
- hw_mmu_event_enable(resources->dw_dmmu_base,
+ hw_mmu_event_enable(resources->dmmu_base,
HW_MMU_ALL_INTERRUPTS);
/* release the RST1, DSP starts executing now .. */
(*pdata->dsp_prm_rmw_bits)(OMAP3430_RST1_IVA2_MASK, 0,
return status;
}
if ((dsp_addr - dev_context->dsp_start_add) <
- dev_context->dw_internal_size) {
+ dev_context->internal_size) {
status = write_dsp_data(dev_ctxt, host_buff, dsp_addr,
ul_num_bytes, mem_type);
} else {
dev_context->dsp_start_add = (u32) OMAP_GEM_BASE;
dev_context->dw_self_loop = (u32) NULL;
dev_context->dsp_per_clks = 0;
- dev_context->dw_internal_size = OMAP_DSP_SIZE;
+ dev_context->internal_size = OMAP_DSP_SIZE;
/* Clear dev context MMU table entries.
* These get set on bridge_io_on_loaded() call after program loaded. */
for (entry_ndx = 0; entry_ndx < BRDIOCTL_NUMOFMMUTLB; entry_ndx++) {
}
dev_context->dsp_base_addr = (u32) MEM_LINEAR_ADDRESS((void *)
(config_param->
- dw_mem_base
+ mem_base
[3]),
config_param->
- dw_mem_length
+ mem_length
[3]);
if (!dev_context->dsp_base_addr)
status = -EPERM;
udelay(5);
/* MMU address is obtained from the host
* resources struct */
- dev_context->dw_dsp_mmu_base = resources->dw_dmmu_base;
+ dev_context->dsp_mmu_base = resources->dmmu_base;
}
if (!status) {
dev_context->hdev_obj = hdev_obj;
host_res = dev_context->resources;
shm_size = drv_datap->shm_size;
if (shm_size >= 0x10000) {
- if ((host_res->dw_mem_base[1]) &&
- (host_res->dw_mem_phys[1])) {
+ if ((host_res->mem_base[1]) &&
+ (host_res->mem_phys[1])) {
mem_free_phys_mem((void *)
- host_res->dw_mem_base
+ host_res->mem_base
[1],
- host_res->dw_mem_phys
+ host_res->mem_phys
[1], shm_size);
}
} else {
"mem_free_phys_mem\n", __func__,
status);
}
- host_res->dw_mem_base[1] = 0;
- host_res->dw_mem_phys[1] = 0;
-
- if (host_res->dw_mem_base[0])
- iounmap((void *)host_res->dw_mem_base[0]);
- if (host_res->dw_mem_base[2])
- iounmap((void *)host_res->dw_mem_base[2]);
- if (host_res->dw_mem_base[3])
- iounmap((void *)host_res->dw_mem_base[3]);
- if (host_res->dw_mem_base[4])
- iounmap((void *)host_res->dw_mem_base[4]);
- if (host_res->dw_dmmu_base)
- iounmap(host_res->dw_dmmu_base);
- if (host_res->dw_per_base)
- iounmap(host_res->dw_per_base);
- if (host_res->dw_per_pm_base)
- iounmap((void *)host_res->dw_per_pm_base);
+ host_res->mem_base[1] = 0;
+ host_res->mem_phys[1] = 0;
+
+ if (host_res->mem_base[0])
+ iounmap((void *)host_res->mem_base[0]);
+ if (host_res->mem_base[2])
+ iounmap((void *)host_res->mem_base[2]);
+ if (host_res->mem_base[3])
+ iounmap((void *)host_res->mem_base[3]);
+ if (host_res->mem_base[4])
+ iounmap((void *)host_res->mem_base[4]);
+ if (host_res->dmmu_base)
+ iounmap(host_res->dmmu_base);
+ if (host_res->per_base)
+ iounmap(host_res->per_base);
+ if (host_res->per_pm_base)
+ iounmap((void *)host_res->per_pm_base);
if (host_res->core_pm_base)
iounmap((void *)host_res->core_pm_base);
- host_res->dw_mem_base[0] = (u32) NULL;
- host_res->dw_mem_base[2] = (u32) NULL;
- host_res->dw_mem_base[3] = (u32) NULL;
- host_res->dw_mem_base[4] = (u32) NULL;
- host_res->dw_dmmu_base = NULL;
+ host_res->mem_base[0] = (u32) NULL;
+ host_res->mem_base[2] = (u32) NULL;
+ host_res->mem_base[3] = (u32) NULL;
+ host_res->mem_base[4] = (u32) NULL;
+ host_res->dmmu_base = NULL;
kfree(host_res);
}
copy_bytes, mem_type);
if (!status) {
if (dest_addr < (dev_context->dsp_start_add +
- dev_context->dw_internal_size)) {
+ dev_context->internal_size)) {
/* Write to Internal memory */
status = write_dsp_data(dev_ctxt, host_buf,
dest_addr, copy_bytes,
ul_bytes =
ul_remain_bytes > BUFFERSIZE ? BUFFERSIZE : ul_remain_bytes;
if (dsp_addr < (dev_context->dsp_start_add +
- dev_context->dw_internal_size)) {
+ dev_context->internal_size)) {
status =
write_dsp_data(dev_ctxt, host_buff, dsp_addr,
ul_bytes, mem_type);
switch (clock_id) {
case BPWR_GP_TIMER5:
- iva2_grpsel = readl(resources->dw_per_pm_base + 0xA8);
- mpu_grpsel = readl(resources->dw_per_pm_base + 0xA4);
+ iva2_grpsel = readl(resources->per_pm_base + 0xA8);
+ mpu_grpsel = readl(resources->per_pm_base + 0xA4);
if (enable) {
iva2_grpsel |= OMAP3430_GRPSEL_GPT5_MASK;
mpu_grpsel &= ~OMAP3430_GRPSEL_GPT5_MASK;
mpu_grpsel |= OMAP3430_GRPSEL_GPT5_MASK;
iva2_grpsel &= ~OMAP3430_GRPSEL_GPT5_MASK;
}
- writel(iva2_grpsel, resources->dw_per_pm_base + 0xA8);
- writel(mpu_grpsel, resources->dw_per_pm_base + 0xA4);
+ writel(iva2_grpsel, resources->per_pm_base + 0xA8);
+ writel(mpu_grpsel, resources->per_pm_base + 0xA4);
break;
case BPWR_GP_TIMER6:
- iva2_grpsel = readl(resources->dw_per_pm_base + 0xA8);
- mpu_grpsel = readl(resources->dw_per_pm_base + 0xA4);
+ iva2_grpsel = readl(resources->per_pm_base + 0xA8);
+ mpu_grpsel = readl(resources->per_pm_base + 0xA4);
if (enable) {
iva2_grpsel |= OMAP3430_GRPSEL_GPT6_MASK;
mpu_grpsel &= ~OMAP3430_GRPSEL_GPT6_MASK;
mpu_grpsel |= OMAP3430_GRPSEL_GPT6_MASK;
iva2_grpsel &= ~OMAP3430_GRPSEL_GPT6_MASK;
}
- writel(iva2_grpsel, resources->dw_per_pm_base + 0xA8);
- writel(mpu_grpsel, resources->dw_per_pm_base + 0xA4);
+ writel(iva2_grpsel, resources->per_pm_base + 0xA8);
+ writel(mpu_grpsel, resources->per_pm_base + 0xA4);
break;
case BPWR_GP_TIMER7:
- iva2_grpsel = readl(resources->dw_per_pm_base + 0xA8);
- mpu_grpsel = readl(resources->dw_per_pm_base + 0xA4);
+ iva2_grpsel = readl(resources->per_pm_base + 0xA8);
+ mpu_grpsel = readl(resources->per_pm_base + 0xA4);
if (enable) {
iva2_grpsel |= OMAP3430_GRPSEL_GPT7_MASK;
mpu_grpsel &= ~OMAP3430_GRPSEL_GPT7_MASK;
mpu_grpsel |= OMAP3430_GRPSEL_GPT7_MASK;
iva2_grpsel &= ~OMAP3430_GRPSEL_GPT7_MASK;
}
- writel(iva2_grpsel, resources->dw_per_pm_base + 0xA8);
- writel(mpu_grpsel, resources->dw_per_pm_base + 0xA4);
+ writel(iva2_grpsel, resources->per_pm_base + 0xA8);
+ writel(mpu_grpsel, resources->per_pm_base + 0xA4);
break;
case BPWR_GP_TIMER8:
- iva2_grpsel = readl(resources->dw_per_pm_base + 0xA8);
- mpu_grpsel = readl(resources->dw_per_pm_base + 0xA4);
+ iva2_grpsel = readl(resources->per_pm_base + 0xA8);
+ mpu_grpsel = readl(resources->per_pm_base + 0xA4);
if (enable) {
iva2_grpsel |= OMAP3430_GRPSEL_GPT8_MASK;
mpu_grpsel &= ~OMAP3430_GRPSEL_GPT8_MASK;
mpu_grpsel |= OMAP3430_GRPSEL_GPT8_MASK;
iva2_grpsel &= ~OMAP3430_GRPSEL_GPT8_MASK;
}
- writel(iva2_grpsel, resources->dw_per_pm_base + 0xA8);
- writel(mpu_grpsel, resources->dw_per_pm_base + 0xA4);
+ writel(iva2_grpsel, resources->per_pm_base + 0xA8);
+ writel(mpu_grpsel, resources->per_pm_base + 0xA4);
break;
case BPWR_MCBSP1:
iva2_grpsel = readl(resources->core_pm_base + 0xA8);
writel(mpu_grpsel, resources->core_pm_base + 0xA4);
break;
case BPWR_MCBSP2:
- iva2_grpsel = readl(resources->dw_per_pm_base + 0xA8);
- mpu_grpsel = readl(resources->dw_per_pm_base + 0xA4);
+ iva2_grpsel = readl(resources->per_pm_base + 0xA8);
+ mpu_grpsel = readl(resources->per_pm_base + 0xA4);
if (enable) {
iva2_grpsel |= OMAP3430_GRPSEL_MCBSP2_MASK;
mpu_grpsel &= ~OMAP3430_GRPSEL_MCBSP2_MASK;
mpu_grpsel |= OMAP3430_GRPSEL_MCBSP2_MASK;
iva2_grpsel &= ~OMAP3430_GRPSEL_MCBSP2_MASK;
}
- writel(iva2_grpsel, resources->dw_per_pm_base + 0xA8);
- writel(mpu_grpsel, resources->dw_per_pm_base + 0xA4);
+ writel(iva2_grpsel, resources->per_pm_base + 0xA8);
+ writel(mpu_grpsel, resources->per_pm_base + 0xA4);
break;
case BPWR_MCBSP3:
- iva2_grpsel = readl(resources->dw_per_pm_base + 0xA8);
- mpu_grpsel = readl(resources->dw_per_pm_base + 0xA4);
+ iva2_grpsel = readl(resources->per_pm_base + 0xA8);
+ mpu_grpsel = readl(resources->per_pm_base + 0xA4);
if (enable) {
iva2_grpsel |= OMAP3430_GRPSEL_MCBSP3_MASK;
mpu_grpsel &= ~OMAP3430_GRPSEL_MCBSP3_MASK;
mpu_grpsel |= OMAP3430_GRPSEL_MCBSP3_MASK;
iva2_grpsel &= ~OMAP3430_GRPSEL_MCBSP3_MASK;
}
- writel(iva2_grpsel, resources->dw_per_pm_base + 0xA8);
- writel(mpu_grpsel, resources->dw_per_pm_base + 0xA4);
+ writel(iva2_grpsel, resources->per_pm_base + 0xA8);
+ writel(mpu_grpsel, resources->per_pm_base + 0xA4);
break;
case BPWR_MCBSP4:
- iva2_grpsel = readl(resources->dw_per_pm_base + 0xA8);
- mpu_grpsel = readl(resources->dw_per_pm_base + 0xA4);
+ iva2_grpsel = readl(resources->per_pm_base + 0xA8);
+ mpu_grpsel = readl(resources->per_pm_base + 0xA4);
if (enable) {
iva2_grpsel |= OMAP3430_GRPSEL_MCBSP4_MASK;
mpu_grpsel &= ~OMAP3430_GRPSEL_MCBSP4_MASK;
mpu_grpsel |= OMAP3430_GRPSEL_MCBSP4_MASK;
iva2_grpsel &= ~OMAP3430_GRPSEL_MCBSP4_MASK;
}
- writel(iva2_grpsel, resources->dw_per_pm_base + 0xA8);
- writel(mpu_grpsel, resources->dw_per_pm_base + 0xA4);
+ writel(iva2_grpsel, resources->per_pm_base + 0xA8);
+ writel(mpu_grpsel, resources->per_pm_base + 0xA4);
break;
case BPWR_MCBSP5:
- iva2_grpsel = readl(resources->dw_per_pm_base + 0xA8);
- mpu_grpsel = readl(resources->dw_per_pm_base + 0xA4);
+ iva2_grpsel = readl(resources->per_pm_base + 0xA8);
+ mpu_grpsel = readl(resources->per_pm_base + 0xA4);
if (enable) {
iva2_grpsel |= OMAP3430_GRPSEL_MCBSP5_MASK;
mpu_grpsel &= ~OMAP3430_GRPSEL_MCBSP5_MASK;
mpu_grpsel |= OMAP3430_GRPSEL_MCBSP5_MASK;
iva2_grpsel &= ~OMAP3430_GRPSEL_MCBSP5_MASK;
}
- writel(iva2_grpsel, resources->dw_per_pm_base + 0xA8);
- writel(mpu_grpsel, resources->dw_per_pm_base + 0xA4);
+ writel(iva2_grpsel, resources->per_pm_base + 0xA8);
+ writel(mpu_grpsel, resources->per_pm_base + 0xA4);
break;
}
}
offset = dsp_addr - dev_context->dsp_start_add;
if (offset < base1) {
- dw_base_addr = MEM_LINEAR_ADDRESS(resources->dw_mem_base[2],
- resources->dw_mem_length[2]);
+ dw_base_addr = MEM_LINEAR_ADDRESS(resources->mem_base[2],
+ resources->mem_length[2]);
} else if (offset > base1 && offset < base2 + OMAP_DSP_MEM2_SIZE) {
- dw_base_addr = MEM_LINEAR_ADDRESS(resources->dw_mem_base[3],
- resources->dw_mem_length[3]);
+ dw_base_addr = MEM_LINEAR_ADDRESS(resources->mem_base[3],
+ resources->mem_length[3]);
offset = offset - base2;
} else if (offset >= base2 + OMAP_DSP_MEM2_SIZE &&
offset < base3 + OMAP_DSP_MEM3_SIZE) {
- dw_base_addr = MEM_LINEAR_ADDRESS(resources->dw_mem_base[4],
- resources->dw_mem_length[4]);
+ dw_base_addr = MEM_LINEAR_ADDRESS(resources->mem_base[4],
+ resources->mem_length[4]);
offset = offset - base3;
} else {
return -EPERM;
dw_ext_prog_virt_mem =
dev_context->atlb_entry[0].ul_gpp_va;
} else {
- dw_ext_prog_virt_mem = host_res->dw_mem_base[1];
+ dw_ext_prog_virt_mem = host_res->mem_base[1];
dw_ext_prog_virt_mem +=
(ul_ext_base - ul_dyn_ext_base);
}
omap_mbox_restore_ctx(dev_context->mbox);
/* Access MMU SYS CONFIG register to generate a short wakeup */
- temp = readl(resources->dw_dmmu_base + 0x10);
+ temp = readl(resources->dmmu_base + 0x10);
dev_context->brd_state = BRD_RUNNING;
} else if (dev_context->brd_state == BRD_RETENTION) {
return IRQ_HANDLED;
}
- hw_mmu_event_status(resources->dw_dmmu_base, &event);
+ hw_mmu_event_status(resources->dmmu_base, &event);
if (event == HW_MMU_TRANSLATION_FAULT) {
- hw_mmu_fault_addr_read(resources->dw_dmmu_base, &fault_addr);
+ hw_mmu_fault_addr_read(resources->dmmu_base, &fault_addr);
dev_dbg(bridge, "%s: event=0x%x, fault_addr=0x%x\n", __func__,
event, fault_addr);
/*
/* Disable the MMU events, else once we clear it will
* start to raise INTs again */
- hw_mmu_event_disable(resources->dw_dmmu_base,
+ hw_mmu_event_disable(resources->dmmu_base,
HW_MMU_TRANSLATION_FAULT);
} else {
- hw_mmu_event_disable(resources->dw_dmmu_base,
+ hw_mmu_event_disable(resources->dmmu_base,
HW_MMU_ALL_INTERRUPTS);
}
return IRQ_HANDLED;
* access entry #0. Then add a new entry so that the DSP OS
* can continue in order to dump the stack.
*/
- hw_mmu_twl_disable(resources->dw_dmmu_base);
- hw_mmu_tlb_flush_all(resources->dw_dmmu_base);
+ hw_mmu_twl_disable(resources->dmmu_base);
+ hw_mmu_tlb_flush_all(resources->dmmu_base);
- hw_mmu_tlb_add(resources->dw_dmmu_base,
+ hw_mmu_tlb_add(resources->dmmu_base,
virt_to_phys(dummy_va_addr), fault_addr,
HW_PAGE_SIZE4KB, 1,
&map_attrs, HW_SET, HW_SET);
dsp_gpt_wait_overflow(DSP_CLK_GPT8, 0xfffffffe);
/* Clear MMU interrupt */
- hw_mmu_event_ack(resources->dw_dmmu_base,
+ hw_mmu_event_ack(resources->dmmu_base,
HW_MMU_TRANSLATION_FAULT);
dump_dsp_stack(dev_context);
dsp_clk_disable(DSP_CLK_GPT8);
- hw_mmu_disable(resources->dw_dmmu_base);
+ hw_mmu_disable(resources->dmmu_base);
free_page((unsigned long)dummy_va_addr);
}
#endif
struct dev_object *hdev_obj;
/* These fields initialized in bridge_chnl_create(): */
- u32 dw_output_mask; /* Host output channels w/ full buffers */
- u32 dw_last_output; /* Last output channel fired from DPC */
+ u32 output_mask; /* Host output channels w/ full buffers */
+ u32 last_output; /* Last output channel fired from DPC */
/* Critical section object handle */
spinlock_t chnl_mgr_lock;
u32 word_size; /* Size in bytes of DSP word */
struct cfg_hostres {
u32 num_mem_windows; /* Set to default */
/* This is the base.memory */
- u32 dw_mem_base[CFG_MAXMEMREGISTERS]; /* shm virtual address */
- u32 dw_mem_length[CFG_MAXMEMREGISTERS]; /* Length of the Base */
- u32 dw_mem_phys[CFG_MAXMEMREGISTERS]; /* shm Physical address */
+ u32 mem_base[CFG_MAXMEMREGISTERS]; /* shm virtual address */
+ u32 mem_length[CFG_MAXMEMREGISTERS]; /* Length of the Base */
+ u32 mem_phys[CFG_MAXMEMREGISTERS]; /* shm Physical address */
u8 birq_registers; /* IRQ Number */
u8 birq_attrib; /* IRQ Attribute */
- u32 dw_offset_for_monitor; /* The Shared memory starts from
- * dw_mem_base + this offset */
+ u32 offset_for_monitor; /* The Shared memory starts from
+ * mem_base + this offset */
/*
* Info needed by NODE for allocating channels to communicate with RMS:
* chnl_offset: Offset of RMS channels. Lower channels are
* reserved.
* chnl_buf_size: Size of channel buffer to send to RMS
- * dw_num_chnls: Total number of channels
+ * num_chnls: Total number of channels
* (including reserved).
*/
u32 chnl_offset;
u32 chnl_buf_size;
- u32 dw_num_chnls;
- void __iomem *dw_per_base;
- u32 dw_per_pm_base;
+ u32 num_chnls;
+ void __iomem *per_base;
+ u32 per_pm_base;
u32 core_pm_base;
- void __iomem *dw_dmmu_base;
+ void __iomem *dmmu_base;
};
#endif /* CFGDEFS_ */
void *event_obj; /* Channel I/O completion event. */
/*Abstraction of I/O completion event. */
struct sync_object *sync_event;
- s8 dw_mode; /* Channel mode. */
+ s8 mode; /* Channel mode. */
u8 dw_state; /* Current channel state. */
u32 bytes_tx; /* Total bytes transferred. */
u32 cio_cs; /* Number of IOCs in queue. */
*/
struct cmm_seginfo {
- u32 dw_seg_base_pa; /* Start Phys address of SM segment */
+ u32 seg_base_pa; /* Start Phys address of SM segment */
/* Total size in bytes of segment: DSP+GPP */
u32 ul_total_seg_size;
u32 gpp_base_pa; /* Start Phys addr of Gpp SM seg */
#include <dspbridge/cmm.h>
/* ----------------------------------- Defines, Data Structures, Typedefs */
-#define NEXT_PA(pnode) (pnode->dw_pa + pnode->ul_size)
+#define NEXT_PA(pnode) (pnode->pa + pnode->ul_size)
/* Other bus/platform translations */
#define DSPPA2GPPPA(base, x, y) ((x)+(y))
struct mutex cmm_lock; /* Lock to access cmm mgr */
struct list_head node_free_list; /* Free list of memory nodes */
u32 ul_min_block_size; /* Min SM block; default 16 bytes */
- u32 dw_page_size; /* Memory Page size (1k/4k) */
+ u32 page_size; /* Memory Page size (1k/4k) */
/* GPP SM segment ptrs */
struct cmm_allocator *pa_gppsm_seg_tab[CMM_MAXGPPSEGS];
};
/* SM node representing a block of memory. */
struct cmm_mnode {
struct list_head link; /* must be 1st element */
- u32 dw_pa; /* Phys addr */
+ u32 pa; /* Phys addr */
u32 dw_va; /* Virtual address in device process context */
u32 ul_size; /* SM block size in bytes */
u32 client_proc; /* Process that allocated this mem block */
/* create a new block with the leftovers and
* add to freelist */
new_node =
- get_node(cmm_mgr_obj, pnode->dw_pa + usize,
+ get_node(cmm_mgr_obj, pnode->pa + usize,
pnode->dw_va + usize,
(u32) delta_size);
/* leftovers go free */
/* put our node on InUse list */
list_add_tail(&pnode->link, &allocator->in_use_list);
- buf_pa = (void *)pnode->dw_pa; /* physical address */
+ buf_pa = (void *)pnode->pa; /* physical address */
/* clear mem */
pbyte = (u8 *) pnode->dw_va;
for (cnt = 0; cnt < (s32) usize; cnt++, pbyte++)
DBC_ASSERT(mgr_attrts->ul_min_block_size >= 4);
/* save away smallest block allocation for this cmm mgr */
cmm_obj->ul_min_block_size = mgr_attrts->ul_min_block_size;
- cmm_obj->dw_page_size = PAGE_SIZE;
+ cmm_obj->page_size = PAGE_SIZE;
/* create node free list */
INIT_LIST_HEAD(&cmm_obj->node_free_list);
mutex_lock(&cmm_mgr_obj->cmm_lock);
list_for_each_entry_safe(curr, tmp, &allocator->in_use_list, link) {
- if (curr->dw_pa == (u32) buf_pa) {
+ if (curr->pa == (u32) buf_pa) {
list_del(&curr->link);
add_to_free_list(allocator, curr);
status = 0;
if (!altr)
continue;
cmm_info_obj->ul_num_gppsm_segs++;
- cmm_info_obj->seg_info[ul_seg - 1].dw_seg_base_pa =
+ cmm_info_obj->seg_info[ul_seg - 1].seg_base_pa =
altr->shm_base - altr->ul_dsp_size;
cmm_info_obj->seg_info[ul_seg - 1].ul_total_seg_size =
altr->ul_dsp_size + altr->ul_sm_size;
list_del_init(&pnode->link);
}
- pnode->dw_pa = dw_pa;
+ pnode->pa = dw_pa;
pnode->dw_va = dw_va;
pnode->ul_size = ul_size;
}
list_for_each_entry(curr, &allocator->free_list, link) {
- if (NEXT_PA(curr) == node->dw_pa) {
+ if (NEXT_PA(curr) == node->pa) {
curr->ul_size += node->ul_size;
delete_node(allocator->hcmm_mgr, node);
return;
}
- if (curr->dw_pa == NEXT_PA(node)) {
- curr->dw_pa = node->dw_pa;
+ if (curr->pa == NEXT_PA(node)) {
+ curr->pa = node->pa;
curr->dw_va = node->dw_va;
curr->ul_size += node->ul_size;
delete_node(allocator->hcmm_mgr, node);
num_windows = host_res->num_mem_windows;
if (num_windows) {
/* Assume last memory window is for CHNL */
- io_mgr_attrs.shm_base = host_res->dw_mem_base[1] +
- host_res->dw_offset_for_monitor;
+ io_mgr_attrs.shm_base = host_res->mem_base[1] +
+ host_res->offset_for_monitor;
io_mgr_attrs.usm_length =
- host_res->dw_mem_length[1] -
- host_res->dw_offset_for_monitor;
+ host_res->mem_length[1] -
+ host_res->offset_for_monitor;
} else {
io_mgr_attrs.shm_base = 0;
io_mgr_attrs.usm_length = 0;
/* Device IOCtl function pointer */
struct api_cmd {
u32(*fxn) (union trapped_args *args, void *pr_ctxt);
- u32 dw_index;
+ u32 index;
};
/* ----------------------------------- Globals */
host_res->num_mem_windows = 2;
/* First window is for DSP internal memory */
- dev_dbg(bridge, "dw_mem_base[0] 0x%x\n", host_res->dw_mem_base[0]);
- dev_dbg(bridge, "dw_mem_base[3] 0x%x\n", host_res->dw_mem_base[3]);
- dev_dbg(bridge, "dw_dmmu_base %p\n", host_res->dw_dmmu_base);
+ dev_dbg(bridge, "mem_base[0] 0x%x\n", host_res->mem_base[0]);
+ dev_dbg(bridge, "mem_base[3] 0x%x\n", host_res->mem_base[3]);
+ dev_dbg(bridge, "dmmu_base %p\n", host_res->dmmu_base);
/* for 24xx base port is not mapping the mamory for DSP
* internal memory TODO Do a ioremap here */
/* These are hard-coded values */
host_res->birq_registers = 0;
host_res->birq_attrib = 0;
- host_res->dw_offset_for_monitor = 0;
+ host_res->offset_for_monitor = 0;
host_res->chnl_offset = 0;
/* CHNL_MAXCHANNELS */
- host_res->dw_num_chnls = CHNL_MAXCHANNELS;
+ host_res->num_chnls = CHNL_MAXCHANNELS;
host_res->chnl_buf_size = 0x400;
return 0;
/* num_mem_windows must not be more than CFG_MAXMEMREGISTERS */
host_res->num_mem_windows = 4;
- host_res->dw_mem_base[0] = 0;
- host_res->dw_mem_base[2] = (u32) ioremap(OMAP_DSP_MEM1_BASE,
+ host_res->mem_base[0] = 0;
+ host_res->mem_base[2] = (u32) ioremap(OMAP_DSP_MEM1_BASE,
OMAP_DSP_MEM1_SIZE);
- host_res->dw_mem_base[3] = (u32) ioremap(OMAP_DSP_MEM2_BASE,
+ host_res->mem_base[3] = (u32) ioremap(OMAP_DSP_MEM2_BASE,
OMAP_DSP_MEM2_SIZE);
- host_res->dw_mem_base[4] = (u32) ioremap(OMAP_DSP_MEM3_BASE,
+ host_res->mem_base[4] = (u32) ioremap(OMAP_DSP_MEM3_BASE,
OMAP_DSP_MEM3_SIZE);
- host_res->dw_per_base = ioremap(OMAP_PER_CM_BASE,
+ host_res->per_base = ioremap(OMAP_PER_CM_BASE,
OMAP_PER_CM_SIZE);
- host_res->dw_per_pm_base = (u32) ioremap(OMAP_PER_PRM_BASE,
+ host_res->per_pm_base = (u32) ioremap(OMAP_PER_PRM_BASE,
OMAP_PER_PRM_SIZE);
host_res->core_pm_base = (u32) ioremap(OMAP_CORE_PRM_BASE,
OMAP_CORE_PRM_SIZE);
- host_res->dw_dmmu_base = ioremap(OMAP_DMMU_BASE,
+ host_res->dmmu_base = ioremap(OMAP_DMMU_BASE,
OMAP_DMMU_SIZE);
- dev_dbg(bridge, "dw_mem_base[0] 0x%x\n",
- host_res->dw_mem_base[0]);
- dev_dbg(bridge, "dw_mem_base[1] 0x%x\n",
- host_res->dw_mem_base[1]);
- dev_dbg(bridge, "dw_mem_base[2] 0x%x\n",
- host_res->dw_mem_base[2]);
- dev_dbg(bridge, "dw_mem_base[3] 0x%x\n",
- host_res->dw_mem_base[3]);
- dev_dbg(bridge, "dw_mem_base[4] 0x%x\n",
- host_res->dw_mem_base[4]);
- dev_dbg(bridge, "dw_dmmu_base %p\n", host_res->dw_dmmu_base);
+ dev_dbg(bridge, "mem_base[0] 0x%x\n",
+ host_res->mem_base[0]);
+ dev_dbg(bridge, "mem_base[1] 0x%x\n",
+ host_res->mem_base[1]);
+ dev_dbg(bridge, "mem_base[2] 0x%x\n",
+ host_res->mem_base[2]);
+ dev_dbg(bridge, "mem_base[3] 0x%x\n",
+ host_res->mem_base[3]);
+ dev_dbg(bridge, "mem_base[4] 0x%x\n",
+ host_res->mem_base[4]);
+ dev_dbg(bridge, "dmmu_base %p\n", host_res->dmmu_base);
shm_size = drv_datap->shm_size;
if (shm_size >= 0x10000) {
/* Allocate Physically contiguous,
* non-cacheable memory */
- host_res->dw_mem_base[1] =
+ host_res->mem_base[1] =
(u32) mem_alloc_phys_mem(shm_size, 0x100000,
&dma_addr);
- if (host_res->dw_mem_base[1] == 0) {
+ if (host_res->mem_base[1] == 0) {
status = -ENOMEM;
pr_err("shm reservation Failed\n");
} else {
- host_res->dw_mem_length[1] = shm_size;
- host_res->dw_mem_phys[1] = dma_addr;
+ host_res->mem_length[1] = shm_size;
+ host_res->mem_phys[1] = dma_addr;
dev_dbg(bridge, "%s: Bridge shm address 0x%x "
"dma_addr %x size %x\n", __func__,
- host_res->dw_mem_base[1],
+ host_res->mem_base[1],
dma_addr, shm_size);
}
}
/* These are hard-coded values */
host_res->birq_registers = 0;
host_res->birq_attrib = 0;
- host_res->dw_offset_for_monitor = 0;
+ host_res->offset_for_monitor = 0;
host_res->chnl_offset = 0;
/* CHNL_MAXCHANNELS */
- host_res->dw_num_chnls = CHNL_MAXCHANNELS;
+ host_res->num_chnls = CHNL_MAXCHANNELS;
host_res->chnl_buf_size = 0x400;
dw_buff_size = sizeof(struct cfg_hostres);
}
goto func_end;
}
- ul_gpp_mem_base = (u32) host_res->dw_mem_base[1];
+ ul_gpp_mem_base = (u32) host_res->mem_base[1];
off_set = pul_value - dynext_base;
ul_stack_seg_addr = ul_gpp_mem_base + off_set;
ul_stack_seg_val = readl(ul_stack_seg_addr);
return -EPERM;
hnode_mgr->ul_chnl_offset = host_res->chnl_offset;
hnode_mgr->ul_chnl_buf_size = host_res->chnl_buf_size;
- hnode_mgr->ul_num_chnls = host_res->dw_num_chnls;
+ hnode_mgr->ul_num_chnls = host_res->num_chnls;
/*
* PROC will add an API to get dsp_processorinfo.