* This function sets the group selction bits for while
* enabling/disabling.
*/
-void dsp_clk_wakeup_event_ctrl(u32 ClkId, bool enable);
+void dsp_clk_wakeup_event_ctrl(u32 clock_id, bool enable);
#endif /* _TIOMAP_PWR_ */
return status;
}
-void dsp_clk_wakeup_event_ctrl(u32 ClkId, bool enable)
+void dsp_clk_wakeup_event_ctrl(u32 clock_id, bool enable)
{
struct cfg_hostres *resources;
int status = 0;
if (!resources)
return;
- switch (ClkId) {
+ switch (clock_id) {
case BPWR_GP_TIMER5:
iva2_grpsel = (u32) *((reg_uword32 *)
((u32) (resources->dw_per_pm_base) +
int write_ext_dsp_data(struct bridge_dev_context *dev_context,
IN u8 *pbHostBuf, u32 dwDSPAddr,
u32 ul_num_bytes, u32 ulMemType,
- bool bDynamicLoad)
+ bool dynamic_load)
{
u32 dw_base_addr = dev_context->dw_dsp_ext_base_addr;
u32 dw_offset = 0;
}
/* If dynamic, force remap/unmap */
- if ((bDynamicLoad || trace_load) && dw_base_addr) {
+ if ((dynamic_load || trace_load) && dw_base_addr) {
dw_base_addr = 0;
MEM_UNMAP_LINEAR_ADDRESS((void *)
dev_context->dw_dsp_ext_base_addr);
ret = dev_get_symbol(dev_context->hdev_obj,
SHMBASENAME, &ul_shm_base_virt);
DBC_ASSERT(ul_shm_base_virt != 0);
- if (bDynamicLoad) {
+ if (dynamic_load) {
if (DSP_SUCCEEDED(ret)) {
if (symbols_reloaded)
ret =
*((u32 *) pbHostBuf) = dw_base_addr + dw_offset;
}
/* Unmap here to force remap for other Ext loads */
- if ((bDynamicLoad || trace_load) && dev_context->dw_dsp_ext_base_addr) {
+ if ((dynamic_load || trace_load) && dev_context->dw_dsp_ext_base_addr) {
MEM_UNMAP_LINEAR_ADDRESS((void *)
dev_context->dw_dsp_ext_base_addr);
dev_context->dw_dsp_ext_base_addr = 0x0;
extern int write_ext_dsp_data(struct bridge_dev_context *dev_context,
IN u8 *pbHostBuf, u32 dwDSPAddr,
u32 ul_num_bytes, u32 ulMemType,
- bool bDynamicLoad);
+ bool dynamic_load);
/*
* ======== write_ext32_bit_dsp_data ========
#if defined(USE_LEVEL_1_MACROS)
-#define MMUMMU_SYSCONFIG_READ_REGISTER32(baseAddress)\
+#define MMUMMU_SYSCONFIG_READ_REGISTER32(base_address)\
(_DEBUG_LEVEL1_EASI(EASIL1_MMUMMU_SYSCONFIG_READ_REGISTER32),\
- __raw_readl((baseAddress)+MMU_MMU_SYSCONFIG_OFFSET))
+ __raw_readl((base_address)+MMU_MMU_SYSCONFIG_OFFSET))
-#define MMUMMU_SYSCONFIG_IDLE_MODE_WRITE32(baseAddress, value)\
+#define MMUMMU_SYSCONFIG_IDLE_MODE_WRITE32(base_address, value)\
{\
const u32 offset = MMU_MMU_SYSCONFIG_OFFSET;\
- register u32 data = __raw_readl((baseAddress)+offset);\
+ register u32 data = __raw_readl((base_address)+offset);\
register u32 newValue = (value);\
_DEBUG_LEVEL1_EASI(EASIL1_MMUMMU_SYSCONFIG_IDLE_MODE_WRITE32);\
data &= ~(MMU_MMU_SYSCONFIG_IDLE_MODE_MASK);\
newValue <<= MMU_MMU_SYSCONFIG_IDLE_MODE_OFFSET;\
newValue &= MMU_MMU_SYSCONFIG_IDLE_MODE_MASK;\
newValue |= data;\
- __raw_writel(newValue, baseAddress+offset);\
+ __raw_writel(newValue, base_address+offset);\
}
-#define MMUMMU_SYSCONFIG_AUTO_IDLE_WRITE32(baseAddress, value)\
+#define MMUMMU_SYSCONFIG_AUTO_IDLE_WRITE32(base_address, value)\
{\
const u32 offset = MMU_MMU_SYSCONFIG_OFFSET;\
- register u32 data = __raw_readl((baseAddress)+offset);\
+ register u32 data = __raw_readl((base_address)+offset);\
register u32 newValue = (value);\
_DEBUG_LEVEL1_EASI(EASIL1_MMUMMU_SYSCONFIG_AUTO_IDLE_WRITE32);\
data &= ~(MMU_MMU_SYSCONFIG_AUTO_IDLE_MASK);\
newValue <<= MMU_MMU_SYSCONFIG_AUTO_IDLE_OFFSET;\
newValue &= MMU_MMU_SYSCONFIG_AUTO_IDLE_MASK;\
newValue |= data;\
- __raw_writel(newValue, baseAddress+offset);\
+ __raw_writel(newValue, base_address+offset);\
}
-#define MMUMMU_IRQSTATUS_READ_REGISTER32(baseAddress)\
+#define MMUMMU_IRQSTATUS_READ_REGISTER32(base_address)\
(_DEBUG_LEVEL1_EASI(EASIL1_MMUMMU_IRQSTATUSReadRegister32),\
- __raw_readl((baseAddress)+MMU_MMU_IRQSTATUS_OFFSET))
+ __raw_readl((base_address)+MMU_MMU_IRQSTATUS_OFFSET))
-#define MMUMMU_IRQSTATUS_WRITE_REGISTER32(baseAddress, value)\
+#define MMUMMU_IRQSTATUS_WRITE_REGISTER32(base_address, value)\
{\
const u32 offset = MMU_MMU_IRQSTATUS_OFFSET;\
register u32 newValue = (value);\
_DEBUG_LEVEL1_EASI(EASIL1_MMUMMU_IRQSTATUS_WRITE_REGISTER32);\
- __raw_writel(newValue, (baseAddress)+offset);\
+ __raw_writel(newValue, (base_address)+offset);\
}
-#define MMUMMU_IRQENABLE_READ_REGISTER32(baseAddress)\
+#define MMUMMU_IRQENABLE_READ_REGISTER32(base_address)\
(_DEBUG_LEVEL1_EASI(EASIL1_MMUMMU_IRQENABLE_READ_REGISTER32),\
- __raw_readl((baseAddress)+MMU_MMU_IRQENABLE_OFFSET))
+ __raw_readl((base_address)+MMU_MMU_IRQENABLE_OFFSET))
-#define MMUMMU_IRQENABLE_WRITE_REGISTER32(baseAddress, value)\
+#define MMUMMU_IRQENABLE_WRITE_REGISTER32(base_address, value)\
{\
const u32 offset = MMU_MMU_IRQENABLE_OFFSET;\
register u32 newValue = (value);\
_DEBUG_LEVEL1_EASI(EASIL1_MMUMMU_IRQENABLE_WRITE_REGISTER32);\
- __raw_writel(newValue, (baseAddress)+offset);\
+ __raw_writel(newValue, (base_address)+offset);\
}
-#define MMUMMU_WALKING_STTWL_RUNNING_READ32(baseAddress)\
+#define MMUMMU_WALKING_STTWL_RUNNING_READ32(base_address)\
(_DEBUG_LEVEL1_EASI(EASIL1_MMUMMU_WALKING_STTWL_RUNNING_READ32),\
- (((__raw_readl(((baseAddress)+(MMU_MMU_WALKING_ST_OFFSET))))\
+ (((__raw_readl(((base_address)+(MMU_MMU_WALKING_ST_OFFSET))))\
& MMU_MMU_WALKING_ST_TWL_RUNNING_MASK) >>\
MMU_MMU_WALKING_ST_TWL_RUNNING_OFFSET))
-#define MMUMMU_CNTLTWL_ENABLE_READ32(baseAddress)\
+#define MMUMMU_CNTLTWL_ENABLE_READ32(base_address)\
(_DEBUG_LEVEL1_EASI(EASIL1_MMUMMU_CNTLTWL_ENABLE_READ32),\
- (((__raw_readl(((baseAddress)+(MMU_MMU_CNTL_OFFSET)))) &\
+ (((__raw_readl(((base_address)+(MMU_MMU_CNTL_OFFSET)))) &\
MMU_MMU_CNTL_TWL_ENABLE_MASK) >>\
MMU_MMU_CNTL_TWL_ENABLE_OFFSET))
-#define MMUMMU_CNTLTWL_ENABLE_WRITE32(baseAddress, value)\
+#define MMUMMU_CNTLTWL_ENABLE_WRITE32(base_address, value)\
{\
const u32 offset = MMU_MMU_CNTL_OFFSET;\
- register u32 data = __raw_readl((baseAddress)+offset);\
+ register u32 data = __raw_readl((base_address)+offset);\
register u32 newValue = (value);\
_DEBUG_LEVEL1_EASI(EASIL1_MMUMMU_CNTLTWL_ENABLE_WRITE32);\
data &= ~(MMU_MMU_CNTL_TWL_ENABLE_MASK);\
newValue <<= MMU_MMU_CNTL_TWL_ENABLE_OFFSET;\
newValue &= MMU_MMU_CNTL_TWL_ENABLE_MASK;\
newValue |= data;\
- __raw_writel(newValue, baseAddress+offset);\
+ __raw_writel(newValue, base_address+offset);\
}
-#define MMUMMU_CNTLMMU_ENABLE_WRITE32(baseAddress, value)\
+#define MMUMMU_CNTLMMU_ENABLE_WRITE32(base_address, value)\
{\
const u32 offset = MMU_MMU_CNTL_OFFSET;\
- register u32 data = __raw_readl((baseAddress)+offset);\
+ register u32 data = __raw_readl((base_address)+offset);\
register u32 newValue = (value);\
_DEBUG_LEVEL1_EASI(EASIL1_MMUMMU_CNTLMMU_ENABLE_WRITE32);\
data &= ~(MMU_MMU_CNTL_MMU_ENABLE_MASK);\
newValue <<= MMU_MMU_CNTL_MMU_ENABLE_OFFSET;\
newValue &= MMU_MMU_CNTL_MMU_ENABLE_MASK;\
newValue |= data;\
- __raw_writel(newValue, baseAddress+offset);\
+ __raw_writel(newValue, base_address+offset);\
}
-#define MMUMMU_FAULT_AD_READ_REGISTER32(baseAddress)\
+#define MMUMMU_FAULT_AD_READ_REGISTER32(base_address)\
(_DEBUG_LEVEL1_EASI(EASIL1_MMUMMU_FAULT_AD_READ_REGISTER32),\
- __raw_readl((baseAddress)+MMU_MMU_FAULT_AD_OFFSET))
+ __raw_readl((base_address)+MMU_MMU_FAULT_AD_OFFSET))
-#define MMUMMU_TTB_WRITE_REGISTER32(baseAddress, value)\
+#define MMUMMU_TTB_WRITE_REGISTER32(base_address, value)\
{\
const u32 offset = MMU_MMU_TTB_OFFSET;\
register u32 newValue = (value);\
_DEBUG_LEVEL1_EASI(EASIL1_MMUMMU_TTB_WRITE_REGISTER32);\
- __raw_writel(newValue, (baseAddress)+offset);\
+ __raw_writel(newValue, (base_address)+offset);\
}
-#define MMUMMU_LOCK_READ_REGISTER32(baseAddress)\
+#define MMUMMU_LOCK_READ_REGISTER32(base_address)\
(_DEBUG_LEVEL1_EASI(EASIL1_MMUMMU_LOCK_READ_REGISTER32),\
- __raw_readl((baseAddress)+MMU_MMU_LOCK_OFFSET))
+ __raw_readl((base_address)+MMU_MMU_LOCK_OFFSET))
-#define MMUMMU_LOCK_WRITE_REGISTER32(baseAddress, value)\
+#define MMUMMU_LOCK_WRITE_REGISTER32(base_address, value)\
{\
const u32 offset = MMU_MMU_LOCK_OFFSET;\
register u32 newValue = (value);\
_DEBUG_LEVEL1_EASI(EASIL1_MMUMMU_LOCK_WRITE_REGISTER32);\
- __raw_writel(newValue, (baseAddress)+offset);\
+ __raw_writel(newValue, (base_address)+offset);\
}
-#define MMUMMU_LOCK_BASE_VALUE_READ32(baseAddress)\
+#define MMUMMU_LOCK_BASE_VALUE_READ32(base_address)\
(_DEBUG_LEVEL1_EASI(EASIL1_MMUMMU_LOCK_BASE_VALUE_READ32),\
- (((__raw_readl(((baseAddress)+(MMU_MMU_LOCK_OFFSET)))) &\
+ (((__raw_readl(((base_address)+(MMU_MMU_LOCK_OFFSET)))) &\
MMU_MMU_LOCK_BASE_VALUE_MASK) >>\
MMU_MMU_LOCK_BASE_VALUE_OFFSET))
-#define MMUMMU_LOCK_BASE_VALUE_WRITE32(baseAddress, value)\
+#define MMUMMU_LOCK_BASE_VALUE_WRITE32(base_address, value)\
{\
const u32 offset = MMU_MMU_LOCK_OFFSET;\
- register u32 data = __raw_readl((baseAddress)+offset);\
+ register u32 data = __raw_readl((base_address)+offset);\
register u32 newValue = (value);\
_DEBUG_LEVEL1_EASI(EASIL1_MMUMMU_LOCKBaseValueWrite32);\
data &= ~(MMU_MMU_LOCK_BASE_VALUE_MASK);\
newValue <<= MMU_MMU_LOCK_BASE_VALUE_OFFSET;\
newValue &= MMU_MMU_LOCK_BASE_VALUE_MASK;\
newValue |= data;\
- __raw_writel(newValue, baseAddress+offset);\
+ __raw_writel(newValue, base_address+offset);\
}
-#define MMUMMU_LOCK_CURRENT_VICTIM_READ32(baseAddress)\
+#define MMUMMU_LOCK_CURRENT_VICTIM_READ32(base_address)\
(_DEBUG_LEVEL1_EASI(EASIL1_MMUMMU_LOCK_CURRENT_VICTIM_READ32),\
- (((__raw_readl(((baseAddress)+(MMU_MMU_LOCK_OFFSET)))) &\
+ (((__raw_readl(((base_address)+(MMU_MMU_LOCK_OFFSET)))) &\
MMU_MMU_LOCK_CURRENT_VICTIM_MASK) >>\
MMU_MMU_LOCK_CURRENT_VICTIM_OFFSET))
-#define MMUMMU_LOCK_CURRENT_VICTIM_WRITE32(baseAddress, value)\
+#define MMUMMU_LOCK_CURRENT_VICTIM_WRITE32(base_address, value)\
{\
const u32 offset = MMU_MMU_LOCK_OFFSET;\
- register u32 data = __raw_readl((baseAddress)+offset);\
+ register u32 data = __raw_readl((base_address)+offset);\
register u32 newValue = (value);\
_DEBUG_LEVEL1_EASI(EASIL1_MMUMMU_LOCK_CURRENT_VICTIM_WRITE32);\
data &= ~(MMU_MMU_LOCK_CURRENT_VICTIM_MASK);\
newValue <<= MMU_MMU_LOCK_CURRENT_VICTIM_OFFSET;\
newValue &= MMU_MMU_LOCK_CURRENT_VICTIM_MASK;\
newValue |= data;\
- __raw_writel(newValue, baseAddress+offset);\
+ __raw_writel(newValue, base_address+offset);\
}
#define MMUMMU_LOCK_CURRENT_VICTIM_SET32(var, value)\
(((value) << MMU_MMU_LOCK_CURRENT_VICTIM_OFFSET) &\
MMU_MMU_LOCK_CURRENT_VICTIM_MASK)))
-#define MMUMMU_LD_TLB_READ_REGISTER32(baseAddress)\
+#define MMUMMU_LD_TLB_READ_REGISTER32(base_address)\
(_DEBUG_LEVEL1_EASI(EASIL1_MMUMMU_LD_TLB_READ_REGISTER32),\
- __raw_readl((baseAddress)+MMU_MMU_LD_TLB_OFFSET))
+ __raw_readl((base_address)+MMU_MMU_LD_TLB_OFFSET))
-#define MMUMMU_LD_TLB_WRITE_REGISTER32(baseAddress, value)\
+#define MMUMMU_LD_TLB_WRITE_REGISTER32(base_address, value)\
{\
const u32 offset = MMU_MMU_LD_TLB_OFFSET;\
register u32 newValue = (value);\
_DEBUG_LEVEL1_EASI(EASIL1_MMUMMU_LD_TLB_WRITE_REGISTER32);\
- __raw_writel(newValue, (baseAddress)+offset);\
+ __raw_writel(newValue, (base_address)+offset);\
}
-#define MMUMMU_CAM_WRITE_REGISTER32(baseAddress, value)\
+#define MMUMMU_CAM_WRITE_REGISTER32(base_address, value)\
{\
const u32 offset = MMU_MMU_CAM_OFFSET;\
register u32 newValue = (value);\
_DEBUG_LEVEL1_EASI(EASIL1_MMUMMU_CAM_WRITE_REGISTER32);\
- __raw_writel(newValue, (baseAddress)+offset);\
+ __raw_writel(newValue, (base_address)+offset);\
}
-#define MMUMMU_RAM_WRITE_REGISTER32(baseAddress, value)\
+#define MMUMMU_RAM_WRITE_REGISTER32(base_address, value)\
{\
const u32 offset = MMU_MMU_RAM_OFFSET;\
register u32 newValue = (value);\
_DEBUG_LEVEL1_EASI(EASIL1_MMUMMU_RAM_WRITE_REGISTER32);\
- __raw_writel(newValue, (baseAddress)+offset);\
+ __raw_writel(newValue, (base_address)+offset);\
}
-#define MMUMMU_FLUSH_ENTRY_WRITE_REGISTER32(baseAddress, value)\
+#define MMUMMU_FLUSH_ENTRY_WRITE_REGISTER32(base_address, value)\
{\
const u32 offset = MMU_MMU_FLUSH_ENTRY_OFFSET;\
register u32 newValue = (value);\
_DEBUG_LEVEL1_EASI(EASIL1_MMUMMU_FLUSH_ENTRY_WRITE_REGISTER32);\
- __raw_writel(newValue, (baseAddress)+offset);\
+ __raw_writel(newValue, (base_address)+offset);\
}
#endif /* USE_LEVEL_1_MACROS */
*
* INPUTS:
*
- * Identifier : baseAddress
+ * Identifier : base_address
* Type : const u32
* Description : Base Address of instance of MMU module
*
* METHOD: : Check the Input parameter and Flush a
* single entry in the TLB.
*/
-static hw_status mmu_flush_entry(const void __iomem *baseAddress);
+static hw_status mmu_flush_entry(const void __iomem *base_address);
/*
* FUNCTION : mmu_set_cam_entry
*
* INPUTS:
*
- * Identifier : baseAddress
+ * Identifier : base_address
* TypE : const u32
* Description : Base Address of instance of MMU module
*
*
* METHOD: : Check the Input parameters and set the CAM entry.
*/
-static hw_status mmu_set_cam_entry(const void __iomem *baseAddress,
+static hw_status mmu_set_cam_entry(const void __iomem *base_address,
const u32 pageSize,
const u32 preservedBit,
const u32 validBit,
*
* INPUTS:
*
- * Identifier : baseAddress
+ * Identifier : base_address
* Type : const u32
* Description : Base Address of instance of MMU module
*
*
* METHOD: : Check the Input parameters and set the RAM entry.
*/
-static hw_status mmu_set_ram_entry(const void __iomem *baseAddress,
+static hw_status mmu_set_ram_entry(const void __iomem *base_address,
const u32 physicalAddr,
enum hw_endianism_t endianism,
enum hw_element_size_t element_size,
/* HW FUNCTIONS */
-hw_status hw_mmu_enable(const void __iomem *baseAddress)
+hw_status hw_mmu_enable(const void __iomem *base_address)
{
hw_status status = RET_OK;
- MMUMMU_CNTLMMU_ENABLE_WRITE32(baseAddress, HW_SET);
+ MMUMMU_CNTLMMU_ENABLE_WRITE32(base_address, HW_SET);
return status;
}
-hw_status hw_mmu_disable(const void __iomem *baseAddress)
+hw_status hw_mmu_disable(const void __iomem *base_address)
{
hw_status status = RET_OK;
- MMUMMU_CNTLMMU_ENABLE_WRITE32(baseAddress, HW_CLEAR);
+ MMUMMU_CNTLMMU_ENABLE_WRITE32(base_address, HW_CLEAR);
return status;
}
-hw_status hw_mmu_num_locked_set(const void __iomem *baseAddress,
+hw_status hw_mmu_num_locked_set(const void __iomem *base_address,
u32 numLockedEntries)
{
hw_status status = RET_OK;
- MMUMMU_LOCK_BASE_VALUE_WRITE32(baseAddress, numLockedEntries);
+ MMUMMU_LOCK_BASE_VALUE_WRITE32(base_address, numLockedEntries);
return status;
}
-hw_status hw_mmu_victim_num_set(const void __iomem *baseAddress,
+hw_status hw_mmu_victim_num_set(const void __iomem *base_address,
u32 victimEntryNum)
{
hw_status status = RET_OK;
- MMUMMU_LOCK_CURRENT_VICTIM_WRITE32(baseAddress, victimEntryNum);
+ MMUMMU_LOCK_CURRENT_VICTIM_WRITE32(base_address, victimEntryNum);
return status;
}
-hw_status hw_mmu_event_ack(const void __iomem *baseAddress, u32 irqMask)
+hw_status hw_mmu_event_ack(const void __iomem *base_address, u32 irqMask)
{
hw_status status = RET_OK;
- MMUMMU_IRQSTATUS_WRITE_REGISTER32(baseAddress, irqMask);
+ MMUMMU_IRQSTATUS_WRITE_REGISTER32(base_address, irqMask);
return status;
}
-hw_status hw_mmu_event_disable(const void __iomem *baseAddress, u32 irqMask)
+hw_status hw_mmu_event_disable(const void __iomem *base_address, u32 irqMask)
{
hw_status status = RET_OK;
u32 irq_reg;
- irq_reg = MMUMMU_IRQENABLE_READ_REGISTER32(baseAddress);
+ irq_reg = MMUMMU_IRQENABLE_READ_REGISTER32(base_address);
- MMUMMU_IRQENABLE_WRITE_REGISTER32(baseAddress, irq_reg & ~irqMask);
+ MMUMMU_IRQENABLE_WRITE_REGISTER32(base_address, irq_reg & ~irqMask);
return status;
}
-hw_status hw_mmu_event_enable(const void __iomem *baseAddress, u32 irqMask)
+hw_status hw_mmu_event_enable(const void __iomem *base_address, u32 irqMask)
{
hw_status status = RET_OK;
u32 irq_reg;
- irq_reg = MMUMMU_IRQENABLE_READ_REGISTER32(baseAddress);
+ irq_reg = MMUMMU_IRQENABLE_READ_REGISTER32(base_address);
- MMUMMU_IRQENABLE_WRITE_REGISTER32(baseAddress, irq_reg | irqMask);
+ MMUMMU_IRQENABLE_WRITE_REGISTER32(base_address, irq_reg | irqMask);
return status;
}
-hw_status hw_mmu_event_status(const void __iomem *baseAddress, u32 *irqMask)
+hw_status hw_mmu_event_status(const void __iomem *base_address, u32 *irqMask)
{
hw_status status = RET_OK;
- *irqMask = MMUMMU_IRQSTATUS_READ_REGISTER32(baseAddress);
+ *irqMask = MMUMMU_IRQSTATUS_READ_REGISTER32(base_address);
return status;
}
-hw_status hw_mmu_fault_addr_read(const void __iomem *baseAddress, u32 *addr)
+hw_status hw_mmu_fault_addr_read(const void __iomem *base_address, u32 *addr)
{
hw_status status = RET_OK;
/*Check the input Parameters */
- CHECK_INPUT_PARAM(baseAddress, 0, RET_BAD_NULL_PARAM,
+ CHECK_INPUT_PARAM(base_address, 0, RET_BAD_NULL_PARAM,
RES_MMU_BASE + RES_INVALID_INPUT_PARAM);
/* read values from register */
- *addr = MMUMMU_FAULT_AD_READ_REGISTER32(baseAddress);
+ *addr = MMUMMU_FAULT_AD_READ_REGISTER32(base_address);
return status;
}
-hw_status hw_mmu_ttb_set(const void __iomem *baseAddress, u32 TTBPhysAddr)
+hw_status hw_mmu_ttb_set(const void __iomem *base_address, u32 TTBPhysAddr)
{
hw_status status = RET_OK;
u32 load_ttb;
/*Check the input Parameters */
- CHECK_INPUT_PARAM(baseAddress, 0, RET_BAD_NULL_PARAM,
+ CHECK_INPUT_PARAM(base_address, 0, RET_BAD_NULL_PARAM,
RES_MMU_BASE + RES_INVALID_INPUT_PARAM);
load_ttb = TTBPhysAddr & ~0x7FUL;
/* write values to register */
- MMUMMU_TTB_WRITE_REGISTER32(baseAddress, load_ttb);
+ MMUMMU_TTB_WRITE_REGISTER32(base_address, load_ttb);
return status;
}
-hw_status hw_mmu_twl_enable(const void __iomem *baseAddress)
+hw_status hw_mmu_twl_enable(const void __iomem *base_address)
{
hw_status status = RET_OK;
- MMUMMU_CNTLTWL_ENABLE_WRITE32(baseAddress, HW_SET);
+ MMUMMU_CNTLTWL_ENABLE_WRITE32(base_address, HW_SET);
return status;
}
-hw_status hw_mmu_twl_disable(const void __iomem *baseAddress)
+hw_status hw_mmu_twl_disable(const void __iomem *base_address)
{
hw_status status = RET_OK;
- MMUMMU_CNTLTWL_ENABLE_WRITE32(baseAddress, HW_CLEAR);
+ MMUMMU_CNTLTWL_ENABLE_WRITE32(base_address, HW_CLEAR);
return status;
}
-hw_status hw_mmu_tlb_flush(const void __iomem *baseAddress, u32 virtualAddr,
+hw_status hw_mmu_tlb_flush(const void __iomem *base_address, u32 virtualAddr,
u32 pageSize)
{
hw_status status = RET_OK;
/* Generate the 20-bit tag from virtual address */
virtual_addr_tag = ((virtualAddr & MMU_ADDR_MASK) >> 12);
- mmu_set_cam_entry(baseAddress, pg_size_bits, 0, 0, virtual_addr_tag);
+ mmu_set_cam_entry(base_address, pg_size_bits, 0, 0, virtual_addr_tag);
- mmu_flush_entry(baseAddress);
+ mmu_flush_entry(base_address);
return status;
}
-hw_status hw_mmu_tlb_add(const void __iomem *baseAddress,
+hw_status hw_mmu_tlb_add(const void __iomem *base_address,
u32 physicalAddr,
u32 virtualAddr,
u32 pageSize,
enum hw_mmu_page_size_t mmu_pg_size;
/*Check the input Parameters */
- CHECK_INPUT_PARAM(baseAddress, 0, RET_BAD_NULL_PARAM,
+ CHECK_INPUT_PARAM(base_address, 0, RET_BAD_NULL_PARAM,
RES_MMU_BASE + RES_INVALID_INPUT_PARAM);
CHECK_INPUT_RANGE_MIN0(pageSize, MMU_PAGE_MAX, RET_PARAM_OUT_OF_RANGE,
RES_MMU_BASE + RES_INVALID_INPUT_PARAM);
return RET_FAIL;
}
- lock_reg = MMUMMU_LOCK_READ_REGISTER32(baseAddress);
+ lock_reg = MMUMMU_LOCK_READ_REGISTER32(base_address);
/* Generate the 20-bit tag from virtual address */
virtual_addr_tag = ((virtualAddr & MMU_ADDR_MASK) >> 12);
/* Write the fields in the CAM Entry Register */
- mmu_set_cam_entry(baseAddress, mmu_pg_size, preservedBit, validBit,
+ mmu_set_cam_entry(base_address, mmu_pg_size, preservedBit, validBit,
virtual_addr_tag);
/* Write the different fields of the RAM Entry Register */
/* endianism of the page,Element Size of the page (8, 16, 32, 64 bit) */
- mmu_set_ram_entry(baseAddress, physicalAddr, map_attrs->endianism,
+ mmu_set_ram_entry(base_address, physicalAddr, map_attrs->endianism,
map_attrs->element_size, map_attrs->mixed_size);
/* Update the MMU Lock Register */
/* currentVictim between lockedBaseValue and (MMU_Entries_Number - 1) */
- MMUMMU_LOCK_CURRENT_VICTIM_WRITE32(baseAddress, entryNum);
+ MMUMMU_LOCK_CURRENT_VICTIM_WRITE32(base_address, entryNum);
/* Enable loading of an entry in TLB by writing 1
into LD_TLB_REG register */
- MMUMMU_LD_TLB_WRITE_REGISTER32(baseAddress, MMU_LOAD_TLB);
+ MMUMMU_LD_TLB_WRITE_REGISTER32(base_address, MMU_LOAD_TLB);
- MMUMMU_LOCK_WRITE_REGISTER32(baseAddress, lock_reg);
+ MMUMMU_LOCK_WRITE_REGISTER32(base_address, lock_reg);
return status;
}
}
/* mmu_flush_entry */
-static hw_status mmu_flush_entry(const void __iomem *baseAddress)
+static hw_status mmu_flush_entry(const void __iomem *base_address)
{
hw_status status = RET_OK;
u32 flush_entry_data = 0x1;
/*Check the input Parameters */
- CHECK_INPUT_PARAM(baseAddress, 0, RET_BAD_NULL_PARAM,
+ CHECK_INPUT_PARAM(base_address, 0, RET_BAD_NULL_PARAM,
RES_MMU_BASE + RES_INVALID_INPUT_PARAM);
/* write values to register */
- MMUMMU_FLUSH_ENTRY_WRITE_REGISTER32(baseAddress, flush_entry_data);
+ MMUMMU_FLUSH_ENTRY_WRITE_REGISTER32(base_address, flush_entry_data);
return status;
}
/* mmu_set_cam_entry */
-static hw_status mmu_set_cam_entry(const void __iomem *baseAddress,
+static hw_status mmu_set_cam_entry(const void __iomem *base_address,
const u32 pageSize,
const u32 preservedBit,
const u32 validBit,
u32 mmu_cam_reg;
/*Check the input Parameters */
- CHECK_INPUT_PARAM(baseAddress, 0, RET_BAD_NULL_PARAM,
+ CHECK_INPUT_PARAM(base_address, 0, RET_BAD_NULL_PARAM,
RES_MMU_BASE + RES_INVALID_INPUT_PARAM);
mmu_cam_reg = (virtual_addr_tag << 12);
(preservedBit << 3);
/* write values to register */
- MMUMMU_CAM_WRITE_REGISTER32(baseAddress, mmu_cam_reg);
+ MMUMMU_CAM_WRITE_REGISTER32(base_address, mmu_cam_reg);
return status;
}
/* mmu_set_ram_entry */
-static hw_status mmu_set_ram_entry(const void __iomem *baseAddress,
+static hw_status mmu_set_ram_entry(const void __iomem *base_address,
const u32 physicalAddr,
enum hw_endianism_t endianism,
enum hw_element_size_t element_size,
u32 mmu_ram_reg;
/*Check the input Parameters */
- CHECK_INPUT_PARAM(baseAddress, 0, RET_BAD_NULL_PARAM,
+ CHECK_INPUT_PARAM(base_address, 0, RET_BAD_NULL_PARAM,
RES_MMU_BASE + RES_INVALID_INPUT_PARAM);
CHECK_INPUT_RANGE_MIN0(element_size, MMU_ELEMENTSIZE_MAX,
RET_PARAM_OUT_OF_RANGE, RES_MMU_BASE +
(mixed_size << 6));
/* write values to register */
- MMUMMU_RAM_WRITE_REGISTER32(baseAddress, mmu_ram_reg);
+ MMUMMU_RAM_WRITE_REGISTER32(base_address, mmu_ram_reg);
return status;
bool donotlockmpupage;
};
-extern hw_status hw_mmu_enable(const void __iomem *baseAddress);
+extern hw_status hw_mmu_enable(const void __iomem *base_address);
-extern hw_status hw_mmu_disable(const void __iomem *baseAddress);
+extern hw_status hw_mmu_disable(const void __iomem *base_address);
-extern hw_status hw_mmu_num_locked_set(const void __iomem *baseAddress,
+extern hw_status hw_mmu_num_locked_set(const void __iomem *base_address,
u32 numLockedEntries);
-extern hw_status hw_mmu_victim_num_set(const void __iomem *baseAddress,
+extern hw_status hw_mmu_victim_num_set(const void __iomem *base_address,
u32 victimEntryNum);
/* For MMU faults */
-extern hw_status hw_mmu_event_ack(const void __iomem *baseAddress,
+extern hw_status hw_mmu_event_ack(const void __iomem *base_address,
u32 irqMask);
-extern hw_status hw_mmu_event_disable(const void __iomem *baseAddress,
+extern hw_status hw_mmu_event_disable(const void __iomem *base_address,
u32 irqMask);
-extern hw_status hw_mmu_event_enable(const void __iomem *baseAddress,
+extern hw_status hw_mmu_event_enable(const void __iomem *base_address,
u32 irqMask);
-extern hw_status hw_mmu_event_status(const void __iomem *baseAddress,
+extern hw_status hw_mmu_event_status(const void __iomem *base_address,
u32 *irqMask);
-extern hw_status hw_mmu_fault_addr_read(const void __iomem *baseAddress,
+extern hw_status hw_mmu_fault_addr_read(const void __iomem *base_address,
u32 *addr);
/* Set the TT base address */
-extern hw_status hw_mmu_ttb_set(const void __iomem *baseAddress,
+extern hw_status hw_mmu_ttb_set(const void __iomem *base_address,
u32 TTBPhysAddr);
-extern hw_status hw_mmu_twl_enable(const void __iomem *baseAddress);
+extern hw_status hw_mmu_twl_enable(const void __iomem *base_address);
-extern hw_status hw_mmu_twl_disable(const void __iomem *baseAddress);
+extern hw_status hw_mmu_twl_disable(const void __iomem *base_address);
-extern hw_status hw_mmu_tlb_flush(const void __iomem *baseAddress,
+extern hw_status hw_mmu_tlb_flush(const void __iomem *base_address,
u32 virtualAddr, u32 pageSize);
-extern hw_status hw_mmu_tlb_add(const void __iomem *baseAddress,
+extern hw_status hw_mmu_tlb_add(const void __iomem *base_address,
u32 physicalAddr,
u32 virtualAddr,
u32 pageSize,
* Destroy the communication memory manager object.
* Parameters:
* hcmm_mgr: Cmm Mgr handle.
- * bForce: Force deallocation of all cmm memory immediately if set TRUE.
+ * force: Force deallocation of all cmm memory immediately if set TRUE.
* If FALSE, and outstanding allocations will return -EPERM
* status.
* Returns:
* Ensures:
* Memory resources used by Cmm Mgr are freed.
*/
-extern int cmm_destroy(struct cmm_object *hcmm_mgr, bool bForce);
+extern int cmm_destroy(struct cmm_object *hcmm_mgr, bool force);
/*
* ======== cmm_exit ========
* Delete translator resources
* Parameters:
* xlator: handle to translator.
- * bForce: bForce = TRUE will free XLators SM buffers/dscriptrs.
+ * force: force = TRUE will free XLators SM buffers/dscriptrs.
* Returns:
* 0: Success.
* -EFAULT: Bad translator handle.
*
*/
extern int cmm_xlator_delete(struct cmm_xlatorobject *xlator,
- bool bForce);
+ bool force);
/*
* ======== cmm_xlator_free_buf ========
* COD module initialized.
* hmgr is valid.
* nArgc > 0.
- * aArgs != NULL.
- * aArgs[0] != NULL.
+ * args != NULL.
+ * args[0] != NULL.
* pfn_write != NULL.
* Ensures:
*/
extern int cod_load_base(struct cod_manager *cod_mgr_obj,
- u32 nArgc, char *aArgs[],
+ u32 nArgc, char *args[],
cod_writefxn pfn_write, void *pArb,
char *envp[]);
*/
extern int cod_read_section(struct cod_libraryobj *lib,
IN char *pstrSect,
- OUT char *pstrContent, IN u32 cContentSize);
+ OUT char *pstrContent, IN u32 content_size);
#endif /* COD_ */
* This function enumerates currently visible DSP/BIOS Bridge objects
* and returns the UUID and type of each enumerated object.
* Parameters:
- * cIndex: The object enumeration index.
+ * index: The object enumeration index.
* obj_type: Type of object to enumerate.
* uuid_obj: Pointer to a dsp_uuid object.
* Returns:
* This function can be used in conjunction with dcd_get_object_def to
* retrieve object properties.
*/
-extern int dcd_enumerate_object(IN s32 cIndex,
+extern int dcd_enumerate_object(IN s32 index,
IN enum dsp_dcdobjtype obj_type,
OUT struct dsp_uuid *uuid_obj);
/* DCD Generic Object Type */
struct dcd_genericobj {
- union dcdObjUnion {
+ union dcd_obj {
struct dcd_nodeprops node_obj; /* node object. */
/* processor object. */
struct dsp_processorinfo proc_info;
* Purpose:
* Release the communication memory manager resources.
*/
-int cmm_destroy(struct cmm_object *hcmm_mgr, bool bForce)
+int cmm_destroy(struct cmm_object *hcmm_mgr, bool force)
{
struct cmm_object *cmm_mgr_obj = (struct cmm_object *)hcmm_mgr;
struct cmm_info temp_info;
}
mutex_lock(&cmm_mgr_obj->cmm_lock);
/* If not force then fail if outstanding allocations exist */
- if (!bForce) {
+ if (!force) {
/* Check for outstanding memory allocations */
status = cmm_get_info(hcmm_mgr, &temp_info);
if (DSP_SUCCEEDED(status)) {
* Free the Xlator resources.
* VM gets freed later.
*/
-int cmm_xlator_delete(struct cmm_xlatorobject *xlator, bool bForce)
+int cmm_xlator_delete(struct cmm_xlatorobject *xlator, bool force)
{
struct cmm_xlator *xlator_obj = (struct cmm_xlator *)xlator;
int status = 0;
return filp;
}
-static s32 cod_f_read(void __user *pbuffer, s32 size, s32 cCount,
+static s32 cod_f_read(void __user *pbuffer, s32 size, s32 count,
struct file *filp)
{
/* check for valid file handle */
if (!filp)
return -EFAULT;
- if ((size > 0) && (cCount > 0) && pbuffer) {
+ if ((size > 0) && (count > 0) && pbuffer) {
u32 dw_bytes_read;
mm_segment_t fs;
/* read from file */
fs = get_fs();
set_fs(get_ds());
- dw_bytes_read = filp->f_op->read(filp, pbuffer, size * cCount,
+ dw_bytes_read = filp->f_op->read(filp, pbuffer, size * count,
&(filp->f_pos));
set_fs(fs);
return -EINVAL;
}
-static s32 cod_f_seek(struct file *filp, s32 lOffset, s32 cOrigin)
+static s32 cod_f_seek(struct file *filp, s32 lOffset, s32 origin)
{
loff_t dw_cur_pos;
return -EFAULT;
/* based on the origin flag, move the internal pointer */
- dw_cur_pos = filp->f_op->llseek(filp, lOffset, cOrigin);
+ dw_cur_pos = filp->f_op->llseek(filp, lOffset, origin);
if ((s32) dw_cur_pos < 0)
return -EPERM;
* loaded must be the first element of the args array and must be a fully
* qualified pathname.
* Details:
- * if nArgc doesn't match the number of arguments in the aArgs array, the
- * aArgs array is searched for a NULL terminating entry, and argc is
+ * if nArgc doesn't match the number of arguments in the args array, the
+ * args array is searched for a NULL terminating entry, and argc is
* recalculated to reflect this. In this way, we can support NULL
- * terminating aArgs arrays, if nArgc is very large.
+ * terminating args arrays, if nArgc is very large.
*/
-int cod_load_base(struct cod_manager *hmgr, u32 nArgc, char *aArgs[],
+int cod_load_base(struct cod_manager *hmgr, u32 nArgc, char *args[],
cod_writefxn pfn_write, void *pArb, char *envp[])
{
dbll_flags flags;
DBC_REQUIRE(refs > 0);
DBC_REQUIRE(IS_VALID(hmgr));
DBC_REQUIRE(nArgc > 0);
- DBC_REQUIRE(aArgs != NULL);
- DBC_REQUIRE(aArgs[0] != NULL);
+ DBC_REQUIRE(args != NULL);
+ DBC_REQUIRE(args[0] != NULL);
DBC_REQUIRE(pfn_write != NULL);
DBC_REQUIRE(hmgr->base_lib != NULL);
* reflect true number in NULL terminated argv array.
*/
for (i = 0; i < nArgc; i++) {
- if (aArgs[i] == NULL) {
+ if (args[i] == NULL) {
nArgc = i;
break;
}
* Retrieve the content of a code section given the section name.
*/
int cod_read_section(struct cod_libraryobj *lib, IN char *pstrSect,
- OUT char *pstrContent, IN u32 cContentSize)
+ OUT char *pstrContent, IN u32 content_size)
{
int status = 0;
if (lib != NULL)
status =
lib->cod_mgr->fxns.read_sect_fxn(lib->dbll_lib, pstrSect,
- pstrContent, cContentSize);
+ pstrContent, content_size);
else
status = -ESPIPE;
/* ----------------------------------- Function Prototypes */
static struct map_page *get_region(u32 addr);
-static struct map_page *get_free_region(u32 aSize);
-static struct map_page *get_mapped_region(u32 aAddr);
+static struct map_page *get_free_region(u32 len);
+static struct map_page *get_mapped_region(u32 addrs);
/* ======== dmm_create_tables ========
* Purpose:
* Purpose:
* Returns a region containing the specified memory region
*/
-static struct map_page *get_region(u32 aAddr)
+static struct map_page *get_region(u32 addrs)
{
struct map_page *curr_region = NULL;
u32 i = 0;
if (virtual_mapping_table != NULL) {
/* find page mapped by this address */
- i = DMM_ADDR_TO_INDEX(aAddr);
+ i = DMM_ADDR_TO_INDEX(addrs);
if (i < table_size)
curr_region = virtual_mapping_table + i;
}
* Purpose:
* Returns the requested free region
*/
-static struct map_page *get_free_region(u32 aSize)
+static struct map_page *get_free_region(u32 len)
{
struct map_page *curr_region = NULL;
u32 i = 0;
if (virtual_mapping_table == NULL)
return curr_region;
- if (aSize > free_size) {
+ if (len > free_size) {
/* Find the largest free region
* (coalesce during the traversal) */
while (i < table_size) {
i = next_i;
}
}
- if (aSize <= free_size) {
+ if (len <= free_size) {
curr_region = virtual_mapping_table + free_region;
- free_region += (aSize / PG_SIZE4K);
- free_size -= aSize;
+ free_region += (len / PG_SIZE4K);
+ free_size -= len;
}
return curr_region;
}
* Purpose:
* Returns the requestedmapped region
*/
-static struct map_page *get_mapped_region(u32 aAddr)
+static struct map_page *get_mapped_region(u32 addrs)
{
u32 i = 0;
struct map_page *curr_region = NULL;
if (virtual_mapping_table == NULL)
return curr_region;
- i = DMM_ADDR_TO_INDEX(aAddr);
+ i = DMM_ADDR_TO_INDEX(addrs);
if (i < table_size && (virtual_mapping_table[i].mapped ||
virtual_mapping_table[i].reserved))
curr_region = virtual_mapping_table + i;
static int get_attrs_from_buf(char *psz_buf, u32 ul_buf_size,
enum dsp_dcdobjtype obj_type,
struct dcd_genericobj *pGenObj);
-static void compress_buf(char *psz_buf, u32 ul_buf_size, s32 cCharSize);
-static char dsp_char2_gpp_char(char *pWord, s32 cDspCharSize);
+static void compress_buf(char *psz_buf, u32 ul_buf_size, s32 char_size);
+static char dsp_char2_gpp_char(char *pWord, s32 dsp_char_size);
static int get_dep_lib_info(IN struct dcd_manager *hdcd_mgr,
IN struct dsp_uuid *uuid_obj,
IN OUT u16 *pNumLibs,
* Purpose:
* Enumerates objects in the DCD.
*/
-int dcd_enumerate_object(IN s32 cIndex, IN enum dsp_dcdobjtype obj_type,
+int dcd_enumerate_object(IN s32 index, IN enum dsp_dcdobjtype obj_type,
OUT struct dsp_uuid *uuid_obj)
{
int status = 0;
int len;
DBC_REQUIRE(refs >= 0);
- DBC_REQUIRE(cIndex >= 0);
+ DBC_REQUIRE(index >= 0);
DBC_REQUIRE(uuid_obj != NULL);
- if ((cIndex != 0) && (enum_refs == 0)) {
+ if ((index != 0) && (enum_refs == 0)) {
/*
* If an enumeration is being performed on an index greater
* than zero, then the current enum_refs must have been
spin_lock(&dbdcd_lock);
list_for_each_entry(dcd_key, ®_key_list, link) {
if (!strncmp(dcd_key->name, sz_reg_key, len)
- && !cIndex--) {
+ && !index--) {
strncpy(sz_value, &dcd_key->name[len],
strlen(&dcd_key->name[len]) + 1);
break;
* Purpose:
* Compress the DSP buffer, if necessary, to conform to PC format.
*/
-static void compress_buf(char *psz_buf, u32 ul_buf_size, s32 cCharSize)
+static void compress_buf(char *psz_buf, u32 ul_buf_size, s32 char_size)
{
char *p;
char ch;
return;
for (q = psz_buf; q < (psz_buf + ul_buf_size);) {
- ch = dsp_char2_gpp_char(q, cCharSize);
+ ch = dsp_char2_gpp_char(q, char_size);
if (ch == '\\') {
- q += cCharSize;
- ch = dsp_char2_gpp_char(q, cCharSize);
+ q += char_size;
+ ch = dsp_char2_gpp_char(q, char_size);
switch (ch) {
case 't':
*p = '\t';
*p = ch;
}
p++;
- q += cCharSize;
+ q += char_size;
}
/* NULL out remainder of buffer. */
* Purpose:
* Convert DSP char to host GPP char in a portable manner
*/
-static char dsp_char2_gpp_char(char *pWord, s32 cDspCharSize)
+static char dsp_char2_gpp_char(char *pWord, s32 dsp_char_size)
{
char ch = '\0';
char *ch_src;
s32 i;
- for (ch_src = pWord, i = cDspCharSize; i > 0; i--)
+ for (ch_src = pWord, i = dsp_char_size; i > 0; i--)
ch |= *ch_src++;
return ch;
* ======== dsp_deinit ========
* Frees the resources allocated for bridge.
*/
-bool dsp_deinit(u32 deviceContext)
+bool dsp_deinit(u32 device_ctxt)
{
bool ret = true;
u32 device_node;
(void)dev_remove_device((struct cfg_devnode *)device_node);
(void)drv_release_resources((u32) device_node,
- (struct drv_object *)deviceContext);
+ (struct drv_object *)device_ctxt);
}
- (void)drv_destroy((struct drv_object *)deviceContext);
+ (void)drv_destroy((struct drv_object *)device_ctxt);
/* Get the Manager Object from Registry
* MGR Destroy will unload the DCD dll */