#define IPC_SRC_QUEUE_MASK 0x7
#define IPC_SRC_QUEUE(x) (((x) & IPC_SRC_QUEUE_MASK) \
<< IPC_SRC_QUEUE_SHIFT)
+/* Load Module count */
+#define IPC_LOAD_MODULE_SHIFT 0
+#define IPC_LOAD_MODULE_MASK 0xFF
+#define IPC_LOAD_MODULE_CNT(x) (((x) & IPC_LOAD_MODULE_MASK) \
+ << IPC_LOAD_MODULE_SHIFT)
/* Save pipeline messgae extension register */
#define IPC_DMA_ID_SHIFT 0
}
EXPORT_SYMBOL_GPL(skl_ipc_bind_unbind);
+/*
+ * In order to load a module we need to send IPC to initiate that. DMA will
+ * performed to load the module memory. The FW supports multiple module load
+ * at single shot, so we can send IPC with N modules represented by
+ * module_cnt
+ */
+int skl_ipc_load_modules(struct sst_generic_ipc *ipc,
+ u8 module_cnt, void *data)
+{
+ struct skl_ipc_header header = {0};
+ u64 *ipc_header = (u64 *)(&header);
+ int ret;
+
+ header.primary = IPC_MSG_TARGET(IPC_FW_GEN_MSG);
+ header.primary |= IPC_MSG_DIR(IPC_MSG_REQUEST);
+ header.primary |= IPC_GLB_TYPE(IPC_GLB_LOAD_MULTIPLE_MODS);
+ header.primary |= IPC_LOAD_MODULE_CNT(module_cnt);
+
+ ret = sst_ipc_tx_message_wait(ipc, *ipc_header, data,
+ (sizeof(u16) * module_cnt), NULL, 0);
+ if (ret < 0)
+ dev_err(ipc->dev, "ipc: load modules failed :%d\n", ret);
+
+ return ret;
+}
+EXPORT_SYMBOL_GPL(skl_ipc_load_modules);
+
+int skl_ipc_unload_modules(struct sst_generic_ipc *ipc, u8 module_cnt,
+ void *data)
+{
+ struct skl_ipc_header header = {0};
+ u64 *ipc_header = (u64 *)(&header);
+ int ret;
+
+ header.primary = IPC_MSG_TARGET(IPC_FW_GEN_MSG);
+ header.primary |= IPC_MSG_DIR(IPC_MSG_REQUEST);
+ header.primary |= IPC_GLB_TYPE(IPC_GLB_UNLOAD_MULTIPLE_MODS);
+ header.primary |= IPC_LOAD_MODULE_CNT(module_cnt);
+
+ ret = sst_ipc_tx_message_wait(ipc, *ipc_header, data,
+ (sizeof(u16) * module_cnt), NULL, 0);
+ if (ret < 0)
+ dev_err(ipc->dev, "ipc: unload modules failed :%d\n", ret);
+
+ return ret;
+}
+EXPORT_SYMBOL_GPL(skl_ipc_unload_modules);
+
int skl_ipc_set_large_config(struct sst_generic_ipc *ipc,
struct skl_ipc_large_config_msg *msg, u32 *param)
{
#define SKL_INSTANCE_ID 0
#define SKL_BASE_FW_MODULE_ID 0
+#define SKL_NUM_MODULES 1
+
static bool skl_check_fw_status(struct sst_dsp *ctx, u32 status)
{
u32 cur_sts;
return sst_dsp_shim_read(ctx, SKL_ADSP_ERROR_CODE);
}
+/*
+ * since get/set_module are called from DAPM context,
+ * we don't need lock for usage count
+ */
+static unsigned int skl_get_module(struct sst_dsp *ctx, u16 mod_id)
+{
+ struct skl_module_table *module;
+
+ list_for_each_entry(module, &ctx->module_list, list) {
+ if (module->mod_info->mod_id == mod_id)
+ return ++module->usage_cnt;
+ }
+
+ return -EINVAL;
+}
+
+static unsigned int skl_put_module(struct sst_dsp *ctx, u16 mod_id)
+{
+ struct skl_module_table *module;
+
+ list_for_each_entry(module, &ctx->module_list, list) {
+ if (module->mod_info->mod_id == mod_id)
+ return --module->usage_cnt;
+ }
+
+ return -EINVAL;
+}
+
+static struct skl_module_table *skl_fill_module_table(struct sst_dsp *ctx,
+ char *mod_name, int mod_id)
+{
+ const struct firmware *fw;
+ struct skl_module_table *skl_module;
+ unsigned int size;
+ int ret;
+
+ ret = request_firmware(&fw, mod_name, ctx->dev);
+ if (ret < 0) {
+ dev_err(ctx->dev, "Request Module %s failed :%d\n",
+ mod_name, ret);
+ return NULL;
+ }
+
+ skl_module = devm_kzalloc(ctx->dev, sizeof(*skl_module), GFP_KERNEL);
+ if (skl_module == NULL) {
+ release_firmware(fw);
+ return NULL;
+ }
+
+ size = sizeof(*skl_module->mod_info);
+ skl_module->mod_info = devm_kzalloc(ctx->dev, size, GFP_KERNEL);
+ if (skl_module->mod_info == NULL) {
+ release_firmware(fw);
+ return NULL;
+ }
+
+ skl_module->mod_info->mod_id = mod_id;
+ skl_module->mod_info->fw = fw;
+ list_add(&skl_module->list, &ctx->module_list);
+
+ return skl_module;
+}
+
+/* get a module from it's unique ID */
+static struct skl_module_table *skl_module_get_from_id(
+ struct sst_dsp *ctx, u16 mod_id)
+{
+ struct skl_module_table *module;
+
+ if (list_empty(&ctx->module_list)) {
+ dev_err(ctx->dev, "Module list is empty\n");
+ return NULL;
+ }
+
+ list_for_each_entry(module, &ctx->module_list, list) {
+ if (module->mod_info->mod_id == mod_id)
+ return module;
+ }
+
+ return NULL;
+}
+
+static int skl_transfer_module(struct sst_dsp *ctx,
+ struct skl_load_module_info *module)
+{
+ int ret;
+ struct skl_sst *skl = ctx->thread_context;
+
+ ret = ctx->cl_dev.ops.cl_copy_to_dmabuf(ctx, module->fw->data,
+ module->fw->size);
+ if (ret < 0)
+ return ret;
+
+ ret = skl_ipc_load_modules(&skl->ipc, SKL_NUM_MODULES,
+ (void *)&module->mod_id);
+ if (ret < 0)
+ dev_err(ctx->dev, "Failed to Load module: %d\n", ret);
+
+ ctx->cl_dev.ops.cl_stop_dma(ctx);
+
+ return ret;
+}
+
+static int skl_load_module(struct sst_dsp *ctx, u16 mod_id, char *guid)
+{
+ struct skl_module_table *module_entry = NULL;
+ int ret = 0;
+ char mod_name[64]; /* guid str = 32 chars + 4 hyphens */
+
+ snprintf(mod_name, sizeof(mod_name), "%s%s%s",
+ "intel/dsp_fw_", guid, ".bin");
+
+ module_entry = skl_module_get_from_id(ctx, mod_id);
+ if (module_entry == NULL) {
+ module_entry = skl_fill_module_table(ctx, mod_name, mod_id);
+ if (module_entry == NULL) {
+ dev_err(ctx->dev, "Failed to Load module\n");
+ return -EINVAL;
+ }
+ }
+
+ if (!module_entry->usage_cnt) {
+ ret = skl_transfer_module(ctx, module_entry->mod_info);
+ if (ret < 0) {
+ dev_err(ctx->dev, "Failed to Load module\n");
+ return ret;
+ }
+ }
+
+ ret = skl_get_module(ctx, mod_id);
+
+ return ret;
+}
+
+static int skl_unload_module(struct sst_dsp *ctx, u16 mod_id)
+{
+ unsigned int usage_cnt;
+ struct skl_sst *skl = ctx->thread_context;
+ int ret = 0;
+
+ usage_cnt = skl_put_module(ctx, mod_id);
+ if (usage_cnt < 0) {
+ dev_err(ctx->dev, "Module bad usage cnt!:%d\n", usage_cnt);
+ return -EIO;
+ }
+ ret = skl_ipc_unload_modules(&skl->ipc,
+ SKL_NUM_MODULES, &mod_id);
+ if (ret < 0) {
+ dev_err(ctx->dev, "Failed to UnLoad module\n");
+ skl_get_module(ctx, mod_id);
+ return ret;
+ }
+
+ return ret;
+}
+
+static void skl_clear_module_table(struct sst_dsp *ctx)
+{
+ struct skl_module_table *module, *tmp;
+
+ if (list_empty(&ctx->module_list))
+ return;
+
+ list_for_each_entry_safe(module, tmp, &ctx->module_list, list) {
+ list_del(&module->list);
+ release_firmware(module->mod_info->fw);
+ }
+}
+
static struct skl_dsp_fw_ops skl_fw_ops = {
.set_state_D0 = skl_set_dsp_D0,
.set_state_D3 = skl_set_dsp_D3,
.load_fw = skl_load_base_firmware,
.get_fw_errcode = skl_get_errorcode,
+ .load_mod = skl_load_module,
+ .unload_mod = skl_unload_module,
};
static struct sst_ops skl_ops = {
sst_dsp_mailbox_init(sst, (SKL_ADSP_SRAM0_BASE + SKL_ADSP_W0_STAT_SZ),
SKL_ADSP_W0_UP_SZ, SKL_ADSP_SRAM1_BASE, SKL_ADSP_W1_SZ);
+ INIT_LIST_HEAD(&sst->module_list);
sst->dsp_ops = dsp_ops;
sst->fw_ops = skl_fw_ops;
void skl_sst_dsp_cleanup(struct device *dev, struct skl_sst *ctx)
{
+ skl_clear_module_table(ctx->dsp);
skl_ipc_free(&ctx->ipc);
ctx->dsp->ops->free(ctx->dsp);
}
#include "skl-topology.h"
#include "skl.h"
#include "skl-tplg-interface.h"
+#include "../common/sst-dsp.h"
+#include "../common/sst-dsp-priv.h"
#define SKL_CH_FIXUP_MASK (1 << 0)
#define SKL_RATE_FIXUP_MASK (1 << 1)
if (!skl_tplg_alloc_pipe_mcps(skl, mconfig))
return -ENOMEM;
+ if (mconfig->is_loadable && ctx->dsp->fw_ops.load_mod) {
+ ret = ctx->dsp->fw_ops.load_mod(ctx->dsp,
+ mconfig->id.module_id, mconfig->guid);
+ if (ret < 0)
+ return ret;
+ }
+
/*
* apply fix/conversion to module params based on
* FE/BE params
return 0;
}
+static int skl_tplg_unload_pipe_modules(struct skl_sst *ctx,
+ struct skl_pipe *pipe)
+{
+ struct skl_pipe_module *w_module = NULL;
+ struct skl_module_cfg *mconfig = NULL;
+
+ list_for_each_entry(w_module, &pipe->w_list, node) {
+ mconfig = w_module->w->priv;
+
+ if (mconfig->is_loadable && ctx->dsp->fw_ops.unload_mod)
+ return ctx->dsp->fw_ops.unload_mod(ctx->dsp,
+ mconfig->id.module_id);
+ }
+
+ /* no modules to unload in this path, so return */
+ return 0;
+}
+
/*
* Mixer module represents a pipeline. So in the Pre-PMU event of mixer we
* need create the pipeline. So we do following:
ret = skl_delete_pipe(ctx, mconfig->pipe);
- return ret;
+ return skl_tplg_unload_pipe_modules(ctx, s_pipe);
}
/*