#define MFC_ENC_OTF_DRM_NAME "s5p-mfc-enc-otf-secure"
struct _mfc_trace g_mfc_trace[MFC_TRACE_COUNT_MAX];
-struct _mfc_trace g_mfc_trace_hwlock[MFC_TRACE_COUNT_MAX];
+struct _mfc_trace g_mfc_trace_longterm[MFC_TRACE_COUNT_MAX];
struct mfc_dev *g_mfc_dev;
#ifdef CONFIG_EXYNOS_CONTENT_PATH_PROTECTION
if (dbg_enable)
mfc_alloc_dbg_info_buffer(dev);
- MFC_TRACE_DEV_HWLOCK("**open\n");
ret = mfc_get_hwlock_dev(dev);
if (ret < 0) {
mfc_err_dev("Failed to get hwlock\n");
ctx->num++;
if (ctx->num >= MFC_NUM_CONTEXTS) {
mfc_err_dev("Too many open contexts\n");
+ mfc_err_dev("Print information to check if there was an error or not\n");
+ call_dop(dev, dump_info_context, dev);
ret = -EBUSY;
goto err_ctx_num;
}
dev->num_drm_inst, dev->num_inst);
} else {
mfc_err_ctx("Too many instance are opened for DRM\n");
+ mfc_err_dev("Print information to check if there was an error or not\n");
+ call_dop(dev, dump_info_context, dev);
ret = -EINVAL;
goto err_drm_start;
}
trace_mfc_node_open(ctx->num, dev->num_inst, ctx->type, ctx->is_drm);
mfc_info_ctx("MFC open completed [%d:%d] dev = 0x%p, ctx = 0x%p, version = %d\n",
dev->num_drm_inst, dev->num_inst, dev, ctx, MFC_DRIVER_INFO);
+ MFC_TRACE_CTX_LT("[INFO] %s %s opened (ctx:%d, total:%d)\n", ctx->is_drm ? "DRM" : "Normal",
+ mfc_is_decoder_node(node) ? "DEC" : "ENC", ctx->num, dev->num_inst);
mutex_unlock(&dev->mfc_mutex);
return ret;
static int __mfc_wait_close_inst(struct mfc_dev *dev, struct mfc_ctx *ctx)
{
+ int ret;
+
if (atomic_read(&dev->watchdog_run)) {
mfc_err_ctx("watchdog already running!\n");
return 0;
/* To issue the command 'CLOSE_INSTANCE' */
if (mfc_just_run(dev, ctx->num)) {
- mfc_err_ctx("Failed to run MFC\n");
+ mfc_err_ctx("failed to run MFC, state: %d\n", ctx->state);
+ MFC_TRACE_CTX_LT("[ERR][Release] failed to run MFC, state: %d\n", ctx->state);
return -EIO;
}
/* Wait until instance is returned or timeout occured */
- if (mfc_wait_for_done_ctx(ctx,
- MFC_REG_R2H_CMD_CLOSE_INSTANCE_RET) == 1) {
- mfc_err_ctx("Waiting for CLOSE_INSTANCE timed out\n");
+ ret = mfc_wait_for_done_ctx(ctx, MFC_REG_R2H_CMD_CLOSE_INSTANCE_RET);
+ if (ret == 1) {
+ mfc_err_ctx("failed to wait CLOSE_INSTANCE(timeout)\n");
+
if (mfc_wait_for_done_ctx(ctx,
MFC_REG_R2H_CMD_CLOSE_INSTANCE_RET)) {
- mfc_err_ctx("waiting once more but timed out\n");
+ mfc_err_ctx("waited once more but failed to wait CLOSE_INSTANCE\n");
dev->logging_data->cause |= (1 << MFC_CAUSE_FAIL_CLOSE_INST);
call_dop(dev, dump_and_stop_always, dev);
}
+ } else if (ret == -1) {
+ mfc_err_ctx("failed to wait CLOSE_INSTANCE(err)\n");
+ call_dop(dev, dump_and_stop_debug_mode, dev);
}
ctx->inst_no = MFC_NO_INSTANCE_SET;
mfc_info_ctx("MFC driver release is called [%d:%d], is_drm(%d)\n",
dev->num_drm_inst, dev->num_inst, ctx->is_drm);
+ MFC_TRACE_CTX_LT("[INFO] release is called (ctx:%d, total:%d)\n", ctx->num, dev->num_inst);
+
mfc_clear_bit(ctx->num, &dev->work_bits);
/* If a H/W operation is in progress, wait for it complete */
mfc_cleanup_work_bit_and_try_run(ctx);
}
}
- MFC_TRACE_CTX_HWLOCK("**release\n");
+
ret = mfc_get_hwlock_ctx(ctx);
if (ret < 0) {
mfc_err_dev("Failed to get hwlock\n");
+ MFC_TRACE_CTX_LT("[ERR][Release] failed to get hwlock (shutdown: %d)\n", dev->shutdown);
mutex_unlock(&dev->mfc_mutex);
return -EBUSY;
}
trace_mfc_node_close(ctx->num, dev->num_inst, ctx->type, ctx->is_drm);
+ MFC_TRACE_CTX_LT("[INFO] Release finished (ctx:%d, total:%d)\n", ctx->num, dev->num_inst);
+
dev->ctx[ctx->num] = 0;
kfree(ctx);
__mfc_parse_dt(dev->device->of_node, dev);
atomic_set(&dev->trace_ref, 0);
- atomic_set(&dev->trace_ref_hwlock, 0);
+ atomic_set(&dev->trace_ref_longterm, 0);
dev->mfc_trace = g_mfc_trace;
- dev->mfc_trace_hwlock = g_mfc_trace_hwlock;
+ dev->mfc_trace_longterm = g_mfc_trace_longterm;
dma_set_mask(&pdev->dev, DMA_BIT_MASK(36));
int ret;
mfc_info_dev("MFC shutdown is called\n");
- MFC_TRACE_DEV_HWLOCK("**shutdown \n");
if (!mfc_pm_get_pwr_ref_cnt(dev)) {
dev->shutdown = 1;
if (dev->num_inst == 0)
return 0;
- MFC_TRACE_DEV_HWLOCK("**sleep\n");
ret = mfc_get_hwlock_dev(dev);
if (ret < 0) {
mfc_err_dev("Failed to get hwlock\n");
if (dev->num_inst == 0)
return 0;
- MFC_TRACE_DEV_HWLOCK("**wakeup\n");
ret = mfc_get_hwlock_dev(dev);
if (ret < 0) {
mfc_err_dev("Failed to get hwlock\n");
void (*dump_regs)(struct mfc_dev *dev);
void (*dump_info)(struct mfc_dev *dev);
void (*dump_info_without_regs)(struct mfc_dev *dev);
+ void (*dump_info_context)(struct mfc_dev *dev);
void (*dump_and_stop_always)(struct mfc_dev *dev);
void (*dump_and_stop_debug_mode)(struct mfc_dev *dev);
};
atomic_t trace_ref;
struct _mfc_trace *mfc_trace;
- atomic_t trace_ref_hwlock;
- struct _mfc_trace *mfc_trace_hwlock;
+ atomic_t trace_ref_longterm;
+
+ struct _mfc_trace *mfc_trace_longterm;
bool continue_clock_on;
bool shutdown;
/* If there is no ctx structure */
-#define MFC_TRACE_DEV_HWLOCK(fmt, args...) \
+#define MFC_TRACE_DEV_LT(fmt, args...) \
do { \
int cpu = raw_smp_processor_id(); \
int cnt; \
- cnt = atomic_inc_return(&dev->trace_ref_hwlock) & (MFC_TRACE_COUNT_MAX - 1); \
- dev->mfc_trace_hwlock[cnt].time = cpu_clock(cpu); \
- snprintf(dev->mfc_trace_hwlock[cnt].str, MFC_TRACE_STR_LEN, \
+ cnt = atomic_inc_return(&dev->trace_ref_longterm) & (MFC_TRACE_COUNT_MAX - 1); \
+ dev->mfc_trace_longterm[cnt].time = cpu_clock(cpu); \
+ snprintf(dev->mfc_trace_longterm[cnt].str, MFC_TRACE_STR_LEN, \
fmt, ##args); \
} while (0)
/* If there is ctx structure */
-#define MFC_TRACE_CTX_HWLOCK(fmt, args...) \
+#define MFC_TRACE_CTX_LT(fmt, args...) \
do { \
int cpu = raw_smp_processor_id(); \
int cnt; \
- cnt = atomic_inc_return(&dev->trace_ref_hwlock) & (MFC_TRACE_COUNT_MAX - 1); \
- dev->mfc_trace_hwlock[cnt].time = cpu_clock(cpu); \
- snprintf(dev->mfc_trace_hwlock[cnt].str, MFC_TRACE_STR_LEN, \
+ cnt = atomic_inc_return(&dev->trace_ref_longterm) & (MFC_TRACE_COUNT_MAX - 1); \
+ dev->mfc_trace_longterm[cnt].time = cpu_clock(cpu); \
+ snprintf(dev->mfc_trace_longterm[cnt].str, MFC_TRACE_STR_LEN, \
"[c:%d] " fmt, ctx->num, ##args); \
} while (0)
mfc_debug(2, "[STREAM] sizeimage: %d\n", pix_fmt_mp->plane_fmt[0].sizeimage);
pix_fmt_mp->plane_fmt[0].bytesperline = 0;
- MFC_TRACE_CTX_HWLOCK("**DEC s_fmt\n");
ret = mfc_get_hwlock_ctx(ctx);
if (ret < 0) {
mfc_err_ctx("Failed to get hwlock\n");
test_bit(ctx->num, &dev->hwlock.bits), q->type);
MFC_TRACE_CTX("** DEC streamoff(type:%d)\n", q->type);
- MFC_TRACE_CTX_HWLOCK("**DEC streamoff(type:%d)\n", q->type);
/* If a H/W operation is in progress, wait for it complete */
ret = mfc_get_hwlock_ctx(ctx);
if (ret < 0) {
return -ENOMEM;
}
- MFC_TRACE_CTX_HWLOCK("**ENC s_fmt\n");
-
ret = mfc_get_hwlock_ctx(ctx);
if (ret < 0) {
mfc_err_dev("Failed to get hwlock\n");
}
aborted = 1;
}
- MFC_TRACE_CTX_HWLOCK("**ENC streamoff(type:%d)\n", q->type);
+
ret = mfc_get_hwlock_ctx(ctx);
if (ret < 0) {
mfc_err_ctx("Failed to get hwlock\n");
((dev->hwlock.transfer_owner == 1) && (dev->hwlock.dev == 1)),
msecs_to_jiffies(MFC_HWLOCK_TIMEOUT));
- MFC_TRACE_DEV_HWLOCK("get_hwlock_dev: before waiting\n");
- MFC_TRACE_DEV_HWLOCK(">>dev:0x%lx, bits:0x%lx, owned:%d, wl:%d, trans:%d\n",
- dev->hwlock.dev, dev->hwlock.bits, dev->hwlock.owned_by_irq,
- dev->hwlock.wl_count, dev->hwlock.transfer_owner);
-
dev->hwlock.transfer_owner = 0;
__mfc_remove_listable_wq_dev(dev);
if (ret == 0) {
dev->hwlock.dev = 1;
dev->hwlock.owned_by_irq = 0;
- MFC_TRACE_DEV_HWLOCK("get_hwlock_dev: no waiting\n");
- MFC_TRACE_DEV_HWLOCK(">>dev:0x%lx, bits:0x%lx, owned:%d, wl:%d, trans:%d\n",
- dev->hwlock.dev, dev->hwlock.bits, dev->hwlock.owned_by_irq,
- dev->hwlock.wl_count, dev->hwlock.transfer_owner);
-
__mfc_print_hwlock(dev);
spin_unlock_irqrestore(&dev->hwlock.lock, flags);
mutex_unlock(&dev->hwlock_wq.wait_mutex);
*/
int mfc_get_hwlock_ctx(struct mfc_ctx *curr_ctx)
{
- struct mfc_ctx *ctx = curr_ctx;
struct mfc_dev *dev = curr_ctx->dev;
int ret = 0;
unsigned long flags;
spin_unlock_irqrestore(&dev->hwlock.lock, flags);
- MFC_TRACE_CTX_HWLOCK("get_hwlock_ctx: before waiting\n");
- MFC_TRACE_CTX_HWLOCK(">>dev:0x%lx, bits:0x%lx, owned:%d, wl:%d, trans:%d\n",
- dev->hwlock.dev, dev->hwlock.bits, dev->hwlock.owned_by_irq,
- dev->hwlock.wl_count, dev->hwlock.transfer_owner);
-
mfc_debug(2, "Waiting for hwlock to be released\n");
ret = wait_event_timeout(curr_ctx->hwlock_wq.wait_queue,
((dev->hwlock.transfer_owner == 1) && (test_bit(curr_ctx->num, &dev->hwlock.bits))),
msecs_to_jiffies(MFC_HWLOCK_TIMEOUT));
- MFC_TRACE_CTX_HWLOCK("get_hwlock_ctx: after waiting, ret:%d\n", ret);
- MFC_TRACE_CTX_HWLOCK(">>dev:0x%lx, bits:0x%lx, owned:%d, wl:%d, trans:%d\n",
- dev->hwlock.dev, dev->hwlock.bits, dev->hwlock.owned_by_irq,
- dev->hwlock.wl_count, dev->hwlock.transfer_owner);
-
dev->hwlock.transfer_owner = 0;
__mfc_remove_listable_wq_ctx(curr_ctx);
if (ret == 0) {
set_bit(curr_ctx->num, &dev->hwlock.bits);
dev->hwlock.owned_by_irq = 0;
- MFC_TRACE_CTX_HWLOCK("get_hwlock_ctx: no waiting\n");
- MFC_TRACE_CTX_HWLOCK(">>dev:0x%lx, bits:0x%lx, owned:%d, wl:%d, trans:%d\n",
- dev->hwlock.dev, dev->hwlock.bits, dev->hwlock.owned_by_irq,
- dev->hwlock.wl_count, dev->hwlock.transfer_owner);
-
__mfc_print_hwlock(dev);
spin_unlock_irqrestore(&dev->hwlock.lock, flags);
mutex_unlock(&curr_ctx->hwlock_wq.wait_mutex);
dev->hwlock.transfer_owner = 1;
- MFC_TRACE_DEV_HWLOCK("release_hwlock_dev: wakeup\n");
- MFC_TRACE_DEV_HWLOCK(">>dev:0x%lx, bits:0x%lx, owned:%d, wl:%d, trans:%d\n",
- dev->hwlock.dev, dev->hwlock.bits, dev->hwlock.owned_by_irq,
- dev->hwlock.wl_count, dev->hwlock.transfer_owner);
-
wake_up(&listable_wq->wait_queue);
}
static void __mfc_release_hwlock_ctx_protected(struct mfc_ctx *curr_ctx)
{
struct mfc_dev *dev = curr_ctx->dev;
- struct mfc_ctx *ctx = curr_ctx;
struct mfc_listable_wq *listable_wq;
__mfc_print_hwlock(dev);
dev->hwlock.transfer_owner = 1;
- MFC_TRACE_CTX_HWLOCK("release_hwlock_ctx: wakeup\n");
- MFC_TRACE_CTX_HWLOCK(">>dev:0x%lx, bits:0x%lx, owned:%d, wl:%d, trans:%d\n",
- dev->hwlock.dev, dev->hwlock.bits, dev->hwlock.owned_by_irq,
- dev->hwlock.wl_count, dev->hwlock.transfer_owner);
-
wake_up(&listable_wq->wait_queue);
}
}
}
+static void __mfc_dump_trace_longterm(struct mfc_dev *dev)
+{
+ int i, cnt, trace_cnt;
+
+ pr_err("-----------dumping MFC trace long-term info-----------\n");
+
+ trace_cnt = atomic_read(&dev->trace_ref_longterm);
+ for (i = MFC_TRACE_COUNT_PRINT - 1; i >= 0; i--) {
+ cnt = ((trace_cnt + MFC_TRACE_COUNT_MAX) - i) % MFC_TRACE_COUNT_MAX;
+ pr_err("MFC trace longterm[%d]: time=%llu, str=%s", cnt,
+ dev->mfc_trace_longterm[cnt].time, dev->mfc_trace_longterm[cnt].str);
+ }
+}
+
void __mfc_dump_buffer_info(struct mfc_dev *dev)
{
struct mfc_ctx *ctx;
}
}
+static void __mfc_dump_info_context(struct mfc_dev *dev)
+{
+ __mfc_dump_state(dev);
+ __mfc_dump_trace_longterm(dev);
+}
+
static void __mfc_dump_info_without_regs(struct mfc_dev *dev)
{
__mfc_dump_state(dev);
.dump_regs = __mfc_dump_regs,
.dump_info = __mfc_dump_info,
.dump_info_without_regs = __mfc_dump_info_without_regs,
+ .dump_info_context = __mfc_dump_info_context,
.dump_and_stop_always = __mfc_dump_info_and_stop_hw,
.dump_and_stop_debug_mode = __mfc_dump_info_and_stop_hw_debug,
};