enum { CHUB_ON, CHUB_OFF };
enum { C2A_ON, C2A_OFF };
+int contexthub_get_token(struct contexthub_ipc_info *ipc, enum access_type acc)
+{
+ if (acc == HW_ACCESS)
+ return !atomic_read(&ipc->in_pmu_shutdown);
+
+ if (acc == IPC_ACCESS)
+ return (atomic_read(&ipc->chub_status) == CHUB_ST_RUN) && !atomic_read(&ipc->in_reset);
+
+ return -EINVAL;
+}
+
/* host interface functions */
int contexthub_is_run(struct contexthub_ipc_info *ipc)
{
#endif
}
-static inline int contexthub_is_active(struct contexthub_ipc_info *ipc)
+/* request contexthub to host driver */
+int contexthub_request(struct contexthub_ipc_info *ipc, enum access_type acc)
{
- int status = atomic_read(&ipc->chub_status);
-
- if (status == CHUB_ST_RUN)
- return 0;
- else {
- dev_warn(ipc->dev, "%s: isn't active: status:%d\n",
- __func__, status);
+ if (!contexthub_get_token(ipc, acc)) {
+ dev_info(ipc->dev, "%s: %s isn't accesable\n",
+ __func__, (acc == HW_ACCESS) ? "hw" : "ipc");
return -EINVAL;
}
-}
-/* request contexthub to host driver */
-int contexthub_request(struct contexthub_ipc_info *ipc)
-{
if (!ipc->powermode)
return 0;
atomic_dec(&ipc->read_lock.cnt);
}
-static void enable_debug_workqueue(struct contexthub_ipc_info *ipc, enum chub_err_type err)
+/* simple alive check function : don't use ipc map */
+static bool contexthub_lowlevel_alive(struct contexthub_ipc_info *ipc)
{
- if (err != CHUB_ERR_NANOHUB) {
- ipc->err_cnt[err]++;
- ipc->active_err |= (1 << err);
+ int val;
+
+ ipc->chub_alive_lock.flag = 0;
+ ipc_hw_gen_interrupt(AP, IRQ_EVT_CHUB_ALIVE);
+ val = wait_event_timeout(ipc->chub_alive_lock.event,
+ ipc->chub_alive_lock.flag,
+ msecs_to_jiffies(WAIT_TIMEOUT_MS));
+
+ return ipc->chub_alive_lock.flag;
+}
+
+/* handle errors of chub driver and fw */
+static void handle_debug_work(struct contexthub_ipc_info *ipc, enum chub_err_type err)
+{
+ int need_reset;
+ int alive = contexthub_lowlevel_alive(ipc);
+ int ret;
+
+ /* handle fw dbg */
+ if (err == CHUB_ERR_NANOHUB) {
+ enum ipc_debug_event event = ipc_read_debug_event(AP);
+
+ dev_info(ipc->dev, "%s: fw dbg event:%d\n", __func__, event);
+ switch (event) {
+ case IPC_DEBUG_CHUB_PRINT_LOG:
+ log_flush(ipc->fw_log);
+ break;
+ default:
+ dev_warn(ipc->dev, "Contexthub request invalid event:%d\n", event);
+ break;
+ };
- dev_info(ipc->dev, "%s: err:%d(cnt:%d), active:0x%x\n",
- __func__, err, ipc->err_cnt[err], ipc->active_err);
+ ipc_write_debug_event(AP, 0);
+ return;
}
- dev_info(ipc->dev, "%s: dbg:%d\n", __func__, err);
- if (err == CHUB_ERR_ITMON) {
- chub_dbg_dump_gpr(ipc);
- chub_dbg_dump_ram(ipc, err);
- } else {
+ /* handle err */
+ ipc->err_cnt[err]++;
+
+ dev_info(ipc->dev, "%s: err:%d, alive:%d, status:%d, in-reset:%d\n",
+ __func__, err, alive, __raw_readl(&ipc->chub_status),
+ __raw_readl(&ipc->in_reset));
+
+ if ((atomic_read(&ipc->chub_status) == CHUB_ST_ERR) || !alive)
+ need_reset = 1;
+
+ /* dump hw & sram into file */
+ chub_dbg_dump_hw(ipc, err);
+
+ /* reset */
+ if (need_reset) {
+#ifdef CHUB_RESET_ENABLE
+ ret = contexthub_reset(ipc, 0);
+ if (ret)
+ dev_warn(ipc->dev, "%s: fails to reset:%d. status:%d\n",
+ __func__, ret, __raw_readl(&ipc->chub_status));
+ else {
+ /* TODO: recovery */
+ dev_info(ipc->dev, "%s: chub reset! should be recovery\n",
+ __func__);
+ if (err == CHUB_ERR_FW_WDT && ipc->irq_wdt)
+ enable_irq(ipc->irq_wdt);
+ }
+#else
+ atomic_set(&ipc->chub_status, CHUB_ST_HANG);
+#endif
+ }
+}
+
+#define CHUB_RESET_THOLD (5)
+static void request_debug_work(struct contexthub_ipc_info *ipc,
+ enum chub_err_type err, bool enable_wq)
+{
+ dev_info(ipc->dev, "%s: err:%d(cnt:%d), enable_wq:%d\n",
+ __func__, err, ipc->err_cnt[err], enable_wq);
+
+ /* get fw err */
+ if (err == CHUB_ERR_NANOHUB) {
+ enum ipc_debug_event fw_evt = ipc_read_debug_event(AP);
+ if (fw_evt == IPC_DEBUG_CHUB_FAULT) {
+ err = CHUB_ERR_FW_FAULT;
+ ipc_write_debug_event(AP, 0);
+ }
+ else if ((fw_evt == IPC_DEBUG_CHUB_ASSERT) || (fw_evt == IPC_DEBUG_CHUB_ERROR)) {
+ err = CHUB_ERR_FW_ERROR;
+ ipc_write_debug_event(AP, 0);
+ }
+ }
+
+ /* set status in CHUB_ST_ERR */
+ if ((err == CHUB_ERR_ITMON) || (err == CHUB_ERR_FW_WDT) || (err == CHUB_ERR_FW_FAULT))
+ atomic_set(&ipc->chub_status, CHUB_ST_ERR);
+
+ if (err < CHUB_ERR_NEED_RESET)
+ if (ipc->err_cnt[err] > CHUB_RESET_THOLD) {
+ atomic_set(&ipc->chub_status, CHUB_ST_ERR);
+ ipc->err_cnt[err] = 0;
+ }
+
+ /* handle err */
+ if (enable_wq) {
+ ipc->cur_err |= (1 << err);
schedule_work(&ipc->debug_work);
+ } else {
+ handle_debug_work(ipc, err);
}
}
+static DEFINE_MUTEX(dbg_mutex);
+static void handle_debug_work_func(struct work_struct *work)
+{
+ struct contexthub_ipc_info *ipc =
+ container_of(work, struct contexthub_ipc_info, debug_work);
+ enum ipc_debug_event event = 0;
+ enum chub_err_type err = 0;
+ bool need_reset = 0;
+ int ret;
+ int i;
+
+ mutex_lock(&dbg_mutex);
+ dev_info(ipc->dev, "%s: cur_err:0x%x\n", __func__, ipc->cur_err);
+ for (i = 0; i < CHUB_ERR_MAX; i++) {
+ if (ipc->cur_err & (1 << i)) {
+ dev_info(ipc->dev, "%s: loop: err:%d, cur_err:0x%x\n", __func__, i, ipc->cur_err);
+ handle_debug_work(ipc, i);
+ ipc->cur_err &= ~(1 << i);
+ }
+ }
+ mutex_unlock(&dbg_mutex);
+}
+
int contexthub_ipc_read(struct contexthub_ipc_info *ipc, uint8_t *rx, int max_length,
int timeout)
{
unsigned long flag;
-#ifdef USE_IPC_BUF
int size = 0;
int ret;
int lock;
- struct ipc_buf *ipc_buf;
+ void *rxbuf;
- if (contexthub_is_active(ipc))
+ if (!contexthub_get_token(ipc, IPC_ACCESS)) {
+ dev_warn(ipc->dev, "no-active: read fails\n");
return 0;
+ }
- ipc_buf = ipc_get_base(IPC_REG_IPC_C2A);
if (!ipc->read_lock.flag) {
spin_lock_irqsave(&ipc->read_lock.event.lock, flag);
read_get_locked(ipc);
}
ipc->read_lock.flag--;
- size = ipc_read_data(IPC_DATA_C2A, ipc->rxbuf);
- if (size)
- return contexthub_read_process(rx, ipc->rxbuf, size);
-#else
- struct ipc_content *content;
- int ch = INVAL_CHANNEL;
-
- if (contexthub_is_active(ipc))
- return 0;
-
- if (ipc->read_lock.flag) {
-search_channel:
- ch = get_recv_channel(&ipc->recv_order);
-
- if (ch == INVAL_CHANNEL)
- goto fail_get_channel;
- else
- ipc->read_lock.flag &= ~(1 << ch);
- } else {
- spin_lock_irqsave(&ipc->read_lock.event.lock, flag);
- read_get_locked(ipc);
- ret =
- wait_event_interruptible_timeout_locked(ipc->read_lock.event,
- ipc->read_lock.flag,
- msecs_to_jiffies(timeout));
- read_put_unlocked(ipc);
- spin_unlock_irqrestore(&ipc->read_lock.event.lock, flag);
- if (ret < 0)
- dev_warn(ipc->dev,
- "fails to get read ret:%d timeout:%d, flag:0x%x",
- ret, timeout, ipc->read_lock.flag);
- if (ipc->read_lock.flag)
- goto search_channel;
- else
- goto fail_get_channel;
- }
-
- content = ipc_get_addr(IPC_REG_IPC_C2A, ch);
- ipc->recv_order.container[ch] = 0;
- ipc_update_channel_status(content, CS_CHUB_OWN);
-
- return contexthub_read_process(rx, content->buf, content->size);
+#ifdef USE_IPC_BUF
+ rxbuf = ipc->rxbuf;
+ size = ipc_read_data(IPC_DATA_C2A, rxbuf);
+#else
+ rxbuf = ipc_read_data(IPC_DATA_C2A, &size);
#endif
+ if (size > 0)
+ return contexthub_read_process(rx, rxbuf, size);
+
fail_get_channel:
- enable_debug_workqueue(ipc, CHUB_ERR_READ_FAIL);
+ request_debug_work(ipc, CHUB_ERR_READ_FAIL, 0);
return -EINVAL;
}
int contexthub_ipc_write(struct contexthub_ipc_info *ipc,
uint8_t *tx, int length, int timeout)
{
-#ifdef USE_IPC_BUF
int ret;
- if (contexthub_is_active(ipc))
+ if (!contexthub_get_token(ipc, IPC_ACCESS)) {
+ dev_warn(ipc->dev, "no-active: write fails\n");
return 0;
+ }
ret = ipc_write_data(IPC_DATA_A2C, tx, (u16)length);
if (ret) {
pr_err("%s: fails to write data: ret:%d, len:%d errcnt:%d\n",
__func__, ret, length, ipc->err_cnt[CHUB_ERR_WRITE_FAIL]);
- enable_debug_workqueue(ipc, CHUB_ERR_WRITE_FAIL);
+ request_debug_work(ipc, CHUB_ERR_WRITE_FAIL, 0);
length = 0;
}
return length;
-#else
- struct ipc_content *content;
-
- if (contexthub_is_active(ipc))
- return 0;
-
- content =
- ipc_get_channel(IPC_REG_IPC_A2C, CS_IDLE, CS_AP_WRITE);
-
- if (!content) {
- pr_err("%s: fails to get channel.\n", __func__);
- ipc_print_channel();
-
- return -EINVAL;
- }
- content->size = length;
- memcpy_toio(content->buf, tx, length);
-
- DEBUG_PRINT(KERN_DEBUG, "->W%d\n", content->num);
- if (ipc_add_evt(IPC_EVT_A2C, content->num)) {
- contexthub_ipc_write_event(ipc, MAILBOX_EVT_CHUB_ALIVE);
- length = 0;
- }
-#endif
- return length;
}
static void check_rtc_time(void)
ap_t = rtc_tm_to_time64(&ap_tm);
}
-/* simple alive check function : don't use ipc map */
-static bool contexthub_lowlevel_alive(struct contexthub_ipc_info *ipc)
-{
- int val;
-
- ipc->chub_alive_lock.flag = 0;
- ipc_hw_gen_interrupt(AP, IRQ_EVT_CHUB_ALIVE);
- val = wait_event_timeout(ipc->chub_alive_lock.event,
- ipc->chub_alive_lock.flag,
- msecs_to_jiffies(WAIT_TIMEOUT_MS));
-
- return ipc->chub_alive_lock.flag;
-}
-
-static int contexthub_wait_alive(struct contexthub_ipc_info *ipc)
-{
- int trycnt = 0;
-
- do {
- msleep(WAIT_TIMEOUT_MS);
- contexthub_ipc_write_event(ipc, MAILBOX_EVT_CHUB_ALIVE);
- if (++trycnt > WAIT_TRY_CNT)
- break;
- } while ((atomic_read(&ipc->chub_status) != CHUB_ST_RUN));
-
- if (atomic_read(&ipc->chub_status) == CHUB_ST_RUN) {
- return 0;
- } else {
- dev_warn(ipc->dev, "%s fails. contexthub status is %d\n",
- __func__, atomic_read(&ipc->chub_status));
- return -ETIMEDOUT;
- }
-}
-
static int contexthub_hw_reset(struct contexthub_ipc_info *ipc,
enum mailbox_event event)
{
int ret = 0;
int i;
- /* clear ipc value */
- ipc_init();
+ dev_info(ipc->dev, "%s. status:%d\n",
+ __func__, __raw_readl(&ipc->chub_status));
+ /* clear ipc value */
atomic_set(&ipc->wakeup_chub, CHUB_OFF);
atomic_set(&ipc->irq1_apInt, C2A_OFF);
atomic_set(&ipc->read_lock.cnt, 0x0);
ipc->err_cnt[i] = 0;
ipc->read_lock.flag = 0;
-#ifndef USE_IPC_BUF
- ipc->recv_order.order = 0;
- for (val = 0; val < IRQ_EVT_CH_MAX; val++)
- ipc->recv_order.container[val] = 0;
-#endif
ipc_hw_write_shared_reg(AP, ipc->os_load, SR_BOOT_MODE);
ipc_set_chub_clk((u32)ipc->clkrate);
ipc_set_chub_bootmode(BOOTMODE_COLD);
break;
}
- msleep(WAIT_RESET_MS);
if (ret)
return ret;
- else
- return contexthub_wait_alive(ipc);
+ else {
+ /* wait active */
+ trycnt = 0;
+ do {
+ msleep(WAIT_RESET_MS);
+ contexthub_ipc_write_event(ipc, MAILBOX_EVT_CHUB_ALIVE);
+ if (++trycnt > WAIT_TRY_CNT)
+ break;
+ } while ((atomic_read(&ipc->chub_status) != CHUB_ST_RUN));
+
+ if (atomic_read(&ipc->chub_status) == CHUB_ST_RUN) {
+ dev_info(ipc->dev, "%s done. contexthub status is %d\n",
+ __func__, atomic_read(&ipc->chub_status));
+ return 0;
+ } else {
+ dev_warn(ipc->dev, "%s fails. contexthub status is %d\n",
+ __func__, atomic_read(&ipc->chub_status));
+ return -ETIMEDOUT;
+ }
+ }
}
static void contexthub_config_init(struct contexthub_ipc_info *chub)
{
u32 val;
int ret = 0;
+ int need_ipc = 0;
switch (event) {
case MAILBOX_EVT_INIT_IPC:
ret = contexthub_ipc_drv_init(ipc);
break;
- case MAILBOX_EVT_ENABLE_IRQ:
- /* if enable, mask from CHUB IRQ, else, unmask from CHUB IRQ */
- ipc_hw_unmask_irq(AP, IRQ_EVT_C2A_INT);
- ipc_hw_unmask_irq(AP, IRQ_EVT_C2A_INTCLR);
- break;
- case MAILBOX_EVT_DISABLE_IRQ:
- ipc_hw_mask_irq(AP, IRQ_EVT_C2A_INT);
- ipc_hw_mask_irq(AP, IRQ_EVT_C2A_INTCLR);
- break;
- case MAILBOX_EVT_ERASE_SHARED:
- memset(ipc_get_base(IPC_REG_SHARED), 0, ipc_get_offset(IPC_REG_SHARED));
- break;
- case MAILBOX_EVT_DUMP_STATUS:
- break;
- case MAILBOX_EVT_WAKEUP_CLR:
- if (atomic_read(&ipc->wakeup_chub) == CHUB_ON) {
- atomic_set(&ipc->wakeup_chub, CHUB_OFF);
- ipc_add_evt(IPC_EVT_A2C, IRQ_EVT_A2C_WAKEUP_CLR);
- }
- break;
- case MAILBOX_EVT_WAKEUP:
- if (atomic_read(&ipc->wakeup_chub) == CHUB_OFF) {
- atomic_set(&ipc->wakeup_chub, CHUB_ON);
- ipc_add_evt(IPC_EVT_A2C, IRQ_EVT_A2C_WAKEUP);
- }
- break;
case MAILBOX_EVT_POWER_ON:
ret = contexthub_hw_reset(ipc, event);
if (!ret)
pr_err("%s: reset release cfg fail\n", __func__);
return ret;
}
-
} else {
- /* core reset */
- ipc_add_evt(IPC_EVT_A2C, IRQ_EVT_A2C_SHUTDOWN);
- msleep(100); /* wait for shut down time */
val = __raw_readl(ipc->pmu_chub_reset +
REG_CHUB_CPU_STATUS);
if (val & (1 << REG_CHUB_CPU_STATUS_BIT_STANDBYWFI)) {
val = __raw_readl(ipc->pmu_chub_reset +
REG_CHUB_RESET_CHUB_CONFIGURATION);
__raw_writel(val & ~(1 << 0),
- ipc->pmu_chub_reset +
- REG_CHUB_RESET_CHUB_CONFIGURATION);
+ ipc->pmu_chub_reset +
+ REG_CHUB_RESET_CHUB_CONFIGURATION);
} else {
dev_err(ipc->dev,
"fails to shutdown contexthub. cpu_status: 0x%x\n",
dev_err(ipc->dev,
"chub isn't alive, should be reset. status:%d\n",
atomic_read(&ipc->chub_status));
- if (atomic_read(&ipc->chub_status) == CHUB_ST_RUN ||
- atomic_read(&ipc->chub_status) == CHUB_ST_NO_RESPONSE) {
- atomic_set(&ipc->chub_status, CHUB_ST_NO_RESPONSE);
- enable_debug_workqueue(ipc, CHUB_ERR_CHUB_NO_RESPONSE);
- }
+ atomic_set(&ipc->chub_status, CHUB_ST_NO_RESPONSE);
+ request_debug_work(ipc, CHUB_ERR_CHUB_NO_RESPONSE, 0);
ret = -EINVAL;
}
break;
+ case MAILBOX_EVT_ENABLE_IRQ:
+ /* if enable, mask from CHUB IRQ, else, unmask from CHUB IRQ */
+ ipc_hw_unmask_irq(AP, IRQ_EVT_C2A_INT);
+ ipc_hw_unmask_irq(AP, IRQ_EVT_C2A_INTCLR);
+ break;
+ case MAILBOX_EVT_DISABLE_IRQ:
+ ipc_hw_mask_irq(AP, IRQ_EVT_C2A_INT);
+ ipc_hw_mask_irq(AP, IRQ_EVT_C2A_INTCLR);
+ break;
+ case MAILBOX_EVT_ERASE_SHARED:
+ memset(ipc_get_base(IPC_REG_SHARED), 0, ipc_get_offset(IPC_REG_SHARED));
+ break;
default:
+ need_ipc = 1;
break;
}
- if ((int)event < IPC_DEBUG_UTC_MAX) {
- ipc->utc_run = event;
- if ((int)event == IPC_DEBUG_UTC_TIME_SYNC) {
- check_rtc_time();
-#ifdef CONFIG_CONTEXTHUB_DEBUG
- /* log_flush enable when utc_run is set */
- schedule_work(&ipc->utc_work);
-#else
+ if (!need_ipc)
+ return ret;
+
+ if (!contexthub_get_token(ipc, IPC_ACCESS)) {
+ dev_warn(ipc->dev, "%s event:%d/%d fails chub isn't active\n",
+ __func__, event, MAILBOX_EVT_MAX);
+ return -EINVAL;
+ }
+
+ /* handle ipc */
+ switch (event) {
+ case MAILBOX_EVT_DUMP_STATUS:
+ /* dump nanohub kernel status */
+ dev_info(ipc->dev, "Request to dump chub fw status\n");
+ ipc_write_debug_event(AP, (u32)MAILBOX_EVT_DUMP_STATUS);
+ ipc_add_evt(IPC_EVT_A2C, IRQ_EVT_A2C_DEBUG);
+ break;
+ case MAILBOX_EVT_WAKEUP_CLR:
+ if (atomic_read(&ipc->wakeup_chub) == CHUB_ON) {
+ atomic_set(&ipc->wakeup_chub, CHUB_OFF);
+ ipc_add_evt(IPC_EVT_A2C, IRQ_EVT_A2C_WAKEUP_CLR);
+ }
+ break;
+ case MAILBOX_EVT_WAKEUP:
+ if (atomic_read(&ipc->wakeup_chub) == CHUB_OFF) {
+ atomic_set(&ipc->wakeup_chub, CHUB_ON);
+ ipc_add_evt(IPC_EVT_A2C, IRQ_EVT_A2C_WAKEUP);
+ }
+ break;
+ default:
+ /* handle ipc utc */
+ if ((int)event < IPC_DEBUG_UTC_MAX) {
+ ipc->utc_run = event;
+ if ((int)event == IPC_DEBUG_UTC_TIME_SYNC)
+ check_rtc_time();
ipc_write_debug_event(AP, (u32)event);
ipc_add_evt(IPC_EVT_A2C, IRQ_EVT_A2C_DEBUG);
-#endif
+ return 0;
}
- ipc_write_debug_event(AP, (u32)event);
- ipc_add_evt(IPC_EVT_A2C, IRQ_EVT_A2C_DEBUG);
+ break;
}
+
return ret;
}
struct device *dev = ipc->dev;
if (!atomic_read(&ipc->chub_status)) {
- ret = contexthub_download_image(ipc, 1);
+ ret = contexthub_download_image(ipc, IPC_REG_BL);
if (ret) {
dev_warn(dev, "fails to download bootloader\n");
return ret;
return ret;
}
- ret = contexthub_download_image(ipc, 0);
+ ret = contexthub_download_image(ipc, IPC_REG_OS);
if (ret) {
dev_warn(dev, "fails to download kernel\n");
return ret;
ret = -EINVAL;
}
- if (ret)
- dev_warn(dev, "fails to %s with %d. Status is %d\n",
- __func__, ret, atomic_read(&ipc->chub_status));
return ret;
}
-int contexthub_reset(struct contexthub_ipc_info *ipc)
+static int contexthub_download_and_check_image(struct contexthub_ipc_info *ipc, enum ipc_region reg)
{
- int ret;
+ u32 *fw = vmalloc(ipc_get_offset(reg));
+ int ret = 0;
- dev_info(ipc->dev, "%s\n", __func__);
- if (atomic_read(&ipc->in_reset)) {
- dev_info(ipc->dev, "%s is in-progress\n", __func__);
- return -EINVAL;
+ if (!fw)
+ return contexthub_download_image(ipc, reg);
+
+ memcpy_fromio(fw, ipc_get_base(reg), ipc_get_offset(reg));
+ ret = contexthub_download_image(ipc, reg);
+ if (ret) {
+ dev_err(ipc->dev, "%s: download bl(%d) fails\n", __func__, reg == IPC_REG_BL);
+ goto out;
+ }
+
+ ret = memcmp(fw, ipc_get_base(reg), ipc_get_offset(reg));
+ if (ret) {
+ int i;
+ u32 *fw_image = (u32 *)ipc_get_base(reg);
+
+ dev_err(ipc->dev, "%s: fw(%lx) doens't match with size %d\n",
+ __func__, (unsigned long)ipc_get_base(reg), ipc_get_offset(reg));
+ for (i = 0; i < ipc_get_offset(reg) / 4; i++)
+ if (fw[i] != fw_image[i]) {
+ dev_err(ipc->dev, "fw[%d] %x -> wrong %x\n", i, fw_image[i], fw[i]);
+ print_hex_dump(KERN_CONT, "before:", DUMP_PREFIX_OFFSET, 16, 1, &fw[i], 64, false);
+ print_hex_dump(KERN_CONT, "after:", DUMP_PREFIX_OFFSET, 16, 1, &fw_image[i], 64, false);
+ ret = -EINVAL;
+ break;
+ }
}
+out:
+ dev_info(ipc->dev, "%s: download and checked bl(%d) ret:%d \n", __func__, reg == IPC_REG_BL, ret);
+ vfree(fw);
+ return ret;
+}
+
+static DEFINE_MUTEX(reset_mutex);
+int contexthub_reset(struct contexthub_ipc_info *ipc, bool force_load)
+{
+ int ret;
+ mutex_lock(&reset_mutex);
+ dev_info(ipc->dev, "%s: status:%d\n", __func__, atomic_read(&ipc->chub_status));
+ if (!force_load && (atomic_read(&ipc->chub_status) == CHUB_ST_RUN)) {
+ mutex_unlock(&reset_mutex);
+ return 0;
+ }
atomic_inc(&ipc->in_reset);
+ if (!ipc->block_reset) {
+ /* core reset */
+ ipc_add_evt(IPC_EVT_A2C, IRQ_EVT_A2C_SHUTDOWN);
+ msleep(100); /* wait for shut down time */
+ }
+
+ atomic_inc(&ipc->in_pmu_shutdown);
ret = contexthub_ipc_write_event(ipc, MAILBOX_EVT_SHUTDOWN);
if (ret) {
dev_err(ipc->dev, "%s: shutdonw fails, ret:%d\n", __func__, ret);
- return ret;
+ goto out;
}
- if (ipc->block_reset) {
- ret = contexthub_download_image(ipc, 1);
+ atomic_dec(&ipc->in_pmu_shutdown);
+
+ if (ipc->block_reset || force_load) {
+ ret = contexthub_download_image(ipc, IPC_REG_BL);
if (!ret) {
- ret = contexthub_download_image(ipc, 0);
+ if (force_load) /* can use new binary */
+ ret = contexthub_download_image(ipc, IPC_REG_OS);
+ else /* use previous binary */
+ ret = contexthub_download_and_check_image(ipc, IPC_REG_OS);
+
if (ret) {
dev_err(ipc->dev, "%s: download os fails\n", __func__);
- return ret;
+ goto out;
}
} else {
- dev_err(ipc->dev, "%s: download bl fails\n", __func__);
- return ret;
+ dev_err(ipc->dev, "%s: download bl fails\n", __func__);
+ goto out;
}
}
+
ret = contexthub_ipc_write_event(ipc, MAILBOX_EVT_RESET);
if (ret)
dev_err(ipc->dev, "%s: reset fails, ret:%d\n", __func__, ret);
else
dev_info(ipc->dev, "%s: chub reseted! (cnt:%d)\n",
- __func__, atomic_read(&ipc->in_reset));
+ __func__, ipc->err_cnt[CHUB_ERR_RESET_CNT]);
if (!ret)
ipc->err_cnt[CHUB_ERR_RESET_CNT]++;
- if (!ret)
- ret = ipc_check_reset_valid();
-
+out:
atomic_dec(&ipc->in_reset);
+ mutex_unlock(&reset_mutex);
return ret;
}
-int contexthub_download_image(struct contexthub_ipc_info *ipc, int bl)
+int contexthub_download_image(struct contexthub_ipc_info *ipc, enum ipc_region reg)
{
const struct firmware *entry;
int ret;
- enum ipc_region reg;
char *name;
- if (bl) {
+ if (reg == IPC_REG_BL)
ret = request_firmware(&entry, "bl.unchecked.bin", ipc->dev);
- reg = IPC_REG_BL;
- } else {
+ else if (reg == IPC_REG_OS)
ret = request_firmware(&entry, ipc->os_name, ipc->dev);
- reg = IPC_REG_OS;
- }
+ else
+ ret = -EINVAL;
if (ret) {
dev_err(ipc->dev, "%s, bl(%d) request_firmware failed\n",
- bl, __func__);
+ reg == IPC_REG_BL, __func__);
return ret;
}
memcpy(ipc_get_base(reg), entry->data, entry->size);
- dev_info(ipc->dev, "%s: bl:%d, bin(size:0x%x) on %lx\n",
- __func__, bl, (int)entry->size,
- (unsigned long)ipc_get_base(reg));
+ dev_info(ipc->dev, "%s: bl:%d, bin(size:%d) on %lx\n",
+ __func__, reg == IPC_REG_BL, (int)entry->size, (unsigned long)ipc_get_base(reg));
release_firmware(entry);
return 0;
#endif
switch (evt) {
case IRQ_EVT_C2A_DEBUG:
- enable_debug_workqueue(ipc, CHUB_ERR_NANOHUB);
+ request_debug_work(ipc, CHUB_ERR_NANOHUB, 1);
break;
case IRQ_EVT_C2A_INT:
if (atomic_read(&ipc->irq1_apInt) == C2A_OFF) {
if (evt < IRQ_EVT_CH_MAX) {
int lock;
-#ifdef USE_IPC_BUF
ipc->read_lock.flag++;
-#else
- content = ipc_get_addr(IPC_REG_IPC_C2A, evt);
- ipc_update_channel_status(content, CS_AP_RECV);
-
- if (!ipc->read_lock.flag)
- ipc->recv_order.order = 1; /* reset order */
-
- if (ipc->recv_order.container[evt])
- dev_warn(ipc->dev,
- "%s: invalid order container[%d] = %lu, status:%x\n",
- __func__, evt,
- ipc->recv_order.container[evt],
- content->status);
-
- ipc->recv_order.container[evt] =
- ++ipc->recv_order.order;
- ipc->read_lock.flag |= (1 << evt);
-
- DEBUG_PRINT(KERN_DEBUG, "<-R%d(%d)(%d)\n", evt,
- content->size, ipc->recv_order.order);
-#endif
/* TODO: requered.. ? */
spin_lock(&ipc->read_lock.event.lock);
lock = read_is_locked(ipc);
ipc_hw_read_int_gen_reg(AP));
ipc->err_cnt[err]++;
ipc_hw_clear_all_int_pend_reg(AP);
- enable_debug_workqueue(ipc, err);
+ if (ipc->err_cnt[err] > CHUB_RESET_THOLD)
+ request_debug_work(ipc, err, 1);
}
return IRQ_HANDLED;
}
-#define CONFIG_WDT_ENABLE
-#if defined(CHUB_RESET_ENABLE) && defined(CONFIG_WDT_ENABLE)
+#if defined(CHUB_RESET_ENABLE)
static irqreturn_t contexthub_irq_wdt_handler(int irq, void *data)
{
struct contexthub_ipc_info *ipc = data;
dev_info(ipc->dev, "%s calledn", __func__);
disable_irq_nosync(ipc->irq_wdt);
- enable_debug_workqueue(ipc, CHUB_ERR_NANOHUB_WDT);
+ request_debug_work(ipc, CHUB_ERR_FW_WDT, 1);
return IRQ_HANDLED;
}
return ret;
}
-#if defined(CHUB_RESET_ENABLE) && defined(CONFIG_WDT_ENABLE)
+#if defined(CHUB_RESET_ENABLE)
/* get wdt interrupt optionally */
chub->irq_wdt = irq_of_parse_and_map(node, 1);
if (chub->irq_wdt > 0) {
struct contexthub_ipc_info *ipc = dev_get_drvdata(dev);
int ret = contexthub_poweron(ipc);
- if (!ret)
- ret = ipc_check_reset_valid();
-
return ret < 0 ? ret : count;
}
struct device_attribute *attr,
const char *buf, size_t count)
{
- int ret = 0;
struct contexthub_ipc_info *ipc = dev_get_drvdata(dev);
-
- if (!ipc->block_reset)
- ret = contexthub_download_image(ipc, 0);
-
- if (!ret)
- ret = contexthub_reset(ipc);
+ int ret = contexthub_reset(ipc, 1);
return ret < 0 ? ret : count;
}
(!strncmp("PDMA_SHUB", itmon_data->master, sizeof("PDMA_SHUB") - 1)))) {
dev_info(data->dev, "%s: chub(%s) itmon detected: action:%d!!\n",
__func__, itmon_data->master, action);
- enable_debug_workqueue(data, CHUB_ERR_ITMON);
+ request_debug_work(data, CHUB_ERR_ITMON, 1);
return NOTIFY_OK;
}
atomic_set(&chub->chub_status, CHUB_ST_NO_POWER);
atomic_set(&chub->in_reset, 0);
+ atomic_set(&chub->in_pmu_shutdown, 0);
chub->powermode = 0; /* updated by fw bl */
- chub->active_err = 0;
+ chub->cur_err = 0;
+ for (i = 0; i < CHUB_ERR_MAX; i++)
+ chub->err_cnt[i] = 0;
chub->dev = &pdev->dev;
platform_set_drvdata(pdev, chub);
contexthub_config_init(chub);
init_waitqueue_head(&chub->read_lock.event);
init_waitqueue_head(&chub->chub_alive_lock.event);
INIT_WORK(&chub->debug_work, handle_debug_work_func);
-#ifdef CONFIG_CONTEXTHUB_DEBUG
- INIT_WORK(&chub->utc_work, handle_utc_work_func);
-#endif
#ifdef CONFIG_EXYNOS_ITMON
chub->itmon_nb.notifier_call = chub_itmon_notifier;
itmon_notifier_chain_register(&chub->itmon_nb);
__ret; \
})
+#define CHUB_RESET_ENABLE
+
enum mailbox_event {
MAILBOX_EVT_UTC_MAX = IPC_DEBUG_UTC_MAX,
MAILBOX_EVT_DUMP_STATUS = IPC_DEBUG_DUMP_STATUS,
- MAILBOX_EVT_DUMP_CHUB,
- MAILBOX_EVT_POWER_ON,
- MAILBOX_EVT_DEBUG_MAX,
MAILBOX_EVT_WAKEUP,
MAILBOX_EVT_WAKEUP_CLR,
MAILBOX_EVT_ERASE_SHARED,
MAILBOX_EVT_ENABLE_IRQ,
MAILBOX_EVT_DISABLE_IRQ,
+ MAILBOX_EVT_RESET_EVT_START,
MAILBOX_EVT_INIT_IPC,
+ MAILBOX_EVT_POWER_ON,
MAILBOX_EVT_CHUB_ALIVE,
MAILBOX_EVT_SHUTDOWN,
MAILBOX_EVT_RESET,
CHUB_ST_RUN,
CHUB_ST_SHUTDOWN,
CHUB_ST_NO_RESPONSE,
+ CHUB_ST_ERR,
CHUB_ST_HANG,
};
CHUB_ERR_READ_FAIL,
CHUB_ERR_WRITE_FAIL,
CHUB_ERR_EVTQ_NO_HW_TRIGGER,
- CHUB_ERR_CHUB_NO_RESPONSE,
+ CHUB_ERR_CHUB_NO_RESPONSE, /* 5 */
CHUB_ERR_ITMON,
- CHUB_ERR_NANOHUB,
- CHUB_ERR_RESET_CNT,
- CHUB_ERR_CHUB_MAX,
- CHUB_ERR_NANOHUB_FAULT, /* chub error */
- CHUB_ERR_NANOHUB_ASSERT,
- CHUB_ERR_NANOHUB_ERROR,
- CHUB_ERR_NANOHUB_WDT, /* 13 */
- CHUB_ERR_COMMS_NACK,
+ CHUB_ERR_FW_FAULT, /* chub error */
+ CHUB_ERR_FW_WDT, /* 8 */
+ CHUB_ERR_NEED_RESET,
+ CHUB_ERR_FW_ERROR = CHUB_ERR_NEED_RESET,
+ CHUB_ERR_COMMS_NACK, /* ap comms error */
CHUB_ERR_COMMS_BUSY,
CHUB_ERR_COMMS_UNKNOWN,
CHUB_ERR_COMMS,
- CHUB_ERR_COMMS_MAX,
+ CHUB_ERR_RESET_CNT,
+ CHUB_ERR_NANOHUB, /* nanohub dbg error */
CHUB_ERR_MAX,
};
struct read_wait read_lock;
#ifdef USE_IPC_BUF
u8 rxbuf[PACKET_SIZE_MAX];
-#else
- struct recv_ctrl recv_order;
#endif
struct chub_alive chub_alive_lock;
void __iomem *sram;
unsigned long clkrate;
atomic_t chub_status;
atomic_t in_reset;
+ atomic_t in_pmu_shutdown;
atomic_t irq1_apInt;
atomic_t wakeup_chub;
int irq_mailbox;
int irq_wdt;
int err_cnt[CHUB_ERR_MAX];
- u32 active_err;
+ u32 cur_err;
int utc_run;
int powermode;
int block_reset;
#define IPC_HW_WRITE_BAAW_CHUB3(base, val) \
__raw_writel((val), (base) + REG_BAAW_D_CHUB3)
+enum access_type { HW_ACCESS, IPC_ACCESS };
+
+int contexthub_get_token(struct contexthub_ipc_info *ipc, enum access_type acc);
int contexthub_ipc_write_event(struct contexthub_ipc_info *data,
enum mailbox_event event);
int contexthub_ipc_read(struct contexthub_ipc_info *ipc,
int contexthub_ipc_write(struct contexthub_ipc_info *ipc,
uint8_t *tx, int length, int timeout);
int contexthub_poweron(struct contexthub_ipc_info *data);
-int contexthub_download_image(struct contexthub_ipc_info *data, int bl);
-int contexthub_download_kernel(struct contexthub_ipc_info *dev);
-int contexthub_download_bl(struct contexthub_ipc_info *data);
-int contexthub_reset(struct contexthub_ipc_info *data);
+int contexthub_download_image(struct contexthub_ipc_info *data, enum ipc_region reg);
+int contexthub_reset(struct contexthub_ipc_info *ipc, bool force_load);
int contexthub_wakeup(struct contexthub_ipc_info *data, int evt);
int contexthub_is_run(struct contexthub_ipc_info *ipc);
-int contexthub_request(struct contexthub_ipc_info *ipc);
+int contexthub_request(struct contexthub_ipc_info *ipc, enum access_type acc);
void contexthub_release(struct contexthub_ipc_info *ipc);
#endif
int i;
struct dbg_dump *p_dump = p_dbg_dump;
- if (contexthub_request(ipc)) {
+ if (contexthub_request(ipc, HW_ACCESS)) {
pr_err("%s: fails to contexthub_request\n", __func__);
return;
}
void chub_dbg_dump_ram(struct contexthub_ipc_info *ipc, enum chub_err_type reason)
{
if (p_dbg_dump) {
- if (contexthub_request(ipc)) {
+ if (contexthub_request(ipc, HW_ACCESS)) {
pr_err("%s: fails to contexthub_request\n", __func__);
return;
}
}
}
+static void chub_dbg_dump_status(struct contexthub_ipc_info *ipc)
+{
+ int val;
+ int i;
+ char *dbg_name[CHUB_ERR_MAX] = {"none", "evtq_empty",
+ "read_fail", "write_fail", "evtq_no_hw_trigger",
+ "chub_no_resp", "itmon", "fw_fault", "fw_wdt",
+ "fw_err", "comms_nack", "comms_busy",
+ "comms_unknown", "comms", "reset_cnt", "fw_dbg"};
+
+#ifdef CONFIG_CHRE_SENSORHUB_HAL
+ struct nanohub_data *data = ipc->data;
+
+ dev_info(ipc->dev,
+ "%s: nanohub driver status\nwu:%d wu_l:%d acq:%d irq1_apInt:%d fired:%d\n",
+ __func__,
+ atomic_read(&data->wakeup_cnt),
+ atomic_read(&data->wakeup_lock_cnt),
+ atomic_read(&data->wakeup_acquired),
+ atomic_read(&ipc->irq1_apInt), nanohub_irq1_fired(data));
+
+#endif
+
+ /* print error status */
+ for (i = 0; i < CHUB_ERR_MAX; i++) {
+ if (ipc->err_cnt[i])
+ dev_info(ipc->dev, "%s: err(%d:%s) %d times\n",
+ __func__, i, dbg_name[i], ipc->err_cnt[i]);
+ }
+
+ if (!contexthub_request(ipc, IPC_ACCESS)) {
+ pr_err("%s: fails to request contexthub. \n", __func__);
+ return;
+ }
+
+ /* dump nanohub kernel status */
+ contexthub_ipc_write_event(ipc, MAILBOX_EVT_DUMP_STATUS);
+ log_flush(ipc->fw_log);
+ contexthub_release(ipc);
+}
+
void chub_dbg_dump_hw(struct contexthub_ipc_info *ipc, enum chub_err_type reason)
{
dev_info(ipc->dev, "%s: reason:%d\n", __func__, reason);
+
chub_dbg_dump_gpr(ipc);
chub_dbg_dump_ram(ipc, reason);
if (p_dbg_dump) {
#ifdef CONFIG_CONTEXTHUB_DEBUG
- if (contexthub_request(ipc)) {
- pr_err("%s: fails to contexthub_request\n", __func__);
- return;
- }
-
/* write file */
dev_info(ipc->dev,
"%s: write file: sram:%p, dram:%p(off:%d), size:%d\n",
chub_dbg_write_file(ipc->dev, "sram",
&p_dbg_dump->sram[p_dbg_dump->sram_start],
ipc_get_chub_mem_size());
-
- contexthub_release(ipc);
#endif
}
+
+ /* dump log and status with ipc */
+ chub_dbg_dump_status(ipc);
}
-void chub_dbg_check_and_download_image(struct contexthub_ipc_info *ipc)
+int chub_dbg_check_and_download_image(struct contexthub_ipc_info *ipc)
{
u32 *bl = vmalloc(ipc_get_offset(IPC_REG_BL));
- int ret;
+ int ret = 0;
memcpy_fromio(bl, ipc_get_base(IPC_REG_BL), ipc_get_offset(IPC_REG_BL));
- contexthub_download_image(ipc, 1);
+ contexthub_download_image(ipc, IPC_REG_BL);
ret = memcmp(bl, ipc_get_base(IPC_REG_BL), ipc_get_offset(IPC_REG_BL));
if (ret) {
if (bl[i] != bl_image[i]) {
pr_info("bl[%d] %x -> wrong %x\n", i,
bl_image[i], bl[i]);
- break;
+ ret = -EINVAL;
+ goto out;
}
}
- contexthub_download_image(ipc, 0);
+ contexthub_download_image(ipc, IPC_REG_OS);
/* os image is dumped on &p_dbg_dump->sram[p_dbg_dump->sram_start] */
ret = memcmp(&p_dbg_dump->sram[p_dbg_dump->sram_start],
ipc_get_base(IPC_REG_OS), ipc_get_offset(IPC_REG_OS));
- if (ret)
+ if (ret) {
pr_info("os doens't match with size %d\n",
ipc_get_offset(IPC_REG_OS));
-
- vfree(bl);
-}
-
-void chub_dbg_dump_status(struct contexthub_ipc_info *ipc)
-{
- int val;
- char *dbg_name[CHUB_ERR_MAX] = {"none", "evtq_empty",
- "read_fail", "write_fail", "evtq_no_hw_trigger",
- "chub_no_resp", "itmon", "nanohub_dbg", "reset_cnt",
- "chub_err_max", "fw_fault", "fw_assert", "fw_error",
- "fw_wdt", "comms_nack", "comms_busy",
- "comms_unknown", "comms", "comms_max"};
-
-#ifdef CONFIG_CHRE_SENSORHUB_HAL
- struct nanohub_data *data = ipc->data;
-
- CSP_PRINTF_INFO
- ("CHUB DUMP: nanohub driver status\nwu:%d wu_l:%d acq:%d irq1_apInt:%d fired:%d\n",
- atomic_read(&data->wakeup_cnt),
- atomic_read(&data->wakeup_lock_cnt),
- atomic_read(&data->wakeup_acquired),
- atomic_read(&ipc->irq1_apInt), nanohub_irq1_fired(data));
-
- if (!contexthub_is_run(ipc)) {
- pr_warn("%s: chub isn't run\n", __func__);
- return;
+ ret = -EINVAL;
}
-#endif
-#ifndef USE_IPC_BUF
- CSP_PRINTF_INFO
- ("CHUB DUMP: contexthub driver status\nflag:%x cnt:%d, order:%lu\nalive container:\n",
- ipc->read_lock.flag, atomic_read(&ipc->read_lock.cnt),
- ipc->recv_order.order);
- for (val = 0; val < IRQ_EVT_CH_MAX; val++)
- if (ipc->recv_order.container[val])
- CSP_PRINTF_INFO("container[%d]:%lu\n", val,
- ipc->recv_order.container[val]);
-#endif
- for (val = 0; val < CHUB_ERR_MAX; val++)
- if (ipc->err_cnt[val])
- CSP_PRINTF_INFO("error %d(%s) occurs %d times\n",
- val, dbg_name[val], ipc->err_cnt[val]);
- ipc_dump();
- /* dump nanohub kernel status */
- CSP_PRINTF_INFO("CHUB DUMP: Request to dump nanohub kernel status\n");
- ipc_write_debug_event(AP, (u32)MAILBOX_EVT_DUMP_STATUS);
- ipc_add_evt(IPC_EVT_A2C, IRQ_EVT_A2C_DEBUG);
- log_flush(ipc->fw_log);
+out:
+ vfree(bl);
+ return ret;
}
static ssize_t chub_bin_sram_read(struct file *file, struct kobject *kobj,
dev_dbg(dev, "%s(%lld, %zu)\n", __func__, off, size);
- if (!contexthub_is_run(dev_get_drvdata(dev))) {
+ if (!contexthub_get_token(dev_get_drvdata(dev), HW_ACCESS)) {
pr_warn("%s: chub isn't run\n", __func__);
return -EINVAL;
}
[IPC_DEBUG_UTC_CHECK_CPU_UTIL] = "utilization",
[IPC_DEBUG_UTC_HEAP_DEBUG] = "heap",
[IPC_DEBUG_UTC_HANG] = "hang",
+ [IPC_DEBUG_UTC_HANG_ITMON] = "itmon",
};
static ssize_t chub_alive_show(struct device *dev,
dev_info(ipc->dev, "%s: event:%d\n", __func__, event);
if (!err) {
- err = contexthub_request(ipc);
+ err = contexthub_request(ipc, IPC_ACCESS);
if (err)
pr_err("%s: fails to request contexthub. ret:%d\n", __func__, err);
memset(input, 0, PACKET_SIZE_MAX);
memcpy(input, buf, count);
} else {
- pr_err("%s: ipc size(%d) is bigger than max(%d)\n",
+ dev_err(ipc->dev, "%s: ipc size(%d) is bigger than max(%d)\n",
__func__, (int)count, (int)PACKET_SIZE_MAX);
return -EINVAL;
}
- ret = contexthub_request(ipc);
+ ret = contexthub_request(ipc, IPC_ACCESS);
if (ret) {
- pr_err("%s: fails to request contexthub. ret:%d\n", __func__, ret);
+ dev_err(ipc->dev, "%s: fails to request contexthub. ret:%d\n", __func__, ret);
return ret;
}
ret = contexthub_ipc_write_event(ipc, (u32)IPC_DEBUG_UTC_IPC_TEST_START);
if (ret) {
- pr_err("%s: fails to set start test event. ret:%d\n", __func__, ret);
+ dev_err(ipc->dev, "%s: fails to set start test event. ret:%d\n", __func__, ret);
count = ret;
goto out;
}
ret = contexthub_ipc_write(ipc, input, count, IPC_MAX_TIMEOUT);
if (ret != count) {
- pr_info("%s: fail to write\n", __func__);
- return -EINVAL;
+ dev_info(ipc->dev, "%s: fail to write\n", __func__);
+ goto out;
}
ret = contexthub_ipc_read(ipc, output, 0, IPC_MAX_TIMEOUT);
if (count != ret) {
- pr_info("%s: fail to read ret:%d\n", __func__, ret);
- return -EINVAL;
+ dev_info(ipc->dev, "%s: fail to read ret:%d\n", __func__, ret);
}
if (strncmp(input, output, count)) {
- pr_info("%s: fail to compare input/output\n", __func__);
+ dev_info(ipc->dev, "%s: fail to compare input/output\n", __func__);
print_hex_dump(KERN_CONT, "chub input:",
DUMP_PREFIX_OFFSET, 16, 1, input,
count, false);
print_hex_dump(KERN_CONT, "chub output:",
DUMP_PREFIX_OFFSET, 16, 1, output,
count, false);
- return 0;
- }
+ } else
+ dev_info(ipc->dev, "[%s pass] len:%d, str: %s\n", __func__, (int)count, output);
+
+out:
ret = contexthub_ipc_write_event(ipc, (u32)IPC_DEBUG_UTC_IPC_TEST_END);
if (ret) {
- pr_err("%s: fails to set end test event. ret:%d\n", __func__, ret);
+ dev_err(ipc->dev, "%s: fails to set end test event. ret:%d\n", __func__, ret);
count = ret;
- } else
- pr_info("[%s pass] len:%d, str: %s\n", __func__, (int)count, output);
+ }
-out:
contexthub_release(ipc);
-
return count;
}
const char *buf, size_t count)
{
struct contexthub_ipc_info *ipc = dev_get_drvdata(dev);
- int ret = contexthub_request(ipc);
-
- if (ret) {
- pr_err("%s: fails to contexthub_request\n", __func__);
- return 0;
- }
chub_dbg_dump_status(ipc);
- contexthub_ipc_write_event(ipc, MAILBOX_EVT_DUMP_STATUS);
-
- contexthub_release(ipc);
return count;
}
return ret;
if (event)
- ret = contexthub_request(ipc);
+ ret = contexthub_request(ipc, IPC_ACCESS);
else
contexthub_release(ipc);
void chub_dbg_dump_ram(struct contexthub_ipc_info *ipc, enum chub_err_type reason);
void chub_dbg_dump_gpr(struct contexthub_ipc_info *ipc);
void chub_dbg_print_hw(struct contexthub_ipc_info *ipc);
-void chub_dbg_dump_status(struct contexthub_ipc_info *ipc);
-void chub_dbg_check_and_download_image(struct contexthub_ipc_info *ipc);
+int chub_dbg_check_and_download_image(struct contexthub_ipc_info *ipc);
#endif /* __CHUB_DEBUG_H */
struct ipc_map_area *ipc_map;
+#define NAME_PREFIX "nanohub-ipc"
+
#ifdef PACKET_LOW_DEBUG
#define GET_IPC_REG_STRING(a) (((a) == IPC_REG_IPC_C2A) ? "wt" : "rd")
struct chub_bootargs *map = (struct chub_bootargs *)(sram_base + MAP_INFO_OFFSET);
if (strncmp(OS_UPDT_MAGIC, map->magic, sizeof(OS_UPDT_MAGIC))) {
- CSP_PRINTF_ERROR("%s: %p has wrong magic key: %s -> %s\n",
- __func__, map, OS_UPDT_MAGIC, map->magic);
+ CSP_PRINTF_ERROR("%s: %s: %p has wrong magic key: %s -> %s\n",
+ NAME_PREFIX, __func__, map, OS_UPDT_MAGIC, map->magic);
return 0;
}
if (map->ipc_version != IPC_VERSION) {
CSP_PRINTF_ERROR
- ("%s: ipc_version doesn't match: AP %d, Chub: %d\n",
- __func__, IPC_VERSION, map->ipc_version);
+ ("%s: %s: ipc_version doesn't match: AP %d, Chub: %d\n",
+ NAME_PREFIX, __func__, IPC_VERSION, map->ipc_version);
+ return 0;
+ }
+
+ if (sizeof(struct chub_bootargs) > MAP_INFO_MAX_SIZE) {
+ CSP_PRINTF_ERROR
+ ("%s: %s: map size bigger than max %d > %d", NAME_PREFIX, __func__,
+ sizeof(struct chub_bootargs), MAP_INFO_MAX_SIZE);
return 0;
}
ipc_map = ipc_addr[IPC_REG_IPC].base;
ipc_map->logbuf.size =
- ipc_addr[IPC_REG_IPC].offset - sizeof(struct ipc_map_area);
+ ipc_addr[IPC_REG_IPC].offset - sizeof(struct ipc_map_area) - CHUB_PERSISTBUF_SIZE;
ipc_addr[IPC_REG_IPC_EVT_A2C].base = &ipc_map->evt[IPC_EVT_A2C].data;
- ipc_addr[IPC_REG_IPC_EVT_A2C].offset = 0;
+ ipc_addr[IPC_REG_IPC_EVT_A2C].offset = sizeof(struct ipc_evt);
ipc_addr[IPC_REG_IPC_EVT_A2C_CTRL].base =
&ipc_map->evt[IPC_EVT_A2C].ctrl;
ipc_addr[IPC_REG_IPC_EVT_A2C_CTRL].offset = 0;
ipc_addr[IPC_REG_IPC_EVT_C2A].base = &ipc_map->evt[IPC_EVT_C2A].data;
- ipc_addr[IPC_REG_IPC_EVT_C2A].offset = 0;
+ ipc_addr[IPC_REG_IPC_EVT_C2A].offset = sizeof(struct ipc_evt);
ipc_addr[IPC_REG_IPC_EVT_C2A_CTRL].base =
&ipc_map->evt[IPC_EVT_C2A].ctrl;
ipc_addr[IPC_REG_IPC_EVT_C2A_CTRL].offset = 0;
ipc_addr[IPC_REG_IPC_C2A].base = &ipc_map->data[IPC_DATA_C2A];
ipc_addr[IPC_REG_IPC_A2C].base = &ipc_map->data[IPC_DATA_A2C];
-#ifdef USE_IPC_BUF
ipc_addr[IPC_REG_IPC_C2A].offset = sizeof(struct ipc_buf);
ipc_addr[IPC_REG_IPC_A2C].offset = sizeof(struct ipc_buf);
-#else
- ipc_addr[IPC_REG_IPC_C2A].offset = sizeof(struct ipc_content);
- ipc_addr[IPC_REG_IPC_A2C].offset = sizeof(struct ipc_content);
-#endif
ipc_addr[IPC_REG_LOG].base = &ipc_map->logbuf.buf;
- ipc_addr[IPC_REG_LOG].offset =
- ipc_addr[IPC_REG_IPC].offset - sizeof(struct ipc_map_area);
+ ipc_addr[IPC_REG_LOG].offset = ipc_map->logbuf.size;
+ ipc_addr[IPC_REG_PERSISTBUF].base = ipc_addr[IPC_REG_LOG].base + ipc_addr[IPC_REG_LOG].offset;
+ ipc_addr[IPC_REG_PERSISTBUF].offset = CHUB_PERSISTBUF_SIZE;
-#ifdef CHUB_IPC
- ipc_map->logbuf.token = 0;
- memset(ipc_addr[IPC_REG_LOG].base, 0, ipc_addr[IPC_REG_LOG].offset);
-#endif
+ if (((u32)ipc_addr[IPC_REG_PERSISTBUF].base + ipc_addr[IPC_REG_PERSISTBUF].offset) >
+ ((u32)ipc_addr[IPC_REG_IPC].base + ipc_addr[IPC_REG_IPC].offset))
+ CSP_PRINTF_INFO("%s: %s: wrong persistbuf addr:%p, %d, ipc_end:0x%x\n",
+ NAME_PREFIX, __func__,
+ ipc_addr[IPC_REG_PERSISTBUF].base, ipc_addr[IPC_REG_PERSISTBUF].offset, map->ipc_end);
CSP_PRINTF_INFO
- ("contexthub map information(v%u)\n bl(%p %d)\n os(%p %d)\n ipc(%p %d)\n ram(%p %d)\n shared(%p %d)\n dump(%p %d)\n",
- map->ipc_version,
+ ("%s: contexthub map information(v%u)\n bl(%p %d)\n os(%p %d)\n ipc(%p %d)\n ram(%p %d)\n shared(%p %d)\n dump(%p %d)\n",
+ NAME_PREFIX, map->ipc_version,
ipc_addr[IPC_REG_BL].base, ipc_addr[IPC_REG_BL].offset,
ipc_addr[IPC_REG_OS].base, ipc_addr[IPC_REG_OS].offset,
ipc_addr[IPC_REG_IPC].base, ipc_addr[IPC_REG_IPC].offset,
ipc_addr[IPC_REG_SHARED].base, ipc_addr[IPC_REG_SHARED].offset,
ipc_addr[IPC_REG_DUMP].base, ipc_addr[IPC_REG_DUMP].offset);
- CSP_PRINTF_INFO
- ("ipc_map information\n ipc:%x\n data_a2c:%x\n data_c2a:%x\n evt_a2c:%x\n evt_c2a:%x\n log:%x\n",
- (unsigned int)ipc_get_base(IPC_REG_IPC),
- (unsigned int)ipc_get_base(IPC_REG_IPC_A2C),
- (unsigned int)ipc_get_base(IPC_REG_IPC_C2A),
- (unsigned int)ipc_get_base(IPC_REG_IPC_EVT_A2C),
- (unsigned int)ipc_get_base(IPC_REG_IPC_EVT_C2A),
- (unsigned int)ipc_get_base(IPC_REG_LOG));
+ CSP_PRINTF_INFO
+ ("%s: ipc_map information\n ipc(%p %d)\n data_c2a(%p %d)\n data_a2c(%p %d)\n evt_c2a(%p %d)\n evt_a2c(%p %d)\n log(%p %d)\n persistbuf(%p %d)\n",
+ NAME_PREFIX, ipc_get_base(IPC_REG_IPC), ipc_get_offset(IPC_REG_IPC),
+ ipc_get_base(IPC_REG_IPC_C2A), ipc_get_offset(IPC_REG_IPC_C2A),
+ ipc_get_base(IPC_REG_IPC_A2C), ipc_get_offset(IPC_REG_IPC_A2C),
+ ipc_get_base(IPC_REG_IPC_EVT_C2A), ipc_get_offset(IPC_REG_IPC_EVT_C2A),
+ ipc_get_base(IPC_REG_IPC_EVT_A2C), ipc_get_offset(IPC_REG_IPC_EVT_A2C),
+ ipc_get_base(IPC_REG_LOG), ipc_get_offset(IPC_REG_LOG),
+ ipc_get_base(IPC_REG_PERSISTBUF), ipc_get_offset(IPC_REG_PERSISTBUF));
+
+#ifndef USE_IPC_BUF
+ CSP_PRINTF_INFO
+ ("%s: ipc_map data_ch: size:%d on %d channel\n", NAME_PREFIX, PACKET_SIZE_MAX, IPC_CH_BUF_NUM);
+#ifdef SEOS
+ if (PACKET_SIZE_MAX < NANOHUB_PACKET_SIZE_MAX)
+ CSP_PRINTF_ERROR("%s: %d should be bigger than %d\n", NAME_PREFIX, PACKET_SIZE_MAX, NANOHUB_PACKET_SIZE_MAX);
+#endif
+#endif
return ipc_map;
}
void ipc_dump(void)
{
- CSP_PRINTF_INFO("%s: a2x event\n", __func__);
+ CSP_PRINTF_INFO("%s: %s: a2x event\n", NAME_PREFIX, __func__);
ipc_print_evt(IPC_EVT_A2C);
- CSP_PRINTF_INFO("%s: c2a event\n", __func__);
+ CSP_PRINTF_INFO("%s: %s: c2a event\n", NAME_PREFIX, __func__);
ipc_print_evt(IPC_EVT_C2A);
+ CSP_PRINTF_INFO("%s: %s: data buffer\n", NAME_PREFIX, __func__);
+ ipc_print_databuf();
+}
#ifndef USE_IPC_BUF
- CSP_PRINTF_INFO("%s: active channel\n", __func__);
- ipc_print_channel();
+static inline bool __ipc_queue_empty(struct ipc_buf *ipc_data)
+{
+ return (ipc_data->eq == ipc_data->dq);
+}
+
+static inline bool __ipc_queue_full(struct ipc_buf *ipc_data)
+{
+ return (((ipc_data->eq + 1) % IPC_CH_BUF_NUM) == ipc_data->dq);
+}
+
+static inline void data_dir_to_reg(enum ipc_data_list dir)
+{
+}
+
+int ipc_write_data(enum ipc_data_list dir, void *tx, u16 length)
+{
+ int ret = 0;
+ enum ipc_region reg = (dir == IPC_DATA_C2A) ? IPC_REG_IPC_C2A : IPC_REG_IPC_A2C;
+ struct ipc_buf *ipc_data = ipc_get_base(reg);
+
+ if (length <= PACKET_SIZE_MAX) {
+ if (!__ipc_queue_full(ipc_data)) {
+ struct ipc_channel_buf *ipc;
+
+ ipc = &ipc_data->ch[ipc_data->eq];
+ ipc->size = length;
+#ifdef AP_IPC
+ memcpy_toio(ipc->buf, tx, length);
#else
- CSP_PRINTF_INFO("%s: data buffer\n", __func__);
- ipc_print_databuf();
+ memcpy(ipc->buf, tx, length);
#endif
+ ipc_data->eq = (ipc_data->eq + 1) % IPC_CH_BUF_NUM;
+ } else {
+ ret = -EINVAL;
+ }
+ } else {
+ CSP_PRINTF_INFO("%s: invalid size:%d\n",
+ __func__, length);
+ return -1;
+ }
+
+ if (!ret) {
+ enum ipc_evt_list evtq = (dir == IPC_DATA_C2A) ? IPC_EVT_C2A : IPC_EVT_A2C;
+
+ ret = ipc_add_evt(evtq, IRQ_EVT_CH0);
+ } else {
+ CSP_PRINTF_INFO("%s: %s: error\n", NAME_PREFIX, __func__);
+ }
+ return ret;
}
-#ifdef USE_IPC_BUF
+void *ipc_read_data(enum ipc_data_list dir, u32 *len)
+{
+ enum ipc_region reg = (dir == IPC_DATA_C2A) ? IPC_REG_IPC_C2A : IPC_REG_IPC_A2C;
+ struct ipc_buf *ipc_data = ipc_get_base(reg);
+
+ if (!__ipc_queue_empty(ipc_data)) {
+ struct ipc_channel_buf *ipc;
+
+ ipc = &ipc_data->ch[ipc_data->dq];
+ *len = ipc->size;
+ ipc_data->dq = (ipc_data->dq + 1) % IPC_CH_BUF_NUM;
+ return ipc->buf;
+ }
+
+ return NULL;
+}
+#else
static inline void ipc_copy_bytes(u8 *dst, u8 *src, int size)
{
int i;
u16 size_to_read;
u32 size_to_copy_top;
u32 size_to_copy_bottom;
- enum ipc_region reg;
-
- /* get ipc region */
- if (dir == IPC_DATA_C2A)
- reg = IPC_REG_IPC_C2A;
- else if (dir == IPC_DATA_A2C)
- reg = IPC_REG_IPC_A2C;
- else {
- CSP_PRINTF_ERROR("%s: invalid dir:%d\n", __func__, dir);
- return -1;
- }
+ enum ipc_region reg = (dir == IPC_DATA_C2A) ? IPC_REG_IPC_C2A : IPC_REG_IPC_A2C;
/* get ipc_data base */
ipc_data = ipc_get_base(reg);
/* check index due to sram corruption */
if ((eq > IPC_DATA_SIZE) || (dq > IPC_DATA_SIZE) ||
(ipc_data->full > 1) || (ipc_data->empty > 1)) {
- CSP_PRINTF_ERROR("%s: invalid index:%d, %d, %d, %d\n",
- __func__, eq, dq, ipc_data->full, ipc_data->empty);
+ CSP_PRINTF_ERROR("%s: %s: invalid index: eq:%d, dq:%d, full:%d, empty:%d\n",
+ NAME_PREFIX, __func__, eq, dq, ipc_data->full, ipc_data->empty);
return -1;
}
#ifdef USE_IPC_BUF_LOG
- CSP_PRINTF_INFO("%s: dir:%s(w:%d, r:%d, cnt:%d), e:%d d:%d, empty:%d, full:%d, ipc_data:%p, len:%d\n",
- __func__, dir ? "a2c" : "c2a", ipc_data->cnt_dbg_wt,
- ipc_data->cnt_dbg_rd, ipc_data->cnt, eq, dq, ipc_data->empty,
+ CSP_PRINTF_INFO("%s: %s: dir:%s, e:%d d:%d, empty:%d, full:%d, ipc_data:%p, len:%d\n",
+ NAME_PREFIX, __func__, dir ? "a2c" : "c2a", ipc_data->cnt, eq, dq, ipc_data->empty,
ipc_data->full, ipc_data, length);
#endif
else if (eq < dq)
useful = dq - eq;
else if (ipc_data->full) {
- CSP_PRINTF_ERROR("%s is full(eq:%d, dq:%d, f:%d, e:%d)\n",
- __func__, ipc_data->eq, ipc_data->dq, ipc_data->full, ipc_data->empty);
+ CSP_PRINTF_ERROR("%s: %s is full(eq:%d, dq:%d, f:%d, e:%d)\n",
+ NAME_PREFIX, __func__, ipc_data->eq, ipc_data->dq, ipc_data->full, ipc_data->empty);
return -1;
} else {
useful = IPC_DATA_SIZE;
}
#ifdef USE_IPC_BUF_LOG
- ipc_data->cnt_dbg_wt++;
- CSP_PRINTF_INFO("w: eq:%d, dq:%d, useful:%d\n", eq, dq, useful);
+ CSP_PRINTF_INFO("%s: w: eq:%d, dq:%d, useful:%d\n", NAME_PREFIX, eq, dq, useful);
#endif
/* check length */
if (length + sizeof(u16) > useful) {
CSP_PRINTF_ERROR
- ("%s: no buffer. len:%d, remain:%d eq:%d, dq:%d (eq:%d, dq:%d, f:%d, e:%d)\n",
- __func__, length, useful, eq, dq, ipc_data->eq, ipc_data->dq, ipc_data->full, ipc_data->empty);
+ ("%s: %s: no buffer. len:%d, remain:%d eq:%d, dq:%d (eq:%d, dq:%d, f:%d, e:%d)\n",
+ NAME_PREFIX, __func__, length, useful, eq, dq, ipc_data->eq, ipc_data->dq, ipc_data->full, ipc_data->empty);
return -1;
}
ipc_data->empty = 0;
#ifdef USE_IPC_BUF_LOG
- CSP_PRINTF_INFO("w_out: eq:%d, dq:%d, f:%d, e:%d\n",
- ipc_data->eq, ipc_data->dq, ipc_data->full, ipc_data->empty);
+ CSP_PRINTF_INFO("%s: w_out: eq:%d, dq:%d, f:%d, e:%d\n",
+ NAME_PREFIX, ipc_data->eq, ipc_data->dq, ipc_data->full, ipc_data->empty);
#endif
return 0;
} else {
else if (eq < dq)
useful = (IPC_DATA_SIZE - dq) + eq;
else if (ipc_data->empty) {
- CSP_PRINTF_ERROR("%s is empty (eq:%d, dq:%d, f:%d, e:%d)\n",
- __func__, ipc_data->eq, ipc_data->dq, ipc_data->full, ipc_data->empty);
+ CSP_PRINTF_ERROR("%s: %s is empty (eq:%d, dq:%d, f:%d, e:%d)\n",
+ NAME_PREFIX, __func__, ipc_data->eq, ipc_data->dq, ipc_data->full, ipc_data->empty);
return 0;
} else {
useful = IPC_DATA_SIZE;
dq = 0;
size_to_read = (size_upper << 8) | size_lower;
- if (size_to_read >= PACKET_SIZE_MAX) {
- CSP_PRINTF_ERROR("%s: wrong size:%d\n",
- __func__, size_to_read);
+ if (size_to_read > PACKET_SIZE_MAX) {
+ CSP_PRINTF_ERROR("%s: %s: wrong size:%d\n",
+ NAME_PREFIX, __func__, size_to_read);
return -1;
}
#ifdef USE_IPC_BUF_LOG
- ipc_data->cnt_dbg_rd++;
- CSP_PRINTF_INFO("r: eq:%d, dq:%d, useful:%d, size_to_read:%d\n",
- eq, dq, useful, size_to_read);
+ CSP_PRINTF_INFO("%s: r: eq:%d, dq:%d, useful:%d, size_to_read:%d\n",
+ NAME_PREFIX, eq, dq, useful, size_to_read);
#endif
if (useful < sizeof(u16) + size_to_read) {
- CSP_PRINTF_ERROR("%s: no enough read size: useful:%d, read_to_size:%d,%d (eq:%d, dq:%d, f:%d, e:%d)\n",
- __func__, useful, size_to_read, sizeof(u16),
+ CSP_PRINTF_ERROR("%s: %s: no enough read size: useful:%d, read_to_size:%d,%d (eq:%d, dq:%d, f:%d, e:%d)\n",
+ NAME_PREFIX, __func__, useful, size_to_read, sizeof(u16),
ipc_data->eq, ipc_data->dq, ipc_data->full, ipc_data->empty);
return 0;
}
ipc_data->full = 0;
#ifdef USE_IPC_BUF_LOG
- CSP_PRINTF_INFO("r_out (read_to_size:%d): eq:%d, dq:%d, f:%d, e:%d\n",
- size_to_read, ipc_data->eq, ipc_data->dq, ipc_data->full, ipc_data->empty);
+ CSP_PRINTF_INFO("%s: r_out (read_to_size:%d): eq:%d, dq:%d, f:%d, e:%d\n",
+ NAME_PREFIX, size_to_read, ipc_data->eq, ipc_data->dq, ipc_data->full, ipc_data->empty);
#endif
return size_to_read;
}
evtq = (dir == IPC_DATA_C2A) ? IPC_EVT_C2A : IPC_EVT_A2C;
ret = ipc_add_evt(evtq, IRQ_EVT_CH0);
} else {
- CSP_PRINTF_INFO("%s: error\n", __func__);
+ CSP_PRINTF_INFO("%s: %s: error\n", NAME_PREFIX, __func__);
}
return ret;
}
-int ipc_read_data(enum ipc_data_list dir, uint8_t *rx)
+int ipc_read_data(enum ipc_data_list dir, u8 *rx)
{
int ret = ipc_io_data(dir, rx, 0);
if (!ret || (ret < 0)) {
- CSP_PRINTF_INFO("%s: error\n", __func__);
- return 0;
+ CSP_PRINTF_INFO("%s: %s: error\n", NAME_PREFIX, __func__);
+ return -1;
}
return ret;
}
+#endif
void ipc_print_databuf(void)
{
struct ipc_buf *ipc_data = ipc_get_base(IPC_REG_IPC_A2C);
- CSP_PRINTF_INFO("a2c: eq:%d dq:%d full:%d empty:%d tx:%d rx:%d\n",
- ipc_data->eq, ipc_data->dq, ipc_data->full, ipc_data->empty,
- ipc_data->cnt_dbg_wt, ipc_data->cnt_dbg_rd);
+ CSP_PRINTF_INFO("%s: a2c: eq:%d dq:%d full:%d empty:%d\n",
+ NAME_PREFIX, ipc_data->eq, ipc_data->dq, ipc_data->full, ipc_data->empty);
ipc_data = ipc_get_base(IPC_REG_IPC_C2A);
- CSP_PRINTF_INFO("c2a: eq:%d dq:%d full:%d empty:%d tx:%d rx:%d\n",
- ipc_data->eq, ipc_data->dq, ipc_data->full, ipc_data->empty,
- ipc_data->cnt_dbg_wt, ipc_data->cnt_dbg_rd);
-}
-
-#else
-/* ipc channel functions */
-#define GET_IPC_REG_NAME(c) (((c) == CS_WRITE) ? "W" : (((c) == CS_RECV) ? "R" : "I"))
-#define GET_CH_NAME(c) (((c) == CS_AP) ? "A" : "C")
-#define GET_CH_OWNER(o) (((o) == IPC_DATA_C2A) ? "C2A" : "A2C")
-
-inline void ipc_update_channel_status(struct ipc_content *content,
- enum channel_status next)
-{
-#ifdef PACKET_LOW_DEBUG
- unsigned int org = __raw_readl(&content->status);
-
- CSP_PRINTF_INFO("CH(%s)%d: %s->%s\n", GET_CH_NAME(org >> CS_OWN_OFFSET),
- content->num, GET_IPC_REG_NAME((org & CS_IPC_REG_CMP)),
- GET_IPC_REG_NAME((next & CS_IPC_REG_CMP)));
-#endif
-
- __raw_writel(next, &content->status);
-}
-
-void *ipc_scan_channel(enum ipc_region area, enum channel_status target)
-{
- int i;
- struct ipc_content *content = ipc_get_base(area);
-
- for (i = 0; i < IPC_BUF_NUM; i++, content++)
- if (__raw_readl(&content->status) == target)
- return content;
-
- return NULL;
+ CSP_PRINTF_INFO("%s: c2a: eq:%d dq:%d full:%d empty:%d\n",
+ NAME_PREFIX, ipc_data->eq, ipc_data->dq, ipc_data->full, ipc_data->empty);
}
-void *ipc_get_channel(enum ipc_region area, enum channel_status target,
- enum channel_status next)
-{
- int i;
- struct ipc_content *content = ipc_get_base(area);
-
- for (i = 0; i < IPC_BUF_NUM; i++, content++) {
- if (__raw_readl(&content->status) == target) {
- ipc_update_channel_status(content, next);
- return content;
- }
- }
-
- return NULL;
-}
-
-void ipc_print_channel(void)
-{
- int i, j, org;
-
- for (j = 0; j < IPC_DATA_MAX; j++) {
- for (i = 0; i < IPC_BUF_NUM; i++) {
- org = ipc_map->data[j][i].status;
- if (org & CS_IPC_REG_CMP)
- CSP_PRINTF_INFO("CH-%s:%x\n",
- GET_CH_OWNER(j), org);
- }
- }
-}
-#endif
-
int ipc_check_reset_valid()
{
int i;
for (i = 0; i < IPC_DATA_MAX; i++)
if (map->data[i].dq || map->data[i].eq ||
map->data[i].full || (map->data[i].empty != 1)) {
- CSP_PRINTF_INFO("contexthub: %s: ipc_data_%s invalid: eq:%d, dq:%d, full:%d, empty:%d\n",
- __func__, i ? "a2c" : "c2a",
+ CSP_PRINTF_INFO("%s: %s: ipc_data_%s invalid: eq:%d, dq:%d, full:%d, empty:%d\n",
+ NAME_PREFIX, __func__, i ? "a2c" : "c2a",
map->data[i].eq,
map->data[i].dq,
map->data[i].full,
for (i = 0; i < IPC_EVT_MAX; i++)
if (map->evt[i].ctrl.eq || map->evt[i].ctrl.dq ||
map->evt[i].ctrl.full || (map->evt[i].ctrl.empty != 1)) {
- CSP_PRINTF_INFO("contexthub: %s: ipc_evt_%s invalid: eq:%d, dq:%d, full:%d, empty:%d\n",
- __func__, i ? "a2c" : "c2a",
+ CSP_PRINTF_INFO("%s: %s: ipc_evt_%s invalid: eq:%d, dq:%d, full:%d, empty:%d\n",
+ NAME_PREFIX, __func__, i ? "a2c" : "c2a",
map->evt[i].ctrl.eq,
map->evt[i].ctrl.eq,
map->evt[i].ctrl.full,
if (!ipc_map)
CSP_PRINTF_ERROR("%s: ipc_map is NULL.\n", __func__);
-#ifdef USE_IPC_BUF
for (i = 0; i < IPC_DATA_MAX; i++) {
ipc_map->data[i].eq = 0;
ipc_map->data[i].dq = 0;
ipc_map->data[i].full = 0;
ipc_map->data[i].empty = 1;
- ipc_map->data[i].cnt_dbg_wt = 0;
- ipc_map->data[i].cnt_dbg_rd = 0;
}
-#else
- for (i = 0; i < IPC_BUF_NUM; i++) {
- ipc_map->data[IPC_DATA_C2A][i].num = i;
- ipc_map->data[IPC_DATA_C2A][i].status = CS_CHUB_OWN;
- ipc_map->data[IPC_DATA_A2C][i].num = i;
- ipc_map->data[IPC_DATA_A2C][i].status = CS_AP_OWN;
- }
-#endif
+
ipc_hw_clear_all_int_pend_reg(AP);
struct ipc_evt_buf *cur_evt = NULL;
if (!ipc_evt) {
- CSP_PRINTF_ERROR("%s: invalid ipc_evt\n", __func__);
+ CSP_PRINTF_ERROR("%s: %s: invalid ipc_evt\n", NAME_PREFIX, __func__);
+ return -1;
+ }
+
+#if 0
+ /* check index due to sram corruption */
+ if ((__raw_readl(&ipc_evt->ctrl.eq) > IPC_EVT_NUM) ||
+ (__raw_readl(&ipc_evt->ctrl.dq) > IPC_EVT_NUM) ||
+ (__raw_readl(&ipc_evt->ctrl.full) > 1) ||
+ (__raw_readl(&ipc_evt->ctrl.empty) > 1)) {
+ CSP_PRINTF_ERROR("%s: invalid index: eq:%d, dq:%d, full:%d, empty:%d\n",
+ __func__, ipc_evt->ctrl.eq, ipc_evt->ctrl.dq,
+ ipc_evt->ctrl.full, ipc_evt->ctrl.empty);
return -1;
}
+#endif
if (!__raw_readl(&ipc_evt->ctrl.full)) {
cur_evt = &ipc_evt->data[ipc_evt->ctrl.eq];
} while (ipc_evt->ctrl.full && (trycnt < MAX_TRY_CNT));
if (!__raw_readl(&ipc_evt->ctrl.full)) {
- CSP_PRINTF_INFO("%s: evt %d during %d ms is full\n",
- __func__, evt, EVT_WAIT_TIME * trycnt);
+ CSP_PRINTF_INFO("%s: %s: evt %d during %d ms is full\n",
+ NAME_PREFIX, __func__, evt, EVT_WAIT_TIME * trycnt);
return -1;
} else {
- CSP_PRINTF_ERROR("%s: fail to add evt\n", __func__);
+ CSP_PRINTF_ERROR("%s: %s: fail to add evt\n", NAME_PREFIX, __func__);
return -1;
}
#else
- CSP_PRINTF_ERROR("%s: fail to add evt\n", __func__);
+ CSP_PRINTF_ERROR("%s: %s: fail to add evt\n", NAME_PREFIX, __func__);
return -1;
#endif
}
struct ipc_evt *ipc_evt = &ipc_map->evt[evtq];
int i;
- CSP_PRINTF_INFO("evt-%s: eq:%d dq:%d full:%d irq:%d\n",
- IPC_GET_EVT_NAME(evtq), ipc_evt->ctrl.eq,
+ CSP_PRINTF_INFO("%s: evt-%s: eq:%d dq:%d full:%d irq:%d\n",
+ NAME_PREFIX, IPC_GET_EVT_NAME(evtq), ipc_evt->ctrl.eq,
ipc_evt->ctrl.dq, ipc_evt->ctrl.full,
ipc_evt->ctrl.irq);
for (i = 0; i < IPC_EVT_NUM; i++) {
- CSP_PRINTF_INFO("evt%d(evt:%d,irq:%d,f:%d)\n",
- i, ipc_evt->data[i].evt,
+ CSP_PRINTF_INFO("%s: evt%d(evt:%d,irq:%d,f:%d)\n",
+ NAME_PREFIX, i, ipc_evt->data[i].evt,
ipc_evt->data[i].irq, ipc_evt->data[i].status);
}
#define AP_IPC
#endif
-#define USE_IPC_BUF
-#ifdef USE_IPC_BUF
-#define IPC_VERSION (180611)
-#else
-#define IPC_VERSION (180111)
-#endif
+#define IPC_VERSION (180730)
#if defined(CHUB_IPC)
#if defined(SEOS)
#include <nanohubPacket.h>
-#define PACKET_SIZE_MAX (NANOHUB_PACKET_SIZE_MAX)
#elif defined(EMBOS)
/* TODO: Add embos */
#define SUPPORT_LOOPBACKTEST
#elif defined(AP_IPC)
#if defined(CONFIG_NANOHUB)
#include "comms.h"
-#define PACKET_SIZE_MAX (NANOHUB_PACKET_SIZE_MAX)
#elif defined(CONFIG_CONTEXTHUB_DRV)
// TODO: Add packet size.. #define PACKET_SIZE_MAX ()
#endif
#endif
#ifndef PACKET_SIZE_MAX
-#define PACKET_SIZE_MAX (270)
+#define PACKET_SIZE_MAX (272)
#endif
#ifdef LOWLEVEL_DEBUG
/* contexthub bootargs */
#define BL_OFFSET (0x0)
#define MAP_INFO_OFFSET (256)
+#define MAP_INFO_MAX_SIZE (128)
+#define CHUB_PERSISTBUF_SIZE (96)
+
#define OS_UPDT_MAGIC "Nanohub OS"
#define BOOTMODE_COLD (0x77773333)
IPC_DEBUG_UTC_CHECK_CPU_UTIL,
IPC_DEBUG_UTC_HEAP_DEBUG,
IPC_DEBUG_UTC_HANG,
+ IPC_DEBUG_UTC_HANG_ITMON,
IPC_DEBUG_UTC_IPC_TEST_START,
IPC_DEBUG_UTC_IPC_TEST_END,
IPC_DEBUG_UTC_MAX,
IPC_REG_SHARED,
IPC_REG_RAM,
IPC_REG_LOG,
+ IPC_REG_PERSISTBUF,
IPC_REG_DUMP,
IPC_REG_MAX,
};
CS_MAX = 0xf
};
-/* ipc channel structure */
-struct ipc_content {
- u8 buf[PACKET_SIZE_MAX];
- u32 num;
- u32 size;
- u32 status;
- u32 pad;
-};
-
#define INVAL_CHANNEL (-1)
#if defined(AP_IPC) || defined(EMBOS)
#define IPC_DATA_SIZE (4096)
#endif
+struct ipc_channel_buf {
+ u32 size;
+ u8 buf[PACKET_SIZE_MAX];
+};
+
+#define IPC_CH_BUF_NUM (6)
struct ipc_buf {
volatile u32 eq;
volatile u32 dq;
volatile u32 full;
volatile u32 empty;
- u32 cnt_dbg_rd; /* for debug */
- u32 cnt_dbg_wt; /* for debug */
+#ifdef USE_IPC_BUF
u8 buf[IPC_DATA_SIZE];
+#else
+ struct ipc_channel_buf ch[IPC_CH_BUF_NUM];
+#endif
};
struct ipc_map_area {
- u8 persist_padding[128]; /* persisten base shoud be ipc base */
-#ifdef USE_IPC_BUF
struct ipc_buf data[IPC_DATA_MAX];
-#else
- struct ipc_content data[IPC_DATA_MAX][IPC_BUF_NUM];
-#endif
struct ipc_evt evt[IPC_EVT_MAX];
struct ipc_logbuf logbuf;
};
int ipc_check_reset_valid(void);
void ipc_init(void);
int ipc_hw_read_int_start_index(enum ipc_owner owner);
-void ipc_update_channel_status(struct ipc_content *content,
- enum channel_status next);
-void *ipc_scan_channel(enum ipc_region area, enum channel_status target);
-void *ipc_get_channel(enum ipc_region area, enum channel_status target,
- enum channel_status next);
/* logbuf functions */
void *ipc_get_logbuf(void);
unsigned int ipc_logbuf_get_token(void);
#endif
void ipc_print_databuf(void);
-int ipc_read_data(enum ipc_data_list dir, uint8_t *rx);
+#ifdef USE_IPC_BUF
+int ipc_read_data(enum ipc_data_list dir, u8 *rx);
+#else
+void *ipc_read_data(enum ipc_data_list dir, u32 *len);
+#endif
int ipc_write_data(enum ipc_data_list dir, void *tx, u16 length);
#endif
struct log_kernel_buffer *kernel_buffer,
const char *src, size_t size)
{
+ mm_segment_t old_fs;
+
size_t left_size = SIZE_OF_BUFFER - kernel_buffer->index;
dev_dbg(info->dev, "%s(%zu)\n", __func__, size);
size = SIZE_OF_BUFFER;
}
- if (log_auto_save && !info->filp)
- info->filp = filp_open(info->save_file_name, O_RDWR | O_APPEND | O_CREAT, S_IRWUG);
+ old_fs = get_fs();
+ set_fs(KERNEL_DS);
+
+ if(log_auto_save) {
+ if (likely(info->file_created)) {
+ info->filp = filp_open(info->save_file_name, O_RDWR | O_APPEND | O_CREAT, S_IRWUG);
+ dev_info(info->dev, "appended to %s\n", info->save_file_name);
+ } else {
+ info->filp = filp_open(info->save_file_name, O_RDWR | O_TRUNC | O_CREAT, S_IRWUG);
+ info->file_created = true;
+ dev_info(info->dev, "created %s\n", info->save_file_name);
+ }
+
+ if (IS_ERR(info->filp)) {
+ dev_warn(info->dev, "%s: saving log fail\n", __func__);
+ goto out;
+ }
+
+ }
if (left_size < size) {
if (info->sram_log_buffer)
}
kernel_buffer->index += size;
+
+out:
+ set_fs(old_fs);
}
void log_flush(struct log_buffer_info *info)
struct log_buffer_info *info;
list_for_each_entry(info, &log_list_head, list) {
- if (info && !contexthub_is_run(dev_get_drvdata(info->dev))) {
+ if (info && !contexthub_get_token(dev_get_drvdata(info->dev), HW_ACCESS)) {
pr_warn("%s: chub isn't run\n", __func__);
return;
}
set_fs(KERNEL_DS);
/* close previous */
- if (info->filp)
- filp_close(info->filp, NULL);
+ if (info->filp && !IS_ERR(info->filp)) {
+ dev_info(info->dev, "%s closing previous file %p\n", __func__, info->filp);
+ filp_close(info->filp, current->files);
+ }
info->filp =
filp_open(info->save_file_name, O_RDWR | O_TRUNC | O_CREAT,
S_IRWUG);
- dev_dbg(info->dev, "%s created\n", info->save_file_name);
+ dev_info(info->dev, "%s created\n", info->save_file_name);
if (IS_ERR(info->filp))
dev_warn(info->dev, "%s: saving log fail\n", __func__);
snprintf(info->save_file_name, sizeof(info->save_file_name),
"%s/nano-%02d-00-%06u.log", CHUB_DBG_DIR, info->id,
(u32)(sched_clock() / NSEC_PER_SEC));
-
chub_log_auto_save_open(info);
log_auto_save = 1;
} else {
log_auto_save = 0;
- filp_close(info->filp, NULL);
+ info->filp = NULL;
}
+
pr_info("%s: %s, %d, %p\n", __func__, info->save_file_name,
log_auto_save, info->filp);
}
err = kstrtol(&buf[0], 10, &event);
if (!err) {
if (!auto_log_flush_ms) {
- err = contexthub_request(ipc);
+ err = contexthub_request(ipc, HW_ACCESS);
if (!err) {
log_flush_all();
contexthub_release(ipc);
{
struct contexthub_ipc_info *ipc = data->pdata->mailbox_client;
- if (!err)
+ if (err >= 0)
ipc->err_cnt[CHUB_ERR_COMMS] = 0;
else {
ipc->err_cnt[CHUB_ERR_COMMS]++;
else if (err == ERROR_BUSY)
ipc->err_cnt[CHUB_ERR_COMMS_BUSY]++;
else
- ipc->err_cnt[CHUB_ERR_COMMS_BUSY]++;
+ ipc->err_cnt[CHUB_ERR_COMMS_UNKNOWN]++;
}
}
#else
int ret;
struct contexthub_ipc_info *ipc = data->pdata->mailbox_client;
- if (atomic_read(&ipc->in_reset)) {
- dev_err(ipc->dev, "%s: chub reset in-progress\n", __func__);
- return ERROR_BUSY;
- }
-
ret = data->comms.write(data, (uint8_t *)&pad->packet, packet_size,
data->comms.timeout_write);
s64 boottime;
struct contexthub_ipc_info *ipc = data->pdata->mailbox_client;
- if (atomic_read(&ipc->in_reset)) {
- packet_free(pad);
- dev_err(ipc->dev, "%s: chub reset in-progress\n", __func__);
- return ERROR_BUSY;
- }
-
if (pad == NULL)
return ERROR_NACK;
}
#elif defined(CONFIG_NANOHUB_MAILBOX)
#ifdef CHUB_RESET_ENABLE
- ret = contexthub_reset(data->pdata->mailbox_client);
+ ret = contexthub_reset(data->pdata->mailbox_client, 0);
#else
ret = -EINVAL;
#endif
return ret < 0 ? ret : count;
#elif defined(CONFIG_NANOHUB_MAILBOX)
- ret = contexthub_download_bl(data->pdata->mailbox_client);
+ ret = contexthub_reset(data->pdata->mailbox_client, 1);
return ret < 0 ? ret : count;
#endif
struct nanohub_data *data = dev_get_nanohub_data(dev);
#ifdef CONFIG_NANOHUB_MAILBOX
- int ret = contexthub_download_kernel(data->pdata->mailbox_client);
+ int ret = contexthub_download_image(data->pdata->mailbox_client, IPC_REG_OS);
return ret < 0 ? ret : count;
#else