static DEFINE_MUTEX(reset_mutex);
static DEFINE_MUTEX(pmu_shutdown_mutex);
-static inline int contexthub_get_token(struct contexthub_ipc_info *ipc, enum access_type acc)
+
+static int contexthub_get_token(struct contexthub_ipc_info *ipc)
{
- if (acc == HW_ACCESS) {
- mutex_lock(&pmu_shutdown_mutex);
- dev_info(ipc->dev, "%s: enter shutdown\n", __func__);
- } else if (acc == IPC_ACCESS)
- return (atomic_read(&ipc->chub_status) == CHUB_ST_RUN) && !atomic_read(&ipc->in_reset);
- return true;
+ if (atomic_read(&ipc->in_reset))
+ return -EINVAL;
+
+ atomic_inc(&ipc->in_use_ipc);
+ return 0;
}
-static inline void contexthub_put_token(struct contexthub_ipc_info *ipc, enum access_type acc)
+static void contexthub_put_token(struct contexthub_ipc_info *ipc)
{
- if (acc == HW_ACCESS) {
- dev_info(ipc->dev, "%s: out shutdown\n", __func__);
- mutex_unlock(&pmu_shutdown_mutex);
- }
+ atomic_dec(&ipc->in_use_ipc);
}
/* host interface functions */
}
/* request contexthub to host driver */
-int contexthub_request(struct contexthub_ipc_info *ipc, enum access_type acc)
+int contexthub_request(struct contexthub_ipc_info *ipc)
{
- if (!contexthub_get_token(ipc, acc)) {
- dev_info(ipc->dev, "%s: %s isn't accesable: chub_status: %d, in_reset:%d\n",
- __func__, (acc == HW_ACCESS) ? "hw" : "ipc",
- atomic_read(&ipc->chub_status), atomic_read(&ipc->in_reset));
- return -EINVAL;
- }
-
- if ((acc == HW_ACCESS) || !ipc->powermode)
+ if (!ipc->powermode)
return 0;
#ifdef CONFIG_CHRE_SENSORHUB_HAL
}
/* rlease contexthub to host driver */
-void contexthub_release(struct contexthub_ipc_info *ipc, enum access_type acc)
+void contexthub_release(struct contexthub_ipc_info *ipc)
{
- contexthub_put_token(ipc, acc);
-
- if ((acc == HW_ACCESS) || !ipc->powermode)
+ if (!ipc->powermode)
return;
#ifdef CONFIG_CHRE_SENSORHUB_HAL
return ipc->chub_alive_lock.flag;
}
+#define CHUB_RESET_THOLD (5)
/* handle errors of chub driver and fw */
static void handle_debug_work(struct contexthub_ipc_info *ipc, enum chub_err_type err)
{
int need_reset;
int alive = contexthub_lowlevel_alive(ipc);
- /* handle fw dbg */
- if (err == CHUB_ERR_NANOHUB) {
- enum ipc_debug_event event = ipc_read_debug_event(AP);
-
- dev_info(ipc->dev, "%s: fw dbg event:%d\n", __func__, event);
- switch (event) {
- case IPC_DEBUG_CHUB_PRINT_LOG:
- log_flush(ipc->fw_log);
- break;
- default:
- dev_warn(ipc->dev, "Contexthub request invalid event:%d\n", event);
- break;
- };
-
- ipc_write_debug_event(AP, 0);
- return;
- }
-
- /* handle err */
- ipc->err_cnt[err]++;
-
dev_info(ipc->dev, "%s: err:%d, alive:%d, status:%d, in-reset:%d\n",
__func__, err, alive, __raw_readl(&ipc->chub_status),
__raw_readl(&ipc->in_reset));
-
if ((atomic_read(&ipc->chub_status) == CHUB_ST_ERR) || !alive)
need_reset = 1;
- /* dump hw & sram into file */
- chub_dbg_dump_hw(ipc, err);
-
/* reset */
if (need_reset) {
#if defined(CHUB_RESET_ENABLE)
dev_info(ipc->dev, "%s: request silent reset. err:%d, alive:%d, status:%d, in-reset:%d\n",
__func__, err, alive, __raw_readl(&ipc->chub_status),
__raw_readl(&ipc->in_reset));
-
- ret = contexthub_reset(ipc, 0);
+ ret = contexthub_reset(ipc, 0, err);
if (ret)
dev_warn(ipc->dev, "%s: fails to reset:%d. status:%d\n",
__func__, ret, __raw_readl(&ipc->chub_status));
else
- /* TODO: recovery */
dev_info(ipc->dev, "%s: chub reset! should be recovery\n",
__func__);
#else
}
}
-#define CHUB_RESET_THOLD (5)
static void request_debug_work(struct contexthub_ipc_info *ipc,
enum chub_err_type err, bool enable_wq)
{
dev_info(ipc->dev, "%s: err:%d(cnt:%d), enable_wq:%d\n",
__func__, err, ipc->err_cnt[err], enable_wq);
- /* get fw err */
+ if (err < CHUB_ERR_NEED_RESET) {
+ if (ipc->err_cnt[err] > CHUB_RESET_THOLD) {
+ atomic_set(&ipc->chub_status, CHUB_ST_ERR);
+ ipc->err_cnt[err] = 0;
+ dev_info(ipc->dev, "%s: err:%d(cnt:%d), enter error status\n",
+ __func__, err, ipc->err_cnt[err]);
+ } else {
+ ipc->err_cnt[err]++;
+ return;
+ }
+ }
+
+ /* get chub-fw err */
if (err == CHUB_ERR_NANOHUB) {
- enum ipc_debug_event fw_evt = ipc_read_debug_event(AP);
- if (fw_evt == IPC_DEBUG_CHUB_FAULT) {
- err = CHUB_ERR_FW_FAULT;
- ipc_write_debug_event(AP, 0);
+ enum ipc_debug_event fw_evt;
+
+ if (contexthub_get_token(ipc)) {
+ dev_warn(ipc->dev, "%s: get token\n", __func__);
+ return;
}
- else if ((fw_evt == IPC_DEBUG_CHUB_ASSERT) || (fw_evt == IPC_DEBUG_CHUB_ERROR)) {
+ fw_evt = ipc_read_debug_event(AP);
+ if (fw_evt == IPC_DEBUG_CHUB_FAULT)
+ err = CHUB_ERR_FW_FAULT;
+ else if ((fw_evt == IPC_DEBUG_CHUB_ASSERT) || (fw_evt == IPC_DEBUG_CHUB_ERROR))
err = CHUB_ERR_FW_ERROR;
- ipc_write_debug_event(AP, 0);
- }
+ else
+ dev_warn(ipc->dev, "%s: unsupported fw_evt: %d\n", fw_evt);
+
+ ipc_write_debug_event(AP, 0);
+ contexthub_put_token(ipc);
}
/* set status in CHUB_ST_ERR */
if ((err == CHUB_ERR_ITMON) || (err == CHUB_ERR_FW_WDT) || (err == CHUB_ERR_FW_FAULT))
atomic_set(&ipc->chub_status, CHUB_ST_ERR);
- if (err < CHUB_ERR_NEED_RESET)
- if (ipc->err_cnt[err] > CHUB_RESET_THOLD) {
- atomic_set(&ipc->chub_status, CHUB_ST_ERR);
- ipc->err_cnt[err] = 0;
- }
-
/* handle err */
if (enable_wq) {
ipc->cur_err |= (1 << err);
container_of(work, struct contexthub_ipc_info, debug_work);
int i;
- mutex_lock(&dbg_mutex);
dev_info(ipc->dev, "%s: cur_err:0x%x\n", __func__, ipc->cur_err);
for (i = 0; i < CHUB_ERR_MAX; i++) {
if (ipc->cur_err & (1 << i)) {
ipc->cur_err &= ~(1 << i);
}
}
- mutex_unlock(&dbg_mutex);
+}
+
+static inline void clear_err_cnt(struct contexthub_ipc_info *ipc, enum chub_err_type err)
+{
+ if (ipc->err_cnt[err])
+ ipc->err_cnt[err] = 0;
}
int contexthub_ipc_read(struct contexthub_ipc_info *ipc, uint8_t *rx, int max_length,
int ret;
void *rxbuf;
- if (!contexthub_get_token(ipc, IPC_ACCESS)) {
- dev_warn(ipc->dev, "no-active: read fails\n");
- return 0;
- }
-
if (!ipc->read_lock.flag) {
spin_lock_irqsave(&ipc->read_lock.event.lock, flag);
read_get_locked(ipc);
ipc->read_lock.flag--;
+ if (contexthub_get_token(ipc)) {
+ dev_warn(ipc->dev, "no-active: read fails\n");
+ return 0;
+ }
+
#ifdef USE_IPC_BUF
rxbuf = ipc->rxbuf;
size = ipc_read_data(IPC_DATA_C2A, rxbuf);
#endif
if (size > 0) {
- contexthub_put_token(ipc, IPC_ACCESS);
- return contexthub_read_process(rx, rxbuf, size);
+ clear_err_cnt(ipc, CHUB_ERR_READ_FAIL);
+ ret = contexthub_read_process(rx, rxbuf, size);
}
+ contexthub_put_token(ipc);
+ return ret;
fail_get_channel:
- contexthub_put_token(ipc, IPC_ACCESS);
request_debug_work(ipc, CHUB_ERR_READ_FAIL, 0);
return -EINVAL;
}
{
int ret;
- if (!contexthub_get_token(ipc, IPC_ACCESS)) {
+ if (contexthub_get_token(ipc)) {
dev_warn(ipc->dev, "no-active: write fails\n");
return 0;
}
ret = ipc_write_data(IPC_DATA_A2C, tx, (u16)length);
+ contexthub_put_token(ipc);
if (ret) {
pr_err("%s: fails to write data: ret:%d, len:%d errcnt:%d\n",
__func__, ret, length, ipc->err_cnt[CHUB_ERR_WRITE_FAIL]);
request_debug_work(ipc, CHUB_ERR_WRITE_FAIL, 0);
length = 0;
+ } else {
+ clear_err_cnt(ipc, CHUB_ERR_WRITE_FAIL);
}
- contexthub_put_token(ipc, IPC_ACCESS);
return length;
}
if (val) {
atomic_set(&ipc->chub_status, CHUB_ST_RUN);
dev_info(ipc->dev, "chub is alive");
+ clear_err_cnt(ipc, CHUB_ERR_CHUB_NO_RESPONSE);
} else {
dev_err(ipc->dev,
"chub isn't alive, should be reset. status:%d\n",
ipc_hw_mask_irq(AP, IRQ_EVT_C2A_INT);
ipc_hw_mask_irq(AP, IRQ_EVT_C2A_INTCLR);
break;
- case MAILBOX_EVT_ERASE_SHARED:
- memset(ipc_get_base(IPC_REG_SHARED), 0, ipc_get_offset(IPC_REG_SHARED));
- break;
default:
need_ipc = 1;
break;
if (!need_ipc)
return ret;
- if (!contexthub_get_token(ipc, IPC_ACCESS)) {
- dev_warn(ipc->dev, "%s event:%d/%d fails chub isn't active\n",
- __func__, event, MAILBOX_EVT_MAX);
+ if (contexthub_get_token(ipc)) {
+ dev_warn(ipc->dev, "%s event:%d/%d fails chub isn't active, status:%d, inreset:%d\n",
+ __func__, event, MAILBOX_EVT_MAX, atomic_read(&ipc->chub_status), atomic_read(&ipc->in_reset));
return -EINVAL;
}
/* handle ipc */
switch (event) {
+ case MAILBOX_EVT_ERASE_SHARED:
+ memset(ipc_get_base(IPC_REG_SHARED), 0, ipc_get_offset(IPC_REG_SHARED));
+ break;
case MAILBOX_EVT_DUMP_STATUS:
/* dump nanohub kernel status */
dev_info(ipc->dev, "Request to dump chub fw status\n");
check_rtc_time();
ipc_write_debug_event(AP, (u32)event);
ipc_add_evt(IPC_EVT_A2C, IRQ_EVT_A2C_DEBUG);
- return 0;
+ ret = 0;
}
break;
}
-
- contexthub_put_token(ipc, IPC_ACCESS);
+ contexthub_put_token(ipc);
return ret;
}
return ret;
}
-int contexthub_reset(struct contexthub_ipc_info *ipc, bool force_load)
+int contexthub_reset(struct contexthub_ipc_info *ipc, bool force_load, int dump)
{
int ret;
+ int trycnt;
- dev_info(ipc->dev, "%s: force:%d, status:%d, in-reset:%d\n",
- __func__, force_load, atomic_read(&ipc->chub_status), atomic_read(&ipc->in_reset));
+ dev_info(ipc->dev, "%s: force:%d, status:%d, in-reset:%d, user:%d\n",
+ __func__, force_load, atomic_read(&ipc->chub_status), atomic_read(&ipc->in_reset), atomic_read(&ipc->in_use_ipc));
mutex_lock(&reset_mutex);
if (!force_load && (atomic_read(&ipc->chub_status) == CHUB_ST_RUN)) {
mutex_unlock(&reset_mutex);
return 0;
}
atomic_inc(&ipc->in_reset);
-#ifndef CONFIG_CHRE_SENSORHUB_HAL /* retry exam */
-retry:
-#endif
+
+ do {
+ msleep(WAIT_CHUB_MS);
+ if (++trycnt > RESET_WAIT_TRY_CNT) {
+ dev_info(ipc->dev, "%s: cann't get lock. force reset: %d\n", __func__, atomic_read(&ipc->in_use_ipc));
+ break;
+ }
+ dev_info(ipc->dev, "%s: wait for ipc user free: %d\n", __func__, atomic_read(&ipc->in_use_ipc));
+ } while (atomic_read(&ipc->in_use_ipc));
+
+ if (dump)
+ chub_dbg_dump_hw(ipc, dump);
+
dev_info(ipc->dev, "%s: start reset status:%d\n", __func__, atomic_read(&ipc->chub_status));
if (!ipc->block_reset) {
/* core reset */
dev_info(ipc->dev, "%s: chub reseted! (cnt:%d)\n",
__func__, ipc->err_cnt[CHUB_ERR_RESET_CNT]);
ipc->err_cnt[CHUB_ERR_RESET_CNT]++;
+ atomic_set(&ipc->in_use_ipc, 0);
atomic_dec(&ipc->in_reset);
-#ifndef CONFIG_CHRE_SENSORHUB_HAL
- ret = request_wakeup(ipc->data);
- if (ret)
- dev_err(ipc->dev, "%s: fails to wakeup after reset, ret:%d\n", __func__, ret);
- else
- release_wakeup(ipc->data);
- if (ipc->irq_wdt && ipc->irq_wdt_disabled) {
- enable_irq(ipc->irq_wdt);
- ipc->irq_wdt_disabled = 0;
- }
-#endif
}
-
out:
- if (!ret) {
- dev_info(ipc->dev, "%s: chub reseted and work (cnt:%d)\n",
- __func__, ipc->err_cnt[CHUB_ERR_RESET_CNT]);
- ipc->err_cnt[CHUB_ERR_RESET_CNT] = 0;
- } else {
-#ifndef CONFIG_CHRE_SENSORHUB_HAL
- atomic_inc(&ipc->in_reset);
- if (ipc->err_cnt[CHUB_ERR_RESET_CNT] > CHUB_RESET_THOLD)
- msleep(WAIT_TIMEOUT_MS * CHUB_RESET_THOLD);
- else
- msleep(WAIT_TIMEOUT_MS);
- goto retry;
-#endif
- }
mutex_unlock(&reset_mutex);
return ret;
err, start_index, cur_evt, evt, irq_num,
status, ipc_hw_read_int_status_reg(AP),
ipc_hw_read_int_gen_reg(AP));
- ipc->err_cnt[err]++;
ipc_hw_clear_all_int_pend_reg(AP);
- if (ipc->err_cnt[err] > CHUB_RESET_THOLD)
- request_debug_work(ipc, err, 1);
+ request_debug_work(ipc, err, 1);
+ } else {
+ clear_err_cnt(ipc, CHUB_ERR_EVTQ_EMTPY);
+ clear_err_cnt(ipc, CHUB_ERR_EVTQ_NO_HW_TRIGGER);
}
return IRQ_HANDLED;
}
chub_clk_len = of_property_count_strings(node, "clock-names");
of_property_read_string_array(node, "clock-names", string_array, chub_clk_len);
- for (i = 0 ; i < chub_clk_len; i++){
+ for (i = 0; i < chub_clk_len; i++) {
clk = devm_clk_get_and_prepare(dev, string_array[i]);
if (!clk)
return -ENODEV;
const char *buf, size_t count)
{
struct contexthub_ipc_info *ipc = dev_get_drvdata(dev);
- int ret = contexthub_reset(ipc, 1);
+ int ret = contexthub_reset(ipc, 1, 0);
return ret < 0 ? ret : count;
}
chub->data->irq1 = IRQ_EVT_A2C_WAKEUP;
chub->data->irq2 = 0;
#endif
-
+ atomic_set(&chub->in_use_ipc, 0);
atomic_set(&chub->chub_status, CHUB_ST_NO_POWER);
atomic_set(&chub->in_reset, 0);
chub->powermode = 0; /* updated by fw bl */
return 0;
}
+static int contexthub_suspend(struct device *dev)
+{
+ struct contexthub_ipc_info *ipc = dev_get_drvdata(dev);
+ struct nanohub_data *data = ipc->data;
+
+ return nanohub_suspend(data->iio_dev);
+}
+
+static int contexthub_resume(struct device *dev)
+{
+ struct contexthub_ipc_info *ipc = dev_get_drvdata(dev);
+ struct nanohub_data *data = ipc->data;
+
+ return nanohub_resume(data->iio_dev);
+}
+
+static SIMPLE_DEV_PM_OPS(contexthub_pm_ops, contexthub_suspend, contexthub_resume);
+
static const struct of_device_id contexthub_ipc_match[] = {
{.compatible = "samsung,exynos-nanohub"},
{},
.name = "nanohub-ipc",
.owner = THIS_MODULE,
.of_match_table = contexthub_ipc_match,
- },
+ .pm = &contexthub_pm_ops,
+ },
};
int nanohub_mailbox_init(void)
static struct dbg_dump *p_dbg_dump;
static struct reserved_mem *chub_rmem;
-void chub_dbg_dump_gpr(struct contexthub_ipc_info *ipc)
+static void chub_dbg_dump_gpr(struct contexthub_ipc_info *ipc)
{
if (p_dbg_dump) {
int i;
struct dbg_dump *p_dump = p_dbg_dump;
- if (contexthub_request(ipc, HW_ACCESS)) {
- pr_err("%s: fails to contexthub_request\n", __func__);
- return;
- }
-
IPC_HW_WRITE_DUMPGPR_CTRL(ipc->chub_dumpgrp, 0x1);
/* dump GPR */
for (i = 0; i <= GPR_PC_INDEX - 1; i++)
for (i = 0; i <= GPR_PC_INDEX; i++)
pr_info("R%d: 0x%x\n", i, p_dump->gpr[i]);
-
- contexthub_release(ipc, HW_ACCESS);
}
}
#endif
/* dump hw into dram (chub reserved mem) */
-void chub_dbg_dump_ram(struct contexthub_ipc_info *ipc, enum chub_err_type reason)
+static void chub_dbg_dump_ram(struct contexthub_ipc_info *ipc, enum chub_err_type reason)
{
if (p_dbg_dump) {
- if (contexthub_request(ipc, HW_ACCESS)) {
- pr_err("%s: fails to contexthub_request\n", __func__);
- return;
- }
-
p_dbg_dump->time = sched_clock();
p_dbg_dump->reason = reason;
memcpy_fromio(&p_dbg_dump->sram[p_dbg_dump->sram_start],
ipc_get_base(IPC_REG_DUMP),
ipc_get_chub_mem_size());
-
- contexthub_release(ipc, HW_ACCESS);
}
}
__func__, i, dbg_name[i], ipc->err_cnt[i]);
}
- if (contexthub_request(ipc, IPC_ACCESS)) {
- pr_err("%s: fails to request contexthub. \n", __func__);
- return;
- }
-
- /* dump nanohub kernel status */
+#ifdef USE_FW_DUMP
contexthub_ipc_write_event(ipc, MAILBOX_EVT_DUMP_STATUS);
+#endif
log_flush(ipc->fw_log);
- contexthub_release(ipc, IPC_ACCESS);
}
void chub_dbg_dump_hw(struct contexthub_ipc_info *ipc, enum chub_err_type reason)
dev_dbg(dev, "%s(%lld, %zu)\n", __func__, off, size);
- if (contexthub_request(dev_get_drvdata(dev), HW_ACCESS)) {
- pr_warn("%s: chub isn't run\n", __func__);
- return -EINVAL;
- }
-
memcpy_fromio(buf, battr->private + off, size);
- contexthub_release(dev_get_drvdata(dev), HW_ACCESS);
return size;
}
dev_info(ipc->dev, "%s: event:%d\n", __func__, event);
if (!err) {
- if (contexthub_request(ipc, IPC_ACCESS)) {
- pr_err("%s: fails to request contexthub\n", __func__);
- return 0;
- }
-
contexthub_ipc_write_event(ipc, event);
- contexthub_release(ipc, IPC_ACCESS);
return count;
} else {
return 0;
return -EINVAL;
}
- if (contexthub_request(ipc, IPC_ACCESS)) {
- dev_err(ipc->dev, "%s: fails to request contexthub. ret:%d\n", __func__);
- return -EINVAL;
- }
-
ret = contexthub_ipc_write_event(ipc, (u32)IPC_DEBUG_UTC_IPC_TEST_START);
if (ret) {
dev_err(ipc->dev, "%s: fails to set start test event. ret:%d\n", __func__, ret);
count = ret;
}
- contexthub_release(ipc, IPC_ACCESS);
return count;
}
return ret;
if (event)
- ret = contexthub_request(ipc, IPC_ACCESS);
+ ret = contexthub_request(ipc);
else
- contexthub_release(ipc, IPC_ACCESS);
+ contexthub_release(ipc);
return ret ? ret : count;
}
ktime_t wakeup_ktime;
#ifdef CONFIG_NANOHUB_MAILBOX
unsigned long flag;
+
spin_lock_irqsave(&data->wakeup_wait.lock, flag);
#else
spin_lock(&data->wakeup_wait.lock);
if (ktime_to_ns(ktime_delta) > WAKEUP_ERR_TIME_NS
&& data->wakeup_err_cnt > WAKEUP_ERR_CNT) {
mcu_wakeup_gpio_put_locked(data, priority_lock);
+#ifdef CONFIG_NANOHUB_MAILBOX
+ spin_unlock_irqrestore(&data->wakeup_wait.lock, flag);
+#else
spin_unlock(&data->wakeup_wait.lock);
+#endif
dev_info(sensor_dev,
"wakeup: hard reset due to consistent error\n");
ret = nanohub_hw_reset(data);
{
bool done;
bool priority_lock = lock_mode > LOCK_MODE_NORMAL;
-
#ifdef CONFIG_NANOHUB_MAILBOX
unsigned long flag;
+
spin_lock_irqsave(&data->wakeup_wait.lock, flag);
#else
spin_lock(&data->wakeup_wait.lock);
return ret;
}
+#ifdef CONFIG_EXT_CHUB
static void __nanohub_interrupt_cfg(struct nanohub_data *data,
u8 interrupt, bool mask)
{
interrupt, ret, mask_ret);
} while ((ret != 1 || mask_ret != 1) && --cnt > 0);
}
-
+#endif
static inline void nanohub_mask_interrupt(struct nanohub_data *data,
u8 interrupt)
{
+#ifdef CONFIG_EXT_CHUB
__nanohub_interrupt_cfg(data, interrupt, true);
+#endif
}
static inline void nanohub_unmask_interrupt(struct nanohub_data *data,
u8 interrupt)
{
+#ifdef CONFIG_EXT_CHUB
__nanohub_interrupt_cfg(data, interrupt, false);
+#endif
}
static ssize_t nanohub_wakeup_query(struct device *dev,
SUSPEND_TIMEOUT_MS : WAKEUP_TIMEOUT_MS,
KEY_WAKEUP_LOCK, mode);
if (ret < 0) {
+#ifdef CONFIG_EXT_CHUB
if (data->irq2)
enable_irq(data->irq2);
else
nanohub_unmask_interrupt(data, 2);
+#endif
return ret;
}
release_wakeup_ex(data, KEY_WAKEUP_LOCK, mode);
return ret;
}
-#endif
if (mode != LOCK_MODE_SUSPEND_RESUME)
disable_irq(data->irq1);
+#else
+ if (mode != LOCK_MODE_SUSPEND_RESUME)
+ contexthub_ipc_write_event(data->pdata->mailbox_client, (u32)MAILBOX_EVT_DISABLE_IRQ);
+#endif
atomic_set(&data->lock_mode, mode);
mcu_wakeup_gpio_set_value(data, mode != LOCK_MODE_IO_BL);
int mode = atomic_read(&data->lock_mode);
atomic_set(&data->lock_mode, LOCK_MODE_NONE);
+#ifdef CONFIG_EXT_CHUB
if (mode != LOCK_MODE_SUSPEND_RESUME)
enable_irq(data->irq1);
-#ifdef CONFIG_EXT_CHUB
if (mode == LOCK_MODE_IO || mode == LOCK_MODE_IO_BL)
nanohub_bl_close(data);
-#endif
if (data->irq2)
enable_irq(data->irq2);
+
release_wakeup_ex(data, KEY_WAKEUP_LOCK, mode);
if (!data->irq2)
nanohub_unmask_interrupt(data, 2);
+#else
+ if (mode != LOCK_MODE_SUSPEND_RESUME)
+ contexthub_ipc_write_event(data->pdata->mailbox_client, (u32)MAILBOX_EVT_ENABLE_IRQ);
+ release_wakeup_ex(data, KEY_WAKEUP_LOCK, mode);
+#endif
+
nanohub_notify_thread(data);
return mode;
}
#elif defined(CONFIG_NANOHUB_MAILBOX)
#ifdef CHUB_RESET_ENABLE
- ret = contexthub_reset(data->pdata->mailbox_client, 0);
+ ret = contexthub_reset(data->pdata->mailbox_client, 0, CHUB_ERR_COMMS);
#else
ret = -EINVAL;
#endif
{
struct nanohub_data *data = dev_get_nanohub_data(dev);
-#ifdef CONFIG_EXT_CHUB
+#if defined(CONFIG_EXT_CHUB)
uint8_t status = CMD_ACK;
int ret;
{
struct nanohub_data *data = dev_get_nanohub_data(dev);
int ret;
-
#ifdef CONFIG_EXT_CHUB
const struct nanohub_platform_data *pdata = data->pdata;
const struct firmware *fw_entry;
return ret < 0 ? ret : count;
#elif defined(CONFIG_NANOHUB_MAILBOX)
- ret = contexthub_reset(data->pdata->mailbox_client, 1);
+ ret = contexthub_reset(data->pdata->mailbox_client, 1, 0);
return ret < 0 ? ret : count;
#endif
gpio_set_value(pdata->nreset_gpio, 1);
usleep_range(650000, 700000);
+
+#ifdef CONFIG_EXT_CHUB
enable_irq(data->irq1);
if (data->irq2)
enable_irq(data->irq2);
else
nanohub_unmask_interrupt(data, 2);
+#else
+ contexthub_ipc_write_event(data->pdata->mailbox_client, (u32)MAILBOX_EVT_ENABLE_IRQ);
+#endif
return 0;
#endif
}
#endif
- ret = request_wakeup_timeout(data, WAKEUP_TIMEOUT_MS);
+ ret = request_wakeup_timeout(data, 500);
if (ret)
return ret;
static const struct sched_param param = {
.sched_priority = (MAX_USER_RT_PRIO/2)-1,
};
-#ifdef CONFIG_NANOHUB_MAILBOX
-#ifndef CHUB_RESET_ENABLE
- struct contexthub_ipc_info *ipc;
-#endif
-#endif
data->kthread_err_cnt = 0;
sched_setscheduler(current, SCHED_FIFO, ¶m);
buf = nanohub_io_get_buf(&data->free_pool,
false);
if (buf) {
- ret = request_wakeup_timeout(data, WAKEUP_TIMEOUT_MS);
+ ret = request_wakeup_timeout(data, 600);
if (ret) {
dev_info(sensor_dev,
"%s: request_wakeup_timeout: ret=%d\n",
int nanohub_suspend(struct iio_dev *iio_dev)
{
-#ifdef CONFIG_EXT_CHUB
+#if defined(CONFIG_EXT_CHUB)
struct nanohub_data *data = iio_priv(iio_dev);
int ret;
}
return ret;
-#else
+#elif defined(CONFIG_NANOHUB_MAILBOX)
(void)iio_dev;
return 0;
int nanohub_resume(struct iio_dev *iio_dev)
{
-#ifdef CONFIG_EXT_CHUB
struct nanohub_data *data = iio_priv(iio_dev);
+#if defined(CONFIG_EXT_CHUB)
disable_irq_wake(data->irq1);
nanohub_wakeup_unlock(data);
-#else
- (void)iio_dev;
-
- return 0;
+#elif defined(CONFIG_NANOHUB_MAILBOX)
+ nanohub_notify_thread(data);
#endif
+ return 0;
}
static int __init nanohub_init(void)