if STAGING
+source "drivers/staging/nanohub/Kconfig"
source "drivers/staging/irda/net/Kconfig"
source "drivers/staging/wlan-ng/Kconfig"
obj-$(CONFIG_DRM_VBOXVIDEO) += vboxvideo/
obj-$(CONFIG_PI433) += pi433/
obj-$(CONFIG_SEC_EXT) += samsung/
+obj-$(CONFIG_NANOHUB) += nanohub/
config NANOHUB
tristate "Nanohub"
default N
+ select IIO
help
Enable support for the nanohub sensorhub driver.
config NANOHUB_SPI
bool "Nanohub SPI"
- default Y
+ default N
help
Enable nanohub SPI support.
If in doubt, say N here.
+config NANOHUB_MAILBOX
+ bool "Nanohub Mailbox"
+ default Y
+ help
+ Enable nanohub Mailbox support.
+
+ Either this or NANOHUB_MAILBOX should be selected.
+
+ If in doubt, say N here.
+
+config CHRE_SENSORHUB_HAL
+ bool "CHRE sensor Hal support"
+ default Y
+ help
+ Enable chre sensorhub hal support.
+
+config CONTEXTHUB_DEBUG
+ bool "Nanohub debug"
+ default N
+ help
+ Eanble nanohub device debug message
+
endif # NANOHUB
# Makefile for nanohub
#
+ccflags-$(CONFIG_CONTEXTHUB_DEBUG) := -DDEBUG
obj-$(CONFIG_NANOHUB) += nanohub.o
-nanohub-y := main.o bl.o comms.o
-nanohub-$(CONFIG_NANOHUB_SPI) += spi.o
-nanohub-$(CONFIG_NANOHUB_I2C) += i2c.o
+nanohub-$(CONFIG_CHRE_SENSORHUB_HAL) := main.o comms.o
+nanohub-$(CONFIG_NANOHUB_SPI) += spi.o bl.o
+nanohub-$(CONFIG_NANOHUB_I2C) += i2c.o bl.o
+nanohub-$(CONFIG_NANOHUB_MAILBOX) += chub.o chub_ipc.o chub_log.o chub_dbg.o
--- /dev/null
+/*
+ * Copyright (c) 2017 Samsung Electronics Co., Ltd.
+ *
+ * Boojin Kim <boojin.kim@samsung.com>
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License as published by
+ * the Free Software Foundation; either version 2 of the License, or
+ * (at your option) any later version.
+ */
+
+#include <linux/module.h>
+#include <linux/io.h>
+#include <linux/kernel.h>
+#include <linux/init.h>
+#include <linux/of.h>
+#include <linux/of_irq.h>
+#include <linux/of_address.h>
+#include <linux/of_reserved_mem.h>
+#include <linux/platform_device.h>
+#include <linux/debugfs.h>
+#include <linux/interrupt.h>
+#include <linux/workqueue.h>
+#include <linux/iio/iio.h>
+#include <linux/wakelock.h>
+#include <linux/delay.h>
+#include <linux/firmware.h>
+#include <linux/random.h>
+#include <linux/rtc.h>
+#include <linux/clk.h>
+#include <linux/timekeeping.h>
+
+#ifdef CONFIG_CHRE_SENSORHUB_HAL
+#include "main.h"
+#endif
+#include "bl.h"
+#include "comms.h"
+#include "chub.h"
+#include "chub_ipc.h"
+#include "chub_dbg.h"
+
+#define WAIT_TRY_CNT (3)
+#define WAIT_TIMEOUT_MS (1000)
+enum { CHUB_ON, CHUB_OFF };
+enum { C2A_ON, C2A_OFF };
+
+/* host interface functions */
+int contexthub_is_run(struct contexthub_ipc_info *ipc)
+{
+#ifdef CONFIG_CHRE_SENSORHUB_HAL
+ return nanohub_irq1_fired(ipc->data);
+#else
+ return 1;
+#endif
+}
+
+/* request contexthub to host driver */
+int contexthub_request(struct contexthub_ipc_info *ipc)
+{
+#ifdef CONFIG_CHRE_SENSORHUB_HAL
+ return request_wakeup_timeout(ipc->data, WAIT_TIMEOUT_MS);
+#else
+ return 0;
+#endif
+}
+
+/* rlease contexthub to host driver */
+void contexthub_release(struct contexthub_ipc_info *ipc)
+{
+#ifdef CONFIG_CHRE_SENSORHUB_HAL
+ release_wakeup(ipc->data);
+#endif
+}
+
+static inline void contexthub_notify_host(struct contexthub_ipc_info *ipc)
+{
+#ifdef CONFIG_CHRE_SENSORHUB_HAL
+ nanohub_handle_irq1(ipc->data);
+#else
+ /* TODO */
+#endif
+}
+
+#ifdef CONFIG_CHRE_SENSORHUB_HAL
+/* by nanohub kernel RxBufStruct. packet header is 10 + 2 bytes to align */
+struct rxbuf {
+ u8 pad;
+ u8 pre_preamble;
+ u8 buf[PACKET_SIZE_MAX];
+ u8 post_preamble;
+};
+
+static int nanohub_mailbox_open(void *data)
+{
+ return 0;
+}
+
+static void nanohub_mailbox_close(void *data)
+{
+ (void)data;
+}
+
+static int nanohub_mailbox_write(void *data, uint8_t *tx, int length,
+ int timeout)
+{
+ struct nanohub_data *ipc = data;
+
+ return contexthub_ipc_write(ipc->pdata->mailbox_client, tx, length, timeout);
+}
+
+static int nanohub_mailbox_read(void *data, uint8_t *rx, int max_length,
+ int timeout)
+{
+ struct nanohub_data *ipc = data;
+
+ return contexthub_ipc_read(ipc->pdata->mailbox_client, rx, max_length, timeout);
+}
+
+void nanohub_mailbox_comms_init(struct nanohub_comms *comms)
+{
+ comms->seq = 1;
+ comms->timeout_write = 544;
+ comms->timeout_ack = 272;
+ comms->timeout_reply = 512;
+ comms->open = nanohub_mailbox_open;
+ comms->close = nanohub_mailbox_close;
+ comms->write = nanohub_mailbox_write;
+ comms->read = nanohub_mailbox_read;
+}
+#endif
+
+static int contexthub_read_process(uint8_t *rx, u8 *raw_rx, u32 size)
+{
+#ifdef CONFIG_CHRE_SENSORHUB_HAL
+ struct rxbuf *rxstruct;
+ struct nanohub_packet *packet;
+
+ rxstruct = (struct rxbuf *)raw_rx;
+ packet = (struct nanohub_packet *)&rxstruct->pre_preamble;
+ memcpy_fromio(rx, (void *)packet, size);
+
+ return NANOHUB_PACKET_SIZE(packet->len);
+#else
+ return size;
+#endif
+}
+
+static int contexthub_ipc_drv_init(struct contexthub_ipc_info *chub)
+{
+ struct device *chub_dev = chub->dev;
+ int i;
+
+ chub->ipc_map = ipc_get_chub_map();
+ if (!chub->ipc_map)
+ return -EINVAL;
+
+ /* init debug-log */
+ /* HACK for clang */
+ chub->ipc_map->logbuf.eq = 0;
+ chub->ipc_map->logbuf.dq = 0;
+ chub->fw_log = log_register_buffer(chub_dev, 0,
+ (void *)&chub->ipc_map->logbuf.eq,
+ "fw", 1);
+ if (!chub->fw_log)
+ return -EINVAL;
+
+#ifdef LOWLEVEL_DEBUG
+ chub->dd_log_buffer = vmalloc(SZ_256K + sizeof(struct LOG_BUFFER *));
+ chub->dd_log_buffer->index_reader = 0;
+ chub->dd_log_buffer->index_writer = 0;
+ chub->dd_log_buffer->size = SZ_256K;
+ chub->dd_log =
+ log_register_buffer(chub_dev, 1, chub->dd_log_buffer, "dd", 0);
+#endif
+ chub_dbg_init(chub_dev);
+
+ /* chub err init */
+ for (i = 0; i < CHUB_ERR_MAX; i++)
+ chub->err_cnt[i] = 0;
+
+ dev_info(chub_dev,
+ "IPC map information\n\tinfo(base:%p size:%zu)\n\tipc(base:%p size:%zu)\n\tlogbuf(base:%p size:%d)\n",
+ chub, sizeof(struct contexthub_ipc_info),
+ ipc_get_base(IPC_REG_IPC), sizeof(struct ipc_map_area),
+ ipc_get_base(IPC_REG_LOG), chub->ipc_map->logbuf.size);
+
+ return 0;
+}
+
+static void chub_dump_and_reset(struct contexthub_ipc_info *ipc,
+ enum CHUB_ERR_TYPE err)
+{
+ int ret = contexthub_ipc_write_event(ipc, MAILBOX_EVT_DUMP_STATUS);
+
+ if (ret)
+ dev_dbg(ipc->dev, "%s: fails to dump\n", __func__);
+
+ chub_dbg_dump_hw(ipc, err);
+
+ /* reset chub */
+ ret = contexthub_ipc_write_event(ipc, MAILBOX_EVT_SHUTDOWN);
+ if (!ret) {
+#ifdef DEBUG_IMAGE
+ chub_dbg_check_and_download_image(ipc);
+#endif
+ dev_dbg(ipc->dev, "%s to be reset\n", __func__);
+ contexthub_ipc_write_event(ipc, MAILBOX_EVT_RESET);
+ mdelay(100);
+ } else {
+ dev_warn(ipc->dev, "%s: fail to shutdown contexthub\n", __func__);
+ }
+}
+
+#ifdef PACKET_LOW_DEBUG
+static void debug_dumpbuf(unsigned char *buf, int len)
+{
+ print_hex_dump(KERN_CONT, "", DUMP_PREFIX_OFFSET, 16, 1, buf, len,
+ false);
+}
+#endif
+
+static inline int get_recv_channel(struct recv_ctrl *recv)
+{
+ int i;
+ unsigned long min_order = 0;
+ int min_order_evt = INVAL_CHANNEL;
+
+ for (i = 0; i < IPC_BUF_NUM; i++) {
+ if (recv->container[i]) {
+ if (!min_order) {
+ min_order = recv->container[i];
+ min_order_evt = i;
+ } else if (recv->container[i] < min_order) {
+ min_order = recv->container[i];
+ min_order_evt = i;
+ }
+ }
+ }
+
+ if (min_order_evt != INVAL_CHANNEL)
+ recv->container[min_order_evt] = 0;
+
+ return min_order_evt;
+}
+
+static inline bool read_is_locked(struct contexthub_ipc_info *ipc)
+{
+ return atomic_read(&ipc->read_lock.cnt) != 0;
+}
+
+static inline void read_get_locked(struct contexthub_ipc_info *ipc)
+{
+ atomic_inc(&ipc->read_lock.cnt);
+}
+
+static inline void read_put_unlocked(struct contexthub_ipc_info *ipc)
+{
+ atomic_dec(&ipc->read_lock.cnt);
+}
+
+int contexthub_ipc_read(struct contexthub_ipc_info *ipc, uint8_t *rx, int max_length,
+ int timeout)
+{
+ unsigned long flag;
+ int ret;
+#ifdef USE_IPC_BUF
+ int size = 0;
+ int lock;
+ struct ipc_buf *ipc_buf = ipc_get_base(IPC_REG_IPC_C2A);
+
+ if (!ipc->read_lock.flag) {
+ spin_lock_irqsave(&ipc->read_lock.event.lock, flag);
+ read_get_locked(ipc);
+ ret =
+ wait_event_interruptible_timeout_locked(ipc->read_lock.event,
+ ipc->read_lock.flag,
+ msecs_to_jiffies(timeout));
+ read_put_unlocked(ipc);
+ spin_unlock_irqrestore(&ipc->read_lock.event.lock, flag);
+ if (ret < 0)
+ dev_warn(ipc->dev,
+ "fails to get read ret:%d timeout:%d, flag:0x%x",
+ ret, timeout, ipc->read_lock.flag);
+
+ if (!ipc->read_lock.flag)
+ goto fail_get_channel;
+ }
+
+ ipc->read_lock.flag--;
+ size = ipc_read_data(IPC_DATA_C2A, ipc->rxbuf);
+ if (size)
+ return contexthub_read_process(rx, ipc->rxbuf, size);
+#else
+ struct ipc_content *content;
+ int ch = INVAL_CHANNEL;
+
+ if (ipc->read_lock.flag) {
+search_channel:
+ ch = get_recv_channel(&ipc->recv_order);
+
+ if (ch == INVAL_CHANNEL)
+ goto fail_get_channel;
+ else
+ ipc->read_lock.flag &= ~(1 << ch);
+ } else {
+ spin_lock_irqsave(&ipc->read_lock.event.lock, flag);
+ read_get_locked(ipc);
+ ret =
+ wait_event_interruptible_timeout_locked(ipc->read_lock.event,
+ ipc->read_lock.flag,
+ msecs_to_jiffies(timeout));
+ read_put_unlocked(ipc);
+ spin_unlock_irqrestore(&ipc->read_lock.event.lock, flag);
+ if (ret < 0)
+ dev_warn(ipc->dev,
+ "fails to get read ret:%d timeout:%d, flag:0x%x",
+ ret, timeout, ipc->read_lock.flag);
+
+ if (ipc->read_lock.flag)
+ goto search_channel;
+ else
+ goto fail_get_channel;
+ }
+
+ content = ipc_get_addr(IPC_REG_IPC_C2A, ch);
+ ipc->recv_order.container[ch] = 0;
+ ipc_update_channel_status(content, CS_CHUB_OWN);
+
+ return contexthub_read_process(rx, content->buf, content->size);
+#endif
+
+fail_get_channel:
+ ipc->err_cnt[CHUB_ERR_READ_FAIL]++;
+ dev_err(ipc->dev,
+ "%s: fails to get data errcnt:%d\n",
+ __func__, ipc->err_cnt[CHUB_ERR_READ_FAIL]);
+ schedule_work(&ipc->debug_work);
+ return -EINVAL;
+}
+
+int contexthub_ipc_write(struct contexthub_ipc_info *ipc,
+ uint8_t *tx, int length, int timeout)
+{
+#ifdef USE_IPC_BUF
+ int ret;
+
+ ret = ipc_write_data(IPC_DATA_A2C, tx, (u16)length);
+ if (ret) {
+ ipc->err_cnt[CHUB_ERR_WRITE_FAIL]++;
+ pr_err("%s: fails to write data: ret:%d, len:%d errcnt:%d\n",
+ __func__, ret, length, ipc->err_cnt[CHUB_ERR_WRITE_FAIL]);
+ schedule_work(&ipc->debug_work);
+ length = 0;
+ }
+ return length;
+#else
+ struct ipc_content *content =
+ ipc_get_channel(IPC_REG_IPC_A2C, CS_IDLE, CS_AP_WRITE);
+
+ if (!content) {
+ pr_err("%s: fails to get channel.\n", __func__);
+ ipc_print_channel();
+
+ return -EINVAL;
+ }
+ content->size = length;
+ memcpy_toio(content->buf, tx, length);
+
+ DEBUG_PRINT(KERN_DEBUG, "->W%d\n", content->num);
+ if (ipc_add_evt(IPC_EVT_A2C, content->num)) {
+ contexthub_ipc_write_event(ipc, MAILBOX_EVT_CHUB_ALIVE);
+ length = 0;
+ }
+#endif
+ return length;
+}
+
+static void check_rtc_time(void)
+{
+ struct rtc_device *chub_rtc = rtc_class_open(CONFIG_RTC_SYSTOHC_DEVICE);
+ struct rtc_device *ap_rtc = rtc_class_open(CONFIG_RTC_HCTOSYS_DEVICE);
+ struct rtc_time chub_tm, ap_tm;
+ time64_t chub_t, ap_t;
+
+ rtc_read_time(ap_rtc, &chub_tm);
+ rtc_read_time(chub_rtc, &ap_tm);
+
+ chub_t = rtc_tm_sub(&chub_tm, &ap_tm);
+
+ if (chub_t) {
+ pr_info("nanohub %s: diff_time: %llu\n", __func__, chub_t);
+ rtc_set_time(chub_rtc, &ap_tm);
+ };
+
+ chub_t = rtc_tm_to_time64(&chub_tm);
+ ap_t = rtc_tm_to_time64(&ap_tm);
+}
+
+static int contexthub_wait_alive(struct contexthub_ipc_info *ipc)
+{
+ int trycnt = 0;
+
+ do {
+ msleep(WAIT_TIMEOUT_MS);
+ contexthub_ipc_write_event(ipc, MAILBOX_EVT_CHUB_ALIVE);
+ if (++trycnt > WAIT_TRY_CNT)
+ break;
+ } while ((atomic_read(&ipc->chub_status) != CHUB_ST_RUN));
+
+ if (atomic_read(&ipc->chub_status) == CHUB_ST_RUN) {
+ return 0;
+ } else {
+ dev_warn(ipc->dev, "%s fails. contexthub status is %d\n",
+ __func__, atomic_read(&ipc->chub_status));
+ return -ETIMEDOUT;
+ }
+}
+
+static int contexthub_hw_reset(struct contexthub_ipc_info *ipc,
+ enum mailbox_event event)
+{
+ u32 val;
+ int trycnt = 0;
+
+ /* clear ipc value */
+ ipc_init();
+
+ atomic_set(&ipc->wakeup_chub, CHUB_OFF);
+ atomic_set(&ipc->irq1_apInt, C2A_OFF);
+ atomic_set(&ipc->read_lock.cnt, 0x0);
+
+ ipc->read_lock.flag = 0;
+#ifndef USE_IPC_BUF
+ ipc->recv_order.order = 0;
+ for (val = 0; val < IRQ_EVT_CH_MAX; val++)
+ ipc->recv_order.container[val] = 0;
+#endif
+ ipc_hw_write_shared_reg(AP, ipc->os_load, SR_BOOT_MODE);
+ ipc_set_chub_clk((u32)ipc->clkrate);
+ ipc_set_chub_bootmode(BOOTMODE_COLD);
+
+ switch (event) {
+ case MAILBOX_EVT_POWER_ON:
+#ifdef NEED_TO_RTC_SYNC
+ check_rtc_time();
+#endif
+ if (atomic_read(&ipc->chub_status) == CHUB_ST_NO_POWER) {
+ atomic_set(&ipc->chub_status, CHUB_ST_POWER_ON);
+
+ /* enable Dump GRP */
+ IPC_HW_WRITE_DUMPGPR_CTRL(ipc->chub_dumpgrp, 0x1);
+
+#if defined(CONFIG_SOC_EXYNOS9610)
+ /* cmu cm4 clock - gating */
+ val = __raw_readl(ipc->cmu_chub_qch +
+ REG_QCH_CON_CM4_SHUB_QCH);
+ val &= ~(IGNORE_FORCE_PM_EN | CLOCK_REQ | ENABLE);
+ __raw_writel((val | IGNORE_FORCE_PM_EN),
+ ipc->cmu_chub_qch +
+ REG_QCH_CON_CM4_SHUB_QCH);
+#endif
+ /* pmu reset-release on CHUB */
+ val =
+ __raw_readl(ipc->pmu_chub_reset +
+ REG_CHUB_RESET_CHUB_OPTION);
+ __raw_writel((val | CHUB_RESET_RELEASE_VALUE),
+ ipc->pmu_chub_reset +
+ REG_CHUB_RESET_CHUB_OPTION);
+
+#if defined(CONFIG_SOC_EXYNOS9610)
+ /* check chub cpu status */
+ do {
+ val = __raw_readl(ipc->pmu_chub_reset +
+ REG_CHUB_RESET_CHUB_CONFIGURATION);
+ msleep(WAIT_TIMEOUT_MS);
+ if (++trycnt > WAIT_TRY_CNT) {
+ dev_warn(ipc->dev, "chub cpu status is not set correctly\n");
+ break;
+ }
+ } while ((val & 0x1) == 0x0);
+
+ /* cmu cm4 clock - release */
+ val = __raw_readl(ipc->cmu_chub_qch +
+ REG_QCH_CON_CM4_SHUB_QCH);
+ val &= ~(IGNORE_FORCE_PM_EN | CLOCK_REQ | ENABLE);
+ __raw_writel((val | IGNORE_FORCE_PM_EN | CLOCK_REQ),
+ ipc->cmu_chub_qch +
+ REG_QCH_CON_CM4_SHUB_QCH);
+
+ val = __raw_readl(ipc->cmu_chub_qch +
+ REG_QCH_CON_CM4_SHUB_QCH);
+ val &= ~(IGNORE_FORCE_PM_EN | CLOCK_REQ | ENABLE);
+ __raw_writel((val | CLOCK_REQ),
+ ipc->cmu_chub_qch +
+ REG_QCH_CON_CM4_SHUB_QCH);
+#endif
+ } else {
+ dev_warn(ipc->dev,
+ "fails to contexthub power on. Status is %d\n",
+ atomic_read(&ipc->chub_status));
+ }
+ break;
+ case MAILBOX_EVT_SYSTEM_RESET:
+#if defined(CONFIG_SOC_EXYNOS9810)
+ val = __raw_readl(ipc->pmu_chub_cpu + REG_CHUB_CPU_OPTION);
+ __raw_writel(val | ENABLE_SYSRESETREQ,
+ ipc->pmu_chub_cpu + REG_CHUB_CPU_OPTION);
+#elif defined(CONFIG_SOC_EXYNOS9610)
+ val = __raw_readl(ipc->pmu_chub_reset + REG_CHUB_CPU_OPTION);
+ __raw_writel(val | ENABLE_SYSRESETREQ,
+ ipc->pmu_chub_reset + REG_CHUB_CPU_OPTION);
+#else
+ dev_warn("%s: doesn't support reset\n", __func__);
+ return 0;
+#endif
+ /* request systemresetreq to chub */
+ ipc_hw_write_shared_reg(AP, ipc->os_load, SR_BOOT_MODE);
+ ipc_add_evt(IPC_EVT_A2C, IRQ_EVT_A2C_RESET);
+ break;
+ case MAILBOX_EVT_CORE_RESET:
+ /* check chub cpu status */
+ val = __raw_readl(ipc->pmu_chub_reset +
+ REG_CHUB_RESET_CHUB_CONFIGURATION);
+ __raw_writel(val | (1 << 0),
+ ipc->pmu_chub_reset +
+ REG_CHUB_RESET_CHUB_CONFIGURATION);
+ break;
+ default:
+ break;
+ }
+
+ return contexthub_wait_alive(ipc);
+}
+
+int contexthub_ipc_write_event(struct contexthub_ipc_info *ipc,
+ enum mailbox_event event)
+{
+ u32 val;
+ int ret = 0;
+
+ switch (event) {
+ case MAILBOX_EVT_INIT_IPC:
+ ret = contexthub_ipc_drv_init(ipc);
+ break;
+ case MAILBOX_EVT_ENABLE_IRQ:
+ /* if enable, mask from CHUB IRQ, else, unmask from CHUB IRQ */
+ ipc_hw_unmask_irq(AP, IRQ_EVT_C2A_INT);
+ ipc_hw_unmask_irq(AP, IRQ_EVT_C2A_INTCLR);
+ break;
+ case MAILBOX_EVT_DISABLE_IRQ:
+ ipc_hw_mask_irq(AP, IRQ_EVT_C2A_INT);
+ ipc_hw_mask_irq(AP, IRQ_EVT_C2A_INTCLR);
+ break;
+ case MAILBOX_EVT_ERASE_SHARED:
+ memset(ipc_get_base(IPC_REG_SHARED), 0, ipc_get_offset(IPC_REG_SHARED));
+ break;
+ case MAILBOX_EVT_DUMP_STATUS:
+ chub_dbg_dump_status(ipc);
+ break;
+ case MAILBOX_EVT_WAKEUP_CLR:
+ if (atomic_read(&ipc->wakeup_chub) == CHUB_ON) {
+ atomic_set(&ipc->wakeup_chub, CHUB_OFF);
+ ipc_add_evt(IPC_EVT_A2C, IRQ_EVT_A2C_WAKEUP_CLR);
+ }
+ break;
+ case MAILBOX_EVT_WAKEUP:
+ if (atomic_read(&ipc->wakeup_chub) == CHUB_OFF) {
+ atomic_set(&ipc->wakeup_chub, CHUB_ON);
+ ipc_add_evt(IPC_EVT_A2C, IRQ_EVT_A2C_WAKEUP);
+ }
+ break;
+ case MAILBOX_EVT_POWER_ON:
+ ret = contexthub_hw_reset(ipc, event);
+ log_schedule_flush_all();
+ break;
+ case MAILBOX_EVT_CORE_RESET:
+ case MAILBOX_EVT_SYSTEM_RESET:
+ if (atomic_read(&ipc->chub_status) == CHUB_ST_SHUTDOWN) {
+ ret = contexthub_hw_reset(ipc, event);
+ log_schedule_flush_all();
+ } else {
+ dev_err(ipc->dev,
+ "contexthub status isn't shutdown. fails to reset\n");
+ ret = -EINVAL;
+ }
+ break;
+ case MAILBOX_EVT_SHUTDOWN:
+ /* assert */
+ if (MAILBOX_EVT_RESET == MAILBOX_EVT_CORE_RESET) {
+ ipc_add_evt(IPC_EVT_A2C, IRQ_EVT_A2C_SHUTDOWN);
+ msleep(100); /* wait for shut down time */
+#if defined(CONFIG_SOC_EXYNOS9810)
+ val = __raw_readl(ipc->pmu_chub_cpu +
+ REG_CHUB_CPU_STATUS);
+#elif defined(CONFIG_SOC_EXYNOS9610)
+ val = __raw_readl(ipc->pmu_chub_reset +
+ REG_CHUB_CPU_STATUS);
+#else
+ dev_err(ipc->dev,
+ "contexthub doesn't support shutdown\n");
+ return 0;
+#endif
+ if (val & (1 << REG_CHUB_CPU_STATUS_BIT_STANDBYWFI)) {
+ val = __raw_readl(ipc->pmu_chub_reset +
+ REG_CHUB_RESET_CHUB_CONFIGURATION);
+ __raw_writel(val & ~(1 << 0),
+ ipc->pmu_chub_reset +
+ REG_CHUB_RESET_CHUB_CONFIGURATION);
+ } else {
+ dev_err(ipc->dev,
+ "fails to shutdown contexthub. cpu_status: 0x%x\n",
+ val);
+ return -EINVAL;
+ }
+ }
+ atomic_set(&ipc->chub_status, CHUB_ST_SHUTDOWN);
+ break;
+ case MAILBOX_EVT_CHUB_ALIVE:
+ ipc->chub_alive_lock.flag = 0;
+ ipc_hw_gen_interrupt(AP, IRQ_EVT_CHUB_ALIVE);
+ val = wait_event_timeout(ipc->chub_alive_lock.event,
+ ipc->chub_alive_lock.flag,
+ msecs_to_jiffies(WAIT_TIMEOUT_MS));
+
+ if (ipc->chub_alive_lock.flag) {
+ atomic_set(&ipc->chub_status, CHUB_ST_RUN);
+ dev_info(ipc->dev, "chub is alive");
+ } else {
+ dev_err(ipc->dev,
+ "chub isn't alive. should be reset\n");
+ if (atomic_read(&ipc->chub_status) ==
+ CHUB_ST_RUN) {
+ chub_dump_and_reset(ipc,
+ CHUB_ERR_CHUB_NO_RESPONSE);
+ ipc->err_cnt[CHUB_ERR_CHUB_NO_RESPONSE]++;
+ atomic_set(&ipc->chub_status,
+ CHUB_ST_NO_RESPONSE);
+ }
+ ret = -EINVAL;
+ }
+ break;
+ default:
+ break;
+ }
+
+ if ((int)event < IPC_DEBUG_UTC_MAX) {
+ ipc->utc_run = event;
+ if ((int)event == IPC_DEBUG_UTC_TIME_SYNC) {
+ check_rtc_time();
+#ifdef CONFIG_CONTEXTHUB_DEBUG
+ /* log_flush enable when utc_run is set */
+ schedule_work(&ipc->utc_work);
+#else
+ ipc_write_debug_event(AP, (u32)event);
+ ipc_add_evt(IPC_EVT_A2C, IRQ_EVT_A2C_DEBUG);
+#endif
+ }
+ ipc_write_debug_event(AP, (u32)event);
+ ipc_add_evt(IPC_EVT_A2C, IRQ_EVT_A2C_DEBUG);
+ }
+
+ return ret;
+}
+
+int contexthub_poweron(struct contexthub_ipc_info *ipc)
+{
+ int ret = 0;
+ struct device *dev = ipc->dev;
+
+ if (!atomic_read(&ipc->chub_status)) {
+ ret = contexthub_download_image(ipc, 1);
+ if (ret) {
+ dev_warn(dev, "fails to download bootloader\n");
+ return ret;
+ }
+
+ ret = contexthub_ipc_write_event(ipc, MAILBOX_EVT_INIT_IPC);
+ if (ret) {
+ dev_warn(dev, "fails to init ipc\n");
+ return ret;
+ }
+
+ ret = contexthub_download_image(ipc, 0);
+ if (ret) {
+ dev_warn(dev, "fails to download kernel\n");
+ return ret;
+ }
+ ret = contexthub_ipc_write_event(ipc, MAILBOX_EVT_POWER_ON);
+ if (ret) {
+ dev_warn(dev, "fails to poweron\n");
+ return ret;
+ }
+
+ if (atomic_read(&ipc->chub_status) == CHUB_ST_RUN)
+ dev_info(dev, "contexthub power-on");
+ else
+ dev_warn(dev, "contexthub fails to power-on");
+ } else {
+ ret = -EINVAL;
+ }
+
+ if (ret)
+ dev_warn(dev, "fails to %s with %d. Status is %d\n",
+ __func__, ret, atomic_read(&ipc->chub_status));
+ return ret;
+}
+
+int contexthub_reset(struct contexthub_ipc_info *ipc)
+{
+ /* TODO: add wait lock */
+ int ret = contexthub_ipc_write_event(ipc, MAILBOX_EVT_SHUTDOWN);
+
+ if (!ret)
+ ret = contexthub_ipc_write_event(ipc, MAILBOX_EVT_RESET);
+
+ return ret;
+}
+
+int contexthub_download_image(struct contexthub_ipc_info *ipc, int bl)
+{
+ const struct firmware *entry;
+ int ret;
+
+ if (bl) {
+ ret = request_firmware(&entry, "bl.unchecked.bin", ipc->dev);
+ if (ret) {
+ dev_err(ipc->dev, "%s, bl request_firmware failed\n",
+ __func__);
+ return ret;
+ }
+ memcpy(ipc_get_base(IPC_REG_BL), entry->data, entry->size);
+ dev_info(ipc->dev, "%s11: bootloader(size:0x%x) on %lx\n",
+ __func__, (int)entry->size,
+ (unsigned long)ipc_get_base(IPC_REG_BL));
+
+ release_firmware(entry);
+ } else {
+ ret = request_firmware(&entry, ipc->os_name, ipc->dev);
+ if (ret) {
+ dev_err(ipc->dev, "%s, %s request_firmware failed\n",
+ __func__, ipc->os_name);
+ return ret;
+ }
+ memcpy(ipc_get_base(IPC_REG_OS), entry->data, entry->size);
+ release_firmware(entry);
+
+ dev_info(ipc->dev, "%s: %s(size:0x%x) on %lx\n", __func__,
+ ipc->os_name, (int)entry->size,
+ (unsigned long)ipc_get_base(IPC_REG_OS));
+ }
+
+ return 0;
+}
+
+int contexthub_download_bl(struct contexthub_ipc_info *ipc)
+{
+ int ret;
+
+ ret = contexthub_ipc_write_event(ipc, MAILBOX_EVT_SHUTDOWN);
+
+ if (!ret)
+ ret = contexthub_download_image(ipc, 1);
+
+ if (!ret)
+ ret = contexthub_download_image(ipc, 0);
+
+ if (!ret)
+ ret = contexthub_ipc_write_event(ipc, MAILBOX_EVT_RESET);
+
+ return ret;
+}
+
+int contexthub_download_kernel(struct contexthub_ipc_info *ipc)
+{
+ return contexthub_download_image(ipc, 0);
+}
+
+#ifdef CONFIG_CONTEXTHUB_DEBUG
+static void handle_utc_work_func(struct work_struct *work)
+{
+ struct contexthub_ipc_info *ipc =
+ container_of(work, struct contexthub_ipc_info, utc_work);
+ int trycnt = 0;
+
+ while (ipc->utc_run) {
+ msleep(20000);
+ ipc_write_val(AP, sched_clock());
+ ipc_write_debug_event(AP, ipc->utc_run);
+ ipc_add_evt(IPC_EVT_A2C, IRQ_EVT_A2C_DEBUG);
+ if (!(++trycnt % 10))
+ log_flush(ipc->fw_log);
+ };
+
+ dev_dbg(ipc->dev, "%s is done with %d try\n", __func__, trycnt);
+}
+#endif
+
+#define MAX_ERR_CNT (3)
+/* handle errors of chub driver and fw */
+static void handle_debug_work_func(struct work_struct *work)
+{
+ struct contexthub_ipc_info *ipc =
+ container_of(work, struct contexthub_ipc_info, debug_work);
+ enum ipc_debug_event event = ipc_read_debug_event(AP);
+ int i;
+ enum CHUB_ERR_TYPE fw_err = 0;
+
+ dev_info(ipc->dev,
+ "%s is run with nanohub driver %d, fw %d error\n", __func__,
+ ipc->chub_err, event);
+
+ log_flush(ipc->fw_log);
+
+ /* do slient reset */
+ for (i = 0; i < CHUB_ERR_MAX; i++) {
+ if (ipc->err_cnt[i] > MAX_ERR_CNT) {
+ pr_info("%s: reset chub due to irq trigger error: alive:%d\n",
+ __func__, contexthub_ipc_write_event(ipc, MAILBOX_EVT_CHUB_ALIVE));
+ chub_dump_and_reset(ipc, i);
+ return;
+ }
+ }
+
+ /* chub driver error */
+ if (ipc->chub_err) {
+ log_dump_all(ipc->chub_err);
+ chub_dbg_dump_hw(ipc, ipc->chub_err);
+ ipc->chub_err = 0;
+ return;
+ }
+
+ /* chub fw error */
+ switch (event) {
+ case IPC_DEBUG_CHUB_FULL_LOG:
+ dev_warn(ipc->dev,
+ "Contexthub notified that logbuf is full\n");
+ break;
+ case IPC_DEBUG_CHUB_PRINT_LOG:
+ break;
+ case IPC_DEBUG_CHUB_FAULT:
+ dev_warn(ipc->dev, "Contexthub notified fault\n");
+ fw_err = CHUB_ERR_NANOHUB_FAULT;
+ break;
+ case IPC_DEBUG_CHUB_ASSERT:
+ dev_warn(ipc->dev, "Contexthub notified assert\n");
+ fw_err = CHUB_ERR_NANOHUB_ASSERT;
+ break;
+ case IPC_DEBUG_CHUB_ERROR:
+ dev_warn(ipc->dev, "Contexthub notified error\n");
+ fw_err = CHUB_ERR_NANOHUB_ERROR;
+ break;
+ default:
+ break;
+ }
+
+ if (fw_err) {
+ ipc->err_cnt[fw_err]++;
+ contexthub_ipc_write_event(ipc, MAILBOX_EVT_DUMP_STATUS);
+ log_dump_all(fw_err);
+ }
+}
+
+static void handle_irq(struct contexthub_ipc_info *ipc, enum irq_evt_chub evt)
+{
+ struct ipc_content *content;
+
+ switch (evt) {
+ case IRQ_EVT_C2A_DEBUG:
+ schedule_work(&ipc->debug_work);
+ break;
+ case IRQ_EVT_C2A_INT:
+ if (atomic_read(&ipc->irq1_apInt) == C2A_OFF) {
+ atomic_set(&ipc->irq1_apInt, C2A_ON);
+ contexthub_notify_host(ipc);
+ }
+ break;
+ case IRQ_EVT_C2A_INTCLR:
+ atomic_set(&ipc->irq1_apInt, C2A_OFF);
+ break;
+ default:
+ if (evt < IRQ_EVT_CH_MAX) {
+ int lock;
+
+#ifdef USE_IPC_BUF
+ ipc->read_lock.flag++;
+#else
+ content = ipc_get_addr(IPC_REG_IPC_C2A, evt);
+ ipc_update_channel_status(content, CS_AP_RECV);
+
+ if (!ipc->read_lock.flag)
+ ipc->recv_order.order = 1; /* reset order */
+
+ if (ipc->recv_order.container[evt])
+ dev_warn(ipc->dev,
+ "%s: invalid order container[%d] = %lu, status:%x\n",
+ __func__, evt,
+ ipc->recv_order.container[evt],
+ content->status);
+
+ ipc->recv_order.container[evt] =
+ ++ipc->recv_order.order;
+ ipc->read_lock.flag |= (1 << evt);
+
+ DEBUG_PRINT(KERN_DEBUG, "<-R%d(%d)(%d)\n", evt,
+ content->size, ipc->recv_order.order);
+#endif
+ /* TODO: requered.. ? */
+ spin_lock(&ipc->read_lock.event.lock);
+ lock = read_is_locked(ipc);
+ spin_unlock(&ipc->read_lock.event.lock);
+ if (lock)
+ wake_up_interruptible_sync(&ipc->read_lock.event);
+ } else {
+ dev_warn(ipc->dev, "%s: invalid %d event",
+ __func__, evt);
+ }
+ break;
+ };
+}
+
+static irqreturn_t contexthub_irq_handler(int irq, void *data)
+{
+ struct contexthub_ipc_info *ipc = data;
+ int start_index = ipc_hw_read_int_start_index(AP);
+ unsigned int status = ipc_hw_read_int_status_reg(AP);
+ struct ipc_evt_buf *cur_evt;
+ enum CHUB_ERR_TYPE err = 0;
+ enum irq_chub evt = 0;
+ int irq_num = IRQ_EVT_CHUB_ALIVE + start_index;
+
+ /* chub alive interrupt handle */
+ if (status & (1 << irq_num)) {
+ status &= ~(1 << irq_num);
+ ipc_hw_clear_int_pend_reg(AP, irq_num);
+ /* set wakeup flag for chub_alive_lock */
+ ipc->chub_alive_lock.flag = 1;
+ wake_up(&ipc->chub_alive_lock.event);
+ }
+
+ /* chub ipc interrupt handle */
+ while (status) {
+ cur_evt = ipc_get_evt(IPC_EVT_C2A);
+
+ if (cur_evt) {
+ evt = cur_evt->evt;
+ irq_num = cur_evt->irq + start_index;
+
+ /* check match evtq and hw interrupt pending */
+ if (!(status & (1 << irq_num))) {
+ err = CHUB_ERR_EVTQ_NO_HW_TRIGGER;
+ break;
+ }
+ } else {
+ err = CHUB_ERR_EVTQ_EMTPY;
+ break;
+ }
+
+ handle_irq(ipc, (u32)evt);
+ ipc_hw_clear_int_pend_reg(AP, irq_num);
+ status &= ~(1 << irq_num);
+ }
+
+ if (err) {
+ ipc->chub_err = err;
+ pr_err("inval irq err(%d):start_irqnum:%d,evt(%p):%d,irq_hw:%d,status_reg:0x%x(0x%x,0x%x)\n",
+ ipc->chub_err, start_index, cur_evt, evt, irq_num,
+ status, ipc_hw_read_int_status_reg(AP),
+ ipc_hw_read_int_gen_reg(AP));
+ ipc->err_cnt[err]++;
+ ipc_hw_clear_all_int_pend_reg(AP);
+ schedule_work(&ipc->debug_work);
+ }
+ return IRQ_HANDLED;
+}
+
+static irqreturn_t contexthub_irq_wdt_handler(int irq, void *data)
+{
+ struct contexthub_ipc_info *ipc = data;
+
+ dev_info(ipc->dev, "context generated WDT timeout.\n");
+
+ return IRQ_HANDLED;
+}
+
+static __init void contexhub_config_init(struct contexthub_ipc_info *chub)
+{
+ /* BAAW-P-APM-CHUB for CHUB to access APM_CMGP. 1 window is used */
+ if (chub->chub_baaw) {
+ IPC_HW_WRITE_BAAW_CHUB0(chub->chub_baaw,
+ chub->baaw_info.baaw_p_apm_chub_start);
+ IPC_HW_WRITE_BAAW_CHUB1(chub->chub_baaw,
+ chub->baaw_info.baaw_p_apm_chub_end);
+ IPC_HW_WRITE_BAAW_CHUB2(chub->chub_baaw,
+ chub->baaw_info.baaw_p_apm_chub_remap);
+ IPC_HW_WRITE_BAAW_CHUB3(chub->chub_baaw, BAAW_RW_ACCESS_ENABLE);
+ }
+
+ /* enable mailbox ipc */
+ ipc_set_base(chub->sram);
+ ipc_set_owner(AP, chub->mailbox, IPC_SRC);
+}
+
+static int contexthub_get_cmgp_clocks(struct device *dev)
+{
+#if defined(CONFIG_SOC_EXYNOS9610)
+ struct clk *clk;
+ int ret = 0;
+
+ /* RPR0521, LIS3MDL */
+ clk = devm_clk_get(dev, "cmgp_usi01");
+ if (IS_ERR(clk)) {
+ dev_err(dev, "[nanohub] cannot get cmgp_usi01\n");
+ return -ENOENT;
+ }
+ ret = clk_prepare(clk);
+ if (ret) {
+ dev_err(dev, "[nanohub] cannot prepare cmgp_usi01\n");
+ return ret;
+ }
+ ret = clk_enable(clk);
+ if (ret) {
+ dev_err(dev, "[nanohub] cannot enable cmgp_usi01\n");
+ return ret;
+ }
+ dev_info(dev, "cmgp_usi01(%lu) is enabled\n", clk_get_rate(clk));
+
+ /* BMP280 */
+ clk = devm_clk_get(dev, "cmgp_usi03");
+ if (IS_ERR(clk)) {
+ dev_err(dev, "[nanohub] cannot get cmgp_usi03\n");
+ return -ENOENT;
+ }
+ ret = clk_prepare(clk);
+ if (ret) {
+ dev_err(dev, "[nanohub] cannot prepare cmgp_usi03\n");
+ return ret;
+ }
+ ret = clk_enable(clk);
+ if (ret) {
+ dev_err(dev, "[nanohub] cannot enable cmgp_usi03\n");
+ return ret;
+ }
+ dev_info(dev, "cmgp_usi03(%lu) is enabled\n", clk_get_rate(clk));
+
+ clk = devm_clk_get(dev, "cmgp_i2c");
+ if (IS_ERR(clk)) {
+ dev_err(dev, "[nanohub] cannot get cmgp_i2c\n");
+ return -ENOENT;
+ }
+ ret = clk_prepare(clk);
+ if (ret) {
+ dev_err(dev, "[nanohub] cannot prepare cmgp_i2c\n");
+ return ret;
+ }
+ ret = clk_enable(clk);
+ if (ret) {
+ dev_err(dev, "[nanohub] cannot enable cmgp_i2c\n");
+ return ret;
+ }
+ dev_info(dev, "cmgp_i2c(%lu) is enabled\n", clk_get_rate(clk));
+#endif
+
+ return 0;
+}
+
+#if defined(CONFIG_SOC_EXYNOS9610)
+//extern int cal_dll_apm_enable(void);
+#endif
+
+static __init int contexthub_ipc_hw_init(struct platform_device *pdev,
+ struct contexthub_ipc_info *chub)
+{
+ int ret;
+ int irq;
+ struct resource *res;
+ const char *os;
+ struct device *dev = &pdev->dev;
+ struct device_node *node = dev->of_node;
+ struct clk *clk;
+
+ if (!node) {
+ dev_err(dev, "driver doesn't support non-dt\n");
+ return -ENODEV;
+ }
+
+ /* get os type from dt */
+ os = of_get_property(node, "os-type", NULL);
+ if (!os || !strcmp(os, "none") || !strcmp(os, "pass")) {
+ dev_err(dev, "no use contexthub\n");
+ chub->os_load = 0;
+ return -ENODEV;
+ } else {
+ chub->os_load = 1;
+ strcpy(chub->os_name, os);
+ }
+
+ /* get mailbox interrupt */
+ irq = irq_of_parse_and_map(node, 0);
+ if (irq < 0) {
+ dev_err(dev, "failed to get irq:%d\n", irq);
+ return -EINVAL;
+ }
+
+ /* request irq handler */
+ ret = devm_request_irq(dev, irq, contexthub_irq_handler,
+ 0, dev_name(dev), chub);
+ if (ret) {
+ dev_err(dev, "failed to request irq:%d, ret:%d\n", irq, ret);
+ return ret;
+ }
+
+ /* get wdt interrupt optionally */
+ irq = irq_of_parse_and_map(node, 1);
+ if (irq > 0) {
+ /* request irq handler */
+ ret = devm_request_irq(dev, irq,
+ contexthub_irq_wdt_handler, 0,
+ dev_name(dev), chub);
+ if (ret) {
+ dev_err(dev, "failed to request wdt irq:%d, ret:%d\n",
+ irq, ret);
+ return ret;
+ }
+ } else {
+ dev_info(dev, "don't use wdt irq:%d\n", irq);
+ }
+
+ /* get MAILBOX SFR */
+ res = platform_get_resource_byname(pdev, IORESOURCE_MEM, "mailbox");
+ chub->mailbox = devm_ioremap_resource(dev, res);
+ if (IS_ERR(chub->mailbox)) {
+ dev_err(dev, "fails to get mailbox sfr\n");
+ return PTR_ERR(chub->mailbox);
+ }
+
+ /* get SRAM base */
+ res = platform_get_resource_byname(pdev, IORESOURCE_MEM, "sram");
+ chub->sram = devm_ioremap_resource(dev, res);
+ if (IS_ERR(chub->sram)) {
+ dev_err(dev, "fails to get sram\n");
+ return PTR_ERR(chub->sram);
+ }
+
+ /* get chub gpr base */
+ res = platform_get_resource_byname(pdev, IORESOURCE_MEM, "dumpgpr");
+ chub->chub_dumpgrp = devm_ioremap_resource(dev, res);
+ if (IS_ERR(chub->chub_dumpgrp)) {
+ dev_err(dev, "fails to get dumpgrp\n");
+ return PTR_ERR(chub->chub_dumpgrp);
+ }
+
+ /* get pmu reset base */
+ res = platform_get_resource_byname(pdev, IORESOURCE_MEM, "chub_reset");
+ chub->pmu_chub_reset = devm_ioremap_resource(dev, res);
+ if (IS_ERR(chub->pmu_chub_reset)) {
+ dev_err(dev, "fails to get dumpgrp\n");
+ return PTR_ERR(chub->pmu_chub_reset);
+ }
+
+#if defined(CONFIG_SOC_EXYNOS9810)
+ /* get pmu reset enable base */
+ res = platform_get_resource_byname(pdev, IORESOURCE_MEM, "pmu_chub_cpu");
+ chub->pmu_chub_cpu = devm_ioremap_resource(&pdev->dev, res);
+ if (IS_ERR(chub->pmu_chub_cpu)) {
+ dev_err(dev, "fails to get pmu_chub_cpu\n");
+ return PTR_ERR(chub->pmu_chub_cpu);
+ }
+#endif
+
+ /* get chub baaw base */
+ res = platform_get_resource_byname(pdev, IORESOURCE_MEM, "chub_baaw");
+ chub->chub_baaw = devm_ioremap_resource(dev, res);
+ if (IS_ERR(chub->chub_baaw)) {
+ pr_err("driver failed to get chub_baaw\n");
+ chub->chub_baaw = 0; /* it can be set on other-side (vts) */
+ }
+
+#if defined(CONFIG_SOC_EXYNOS9610)
+ /* get cmu qch base */
+ res = platform_get_resource_byname(pdev, IORESOURCE_MEM, "cmu_chub_qch");
+ chub->cmu_chub_qch = devm_ioremap_resource(dev, res);
+ if (IS_ERR(chub->cmu_chub_qch)) {
+ pr_err("driver failed to get cmu_chub_qch\n");
+ return PTR_ERR(chub->cmu_chub_qch);
+ }
+#endif
+
+ /* get addresses information to set BAAW */
+ if (of_property_read_u32_index
+ (node, "baaw,baaw-p-apm-chub", 0,
+ &chub->baaw_info.baaw_p_apm_chub_start)) {
+ dev_err(&pdev->dev,
+ "driver failed to get baaw-p-apm-chub, start\n");
+ return -ENODEV;
+ }
+
+ if (of_property_read_u32_index
+ (node, "baaw,baaw-p-apm-chub", 1,
+ &chub->baaw_info.baaw_p_apm_chub_end)) {
+ dev_err(&pdev->dev,
+ "driver failed to get baaw-p-apm-chub, end\n");
+ return -ENODEV;
+ }
+
+ if (of_property_read_u32_index
+ (node, "baaw,baaw-p-apm-chub", 2,
+ &chub->baaw_info.baaw_p_apm_chub_remap)) {
+ dev_err(&pdev->dev,
+ "driver failed to get baaw-p-apm-chub, remap\n");
+ return -ENODEV;
+ }
+
+#if defined(CONFIG_SOC_EXYNOS9610)
+ //cal_dll_apm_enable();
+#endif
+ clk = devm_clk_get(dev, "chub_bus");
+ if (IS_ERR(clk)) {
+ dev_err(dev, "[nanohub] cannot get clock\n");
+ return -ENOENT;
+ }
+#if defined(CONFIG_SOC_EXYNOS9610)
+ ret = clk_prepare(clk);
+ if (ret) {
+ dev_err(dev, "[nanohub] cannot prepare clock\n");
+ return ret;
+ }
+
+ ret = clk_enable(clk);
+ if (ret) {
+ dev_err(dev, "[nanohub] cannot enable clock\n");
+ return ret;
+ }
+#endif
+ chub->clkrate = clk_get_rate(clk);
+
+ ret = contexthub_get_cmgp_clocks(&pdev->dev);
+ if (ret) {
+ dev_err(&pdev->dev, "[nanohub] contexthub_get_cmgp_clocks failed\n");
+ return ret;
+ }
+
+ return 0;
+}
+
+static ssize_t chub_poweron(struct device *dev,
+ struct device_attribute *attr,
+ const char *buf, size_t count)
+{
+ int ret = contexthub_poweron(dev_get_drvdata(dev));
+
+ return ret < 0 ? ret : count;
+}
+
+static ssize_t chub_reset(struct device *dev,
+ struct device_attribute *attr,
+ const char *buf, size_t count)
+{
+ int ret;
+ struct contexthub_ipc_info *ipc = dev_get_drvdata(dev);
+
+ ret = contexthub_download_image(ipc, 0);
+ if (!ret)
+ ret = contexthub_reset(ipc);
+
+ return ret < 0 ? ret : count;
+}
+
+static struct device_attribute attributes[] = {
+ __ATTR(poweron, 0220, NULL, chub_poweron),
+ __ATTR(reset, 0220, NULL, chub_reset),
+};
+
+static int contexthub_ipc_probe(struct platform_device *pdev)
+{
+ struct contexthub_ipc_info *chub;
+ int need_to_free = 0;
+ int ret = 0;
+ int i;
+#ifdef CONFIG_CHRE_SENSORHUB_HAL
+ struct iio_dev *iio_dev;
+#endif
+ chub = chub_dbg_get_memory(DBG_NANOHUB_DD_AREA);
+ if (!chub) {
+ chub =
+ devm_kzalloc(&pdev->dev, sizeof(struct contexthub_ipc_info),
+ GFP_KERNEL);
+ need_to_free = 1;
+ }
+ if (IS_ERR(chub)) {
+ dev_err(&pdev->dev, "%s failed to get ipc memory\n", __func__);
+ ret = -EINVAL;
+ goto err;
+ }
+
+ /* parse dt and hw init */
+ ret = contexthub_ipc_hw_init(pdev, chub);
+ if (ret) {
+ dev_err(&pdev->dev, "%s failed to get init hw with ret %d\n",
+ __func__, ret);
+ goto err;
+ }
+
+#ifdef CONFIG_CHRE_SENSORHUB_HAL
+ /* nanohub probe */
+ iio_dev = nanohub_probe(&pdev->dev, NULL);
+ if (IS_ERR(iio_dev))
+ goto err;
+
+ /* set wakeup irq number on nanohub driver */
+ chub->data = iio_priv(iio_dev);
+ nanohub_mailbox_comms_init(&chub->data->comms);
+ chub->pdata = chub->data->pdata;
+ chub->pdata->mailbox_client = chub;
+ chub->data->irq1 = IRQ_EVT_A2C_WAKEUP;
+ chub->data->irq2 = 0;
+#endif
+
+ atomic_set(&chub->chub_status, CHUB_ST_NO_POWER);
+ chub->chub_err = 0;
+ chub->powermode = INIT_CHUB_VAL;
+ chub->dev = &pdev->dev;
+ platform_set_drvdata(pdev, chub);
+ contexhub_config_init(chub);
+
+ for (i = 0, ret = 0; i < ARRAY_SIZE(attributes); i++) {
+ ret = device_create_file(chub->dev, &attributes[i]);
+ if (ret)
+ dev_warn(chub->dev, "Failed to create file: %s\n",
+ attributes[i].attr.name);
+ }
+
+ init_waitqueue_head(&chub->read_lock.event);
+ init_waitqueue_head(&chub->chub_alive_lock.event);
+ INIT_WORK(&chub->debug_work, handle_debug_work_func);
+#ifdef CONFIG_CONTEXTHUB_DEBUG
+ INIT_WORK(&chub->utc_work, handle_utc_work_func);
+#endif
+
+ dev_info(chub->dev, "%s with %s FW and %lu clk is done\n",
+ __func__, chub->os_name, chub->clkrate);
+ return 0;
+err:
+ if (chub)
+ if (need_to_free)
+ devm_kfree(&pdev->dev, chub);
+
+ dev_err(&pdev->dev, "%s is fail with ret %d\n", __func__, ret);
+ return ret;
+}
+
+static int contexthub_ipc_remove(struct platform_device *pdev)
+{
+ return 0;
+}
+
+static const struct of_device_id contexthub_ipc_match[] = {
+ {.compatible = "samsung,exynos-nanohub"},
+ {},
+};
+
+static struct platform_driver samsung_contexthub_ipc_driver = {
+ .probe = contexthub_ipc_probe,
+ .remove = contexthub_ipc_remove,
+ .driver = {
+ .name = "nanohub-ipc",
+ .owner = THIS_MODULE,
+ .of_match_table = contexthub_ipc_match,
+ },
+};
+
+int nanohub_mailbox_init(void)
+{
+ return platform_driver_register(&samsung_contexthub_ipc_driver);
+}
+
+static void __exit nanohub_mailbox_cleanup(void)
+{
+ platform_driver_unregister(&samsung_contexthub_ipc_driver);
+}
+
+module_init(nanohub_mailbox_init);
+module_exit(nanohub_mailbox_cleanup);
+
+MODULE_LICENSE("GPL v2");
+MODULE_DESCRIPTION("Exynos contexthub mailbox Driver");
+MODULE_AUTHOR("Boojin Kim <boojin.kim@samsung.com>");
--- /dev/null
+/*
+ * Copyright (c) 2017 Samsung Electronics Co., Ltd.
+ *
+ * Boojin Kim <boojin.kim@samsung.com>
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License as published by
+ * the Free Software Foundation; either version 2 of the License, or
+ * (at your option) any later version.
+ */
+
+#ifndef __CONTEXTHUB_IPC_H_
+#define __CONTEXTHUB_IPC_H_
+
+#include <linux/io.h>
+#include <linux/kernel.h>
+#include <linux/init.h>
+#include <linux/platform_device.h>
+#include <linux/interrupt.h>
+#include <linux/workqueue.h>
+#include <linux/wakelock.h>
+#include <linux/list.h>
+#include <linux/spinlock.h>
+#include <linux/semaphore.h>
+#include <linux/platform_data/nanohub.h>
+#include <linux/sched/clock.h>
+#include <linux/sched/signal.h>
+#include "chub_ipc.h"
+#include "chub_log.h"
+
+/* utils for nanohub main */
+#define wait_event_interruptible_timeout_locked(q, cond, tmo) \
+({ \
+ long __ret = (tmo); \
+ DEFINE_WAIT(__wait); \
+ if (!(cond)) { \
+ for (;;) { \
+ __wait.flags &= ~WQ_FLAG_EXCLUSIVE; \
+ if (list_empty(&__wait.entry)) \
+ __add_wait_queue_entry_tail(&(q), &__wait); \
+ set_current_state(TASK_INTERRUPTIBLE); \
+ if ((cond)) \
+ break; \
+ if (signal_pending(current)) { \
+ __ret = -ERESTARTSYS; \
+ break; \
+ } \
+ spin_unlock_irq(&(q).lock); \
+ __ret = schedule_timeout(__ret); \
+ spin_lock_irq(&(q).lock); \
+ if (!__ret) { \
+ if ((cond)) \
+ __ret = 1; \
+ break; \
+ } \
+ } \
+ __set_current_state(TASK_RUNNING); \
+ if (!list_empty(&__wait.entry)) \
+ list_del_init(&__wait.entry); \
+ else if (__ret == -ERESTARTSYS && \
+ /*reimplementation of wait_abort_exclusive() */\
+ waitqueue_active(&(q))) \
+ __wake_up_locked_key(&(q), TASK_INTERRUPTIBLE, \
+ NULL); \
+ } else { \
+ __ret = 1; \
+ } \
+ __ret; \
+})
+
+enum mailbox_event {
+ MAILBOX_EVT_UTC_MAX = IPC_DEBUG_UTC_MAX,
+ MAILBOX_EVT_DUMP_STATUS = IPC_DEBUG_DUMP_STATUS,
+ MAILBOX_EVT_DUMP_CHUB,
+ MAILBOX_EVT_POWER_ON,
+ MAILBOX_EVT_DEBUG_MAX,
+ MAILBOX_EVT_WAKEUP,
+ MAILBOX_EVT_WAKEUP_CLR,
+ MAILBOX_EVT_ERASE_SHARED,
+ MAILBOX_EVT_ENABLE_IRQ,
+ MAILBOX_EVT_DISABLE_IRQ,
+ MAILBOX_EVT_SHUTDOWN,
+ MAILBOX_EVT_INIT_IPC,
+ MAILBOX_EVT_CHUB_ALIVE,
+ MAILBOX_EVT_CORE_RESET,
+ MAILBOX_EVT_SYSTEM_RESET,
+ MAILBOX_EVT_RESET = MAILBOX_EVT_CORE_RESET,
+ MAILBOX_EVT_MAX,
+};
+
+enum chub_status {
+ CHUB_ST_NO_POWER,
+ CHUB_ST_POWER_ON,
+ CHUB_ST_RUN,
+ CHUB_ST_SHUTDOWN,
+ CHUB_ST_NO_RESPONSE,
+};
+
+struct read_wait {
+ atomic_t cnt;
+ volatile u32 flag;
+ wait_queue_head_t event;
+};
+
+struct recv_ctrl {
+ unsigned long order;
+ volatile unsigned long container[IRQ_EVT_CH_MAX];
+};
+
+struct chub_alive {
+ unsigned int flag;
+ wait_queue_head_t event;
+};
+
+enum CHUB_ERR_TYPE {
+ CHUB_ERR_EVTQ_EMTPY, /* ap error */
+ CHUB_ERR_READ_FAIL,
+ CHUB_ERR_WRITE_FAIL,
+ CHUB_ERR_EVTQ_NO_HW_TRIGGER,
+ CHUB_ERR_CHUB_NO_RESPONSE,
+ CHUB_ERR_NANOHUB_FAULT, /* chub error */
+ CHUB_ERR_NANOHUB_ASSERT,
+ CHUB_ERR_NANOHUB_ERROR,
+ CHUB_ERR_MAX,
+};
+
+struct contexthub_baaw_info {
+ unsigned int baaw_p_apm_chub_start;
+ unsigned int baaw_p_apm_chub_end;
+ unsigned int baaw_p_apm_chub_remap;
+};
+
+struct contexthub_ipc_info {
+ struct device *dev;
+ struct nanohub_data *data;
+ struct nanohub_platform_data *pdata;
+ wait_queue_head_t wakeup_wait;
+ struct work_struct debug_work;
+ struct read_wait read_lock;
+#ifdef USE_IPC_BUF
+ u8 rxbuf[PACKET_SIZE_MAX];
+#else
+ struct recv_ctrl recv_order;
+#endif
+ struct chub_alive chub_alive_lock;
+ void __iomem *sram;
+ void __iomem *mailbox;
+ void __iomem *chub_dumpgrp;
+ void __iomem *chub_baaw;
+ void __iomem *pmu_chub_reset;
+ void __iomem *pmu_chub_cpu;
+ void __iomem *cmu_chub_qch;
+ struct contexthub_baaw_info baaw_info;
+ struct ipc_map_area *ipc_map;
+ struct log_buffer_info *fw_log;
+ struct log_buffer_info *dd_log;
+ struct LOG_BUFFER *dd_log_buffer;
+ unsigned long clkrate;
+ enum CHUB_ERR_TYPE chub_err;
+ atomic_t chub_status;
+ atomic_t irq1_apInt;
+ atomic_t wakeup_chub;
+ int err_cnt[CHUB_ERR_MAX];
+ int utc_run;
+ int powermode;
+ bool os_load;
+ char os_name[MAX_FILE_LEN];
+#ifdef CONFIG_CONTEXTHUB_DEBUG
+ struct work_struct utc_work;
+#endif
+};
+
+/* PMU CHUB_CPU registers */
+#if defined(CONFIG_SOC_EXYNOS9810)
+#define REG_CHUB_CPU_STATUS (0x0)
+#elif defined(CONFIG_SOC_EXYNOS9610)
+#define REG_CHUB_CPU_STATUS (0x4)
+#else
+/* TODO: Need to check */
+#define REG_CHUB_CPU_STATUS (0x0)
+#endif
+#define REG_CHUB_CPU_STATUS_BIT_STANDBYWFI (28)
+#if defined(CONFIG_SOC_EXYNOS9810)
+#define REG_CHUB_CPU_OPTION (0x4)
+#define ENABLE_SYSRESETREQ BIT(4)
+#elif defined(CONFIG_SOC_EXYNOS9610)
+#define REG_CHUB_CPU_OPTION (0x8)
+#define ENABLE_SYSRESETREQ BIT(9)
+#else
+/* TODO: Need to check */
+#define REG_CHUB_CPU_OPTION (0x0)
+#define ENABLE_SYSRESETREQ BIT(0)
+#endif
+#define REG_CHUB_CPU_DURATION (0x8)
+
+/* PMU CHUB_RESET registers */
+#define REG_CHUB_RESET_CHUB_CONFIGURATION (0x0)
+#define REG_CHUB_RESET_CHUB_STATUS (0x4)
+#define REG_CHUB_RESET_CHUB_OPTION (0x8)
+#if defined(CONFIG_SOC_EXYNOS9810)
+#define CHUB_RESET_RELEASE_VALUE (0x10000000)
+#elif defined(CONFIG_SOC_EXYNOS9610)
+#define CHUB_RESET_RELEASE_VALUE (0x8000)
+#else
+/* TODO: Need to check */
+#define CHUB_RESET_RELEASE_VALUE (0x0)
+#endif
+
+/* CMU CHUB_QCH registers */
+#if defined(CONFIG_SOC_EXYNOS9610)
+#define REG_QCH_CON_CM4_SHUB_QCH (0x8)
+#define IGNORE_FORCE_PM_EN BIT(2)
+#define CLOCK_REQ BIT(1)
+#define ENABLE BIT(0)
+#endif
+
+/* CHUB dump GRP Registers : CHUB BASE + 0x1f000000 */
+#define REG_CHUB_DUMPGPR_CTRL (0x0)
+#define REG_CHUB_DUMPGPR_PCR (0x4)
+#define REG_CHUB_DUMPGPR_GP0R (0x10)
+#define REG_CHUB_DUMPGPR_GP1R (0x14)
+#define REG_CHUB_DUMPGPR_GP2R (0x18)
+#define REG_CHUB_DUMPGPR_GP3R (0x1c)
+#define REG_CHUB_DUMPGPR_GP4R (0x20)
+#define REG_CHUB_DUMPGPR_GP5R (0x24)
+#define REG_CHUB_DUMPGPR_GP6R (0x28)
+#define REG_CHUB_DUMPGPR_GP7R (0x2c)
+#define REG_CHUB_DUMPGPR_GP8R (0x30)
+#define REG_CHUB_DUMPGPR_GP9R (0x34)
+#define REG_CHUB_DUMPGPR_GPAR (0x38)
+#define REG_CHUB_DUMPGPR_GPBR (0x3c)
+#define REG_CHUB_DUMPGPR_GPCR (0x40)
+#define REG_CHUB_DUMPGPR_GPDR (0x44)
+#define REG_CHUB_DUMPGPR_GPER (0x48)
+#define REG_CHUB_DUMPGPR_GPFR (0x4c)
+
+#define IPC_HW_WRITE_DUMPGPR_CTRL(base, val) \
+ __raw_writel((val), (base) + REG_CHUB_DUMPGPR_CTRL)
+#define IPC_HW_READ_DUMPGPR_PCR(base) \
+ __raw_readl((base) + REG_CHUB_DUMPGPR_PCR)
+
+/* CHUB BAAW Registers : CHUB BASE + 0x100000 */
+#define REG_BAAW_D_CHUB0 (0x0)
+#define REG_BAAW_D_CHUB1 (0x4)
+#define REG_BAAW_D_CHUB2 (0x8)
+#define REG_BAAW_D_CHUB3 (0xc)
+#define BAAW_VAL_MAX (4)
+#define BAAW_RW_ACCESS_ENABLE 0x80000003
+
+#define IPC_MAX_TIMEOUT (0xffffff)
+#define INIT_CHUB_VAL (-1)
+
+#define IPC_HW_WRITE_BAAW_CHUB0(base, val) \
+ __raw_writel((val), (base) + REG_BAAW_D_CHUB0)
+#define IPC_HW_WRITE_BAAW_CHUB1(base, val) \
+ __raw_writel((val), (base) + REG_BAAW_D_CHUB1)
+#define IPC_HW_WRITE_BAAW_CHUB2(base, val) \
+ __raw_writel((val), (base) + REG_BAAW_D_CHUB2)
+#define IPC_HW_WRITE_BAAW_CHUB3(base, val) \
+ __raw_writel((val), (base) + REG_BAAW_D_CHUB3)
+
+int contexthub_ipc_write_event(struct contexthub_ipc_info *data,
+ enum mailbox_event event);
+int contexthub_ipc_read(struct contexthub_ipc_info *ipc,
+ uint8_t *rx, int max_length, int timeout);
+int contexthub_ipc_write(struct contexthub_ipc_info *ipc,
+ uint8_t *tx, int length, int timeout);
+
+int contexthub_poweron(struct contexthub_ipc_info *data);
+int contexthub_download_image(struct contexthub_ipc_info *data, int bl);
+int contexthub_download_kernel(struct contexthub_ipc_info *dev);
+int contexthub_download_bl(struct contexthub_ipc_info *data);
+int contexthub_reset(struct contexthub_ipc_info *data);
+int contexthub_wakeup(struct contexthub_ipc_info *data, int evt);
+
+int contexthub_is_run(struct contexthub_ipc_info *ipc);
+int contexthub_request(struct contexthub_ipc_info *ipc);
+void contexthub_release(struct contexthub_ipc_info *ipc);
+#endif
--- /dev/null
+/*
+ * Copyright (c) 2017 Samsung Electronics Co., Ltd.
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License as published by
+ * the Free Software Foundation; either version 2 of the License, or
+ * (at your option) any later version.
+ */
+
+#include <linux/io.h>
+#include <linux/debugfs.h>
+#include <linux/module.h>
+#include <linux/iommu.h>
+#include <linux/of_reserved_mem.h>
+#include <linux/uaccess.h>
+#include "chub_dbg.h"
+#include "chub_ipc.h"
+#include "chub.h"
+#ifdef CONFIG_CHRE_SENSORHUB_HAL
+#include "main.h"
+#endif
+
+#define NUM_OF_GPR (17)
+#define GPR_PC_INDEX (16)
+#define AREA_NAME_MAX (8)
+/* it's align ramdump side to prevent override */
+#define SRAM_ALIGN (1024)
+
+struct map_info {
+ char name[AREA_NAME_MAX];
+ u32 offset;
+ u32 size;
+};
+
+struct dbg_dump {
+ struct map_info info[DBG_AREA_MAX];
+ long long time;
+ int reason;
+ struct contexthub_ipc_info chub;
+ struct ipc_area ipc_addr[IPC_REG_MAX];
+ u32 gpr[NUM_OF_GPR];
+ int sram_start;
+ char sram[];
+};
+
+static struct dbg_dump *p_dbg_dump;
+static struct reserved_mem *chub_rmem;
+
+void chub_dbg_dump_gpr(struct contexthub_ipc_info *ipc)
+{
+ int ret = contexthub_request(ipc);
+
+ if (ret) {
+ pr_err("%s: fails to contexthub_request\n", __func__);
+ return;
+ }
+
+ if (p_dbg_dump) {
+ int i;
+ struct dbg_dump *p_dump = p_dbg_dump;
+
+ IPC_HW_WRITE_DUMPGPR_CTRL(ipc->chub_dumpgrp, 0x1);
+ /* dump GPR */
+ for (i = 0; i <= GPR_PC_INDEX - 1; i++)
+ p_dump->gpr[i] =
+ readl(ipc->chub_dumpgrp + REG_CHUB_DUMPGPR_GP0R +
+ i * 4);
+ p_dump->gpr[GPR_PC_INDEX] =
+ readl(ipc->chub_dumpgrp + REG_CHUB_DUMPGPR_PCR);
+ }
+
+ contexthub_release(ipc);
+}
+
+static u32 get_dbg_dump_size(void)
+{
+ return sizeof(struct dbg_dump) + ipc_get_chub_mem_size();
+};
+
+#ifdef CONFIG_CONTEXTHUB_DEBUG
+static void chub_dbg_write_file(struct device *dev)
+{
+ struct file *filp;
+ char file_name[32];
+ mm_segment_t old_fs;
+ struct dbg_dump *p_dump = p_dbg_dump;
+ u32 sec = p_dump->time / NSEC_PER_SEC;
+
+ snprintf(file_name, sizeof(file_name), "/data/nano-%02u-%06u.dump",
+ p_dump->reason, sec);
+
+ old_fs = get_fs();
+ set_fs(KERNEL_DS);
+
+ filp = filp_open(file_name, O_RDWR | O_TRUNC | O_CREAT, 0660);
+
+ dev_dbg(dev, "%s is created with %d size\n", file_name,
+ get_dbg_dump_size());
+
+ if (IS_ERR(filp)) {
+ dev_warn(dev, "%s: saving log fail\n", __func__);
+ goto out;
+ }
+
+ vfs_write(filp, (void *)p_dbg_dump, sizeof(struct dbg_dump),
+ &filp->f_pos);
+ vfs_fsync(filp, 0);
+ filp_close(filp, NULL);
+
+ snprintf(file_name, sizeof(file_name), "/data/nano-%02u-%06u-sram.dump",
+ p_dump->reason, sec);
+
+ old_fs = get_fs();
+ set_fs(KERNEL_DS);
+
+ filp = filp_open(file_name, O_RDWR | O_TRUNC | O_CREAT, 0660);
+
+ dev_dbg(dev, "%s is created with %d size\n", file_name,
+ get_dbg_dump_size());
+
+ if (IS_ERR(filp)) {
+ dev_warn(dev, "%s: saving log fail\n", __func__);
+ goto out;
+ }
+
+ vfs_write(filp, &p_dbg_dump->sram[p_dbg_dump->sram_start],
+ ipc_get_chub_mem_size(), &filp->f_pos);
+ vfs_fsync(filp, 0);
+ filp_close(filp, NULL);
+
+out:
+ set_fs(old_fs);
+}
+#else
+#define chub_dbg_write_file(a) do { } while (0)
+#endif
+
+void chub_dbg_dump_hw(struct contexthub_ipc_info *ipc, int reason)
+{
+ int ret = contexthub_request(ipc);
+
+ if (ret) {
+ pr_err("%s: fails to contexthub_request\n", __func__);
+ return;
+ }
+
+ if (p_dbg_dump) {
+ p_dbg_dump->time = sched_clock();
+ p_dbg_dump->reason = reason;
+
+ /* dump GPR */
+ chub_dbg_dump_gpr(ipc);
+
+ /* dump SRAM */
+ memcpy_fromio(&p_dbg_dump->sram[p_dbg_dump->sram_start],
+ ipc_get_base(IPC_REG_DUMP),
+ ipc_get_chub_mem_size());
+
+ dev_dbg(ipc->dev, "contexthub dump is done\n");
+
+ chub_dbg_write_file(ipc->dev);
+ }
+
+ contexthub_release(ipc);
+}
+
+void chub_dbg_check_and_download_image(struct contexthub_ipc_info *ipc)
+{
+ u32 *bl = vmalloc(ipc_get_offset(IPC_REG_BL));
+ int ret;
+
+ memcpy_fromio(bl, ipc_get_base(IPC_REG_BL), ipc_get_offset(IPC_REG_BL));
+ contexthub_download_image(ipc, 1);
+
+ ret = memcmp(bl, ipc_get_base(IPC_REG_BL), ipc_get_offset(IPC_REG_BL));
+ if (ret) {
+ int i;
+ u32 *bl_image = (u32 *)ipc_get_base(IPC_REG_BL);
+
+ pr_info("bl doens't match with size %d\n", ipc_get_offset(IPC_REG_BL));
+
+ for (i = 0; i < ipc_get_offset(IPC_REG_BL) / 4; i++)
+ if (bl[i] != bl_image[i]) {
+ pr_info("bl[%d] %x -> wrong %x\n", i,
+ bl_image[i], bl[i]);
+ break;
+ }
+ }
+ contexthub_download_image(ipc, 0);
+
+ /* os image is dumped on &p_dbg_dump->sram[p_dbg_dump->sram_start] */
+ ret = memcmp(&p_dbg_dump->sram[p_dbg_dump->sram_start],
+ ipc_get_base(IPC_REG_OS), ipc_get_offset(IPC_REG_OS));
+
+ if (ret)
+ pr_info("os doens't match with size %d\n",
+ ipc_get_offset(IPC_REG_OS));
+
+ vfree(bl);
+}
+
+void chub_dbg_dump_status(struct contexthub_ipc_info *ipc)
+{
+ int val;
+ struct nanohub_data *data = ipc->data;
+
+ CSP_PRINTF_INFO
+ ("CHUB DUMP: nanohub driver status\nwu:%d wu_l:%d acq:%d irq1_apInt:%d fired:%d\n",
+ atomic_read(&data->wakeup_cnt),
+ atomic_read(&data->wakeup_lock_cnt),
+ atomic_read(&data->wakeup_acquired),
+ atomic_read(&ipc->irq1_apInt), nanohub_irq1_fired(data));
+
+ if (!contexthub_is_run(ipc)) {
+ pr_warn("%s: chub isn't run\n", __func__);
+ return;
+ }
+
+#ifndef USE_IPC_BUF
+ CSP_PRINTF_INFO
+ ("CHUB DUMP: contexthub driver status\nflag:%x cnt:%d, order:%lu\nalive container:\n",
+ ipc->read_lock.flag, atomic_read(&ipc->read_lock.cnt),
+ ipc->recv_order.order);
+ for (val = 0; val < IRQ_EVT_CH_MAX; val++)
+ if (ipc->recv_order.container[val])
+ CSP_PRINTF_INFO("container[%d]:%lu\n", val,
+ ipc->recv_order.container[val]);
+#endif
+ for (val = 0; val < CHUB_ERR_MAX; val++)
+ if (ipc->err_cnt[val])
+ CSP_PRINTF_INFO("error %d occurs %d times\n",
+ val, ipc->err_cnt[val]);
+ ipc_dump();
+ /* dump nanohub kernel status */
+ CSP_PRINTF_INFO("CHUB DUMP: Request to dump nanohub kernel status\n");
+ ipc_write_debug_event(AP, (u32)MAILBOX_EVT_DUMP_STATUS);
+ ipc_add_evt(IPC_EVT_A2C, IRQ_EVT_A2C_DEBUG);
+ log_flush(ipc->fw_log);
+}
+
+static ssize_t chub_bin_sram_read(struct file *file, struct kobject *kobj,
+ struct bin_attribute *battr, char *buf,
+ loff_t off, size_t size)
+{
+ struct device *dev = kobj_to_dev(kobj);
+
+ dev_dbg(dev, "%s(%lld, %zu)\n", __func__, off, size);
+
+ if (!contexthub_is_run(dev_get_drvdata(dev))) {
+ pr_warn("%s: chub isn't run\n", __func__);
+ return -EINVAL;
+ }
+
+ memcpy_fromio(buf, battr->private + off, size);
+ return size;
+}
+
+static ssize_t chub_bin_dram_read(struct file *file, struct kobject *kobj,
+ struct bin_attribute *battr, char *buf,
+ loff_t off, size_t size)
+{
+ struct device *dev = kobj_to_dev(kobj);
+
+ dev_dbg(dev, "%s(%lld, %zu)\n", __func__, off, size);
+ memcpy(buf, battr->private + off, size);
+ return size;
+}
+
+static BIN_ATTR_RO(chub_bin_sram, 0);
+static BIN_ATTR_RO(chub_bin_dram, 0);
+
+static struct bin_attribute *chub_bin_attrs[] = {
+ &bin_attr_chub_bin_sram,
+ &bin_attr_chub_bin_dram,
+};
+
+#define SIZE_UTC_NAME (16)
+
+char chub_utc_name[][SIZE_UTC_NAME] = {
+ [IPC_DEBUG_UTC_STOP] = "stop",
+ [IPC_DEBUG_UTC_AGING] = "aging",
+ [IPC_DEBUG_UTC_WDT] = "wdt",
+ [IPC_DEBUG_UTC_RTC] = "rtc",
+ [IPC_DEBUG_UTC_TIMER] = "timer",
+ [IPC_DEBUG_UTC_MEM] = "mem",
+ [IPC_DEBUG_UTC_GPIO] = "gpio",
+ [IPC_DEBUG_UTC_SPI] = "spi",
+ [IPC_DEBUG_UTC_CMU] = "cmu",
+ [IPC_DEBUG_UTC_GPIO] = "gpio",
+ [IPC_DEBUG_UTC_TIME_SYNC] = "time_sync",
+ [IPC_DEBUG_UTC_ASSERT] = "assert",
+ [IPC_DEBUG_UTC_FAULT] = "fault",
+ [IPC_DEBUG_UTC_CHECK_STATUS] = "stack",
+ [IPC_DEBUG_UTC_CHECK_CPU_UTIL] = "utilization",
+ [IPC_DEBUG_UTC_HEAP_DEBUG] = "heap",
+ [IPC_DEBUG_NANOHUB_CHUB_ALIVE] = "alive",
+};
+
+static ssize_t chub_utc_show(struct device *kobj,
+ struct device_attribute *attr, char *buf)
+{
+ int i;
+ int index = 0;
+
+ for (i = 0; i < sizeof(chub_utc_name) / SIZE_UTC_NAME; i++)
+ if (chub_utc_name[i][0])
+ index +=
+ sprintf(buf + index, "%d %s\n", i,
+ chub_utc_name[i]);
+
+ return index;
+}
+
+static ssize_t chub_utc_store(struct device *dev,
+ struct device_attribute *attr,
+ const char *buf, size_t count)
+{
+ struct contexthub_ipc_info *ipc = dev_get_drvdata(dev);
+ long event;
+ int err;
+
+ err = kstrtol(&buf[0], 10, &event);
+ if (!err) {
+ if (event == IPC_DEBUG_NANOHUB_CHUB_ALIVE)
+ event = MAILBOX_EVT_CHUB_ALIVE;
+
+ if (event != MAILBOX_EVT_CHUB_ALIVE) {
+ err = contexthub_request(ipc);
+ if (err)
+ pr_err("%s: fails to request contexthub. ret:%d\n", __func__, err);
+ }
+
+ contexthub_ipc_write_event(ipc, event);
+ if (event != MAILBOX_EVT_CHUB_ALIVE)
+ contexthub_release(ipc);
+
+ return count;
+ } else {
+ return 0;
+ }
+}
+
+static ssize_t chub_ipc_store(struct device *dev,
+ struct device_attribute *attr,
+ const char *buf, size_t count)
+{
+ struct contexthub_ipc_info *ipc = dev_get_drvdata(dev);
+ char input[PACKET_SIZE_MAX];
+ char output[PACKET_SIZE_MAX];
+ int ret;
+
+ memset(input, 0, PACKET_SIZE_MAX);
+ memset(output, 0, PACKET_SIZE_MAX);
+
+ if (count <= PACKET_SIZE_MAX) {
+ memset(input, 0, PACKET_SIZE_MAX);
+ memcpy(input, buf, count);
+ } else {
+ pr_err("%s: ipc size(%d) is bigger than max(%d)\n",
+ __func__, (int)count, (int)PACKET_SIZE_MAX);
+ return -EINVAL;
+ }
+
+ ret = contexthub_request(ipc);
+ if (ret) {
+ pr_err("%s: fails to request contexthub. ret:%d\n", __func__, ret);
+ return ret;
+ }
+
+ ret = contexthub_ipc_write_event(ipc, (u32)IPC_DEBUG_UTC_IPC_TEST_START);
+ if (ret) {
+ pr_err("%s: fails to set start test event. ret:%d\n", __func__, ret);
+ count = ret;
+ goto out;
+ }
+
+ ret = contexthub_ipc_write(ipc, input, count, IPC_MAX_TIMEOUT);
+ if (ret != count) {
+ pr_info("%s: fail to write\n", __func__);
+ return -EINVAL;
+ }
+
+ ret = contexthub_ipc_read(ipc, output, 0, IPC_MAX_TIMEOUT);
+ if (count != ret) {
+ pr_info("%s: fail to read ret:%d\n", __func__, ret);
+ return -EINVAL;
+ }
+
+ if (strncmp(input, output, count)) {
+ pr_info("%s: fail to compare input/output\n", __func__);
+ print_hex_dump(KERN_CONT, "chub input:",
+ DUMP_PREFIX_OFFSET, 16, 1, input,
+ count, false);
+ print_hex_dump(KERN_CONT, "chub output:",
+ DUMP_PREFIX_OFFSET, 16, 1, output,
+ count, false);
+ return 0;
+ }
+ ret = contexthub_ipc_write_event(ipc, (u32)IPC_DEBUG_UTC_IPC_TEST_END);
+ if (ret) {
+ pr_err("%s: fails to set end test event. ret:%d\n", __func__, ret);
+ count = ret;
+ } else
+ pr_info("[%s pass] len:%d, str: %s\n", __func__, (int)count, output);
+
+out:
+ contexthub_release(ipc);
+
+ return count;
+}
+
+static ssize_t chub_get_dump_status_store(struct device *dev,
+ struct device_attribute *attr,
+ const char *buf, size_t count)
+{
+ struct contexthub_ipc_info *ipc = dev_get_drvdata(dev);
+ int ret = contexthub_request(ipc);
+
+ if (ret) {
+ pr_err("%s: fails to contexthub_request\n", __func__);
+ return 0;
+ }
+
+ contexthub_ipc_write_event(ipc, MAILBOX_EVT_DUMP_STATUS);
+
+ contexthub_release(ipc);
+ return count;
+}
+
+static ssize_t chub_get_gpr_show(struct device *dev,
+ struct device_attribute *attr, char *buf)
+{
+ struct contexthub_ipc_info *ipc = dev_get_drvdata(dev);
+ char *pbuf = buf;
+ int i;
+
+ if (p_dbg_dump) {
+ chub_dbg_dump_gpr(ipc);
+
+ pbuf +=
+ sprintf(pbuf, "========================================\n");
+ pbuf += sprintf(pbuf, "CHUB CPU register dump\n");
+
+ for (i = 0; i <= 15; i++)
+ pbuf +=
+ sprintf(pbuf, "R%02d : %08x\n", i,
+ p_dbg_dump->gpr[i]);
+
+ pbuf +=
+ sprintf(pbuf, "PC : %08x\n",
+ p_dbg_dump->gpr[GPR_PC_INDEX]);
+ pbuf +=
+ sprintf(pbuf, "========================================\n");
+ }
+
+ return pbuf - buf;
+}
+
+static ssize_t chub_set_dump_hw_store(struct device *dev,
+ struct device_attribute *attr,
+ const char *buf, size_t count)
+{
+ struct contexthub_ipc_info *ipc = dev_get_drvdata(dev);
+
+ chub_dbg_dump_hw(ipc, 0);
+ return count;
+}
+
+static ssize_t chub_wakeup_store(struct device *dev,
+ struct device_attribute *attr,
+ const char *buf, size_t count)
+{
+ long event;
+ int ret;
+ struct contexthub_ipc_info *ipc = dev_get_drvdata(dev);
+
+ ret = kstrtol(&buf[0], 10, &event);
+ if (ret)
+ return ret;
+
+ if (event)
+ ret = contexthub_request(ipc);
+ else
+ contexthub_release(ipc);
+
+ return ret ? ret : count;
+}
+
+static struct device_attribute attributes[] = {
+ __ATTR(get_gpr, 0440, chub_get_gpr_show, NULL),
+ __ATTR(dump_status, 0220, NULL, chub_get_dump_status_store),
+ __ATTR(dump_hw, 0220, NULL, chub_set_dump_hw_store),
+ __ATTR(utc, 0664, chub_utc_show, chub_utc_store),
+ __ATTR(ipc_test, 0220, NULL, chub_ipc_store),
+ __ATTR(wakeup, 0220, NULL, chub_wakeup_store),
+};
+
+void *chub_dbg_get_memory(enum dbg_dump_area area)
+{
+ void *addr;
+ int size;
+
+ pr_info("%s: chub_rmem: %p\n", __func__, chub_rmem);
+
+ if (!chub_rmem)
+ return NULL;
+
+ if (area == DBG_NANOHUB_DD_AREA) {
+ addr = &p_dbg_dump->chub;
+ size = sizeof(p_dbg_dump->chub);
+ } else {
+ return NULL;
+ }
+
+ memset(addr, 0, size);
+
+ return addr;
+}
+
+int chub_dbg_init(struct device *dev)
+{
+ int i, ret = 0;
+ enum dbg_dump_area area;
+
+ if (!chub_rmem)
+ return -EINVAL;
+
+ bin_attr_chub_bin_sram.size = ipc_get_chub_mem_size();
+ bin_attr_chub_bin_sram.private = ipc_get_base(IPC_REG_DUMP);
+
+ bin_attr_chub_bin_dram.size = sizeof(struct dbg_dump);
+ bin_attr_chub_bin_dram.private = p_dbg_dump;
+
+ if (chub_rmem->size < get_dbg_dump_size())
+ dev_err(dev,
+ "rmem size (%u) should be bigger than dump size(%u)\n",
+ (u32)chub_rmem->size, get_dbg_dump_size());
+
+ for (i = 0; i < ARRAY_SIZE(chub_bin_attrs); i++) {
+ struct bin_attribute *battr = chub_bin_attrs[i];
+
+ ret = device_create_bin_file(dev, battr);
+ if (ret < 0)
+ dev_warn(dev, "Failed to create file: %s\n",
+ battr->attr.name);
+ }
+
+ for (i = 0, ret = 0; i < ARRAY_SIZE(attributes); i++) {
+ ret = device_create_file(dev, &attributes[i]);
+ if (ret)
+ dev_warn(dev, "Failed to create file: %s\n",
+ attributes[i].attr.name);
+ }
+
+ area = DBG_IPC_AREA;
+ strncpy(p_dbg_dump->info[area].name, "ipc_map", AREA_NAME_MAX);
+ p_dbg_dump->info[area].offset =
+ (void *)p_dbg_dump->ipc_addr - (void *)p_dbg_dump;
+ p_dbg_dump->info[area].size = sizeof(struct ipc_area) * IPC_REG_MAX;
+
+ area = DBG_NANOHUB_DD_AREA;
+ strncpy(p_dbg_dump->info[area].name, "nano_dd", AREA_NAME_MAX);
+ p_dbg_dump->info[area].offset =
+ (void *)&p_dbg_dump->chub - (void *)p_dbg_dump;
+ p_dbg_dump->info[area].size = sizeof(struct contexthub_ipc_info);
+
+ area = DBG_GPR_AREA;
+ strncpy(p_dbg_dump->info[area].name, "gpr", AREA_NAME_MAX);
+ p_dbg_dump->info[area].offset =
+ (void *)p_dbg_dump->gpr - (void *)p_dbg_dump;
+ p_dbg_dump->info[area].size = sizeof(u32) * NUM_OF_GPR;
+
+ area = DBG_SRAM_AREA;
+ /* align the chub sram dump base address on rmem into SRAM_ALIN */
+ p_dbg_dump->sram_start = SRAM_ALIGN - bin_attr_chub_bin_dram.size;
+ if (p_dbg_dump->sram_start < 0) {
+ dev_warn(dev,
+ "increase SRAM_ALIGN from %d to %d to align on ramdump.\n",
+ SRAM_ALIGN, (u32)bin_attr_chub_bin_dram.size);
+ p_dbg_dump->sram_start = 0;
+ }
+ strncpy(p_dbg_dump->info[area].name, "sram", AREA_NAME_MAX);
+ p_dbg_dump->info[area].offset =
+ (void *)&p_dbg_dump->sram[p_dbg_dump->sram_start] -
+ (void *)p_dbg_dump;
+ p_dbg_dump->info[area].size = bin_attr_chub_bin_sram.size;
+
+ dev_dbg(dev,
+ "%s(%pa) is mapped on %p (sram %p) with size of %u, dump size %u\n",
+ "dump buffer", &chub_rmem->base, phys_to_virt(chub_rmem->base),
+ &p_dbg_dump->sram[p_dbg_dump->sram_start],
+ (u32)chub_rmem->size, get_dbg_dump_size());
+
+ return ret;
+}
+
+static int __init contexthub_rmem_setup(struct reserved_mem *rmem)
+{
+ pr_info("%s: base=%pa, size=%pa\n", __func__, &rmem->base, &rmem->size);
+
+ chub_rmem = rmem;
+ p_dbg_dump = phys_to_virt(rmem->base);
+ return 0;
+}
+RESERVEDMEM_OF_DECLARE(chub_rmem, "exynos,chub_rmem", contexthub_rmem_setup);
--- /dev/null
+/*
+ * Copyright (c) 2017 Samsung Electronics Co., Ltd.
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License as published by
+ * the Free Software Foundation; either version 2 of the License, or
+ * (at your option) any later version.
+ */
+
+#ifndef __CHUB_DEBUG_H
+#define __CHUB_DEBUG_H
+
+#include <linux/platform_device.h>
+#include "chub.h"
+
+enum dbg_dump_area {
+ DBG_NANOHUB_DD_AREA,
+ DBG_IPC_AREA,
+ DBG_GPR_AREA,
+ DBG_SRAM_AREA,
+ DBG_AREA_MAX
+};
+
+int chub_dbg_init(struct device *dev);
+void *chub_dbg_get_memory(enum dbg_dump_area area);
+void chub_dbg_dump_hw(struct contexthub_ipc_info *ipc, int reason);
+void chub_dbg_dump_status(struct contexthub_ipc_info *ipc);
+void chub_dbg_check_and_download_image(struct contexthub_ipc_info *ipc);
+#endif /* __CHUB_DEBUG_H */
--- /dev/null
+/*
+ * Copyright (c) 2017 Samsung Electronics Co., Ltd.
+ *
+ * Boojin Kim <boojin.kim@samsung.com>
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License as published by
+ * the Free Software Foundation; either version 2 of the License, or
+ * (at your option) any later version.
+ */
+
+#include "chub_ipc.h"
+
+#if defined(CHUB_IPC)
+#if defined(SEOS)
+#include <seos.h>
+#include <errno.h>
+#elif defined(EMBOS)
+#include <Device.h>
+#endif
+#include <mailboxDrv.h>
+#include <csp_common.h>
+#include <csp_printf.h>
+#include <string.h>
+#include <string.h>
+#elif defined(AP_IPC)
+#include <linux/delay.h>
+#include <linux/io.h>
+#include "chub.h"
+#endif
+
+/* ap-chub ipc */
+struct ipc_area ipc_addr[IPC_REG_MAX];
+
+struct ipc_owner_ctrl {
+ enum ipc_direction src;
+ void *base;
+} ipc_own[IPC_OWN_MAX];
+
+struct ipc_map_area *ipc_map;
+
+#ifdef PACKET_LOW_DEBUG
+#define GET_IPC_REG_STRING(a) (((a) == IPC_REG_IPC_C2A) ? "wt" : "rd")
+
+static char *get_cs_name(enum channel_status cs)
+{
+ switch (cs) {
+ case CS_IDLE:
+ return "I";
+ case CS_AP_WRITE:
+ return "AW";
+ case CS_CHUB_RECV:
+ return "CR";
+ case CS_CHUB_WRITE:
+ return "CW";
+ case CS_AP_RECV:
+ return "AR";
+ case CS_MAX:
+ break;
+ };
+ return NULL;
+}
+
+void content_disassemble(struct ipc_content *content, enum ipc_region act)
+{
+ CSP_PRINTF_INFO("[content-%s-%d: status:%s: buf: 0x%x, size: %d]\n",
+ GET_IPC_REG_STRING(act), content->num,
+ get_cs_name(content->status),
+ (unsigned int)content->buf, content->size);
+}
+#endif
+
+/* ipc address control functions */
+void ipc_set_base(void *addr)
+{
+ ipc_addr[IPC_REG_BL].base = addr;
+}
+
+inline void *ipc_get_base(enum ipc_region area)
+{
+ return ipc_addr[area].base;
+}
+
+inline u32 ipc_get_offset(enum ipc_region area)
+{
+ return ipc_addr[area].offset;
+}
+
+inline void *ipc_get_addr(enum ipc_region area, int buf_num)
+{
+#ifdef CHUB_IPC
+ return (void *)((unsigned int)ipc_addr[area].base +
+ ipc_addr[area].offset * buf_num);
+#else
+ return ipc_addr[area].base + ipc_addr[area].offset * buf_num;
+#endif
+}
+
+u32 ipc_get_chub_mem_size(void)
+{
+ return ipc_addr[IPC_REG_DUMP].offset;
+}
+
+void ipc_set_chub_clk(u32 clk)
+{
+ struct chub_bootargs *map = ipc_get_base(IPC_REG_BL_MAP);
+
+ map->chubclk = clk;
+}
+
+u32 ipc_get_chub_clk(void)
+{
+ struct chub_bootargs *map = ipc_get_base(IPC_REG_BL_MAP);
+
+ return map->chubclk;
+}
+
+void ipc_set_chub_bootmode(u32 bootmode)
+{
+ struct chub_bootargs *map = ipc_get_base(IPC_REG_BL_MAP);
+
+ map->bootmode = bootmode;
+}
+
+u32 ipc_get_chub_bootmode(void)
+{
+ struct chub_bootargs *map = ipc_get_base(IPC_REG_BL_MAP);
+
+ return map->bootmode;
+}
+
+#if defined(LOCAL_POWERGATE)
+u32 *ipc_get_chub_psp(void)
+{
+ struct chub_bootargs *map = ipc_get_base(IPC_REG_BL_MAP);
+
+ return &(map->psp);
+}
+
+u32 *ipc_get_chub_msp(void)
+{
+ struct chub_bootargs *map = ipc_get_base(IPC_REG_BL_MAP);
+
+ return &(map->msp);
+}
+#endif
+
+void *ipc_get_chub_map(void)
+{
+ char *sram_base = ipc_get_base(IPC_REG_BL);
+ struct chub_bootargs *map = (struct chub_bootargs *)(sram_base + MAP_INFO_OFFSET);
+
+ if (strncmp(OS_UPDT_MAGIC, map->magic, sizeof(OS_UPDT_MAGIC))) {
+ CSP_PRINTF_ERROR("%s: %p has wrong magic key: %s -> %s\n",
+ __func__, map, OS_UPDT_MAGIC, map->magic);
+ return 0;
+ }
+
+ if (map->ipc_version != IPC_VERSION) {
+ CSP_PRINTF_ERROR
+ ("%s: ipc_version doesn't match: AP %d, Chub: %d\n",
+ __func__, IPC_VERSION, map->ipc_version);
+ return 0;
+ }
+
+ ipc_addr[IPC_REG_BL_MAP].base = map;
+ ipc_addr[IPC_REG_OS].base = sram_base + map->code_start;
+ ipc_addr[IPC_REG_SHARED].base = sram_base + map->shared_start;
+ ipc_addr[IPC_REG_IPC].base = sram_base + map->ipc_start;
+ ipc_addr[IPC_REG_RAM].base = sram_base + map->ram_start;
+ ipc_addr[IPC_REG_DUMP].base = sram_base + map->dump_start;
+ ipc_addr[IPC_REG_BL].offset = map->bl_end - map->bl_start;
+ ipc_addr[IPC_REG_OS].offset = map->code_end - map->code_start;
+ ipc_addr[IPC_REG_SHARED].offset = map->shared_end - map->shared_start;
+ ipc_addr[IPC_REG_IPC].offset = map->ipc_end - map->ipc_start;
+ ipc_addr[IPC_REG_RAM].offset = map->ram_end - map->ram_start;
+ ipc_addr[IPC_REG_DUMP].offset = map->dump_end - map->dump_start;
+
+ ipc_map = ipc_addr[IPC_REG_IPC].base;
+ ipc_map->logbuf.size =
+ ipc_addr[IPC_REG_IPC].offset - sizeof(struct ipc_map_area);
+
+ ipc_addr[IPC_REG_IPC_EVT_A2C].base = &ipc_map->evt[IPC_EVT_A2C].data;
+ ipc_addr[IPC_REG_IPC_EVT_A2C].offset = 0;
+ ipc_addr[IPC_REG_IPC_EVT_A2C_CTRL].base =
+ &ipc_map->evt[IPC_EVT_A2C].ctrl;
+ ipc_addr[IPC_REG_IPC_EVT_A2C_CTRL].offset = 0;
+ ipc_addr[IPC_REG_IPC_EVT_C2A].base = &ipc_map->evt[IPC_EVT_C2A].data;
+ ipc_addr[IPC_REG_IPC_EVT_C2A].offset = 0;
+ ipc_addr[IPC_REG_IPC_EVT_C2A_CTRL].base =
+ &ipc_map->evt[IPC_EVT_C2A].ctrl;
+ ipc_addr[IPC_REG_IPC_EVT_C2A_CTRL].offset = 0;
+ ipc_addr[IPC_REG_IPC_C2A].base = &ipc_map->data[IPC_DATA_C2A];
+ ipc_addr[IPC_REG_IPC_A2C].base = &ipc_map->data[IPC_DATA_A2C];
+#ifdef USE_IPC_BUF
+ ipc_addr[IPC_REG_IPC_C2A].offset = sizeof(struct ipc_buf);
+ ipc_addr[IPC_REG_IPC_A2C].offset = sizeof(struct ipc_buf);
+#else
+ ipc_addr[IPC_REG_IPC_C2A].offset = sizeof(struct ipc_content);
+ ipc_addr[IPC_REG_IPC_A2C].offset = sizeof(struct ipc_content);
+#endif
+
+ ipc_addr[IPC_REG_LOG].base = &ipc_map->logbuf.buf;
+ ipc_addr[IPC_REG_LOG].offset =
+ ipc_addr[IPC_REG_IPC].offset - sizeof(struct ipc_map_area);
+
+#ifdef CHUB_IPC
+ ipc_map->logbuf.token = 0;
+ memset(ipc_addr[IPC_REG_LOG].base, 0, ipc_addr[IPC_REG_LOG].offset);
+#endif
+
+ CSP_PRINTF_INFO
+ ("contexthub map information(v%u)\n\tbl(%p %d)\n\tos(%p %d)\n\tipc(%p %d)\n\tram(%p %d)\n\tshared(%p %d)\n\tdump(%p %d)\n",
+ map->ipc_version,
+ ipc_addr[IPC_REG_BL].base, ipc_addr[IPC_REG_BL].offset,
+ ipc_addr[IPC_REG_OS].base, ipc_addr[IPC_REG_OS].offset,
+ ipc_addr[IPC_REG_IPC].base, ipc_addr[IPC_REG_IPC].offset,
+ ipc_addr[IPC_REG_RAM].base, ipc_addr[IPC_REG_RAM].offset,
+ ipc_addr[IPC_REG_SHARED].base, ipc_addr[IPC_REG_SHARED].offset,
+ ipc_addr[IPC_REG_DUMP].base, ipc_addr[IPC_REG_DUMP].offset);
+
+ return ipc_map;
+}
+
+void ipc_dump(void)
+{
+ CSP_PRINTF_INFO("%s: a2x event\n", __func__);
+ ipc_print_evt(IPC_EVT_A2C);
+ CSP_PRINTF_INFO("%s: c2a event\n", __func__);
+ ipc_print_evt(IPC_EVT_C2A);
+
+#ifndef USE_IPC_BUF
+ CSP_PRINTF_INFO("%s: active channel\n", __func__);
+ ipc_print_channel();
+#else
+ CSP_PRINTF_INFO("%s: data buffer\n", __func__);
+ ipc_print_databuf();
+#endif
+}
+
+#ifdef USE_IPC_BUF
+inline void ipc_copy_bytes(u8 *dst, u8 *src, int size)
+{
+ int i;
+
+ /* max 2 bytes optimize */
+ for (i = 0; i < size; i++)
+ *dst++ = *src++;
+}
+
+#define INC_QIDX(i) (((i) == IPC_DATA_SIZE) ? 0 : (i))
+static inline int ipc_io_data(enum ipc_data_list dir, u8 *buf, u16 length)
+{
+ struct ipc_buf *ipc_data;
+ int eq;
+ int dq;
+ int useful = 0;
+ u8 size_lower;
+ u8 size_upper;
+ u16 size_to_read;
+ u32 size_to_copy_top;
+ u32 size_to_copy_bottom;
+ enum ipc_region reg;
+
+ /* get ipc region */
+ if (dir == IPC_DATA_C2A)
+ reg = IPC_REG_IPC_C2A;
+ else if (dir == IPC_DATA_A2C)
+ reg = IPC_REG_IPC_A2C;
+ else {
+ CSP_PRINTF_ERROR("%s: invalid dir:%d\n", __func__, dir);
+ return -1;
+ }
+
+ /* get ipc_data base */
+ ipc_data = ipc_get_base(reg);
+ eq = ipc_data->eq;
+ dq = ipc_data->dq;
+
+#ifdef USE_IPC_BUF_LOG
+ CSP_PRINTF_INFO("%s: dir:%s(w:%d, r:%d, cnt:%d), e:%d d:%d, empty:%d, full:%d, ipc_data:%p, len:%d\n",
+ __func__, dir ? "a2c" : "c2a", ipc_data->cnt_dbg_wt,
+ ipc_data->cnt_dbg_rd, ipc_data->cnt, eq, dq, ipc_data->empty,
+ ipc_data->full, ipc_data, length);
+#endif
+
+ if (length) {
+ /* write data */
+ /* calc the unused area on ipc buffer */
+ if (eq > dq)
+ useful = dq + (IPC_DATA_SIZE - eq);
+ else if (eq < dq)
+ useful = dq - eq;
+ else if (ipc_data->full) {
+ CSP_PRINTF_ERROR("%s is full\n", __func__);
+ return -1;
+ } else {
+ useful = IPC_DATA_SIZE;
+ }
+
+#ifdef USE_IPC_BUF_LOG
+ ipc_data->cnt_dbg_wt++;
+ CSP_PRINTF_INFO("w: eq:%d, dq:%d, useful:%d\n", eq, dq, useful);
+#endif
+ /* check length */
+ if (length + sizeof(u16) > useful) {
+ CSP_PRINTF_ERROR
+ ("%s: no buffer. len:%d, remain:%d, eq:%d, dq:%d\n",
+ __func__, length, useful, eq, dq);
+ return -1;
+ }
+
+ size_upper = (length >> 8);
+ size_lower = length & 0xff;
+ ipc_data->buf[eq++] = size_lower;
+
+ /* write size */
+ if (eq == IPC_DATA_SIZE)
+ eq = 0;
+
+ ipc_data->buf[eq++] = size_upper;
+ if (eq == IPC_DATA_SIZE)
+ eq = 0;
+
+ /* write data */
+ if (eq + length > IPC_DATA_SIZE) {
+ size_to_copy_top = IPC_DATA_SIZE - eq;
+ size_to_copy_bottom = length - size_to_copy_top;
+ ipc_copy_bytes(&ipc_data->buf[eq], buf, size_to_copy_top);
+ ipc_copy_bytes(&ipc_data->buf[0], &buf[size_to_copy_top], size_to_copy_bottom);
+ eq = size_to_copy_bottom;
+ } else {
+ ipc_copy_bytes(&ipc_data->buf[eq], buf, length);
+ eq += length;
+ if (eq == IPC_DATA_SIZE)
+ eq = 0;
+ }
+
+ /* update queue index */
+ ipc_data->eq = eq;
+
+ if (ipc_data->eq == ipc_data->dq)
+ ipc_data->full = 1;
+
+ if (ipc_data->empty)
+ ipc_data->empty = 0;
+
+#ifdef USE_IPC_BUF_LOG
+ CSP_PRINTF_INFO("w_out: eq:%d, dq:%d, f:%d, e:%d\n",
+ ipc_data->eq, ipc_data->dq, ipc_data->full, ipc_data->empty);
+#endif
+ return 0;
+ } else {
+ /* read data */
+ /* calc the unused area on ipc buffer */
+ if (eq > dq)
+ useful = eq - dq;
+ else if (eq < dq)
+ useful = (IPC_DATA_SIZE - dq) + eq;
+ else if (ipc_data->empty) {
+ CSP_PRINTF_ERROR("%s is empty\n", __func__);
+ return 0;
+ } else {
+ useful = IPC_DATA_SIZE;
+ }
+
+ /* read size */
+ size_lower = ipc_data->buf[dq++];
+ if (dq == IPC_DATA_SIZE)
+ dq = 0;
+
+ size_upper = ipc_data->buf[dq++];
+ if (dq == IPC_DATA_SIZE)
+ dq = 0;
+
+ size_to_read = (size_upper << 8) | size_lower;
+ if (size_to_read >= PACKET_SIZE_MAX) {
+ CSP_PRINTF_ERROR("%s: wrong size:%d\n",
+ __func__, size_to_read);
+ return -1;
+ }
+
+#ifdef USE_IPC_BUF_LOG
+ ipc_data->cnt_dbg_rd++;
+ CSP_PRINTF_INFO("r: eq:%d, dq:%d, useful:%d, size_to_read:%d\n",
+ eq, dq, useful, size_to_read);
+#endif
+
+ if (useful < sizeof(u16) + size_to_read) {
+ CSP_PRINTF_ERROR("%s: no enought read size: useful:%d, read_to_size:%d,%d\n",
+ __func__, useful, size_to_read, sizeof(u16));
+ return 0;
+ }
+
+ /* read data */
+ if (dq + size_to_read > IPC_DATA_SIZE) {
+ size_to_copy_top = IPC_DATA_SIZE - dq;
+ size_to_copy_bottom = size_to_read - size_to_copy_top;
+
+ ipc_copy_bytes(buf, &ipc_data->buf[dq], size_to_copy_top);
+ ipc_copy_bytes(&buf[size_to_copy_top], &ipc_data->buf[0], size_to_copy_bottom);
+ dq = size_to_copy_bottom;
+ } else {
+ ipc_copy_bytes(buf, &ipc_data->buf[dq], size_to_read);
+
+ dq += size_to_read;
+ if (dq == IPC_DATA_SIZE)
+ dq = 0;
+ }
+
+ /* update queue index */
+ ipc_data->dq = dq;
+ if (ipc_data->eq == ipc_data->dq)
+ ipc_data->empty = 1;
+
+ if (ipc_data->full)
+ ipc_data->full = 0;
+
+#ifdef USE_IPC_BUF_LOG
+ CSP_PRINTF_INFO("r_out (read_to_size:%d): eq:%d, dq:%d, f:%d, e:%d\n",
+ size_to_read, ipc_data->eq, ipc_data->dq, ipc_data->full, ipc_data->empty);
+#endif
+ return size_to_read;
+ }
+}
+
+int ipc_write_data(enum ipc_data_list dir, void *tx, u16 length)
+{
+ int ret = 0;
+ enum ipc_evt_list evtq;
+
+ if (length <= PACKET_SIZE_MAX)
+ ret = ipc_io_data(dir, tx, length);
+ else {
+ CSP_PRINTF_INFO("%s: invalid size:%d\n",
+ __func__, length);
+ return -1;
+ }
+
+ if (!ret) {
+ evtq = (dir == IPC_DATA_C2A) ? IPC_EVT_C2A : IPC_EVT_A2C;
+ ret = ipc_add_evt(evtq, IRQ_EVT_CH0);
+ } else {
+ CSP_PRINTF_INFO("%s: error\n", __func__);
+ }
+ return ret;
+}
+
+int ipc_read_data(enum ipc_data_list dir, uint8_t *rx)
+{
+ int ret = ipc_io_data(dir, rx, 0);
+
+ if (!ret || (ret < 0)) {
+ CSP_PRINTF_INFO("%s: error\n", __func__);
+ return 0;
+ }
+ return ret;
+}
+
+void ipc_print_databuf(void)
+{
+ struct ipc_buf *ipc_data = ipc_get_base(IPC_REG_IPC_A2C);
+
+ CSP_PRINTF_INFO("a2c: eq:%d dq:%d full:%d empty:%d tx:%d rx:%d\n",
+ ipc_data->eq, ipc_data->dq, ipc_data->full, ipc_data->empty,
+ ipc_data->cnt_dbg_wt, ipc_data->cnt_dbg_rd);
+
+ ipc_data = ipc_get_base(IPC_REG_IPC_C2A);
+
+ CSP_PRINTF_INFO("c2a: eq:%d dq:%d full:%d empty:%d tx:%d rx:%d\n",
+ ipc_data->eq, ipc_data->dq, ipc_data->full, ipc_data->empty,
+ ipc_data->cnt_dbg_wt, ipc_data->cnt_dbg_rd);
+}
+
+#else
+/* ipc channel functions */
+#define GET_IPC_REG_NAME(c) (((c) == CS_WRITE) ? "W" : (((c) == CS_RECV) ? "R" : "I"))
+#define GET_CH_NAME(c) (((c) == CS_AP) ? "A" : "C")
+#define GET_CH_OWNER(o) (((o) == IPC_DATA_C2A) ? "C2A" : "A2C")
+
+inline void ipc_update_channel_status(struct ipc_content *content,
+ enum channel_status next)
+{
+#ifdef PACKET_LOW_DEBUG
+ unsigned int org = __raw_readl(&content->status);
+
+ CSP_PRINTF_INFO("CH(%s)%d: %s->%s\n", GET_CH_NAME(org >> CS_OWN_OFFSET),
+ content->num, GET_IPC_REG_NAME((org & CS_IPC_REG_CMP)),
+ GET_IPC_REG_NAME((next & CS_IPC_REG_CMP)));
+#endif
+
+ __raw_writel(next, &content->status);
+}
+
+void *ipc_scan_channel(enum ipc_region area, enum channel_status target)
+{
+ int i;
+ struct ipc_content *content = ipc_get_base(area);
+
+ for (i = 0; i < IPC_BUF_NUM; i++, content++)
+ if (__raw_readl(&content->status) == target)
+ return content;
+
+ return NULL;
+}
+
+void *ipc_get_channel(enum ipc_region area, enum channel_status target,
+ enum channel_status next)
+{
+ int i;
+ struct ipc_content *content = ipc_get_base(area);
+
+ for (i = 0; i < IPC_BUF_NUM; i++, content++) {
+ if (__raw_readl(&content->status) == target) {
+ ipc_update_channel_status(content, next);
+ return content;
+ }
+ }
+
+ return NULL;
+}
+
+void ipc_print_channel(void)
+{
+ int i, j, org;
+
+ for (j = 0; j < IPC_DATA_MAX; j++) {
+ for (i = 0; i < IPC_BUF_NUM; i++) {
+ org = ipc_map->data[j][i].status;
+ if (org & CS_IPC_REG_CMP)
+ CSP_PRINTF_INFO("CH-%s:%x\n",
+ GET_CH_OWNER(j), org);
+ }
+ }
+}
+#endif
+
+void ipc_init(void)
+{
+ int i, j;
+
+ if (!ipc_map)
+ CSP_PRINTF_ERROR("%s: ipc_map is NULL.\n", __func__);
+
+#ifdef USE_IPC_BUF
+ for (i = 0; i < IPC_DATA_MAX; i++) {
+ ipc_map->data[i].eq = 0;
+ ipc_map->data[i].dq = 0;
+ ipc_map->data[i].full = 0;
+ ipc_map->data[i].empty = 1;
+ ipc_map->data[i].cnt_dbg_wt = 0;
+ ipc_map->data[i].cnt_dbg_rd = 0;
+ }
+#else
+ for (i = 0; i < IPC_BUF_NUM; i++) {
+ ipc_map->data[IPC_DATA_C2A][i].num = i;
+ ipc_map->data[IPC_DATA_C2A][i].status = CS_CHUB_OWN;
+ ipc_map->data[IPC_DATA_A2C][i].num = i;
+ ipc_map->data[IPC_DATA_A2C][i].status = CS_AP_OWN;
+ }
+#endif
+
+ ipc_hw_clear_all_int_pend_reg(AP);
+
+ for (j = 0; j < IPC_EVT_MAX; j++) {
+ ipc_map->evt[j].ctrl.dq = 0;
+ ipc_map->evt[j].ctrl.eq = 0;
+ ipc_map->evt[j].ctrl.full = 0;
+ ipc_map->evt[j].ctrl.empty = 0;
+ ipc_map->evt[j].ctrl.irq = 0;
+
+ for (i = 0; i < IPC_EVT_NUM; i++) {
+ ipc_map->evt[j].data[i].evt = IRQ_EVT_INVAL;
+ ipc_map->evt[j].data[i].irq = IRQ_EVT_INVAL;
+ }
+ }
+}
+
+/* evt functions */
+enum {
+ IPC_EVT_DQ, /* empty */
+ IPC_EVT_EQ, /* fill */
+};
+
+#define EVT_Q_INT(i) (((i) == IPC_EVT_NUM) ? 0 : (i))
+#define IRQ_EVT_IDX_INT(i) (((i) == IRQ_EVT_END) ? IRQ_EVT_START : (i))
+#define IRQ_C2A_WT_IDX_INT(i) (((i) == IRQ_C2A_END) ? IRQ_C2A_START : (i))
+
+#define EVT_Q_DEC(i) (((i) == -1) ? IPC_EVT_NUM - 1 : (i - 1))
+
+struct ipc_evt_buf *ipc_get_evt(enum ipc_evt_list evtq)
+{
+ struct ipc_evt *ipc_evt = &ipc_map->evt[evtq];
+ struct ipc_evt_buf *cur_evt = NULL;
+
+ if (ipc_evt->ctrl.dq != __raw_readl(&ipc_evt->ctrl.eq)) {
+ cur_evt = &ipc_evt->data[ipc_evt->ctrl.dq];
+ cur_evt->status = IPC_EVT_DQ;
+ ipc_evt->ctrl.dq = EVT_Q_INT(ipc_evt->ctrl.dq + 1);
+ } else if (__raw_readl(&ipc_evt->ctrl.full)) {
+ cur_evt = &ipc_evt->data[ipc_evt->ctrl.dq];
+ cur_evt->status = IPC_EVT_DQ;
+ ipc_evt->ctrl.dq = EVT_Q_INT(ipc_evt->ctrl.dq + 1);
+ __raw_writel(0, &ipc_evt->ctrl.full);
+ }
+
+ return cur_evt;
+}
+
+#define EVT_WAIT_TIME (10)
+#define MAX_TRY_CNT (5)
+
+int ipc_add_evt(enum ipc_evt_list evtq, enum irq_evt_chub evt)
+{
+ struct ipc_evt *ipc_evt = &ipc_map->evt[evtq];
+ enum ipc_owner owner = (evtq < IPC_EVT_AP_MAX) ? AP : IPC_OWN_MAX;
+ struct ipc_evt_buf *cur_evt = NULL;
+
+ if (!ipc_evt) {
+ CSP_PRINTF_ERROR("%s: invalid ipc_evt\n", __func__);
+ return -1;
+ }
+
+ if (!__raw_readl(&ipc_evt->ctrl.full)) {
+ cur_evt = &ipc_evt->data[ipc_evt->ctrl.eq];
+ if (!cur_evt) {
+ CSP_PRINTF_ERROR("%s: invalid cur_evt\n", __func__);
+ return -1;
+ }
+
+ cur_evt->evt = evt;
+ cur_evt->status = IPC_EVT_EQ;
+ cur_evt->irq = ipc_evt->ctrl.irq;
+
+ ipc_evt->ctrl.eq = EVT_Q_INT(ipc_evt->ctrl.eq + 1);
+ ipc_evt->ctrl.irq = IRQ_EVT_IDX_INT(ipc_evt->ctrl.irq + 1);
+
+ if (ipc_evt->ctrl.eq == __raw_readl(&ipc_evt->ctrl.dq))
+ __raw_writel(1, &ipc_evt->ctrl.full);
+ } else {
+#if defined(CHUB_IPC)
+ int trycnt = 0;
+
+ do {
+ trycnt++;
+ msleep(EVT_WAIT_TIME);
+ } while (ipc_evt->ctrl.full && (trycnt < MAX_TRY_CNT));
+
+ if (!__raw_readl(&ipc_evt->ctrl.full)) {
+ CSP_PRINTF_INFO("%s: evt %d during %d ms is full\n",
+ __func__, evt, EVT_WAIT_TIME * trycnt);
+ return -1;
+ } else {
+ CSP_PRINTF_ERROR("%s: fail to add evt\n", __func__);
+ return -1;
+ }
+#else
+ CSP_PRINTF_ERROR("%s: fail to add evt\n", __func__);
+ return -1;
+#endif
+ }
+
+ if (owner != IPC_OWN_MAX) {
+#if defined(AP_IPC)
+ ipc_write_val(AP, sched_clock());
+#endif
+ if (cur_evt)
+ ipc_hw_gen_interrupt(owner, cur_evt->irq);
+ else
+ return -1;
+ }
+
+ return 0;
+}
+
+#define IPC_GET_EVT_NAME(a) (((a) == IPC_EVT_A2C) ? "A2C" : "C2A")
+
+void ipc_print_evt(enum ipc_evt_list evtq)
+{
+ struct ipc_evt *ipc_evt = &ipc_map->evt[evtq];
+ int i;
+
+ CSP_PRINTF_INFO("evt-%s: eq:%d dq:%d full:%d irq:%d\n",
+ IPC_GET_EVT_NAME(evtq), ipc_evt->ctrl.eq,
+ ipc_evt->ctrl.dq, ipc_evt->ctrl.full,
+ ipc_evt->ctrl.irq);
+
+ for (i = 0; i < IPC_EVT_NUM; i++) {
+ CSP_PRINTF_INFO("evt%d(evt:%d,irq:%d,f:%d)\n",
+ i, ipc_evt->data[i].evt,
+ ipc_evt->data[i].irq, ipc_evt->data[i].status);
+ }
+
+ (void)ipc_evt;
+}
+
+u32 ipc_logbuf_get_token(void)
+{
+ __raw_writel(ipc_map->logbuf.token + 1, &ipc_map->logbuf.token);
+
+ return __raw_readl(&ipc_map->logbuf.token);
+}
+
+void ipc_logbuf_put_with_char(char ch)
+{
+ char *logbuf;
+ int eqNext;
+
+ if (ipc_map) {
+ eqNext = ipc_map->logbuf.eq + 1;
+
+#ifdef IPC_DEBUG
+ if (eqNext == ipc_map->logbuf.dq) {
+ ipc_write_debug_event(AP, IPC_DEBUG_CHUB_FULL_LOG);
+ ipc_add_evt(IPC_EVT_C2A, IRQ_EVT_CHUB_TO_AP_DEBUG);
+ }
+#endif
+
+ logbuf = ipc_map->logbuf.buf;
+
+ *(logbuf + ipc_map->logbuf.eq) = ch;
+
+ if (eqNext == ipc_map->logbuf.size)
+ ipc_map->logbuf.eq = 0;
+ else
+ ipc_map->logbuf.eq = eqNext;
+ }
+}
+
+void ipc_set_owner(enum ipc_owner owner, void *base, enum ipc_direction dir)
+{
+ ipc_own[owner].base = base;
+ ipc_own[owner].src = dir;
+}
+
+int ipc_hw_read_int_start_index(enum ipc_owner owner)
+{
+ if (ipc_own[owner].src)
+ return IRQ_EVT_CHUB_MAX;
+ else
+ return 0;
+}
+
+unsigned int ipc_hw_read_gen_int_status_reg(enum ipc_owner owner, int irq)
+{
+ if (ipc_own[owner].src)
+ return __raw_readl((char *)ipc_own[owner].base +
+ REG_MAILBOX_INTSR1) & (1 << irq);
+ else
+ return __raw_readl((char *)ipc_own[owner].base +
+ REG_MAILBOX_INTSR0) & (1 << (irq +
+ IRQ_EVT_CHUB_MAX));
+}
+
+void ipc_hw_write_shared_reg(enum ipc_owner owner, unsigned int val, int num)
+{
+ __raw_writel(val, (char *)ipc_own[owner].base + REG_MAILBOX_ISSR0 + num * 4);
+}
+
+unsigned int ipc_hw_read_shared_reg(enum ipc_owner owner, int num)
+{
+ return __raw_readl((char *)ipc_own[owner].base + REG_MAILBOX_ISSR0 + num * 4);
+}
+
+unsigned int ipc_hw_read_int_status_reg(enum ipc_owner owner)
+{
+ if (ipc_own[owner].src)
+ return __raw_readl((char *)ipc_own[owner].base + REG_MAILBOX_INTSR0);
+ else
+ return __raw_readl((char *)ipc_own[owner].base + REG_MAILBOX_INTSR1);
+}
+
+unsigned int ipc_hw_read_int_gen_reg(enum ipc_owner owner)
+{
+ if (ipc_own[owner].src)
+ return __raw_readl((char *)ipc_own[owner].base + REG_MAILBOX_INTGR0);
+ else
+ return __raw_readl((char *)ipc_own[owner].base + REG_MAILBOX_INTGR1);
+}
+
+void ipc_hw_clear_int_pend_reg(enum ipc_owner owner, int irq)
+{
+ if (ipc_own[owner].src)
+ __raw_writel(1 << irq,
+ (char *)ipc_own[owner].base + REG_MAILBOX_INTCR0);
+ else
+ __raw_writel(1 << irq,
+ (char *)ipc_own[owner].base + REG_MAILBOX_INTCR1);
+}
+
+void ipc_hw_clear_all_int_pend_reg(enum ipc_owner owner)
+{
+ u32 val = 0xffff << ipc_hw_read_int_start_index(AP);
+ /* hack: org u32 val = 0xff; */
+
+ if (ipc_own[owner].src)
+ __raw_writel(val, (char *)ipc_own[owner].base + REG_MAILBOX_INTCR0);
+ else
+ __raw_writel(val, (char *)ipc_own[owner].base + REG_MAILBOX_INTCR1);
+}
+
+void ipc_hw_gen_interrupt(enum ipc_owner owner, int irq)
+{
+ if (ipc_own[owner].src)
+ __raw_writel(1 << irq,
+ (char *)ipc_own[owner].base + REG_MAILBOX_INTGR1);
+ else
+ __raw_writel(1 << (irq + IRQ_EVT_CHUB_MAX),
+ (char *)ipc_own[owner].base + REG_MAILBOX_INTGR0);
+}
+
+void ipc_hw_set_mcuctrl(enum ipc_owner owner, unsigned int val)
+{
+ __raw_writel(val, (char *)ipc_own[owner].base + REG_MAILBOX_MCUCTL);
+}
+
+void ipc_hw_mask_irq(enum ipc_owner owner, int irq)
+{
+ int mask;
+
+ if (ipc_own[owner].src) {
+ mask = __raw_readl((char *)ipc_own[owner].base + REG_MAILBOX_INTMR0);
+ __raw_writel(mask | (1 << (irq + IRQ_EVT_CHUB_MAX)),
+ (char *)ipc_own[owner].base + REG_MAILBOX_INTMR0);
+ } else {
+ mask = __raw_readl((char *)ipc_own[owner].base + REG_MAILBOX_INTMR1);
+ __raw_writel(mask | (1 << irq),
+ (char *)ipc_own[owner].base + REG_MAILBOX_INTMR1);
+ }
+}
+
+void ipc_hw_unmask_irq(enum ipc_owner owner, int irq)
+{
+ int mask;
+
+ if (ipc_own[owner].src) {
+ mask = __raw_readl((char *)ipc_own[owner].base + REG_MAILBOX_INTMR0);
+ __raw_writel(mask & ~(1 << (irq + IRQ_EVT_CHUB_MAX)),
+ (char *)ipc_own[owner].base + REG_MAILBOX_INTMR0);
+ } else {
+ mask = __raw_readl((char *)ipc_own[owner].base + REG_MAILBOX_INTMR1);
+ __raw_writel(mask & ~(1 << irq),
+ (char *)ipc_own[owner].base + REG_MAILBOX_INTMR1);
+ }
+}
+
+void ipc_write_debug_event(enum ipc_owner owner, enum ipc_debug_event action)
+{
+ ipc_hw_write_shared_reg(owner, action, SR_DEBUG_ACTION);
+}
+
+u32 ipc_read_debug_event(enum ipc_owner owner)
+{
+ return ipc_hw_read_shared_reg(owner, SR_DEBUG_ACTION);
+}
+
+void ipc_write_val(enum ipc_owner owner, u64 val)
+{
+ u32 low = val & 0xffffffff;
+ u32 high = val >> 32;
+
+ ipc_hw_write_shared_reg(owner, low, SR_DEBUG_VAL_LOW);
+ ipc_hw_write_shared_reg(owner, high, SR_DEBUG_VAL_HIGH);
+}
+
+u64 ipc_read_val(enum ipc_owner owner)
+{
+ u32 low = ipc_hw_read_shared_reg(owner, SR_DEBUG_VAL_LOW);
+ u64 high = ipc_hw_read_shared_reg(owner, SR_DEBUG_VAL_HIGH);
+ u64 val = low | (high << 32);
+
+ return val;
+}
--- /dev/null
+/*
+ * Copyright (c) 2017 Samsung Electronics Co., Ltd.
+ *
+ * Boojin Kim <boojin.kim@samsung.com>
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License as published by
+ * the Free Software Foundation; either version 2 of the License, or
+ * (at your option) any later version.
+ */
+
+#ifndef _MAILBOX_CHUB_IPC_H
+#define _MAILBOX_CHUB_IPC_H
+
+#if defined(SEOS) || defined(EMBOS)
+#define CHUB_IPC
+#else
+#define AP_IPC
+#endif
+
+#define USE_IPC_BUF
+#ifdef USE_IPC_BUF
+#define IPC_VERSION (180611)
+#else
+#define IPC_VERSION (180111)
+#endif
+
+#if defined(CHUB_IPC)
+#if defined(SEOS)
+#include <nanohubPacket.h>
+#define PACKET_SIZE_MAX (NANOHUB_PACKET_SIZE_MAX)
+#elif defined(EMBOS)
+/* TODO: Add embos */
+#define SUPPORT_LOOPBACKTEST
+#endif
+#include <csp_common.h>
+#elif defined(AP_IPC)
+#if defined(CONFIG_NANOHUB)
+#include "comms.h"
+#define PACKET_SIZE_MAX (NANOHUB_PACKET_SIZE_MAX)
+#elif defined(CONFIG_CONTEXTHUB_DRV)
+// TODO: Add packet size.. #define PACKET_SIZE_MAX ()
+#endif
+#endif
+
+#ifndef PACKET_SIZE_MAX
+#define PACKET_SIZE_MAX (270)
+#endif
+
+#ifdef LOWLEVEL_DEBUG
+#define DEBUG_LEVEL (0)
+#else
+#if defined(CHUB_IPC)
+#define DEBUG_LEVEL (LOG_ERROR)
+#elif defined(AP_IPC)
+#define DEBUG_LEVEL (KERN_ERR)
+#endif
+#endif
+
+#ifndef CSP_PRINTF_INFO
+#ifdef AP_IPC
+#ifdef LOWLEVEL_DEBUG
+#define CSP_PRINTF_INFO(fmt, ...) log_printf(fmt, ##__VA_ARGS__)
+#define CSP_PRINTF_ERROR(fmt, ...) log_printf(fmt, ##__VA_ARGS__)
+#else
+#define CSP_PRINTF_INFO(fmt, ...) pr_info(fmt, ##__VA_ARGS__)
+#define CSP_PRINTF_ERROR(fmt, ...) pr_err(fmt, ##__VA_ARGS__)
+#endif
+#endif
+#endif
+
+#ifdef LOWLEVEL_DEBUG
+#define DEBUG_PRINT(lv, fmt, ...) \
+ ((DEBUG_LEVEL == (0)) ? (CSP_PRINTF_INFO(fmt, ##__VA_ARGS__)) : \
+ ((DEBUG_LEVEL == (lv)) ? (CSP_PRINTF_INFO(fmt, ##__VA_ARGS__)) : (NULL)))
+#else
+#define DEBUG_PRINT(level, fmt, ...)
+#endif
+
+/* contexthub bootargs */
+#define BL_OFFSET (0x0)
+#define MAP_INFO_OFFSET (256)
+#define OS_UPDT_MAGIC "Nanohub OS"
+
+#define BOOTMODE_COLD (0x77773333)
+#define BOOTMODE_PWRGATING (0x11118888)
+
+struct chub_bootargs {
+ char magic[16];
+ u32 ipc_version;
+ u32 bl_start;
+ u32 bl_end;
+ u32 code_start;
+ u32 code_end;
+ u32 ipc_start;
+ u32 ipc_end;
+ u32 ram_start;
+ u32 ram_end;
+ u32 shared_start;
+ u32 shared_end;
+ u32 dump_start;
+ u32 dump_end;
+ u32 chubclk;
+ u32 bootmode;
+#if defined(LOCAL_POWERGATE)
+ u32 psp;
+ u32 msp;
+#endif
+};
+
+/* ipc map
+ * data channel: AP -> CHUB
+ * data channel: CHUB -> AP
+ * event channel: AP -> CHUB / ctrl
+ * event channel: CHUB -> AP / ctrl
+ * logbuf / logbuf_ctrl
+ */
+#define IPC_BUF_NUM (IRQ_EVT_CH_MAX)
+#define IPC_EVT_NUM (15)
+#define IPC_LOGBUF_NUM (256)
+
+enum sr_num {
+ SR_0 = 0,
+ SR_1 = 1,
+ SR_2 = 2,
+ SR_3 = 3,
+};
+
+#define SR_A2C_ADDR SR_0
+#define SR_A2C_SIZE SR_1
+#define SR_C2A_ADDR SR_2
+#define SR_C2A_SIZE SR_3
+#define SR_DEBUG_ACTION SR_0
+#define SR_DEBUG_VAL_LOW SR_1
+#define SR_DEBUG_VAL_HIGH SR_2
+#define SR_CHUB_ALIVE SR_3
+#define SR_BOOT_MODE SR_0
+
+enum irq_chub {
+ IRQ_C2A_START,
+ IRQ_C2A_END = 2,
+ IRQ_EVT_START,
+ IRQ_EVT_END = 15,
+ IRQ_CHUB_ALIVE = 15,
+ IRQ_INVAL = 0xff,
+};
+
+enum irq_evt_chub {
+ IRQ_EVT_CH0, /* data channel */
+ IRQ_EVT_CH1,
+ IRQ_EVT_CH2,
+ IRQ_EVT_CH_MAX,
+ IRQ_EVT_A2C_RESET = IRQ_EVT_CH_MAX,
+ IRQ_EVT_A2C_WAKEUP,
+ IRQ_EVT_A2C_WAKEUP_CLR,
+ IRQ_EVT_A2C_SHUTDOWN,
+ IRQ_EVT_A2C_LOG,
+ IRQ_EVT_A2C_DEBUG,
+ IRQ_EVT_C2A_DEBUG = IRQ_EVT_CH_MAX,
+ IRQ_EVT_C2A_ASSERT,
+ IRQ_EVT_C2A_INT,
+ IRQ_EVT_C2A_INTCLR,
+ IRQ_EVT_CHUB_EVT_MAX = 15,
+ IRQ_EVT_CHUB_ALIVE = IRQ_EVT_CHUB_EVT_MAX,
+ IRQ_EVT_CHUB_MAX = 16, /* max irq number on mailbox */
+ IRQ_EVT_INVAL = 0xff,
+};
+
+enum ipc_debug_event {
+ IPC_DEBUG_UTC_STOP,
+ IPC_DEBUG_UTC_AGING,
+ IPC_DEBUG_UTC_WDT,
+ IPC_DEBUG_UTC_RTC,
+ IPC_DEBUG_UTC_MEM,
+ IPC_DEBUG_UTC_TIMER,
+ IPC_DEBUG_UTC_GPIO,
+ IPC_DEBUG_UTC_SPI,
+ IPC_DEBUG_UTC_CMU,
+ IPC_DEBUG_UTC_TIME_SYNC,
+ IPC_DEBUG_UTC_ASSERT,
+ IPC_DEBUG_UTC_FAULT,
+ IPC_DEBUG_UTC_CHECK_STATUS,
+ IPC_DEBUG_UTC_CHECK_CPU_UTIL,
+ IPC_DEBUG_UTC_HEAP_DEBUG,
+ IPC_DEBUG_UTC_IPC_TEST_START,
+ IPC_DEBUG_UTC_IPC_TEST_END,
+ IPC_DEBUG_UTC_MAX,
+ IPC_DEBUG_NANOHUB_CHUB_ALIVE = IPC_DEBUG_UTC_MAX,
+ IPC_DEBUG_NANOHUB_MAX,
+ IPC_DEBUG_DUMP_STATUS,
+ IPC_DEBUG_FLUSH_LOG,
+ IPC_DEBUG_CHUB_PRINT_LOG,
+ IPC_DEBUG_CHUB_FULL_LOG,
+ IPC_DEBUG_CHUB_FAULT,
+ IPC_DEBUG_CHUB_ASSERT,
+ IPC_DEBUG_CHUB_ERROR,
+};
+
+enum ipc_region {
+ IPC_REG_BL,
+ IPC_REG_BL_MAP,
+ IPC_REG_OS,
+ IPC_REG_IPC,
+ IPC_REG_IPC_EVT_A2C,
+ IPC_REG_IPC_EVT_A2C_CTRL,
+ IPC_REG_IPC_EVT_C2A,
+ IPC_REG_IPC_EVT_C2A_CTRL,
+ IPC_REG_IPC_A2C,
+ IPC_REG_IPC_C2A,
+ IPC_REG_SHARED,
+ IPC_REG_RAM,
+ IPC_REG_LOG,
+ IPC_REG_DUMP,
+ IPC_REG_MAX,
+};
+
+struct ipc_area {
+ void *base;
+ u32 offset;
+};
+
+enum ipc_owner {
+ AP,
+#if defined(CHUB_IPC)
+ APM,
+ CP,
+ GNSS,
+#endif
+ IPC_OWN_MAX
+};
+
+enum ipc_data_list {
+ IPC_DATA_C2A,
+ IPC_DATA_A2C,
+ IPC_DATA_MAX,
+};
+
+enum ipc_evt_list {
+ IPC_EVT_C2A,
+ IPC_EVT_A2C,
+ IPC_EVT_AP_MAX,
+ IPC_EVT_MAX = IPC_EVT_AP_MAX
+};
+
+enum ipc_packet {
+ IPC_ALIVE_HELLO = 0xab,
+ IPC_ALIVE_OK = 0xcd,
+};
+
+enum ipc_direction {
+ IPC_DST,
+ IPC_SRC,
+};
+
+/* channel status define
+ * IDLE_A2C: 100
+ * AP_WRITE : 110
+ * CHUB_RECV: 101
+ * IDLE_C2A: 000
+ * CHUB_WRITE: 010
+ * AP_RECV: 001
+ */
+#define CS_OWN_OFFSET (3)
+#define CS_AP (0x1)
+#define CS_CHUB (0x0)
+#define CS_AP_OWN (CS_AP << CS_OWN_OFFSET)
+#define CS_CHUB_OWN (CS_CHUB << CS_OWN_OFFSET)
+#define CS_WRITE (0x2)
+#define CS_RECV (0x1)
+#define CS_IPC_REG_CMP (0x3)
+
+enum channel_status {
+#ifdef AP_IPC
+ CS_IDLE = CS_AP_OWN,
+#else
+ CS_IDLE = CS_CHUB_OWN,
+#endif
+ CS_AP_WRITE = CS_AP_OWN | CS_WRITE,
+ CS_CHUB_RECV = CS_AP_OWN | CS_RECV,
+ CS_CHUB_WRITE = CS_CHUB_OWN | CS_WRITE,
+ CS_AP_RECV = CS_CHUB_OWN | CS_RECV,
+ CS_MAX = 0xf
+};
+
+/* ipc channel structure */
+struct ipc_content {
+ u8 buf[PACKET_SIZE_MAX];
+ u32 num;
+ u32 size;
+ u32 status;
+ u32 pad;
+};
+
+#define INVAL_CHANNEL (-1)
+
+#if defined(AP_IPC) || defined(EMBOS)
+#define HOSTINTF_SENSOR_DATA_MAX 240
+#endif
+
+/* event structure */
+struct ipc_evt_ctrl {
+ u32 eq;
+ u32 dq;
+ u32 full;
+ u32 empty;
+ u32 irq;
+};
+
+struct ipc_evt_buf {
+ u32 evt;
+ u32 irq;
+ u32 status;
+};
+
+struct ipc_evt {
+ struct ipc_evt_buf data[IPC_EVT_NUM];
+ struct ipc_evt_ctrl ctrl;
+};
+
+/* it's from struct HostIntfDataBuffer buf */
+struct ipc_log_content {
+ u8 pad0;
+ u8 length;
+ u16 pad1;
+ u8 buffer[sizeof(u64) + HOSTINTF_SENSOR_DATA_MAX - sizeof(u32)];
+};
+
+struct ipc_logbuf {
+ u32 token;
+ u32 eq; /* write owner chub */
+ u32 dq; /* read onwer ap */
+ u32 size;
+ char buf[0];
+};
+
+#ifndef IPC_DATA_SIZE
+#define IPC_DATA_SIZE (4096)
+#endif
+
+struct ipc_buf {
+ volatile u32 eq;
+ volatile u32 dq;
+ volatile u32 full;
+ volatile u32 empty;
+ u32 cnt_dbg_rd; /* for debug */
+ u32 cnt_dbg_wt; /* for debug */
+ u8 buf[IPC_DATA_SIZE];
+};
+
+struct ipc_map_area {
+#ifdef USE_IPC_BUF
+ struct ipc_buf data[IPC_DATA_MAX];
+#else
+ struct ipc_content data[IPC_DATA_MAX][IPC_BUF_NUM];
+#endif
+ struct ipc_evt evt[IPC_EVT_MAX];
+ struct ipc_logbuf logbuf;
+};
+
+/* mailbox Registers */
+#define REG_MAILBOX_MCUCTL (0x000)
+#define REG_MAILBOX_INTGR0 (0x008)
+#define REG_MAILBOX_INTCR0 (0x00C)
+#define REG_MAILBOX_INTMR0 (0x010)
+#define REG_MAILBOX_INTSR0 (0x014)
+#define REG_MAILBOX_INTMSR0 (0x018)
+#define REG_MAILBOX_INTGR1 (0x01C)
+#define REG_MAILBOX_INTCR1 (0x020)
+#define REG_MAILBOX_INTMR1 (0x024)
+#define REG_MAILBOX_INTSR1 (0x028)
+#define REG_MAILBOX_INTMSR1 (0x02C)
+
+#if defined(AP_IPC)
+#if defined(CONFIG_SOC_EXYNOS9810)
+#define REG_MAILBOX_VERSION (0x050)
+#elif defined(CONFIG_SOC_EXYNOS9610)
+#define REG_MAILBOX_VERSION (0x070)
+#else
+//
+//Need to check !!!
+//
+#define REG_MAILBOX_VERSION (0x0)
+#endif
+#endif
+
+#define REG_MAILBOX_ISSR0 (0x080)
+#define REG_MAILBOX_ISSR1 (0x084)
+#define REG_MAILBOX_ISSR2 (0x088)
+#define REG_MAILBOX_ISSR3 (0x08C)
+
+#define IPC_HW_READ_STATUS(base) \
+ __raw_readl((base) + REG_MAILBOX_INTSR0)
+#define IPC_HW_READ_STATUS1(base) \
+ __raw_readl((base) + REG_MAILBOX_INTSR1)
+#define IPC_HW_READ_PEND(base, irq) \
+ (__raw_readl((base) + REG_MAILBOX_INTSR1) & (1 << (irq)))
+#define IPC_HW_CLEAR_PEND(base, irq) \
+ __raw_writel(1 << (irq), (base) + REG_MAILBOX_INTCR0)
+#define IPC_HW_CLEAR_PEND1(base, irq) \
+ __raw_writel(1 << (irq), (base) + REG_MAILBOX_INTCR1)
+#define IPC_HW_WRITE_SHARED_REG(base, num, data) \
+ __raw_writel((data), (base) + REG_MAILBOX_ISSR0 + (num) * 4)
+#define IPC_HW_READ_SHARED_REG(base, num) \
+ __raw_readl((base) + REG_MAILBOX_ISSR0 + (num) * 4)
+#define IPC_HW_GEN_INTERRUPT_GR1(base, num) \
+ __raw_writel(1 << (num), (base) + REG_MAILBOX_INTGR1)
+#define IPC_HW_GEN_INTERRUPT_GR0(base, num) \
+ __raw_writel(1 << ((num) + 16), (base) + REG_MAILBOX_INTGR0)
+#define IPC_HW_SET_MCUCTL(base, val) \
+ __raw_write32((val), (base) + REG_MAILBOX_MCUCTL)
+
+/* channel ctrl functions */
+void ipc_print_channel(void);
+char *ipc_get_cs_name(enum channel_status cs);
+void ipc_set_base(void *addr);
+void *ipc_get_base(enum ipc_region area);
+u32 ipc_get_offset(enum ipc_region area);
+void *ipc_get_addr(enum ipc_region area, int buf_num);
+void ipc_init(void);
+int ipc_hw_read_int_start_index(enum ipc_owner owner);
+void ipc_update_channel_status(struct ipc_content *content,
+ enum channel_status next);
+void *ipc_scan_channel(enum ipc_region area, enum channel_status target);
+void *ipc_get_channel(enum ipc_region area, enum channel_status target,
+ enum channel_status next);
+/* logbuf functions */
+void *ipc_get_logbuf(void);
+unsigned int ipc_logbuf_get_token(void);
+/* evt functions */
+struct ipc_evt_buf *ipc_get_evt(enum ipc_evt_list evt);
+int ipc_add_evt(enum ipc_evt_list evt, enum irq_evt_chub irq);
+void ipc_print_evt(enum ipc_evt_list evt);
+/* mailbox hw access */
+void ipc_set_owner(enum ipc_owner owner, void *base, enum ipc_direction dir);
+unsigned int ipc_hw_read_gen_int_status_reg(enum ipc_owner owner, int irq);
+void ipc_hw_write_shared_reg(enum ipc_owner owner, unsigned int val, int num);
+unsigned int ipc_hw_read_shared_reg(enum ipc_owner owner, int num);
+unsigned int ipc_hw_read_int_status_reg(enum ipc_owner owner);
+unsigned int ipc_hw_read_int_gen_reg(enum ipc_owner owner);
+void ipc_hw_clear_int_pend_reg(enum ipc_owner owner, int irq);
+void ipc_hw_clear_all_int_pend_reg(enum ipc_owner owner);
+void ipc_hw_gen_interrupt(enum ipc_owner owner, int irq);
+void ipc_hw_set_mcuctrl(enum ipc_owner owner, unsigned int val);
+void ipc_hw_mask_irq(enum ipc_owner owner, int irq);
+void ipc_hw_unmask_irq(enum ipc_owner owner, int irq);
+void ipc_logbuf_put_with_char(char ch);
+int ipc_logbuf_need_flush(void);
+void ipc_write_debug_event(enum ipc_owner owner, enum ipc_debug_event action);
+u32 ipc_read_debug_event(enum ipc_owner owner);
+void *ipc_get_chub_map(void);
+u32 ipc_get_chub_mem_size(void);
+u64 ipc_read_val(enum ipc_owner owner);
+void ipc_write_val(enum ipc_owner owner, u64 result);
+void ipc_set_chub_clk(u32 clk);
+u32 ipc_get_chub_clk(void);
+void ipc_set_chub_bootmode(u32 bootmode);
+u32 ipc_get_chub_bootmode(void);
+void ipc_dump(void);
+#if defined(LOCAL_POWERGATE)
+u32 *ipc_get_chub_psp(void);
+u32 *ipc_get_chub_msp(void);
+#endif
+
+void ipc_print_databuf(void);
+int ipc_read_data(enum ipc_data_list dir, uint8_t *rx);
+int ipc_write_data(enum ipc_data_list dir, void *tx, u16 length);
+
+#endif
--- /dev/null
+/*
+ * Copyright (c) 2017 Samsung Electronics Co., Ltd.
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License as published by
+ * the Free Software Foundation; either version 2 of the License, or
+ * (at your option) any later version.
+ */
+
+#include <linux/debugfs.h>
+#include <linux/mutex.h>
+#include <linux/vmalloc.h>
+#include <linux/io.h>
+#include <linux/device.h>
+#include <linux/module.h>
+#include <linux/iommu.h>
+#include <linux/poll.h>
+#include <linux/uaccess.h>
+#include <linux/delay.h>
+
+#include "chub_log.h"
+#include "chub_dbg.h"
+#include "chub_ipc.h"
+
+#ifdef CONFIG_CONTEXTHUB_DEBUG
+#define SIZE_OF_BUFFER (SZ_512K + SZ_128K)
+#else
+#define SIZE_OF_BUFFER (SZ_128K)
+#endif
+
+#define S_IRWUG (0660)
+#define DEFAULT_FLUSH_MS (1000)
+
+static u32 log_auto_save;
+static struct dentry *dbg_root_dir __read_mostly;
+static LIST_HEAD(log_list_head);
+static struct log_buffer_info *print_info;
+u32 auto_log_flush_ms;
+
+static void log_memcpy(struct log_buffer_info *info,
+ struct log_kernel_buffer *kernel_buffer,
+ const char *src, size_t size)
+{
+ size_t left_size = SIZE_OF_BUFFER - kernel_buffer->index;
+
+ dev_dbg(info->dev, "%s(%zu)\n", __func__, size);
+
+ if (size > SIZE_OF_BUFFER) {
+ dev_warn(info->dev,
+ "flush size (%zu, %zu) is bigger than kernel buffer size (%d)",
+ size, left_size, SIZE_OF_BUFFER);
+ size = SIZE_OF_BUFFER;
+ }
+
+ if (log_auto_save)
+ info->filp = filp_open(info->save_file_name, O_RDWR | O_APPEND | O_CREAT, S_IRWUG);
+
+ if (left_size < size) {
+ if (info->sram_log_buffer)
+ memcpy_fromio(kernel_buffer->buffer + kernel_buffer->index, src, left_size);
+ else
+ memcpy(kernel_buffer->buffer + kernel_buffer->index, src, left_size);
+
+ if (log_auto_save) {
+ vfs_write(info->filp, kernel_buffer->buffer + kernel_buffer->index, left_size, &info->filp->f_pos);
+ vfs_fsync(info->filp, 0);
+ }
+
+ src += left_size;
+ size -= left_size;
+
+ kernel_buffer->index = 0;
+ kernel_buffer->wrap = true;
+ }
+
+ if (info->sram_log_buffer)
+ memcpy_fromio(kernel_buffer->buffer + kernel_buffer->index, src, size);
+ else
+ memcpy(kernel_buffer->buffer + kernel_buffer->index, src, size);
+
+ if (log_auto_save) {
+ vfs_write(info->filp,
+ kernel_buffer->buffer + kernel_buffer->index, size, &info->filp->f_pos);
+ vfs_fsync(info->filp, 0);
+ filp_close(info->filp, NULL);
+ }
+
+ kernel_buffer->index += size;
+}
+
+void log_flush(struct log_buffer_info *info)
+{
+ struct LOG_BUFFER *buffer = info->log_buffer;
+ struct log_kernel_buffer *kernel_buffer = &info->kernel_buffer;
+ unsigned int index_writer = buffer->index_writer;
+
+ if (buffer->index_reader == index_writer)
+ return;
+
+ dev_dbg(info->dev,
+ "%s(%d): index_writer=%u, index_reader=%u, size=%u\n", __func__,
+ info->id, index_writer, buffer->index_reader, buffer->size);
+
+ mutex_lock(&info->lock);
+
+ if (buffer->index_reader > index_writer) {
+ log_memcpy(info, kernel_buffer,
+ buffer->buffer + buffer->index_reader,
+ buffer->size - buffer->index_reader);
+ buffer->index_reader = 0;
+ }
+ log_memcpy(info, kernel_buffer,
+ buffer->buffer + buffer->index_reader,
+ index_writer - buffer->index_reader);
+ buffer->index_reader = index_writer;
+
+ wmb();
+ mutex_unlock(&info->lock);
+
+ kernel_buffer->updated = true;
+ wake_up_interruptible(&kernel_buffer->wq);
+}
+
+static void log_flush_all_work_func(struct work_struct *work);
+static DECLARE_DEFERRABLE_WORK(log_flush_all_work, log_flush_all_work_func);
+
+static void log_flush_all(void)
+{
+ struct log_buffer_info *info;
+
+ list_for_each_entry(info, &log_list_head, list) {
+ if (info && !contexthub_is_run(dev_get_drvdata(info->dev))) {
+ pr_warn("%s: chub isn't run\n", __func__);
+ return;
+ }
+
+ log_flush(info);
+ }
+}
+
+static void log_flush_all_work_func(struct work_struct *work)
+{
+ log_flush_all();
+
+ if (auto_log_flush_ms)
+ schedule_delayed_work(&log_flush_all_work,
+ msecs_to_jiffies(auto_log_flush_ms));
+}
+
+void log_schedule_flush_all(void)
+{
+ schedule_delayed_work(&log_flush_all_work, msecs_to_jiffies(3000));
+}
+
+static int log_file_open(struct inode *inode, struct file *file)
+{
+ struct log_buffer_info *info = inode->i_private;
+
+ dev_dbg(info->dev, "%s\n", __func__);
+
+ file->private_data = inode->i_private;
+ info->log_file_index = -1;
+
+ return 0;
+}
+
+static ssize_t log_file_read(struct file *file, char __user *buf, size_t count,
+ loff_t *ppos)
+{
+ struct log_buffer_info *info = file->private_data;
+ struct log_kernel_buffer *kernel_buffer = &info->kernel_buffer;
+ size_t end, size;
+ bool first = (info->log_file_index < 0);
+ int result;
+
+ dev_dbg(info->dev, "%s(%zu, %lld)\n", __func__, count, *ppos);
+
+ mutex_lock(&info->lock);
+
+ if (info->log_file_index < 0) {
+ info->log_file_index =
+ likely(kernel_buffer->wrap) ? kernel_buffer->index : 0;
+ }
+
+ do {
+ end = ((info->log_file_index < kernel_buffer->index) ||
+ ((info->log_file_index == kernel_buffer->index) &&
+ !first)) ? kernel_buffer->index : SIZE_OF_BUFFER;
+ size = min(end - info->log_file_index, count);
+ if (size == 0) {
+ mutex_unlock(&info->lock);
+ if (file->f_flags & O_NONBLOCK) {
+ dev_dbg(info->dev, "non block\n");
+ return -EAGAIN;
+ }
+ kernel_buffer->updated = false;
+
+ result = wait_event_interruptible(kernel_buffer->wq,
+ kernel_buffer->updated);
+ if (result != 0) {
+ dev_dbg(info->dev, "interrupted\n");
+ return result;
+ }
+ mutex_lock(&info->lock);
+ }
+ } while (size == 0);
+
+ dev_dbg(info->dev, "start=%zd, end=%zd size=%zd\n",
+ info->log_file_index, end, size);
+ if (copy_to_user
+ (buf, kernel_buffer->buffer + info->log_file_index, size)) {
+ mutex_unlock(&info->lock);
+ return -EFAULT;
+ }
+
+ info->log_file_index += size;
+ if (info->log_file_index >= SIZE_OF_BUFFER)
+ info->log_file_index = 0;
+
+ mutex_unlock(&info->lock);
+
+ dev_dbg(info->dev, "%s: size = %zd\n", __func__, size);
+
+ return size;
+}
+
+static unsigned int log_file_poll(struct file *file, poll_table *wait)
+{
+ struct log_buffer_info *info = file->private_data;
+ struct log_kernel_buffer *kernel_buffer = &info->kernel_buffer;
+
+ dev_dbg(info->dev, "%s\n", __func__);
+
+ poll_wait(file, &kernel_buffer->wq, wait);
+ return POLLIN | POLLRDNORM;
+}
+
+static const struct file_operations log_fops = {
+ .open = log_file_open,
+ .read = log_file_read,
+ .poll = log_file_poll,
+ .llseek = generic_file_llseek,
+ .owner = THIS_MODULE,
+};
+
+static struct dentry *chub_dbg_get_root_dir(void)
+{
+ if (!dbg_root_dir)
+ dbg_root_dir = debugfs_create_dir("nanohub", NULL);
+
+ return dbg_root_dir;
+}
+
+void chub_log_auto_save_open(struct log_buffer_info *info)
+{
+ mm_segment_t old_fs = get_fs();
+
+ set_fs(KERNEL_DS);
+
+ if (info->filp) {
+ /* close previous */
+ filp_close(info->filp, NULL);
+ }
+
+ info->filp =
+ filp_open(info->save_file_name, O_RDWR | O_TRUNC | O_CREAT,
+ S_IRWUG);
+
+ dev_dbg(info->dev, "created\n");
+
+ if (IS_ERR(info->filp))
+ dev_warn(info->dev, "%s: saving log fail\n", __func__);
+
+ set_fs(old_fs);
+}
+
+static void chub_log_auto_save_ctrl(struct log_buffer_info *info, u32 event)
+{
+ if (event) {
+ /* set file name */
+ snprintf(info->save_file_name, sizeof(info->save_file_name),
+ "/data/nano-%02d-00-%06u.log", info->id,
+ (u32)(sched_clock() / NSEC_PER_SEC));
+
+ chub_log_auto_save_open(info);
+
+ log_auto_save = 1;
+ } else {
+ log_auto_save = 0;
+ filp_close(info->filp, NULL);
+ }
+ pr_info("%s: %s, %d, %p\n", __func__, info->save_file_name,
+ log_auto_save, info->filp);
+}
+
+static ssize_t chub_log_save_show(struct device *kobj,
+ struct device_attribute *attr, char *buf)
+{
+ return sprintf(buf, "%d\n", log_auto_save);
+}
+
+static ssize_t chub_log_save_save(struct device *dev,
+ struct device_attribute *attr,
+ const char *buf, size_t count)
+{
+ long event;
+ int err;
+
+ err = kstrtol(&buf[0], 10, &event);
+
+ if (!err) {
+ struct log_buffer_info *info;
+
+ list_for_each_entry(info, &log_list_head, list)
+ if (info->support_log_save)
+ chub_log_auto_save_ctrl(info, event);
+
+ if (!auto_log_flush_ms) {
+ log_schedule_flush_all();
+ auto_log_flush_ms = DEFAULT_FLUSH_MS;
+ }
+
+ return count;
+ } else {
+ return 0;
+ }
+}
+
+#define TMP_BUFFER_SIZE (1000)
+
+#if defined(CONFIG_CONTEXTHUB_DEBUG)
+static void log_dump(struct log_buffer_info *info, int err)
+{
+ struct file *filp;
+ mm_segment_t old_fs;
+ char save_file_name[32];
+ struct LOG_BUFFER *buffer = info->log_buffer;
+ u32 index = buffer->index_writer;
+
+ snprintf(save_file_name, sizeof(save_file_name),
+ "/data/nano-%02d-%02d-%06u.log", info->id, err,
+ (u32)(sched_clock() / NSEC_PER_SEC));
+
+ set_fs(KERNEL_DS);
+
+ filp = filp_open(save_file_name, O_RDWR | O_TRUNC | O_CREAT, S_IRWUG);
+
+ old_fs = get_fs();
+
+ if (info->sram_log_buffer) {
+ int i;
+ int size;
+ bool wrap = false;
+ char tmp_buffer[TMP_BUFFER_SIZE];
+ u32 start_index = index;
+
+ for (i = 0; i < buffer->size / TMP_BUFFER_SIZE + 1;
+ i++, start_index += TMP_BUFFER_SIZE) {
+ if (start_index + TMP_BUFFER_SIZE > buffer->size) {
+ size = buffer->size - start_index;
+ wrap = true;
+ } else if (index - start_index < TMP_BUFFER_SIZE) {
+ size = index - start_index;
+ } else {
+ size = TMP_BUFFER_SIZE;
+ }
+
+ memcpy(tmp_buffer, buffer->buffer + start_index, size);
+ vfs_write(filp, tmp_buffer, size, &filp->f_pos);
+
+ if (wrap) {
+ wrap = false;
+ start_index = 0;
+ }
+ }
+ } else {
+ vfs_write(filp, buffer->buffer + index, buffer->size - index,
+ &filp->f_pos);
+ vfs_write(filp, buffer->buffer, index, &filp->f_pos);
+ }
+
+ dev_info(info->dev, "%s is created\n", save_file_name);
+
+ vfs_fsync(filp, 0);
+ filp_close(filp, NULL);
+}
+
+void log_dump_all(int err)
+{
+ struct log_buffer_info *info;
+
+ list_for_each_entry(info, &log_list_head, list)
+ log_dump(info, err);
+}
+#endif
+
+static ssize_t chub_log_flush_show(struct device *kobj,
+ struct device_attribute *attr, char *buf)
+{
+ return sprintf(buf, "%d\n", auto_log_flush_ms);
+}
+
+static ssize_t chub_log_flush_save(struct device *dev,
+ struct device_attribute *attr,
+ const char *buf, size_t count)
+{
+ long event;
+ int err;
+ struct contexthub_ipc_info *ipc = dev_get_drvdata(dev);
+
+ err = kstrtol(&buf[0], 10, &event);
+ if (!err) {
+ if (!auto_log_flush_ms) {
+ err = contexthub_request(ipc);
+ if (!err) {
+ log_flush_all();
+ contexthub_release(ipc);
+ } else {
+ pr_err("%s: fails to flush log\n", __func__);
+ }
+ }
+ auto_log_flush_ms = event * 1000;
+
+ return count;
+ } else {
+ return 0;
+ }
+
+ return count;
+}
+
+static ssize_t chub_dump_log_save(struct device *dev,
+ struct device_attribute *attr,
+ const char *buf, size_t count)
+{
+ log_dump_all(0);
+ return count;
+}
+
+static struct device_attribute attributes[] = {
+ __ATTR(save_log, 0664, chub_log_save_show, chub_log_save_save),
+ __ATTR(flush_log, 0664, chub_log_flush_show, chub_log_flush_save),
+ __ATTR(dump_log, 0220, NULL, chub_dump_log_save)
+};
+
+struct log_buffer_info *log_register_buffer(struct device *dev, int id,
+ struct LOG_BUFFER *buffer,
+ char *name, bool sram)
+{
+ struct log_buffer_info *info = vmalloc(sizeof(*info));
+ int i;
+ int ret;
+
+ if (!info)
+ return NULL;
+
+ mutex_init(&info->lock);
+ info->id = id;
+ info->file_created = false;
+ info->kernel_buffer.buffer = vzalloc(SIZE_OF_BUFFER);
+ info->kernel_buffer.index = 0;
+ info->kernel_buffer.index_reader = 0;
+ info->kernel_buffer.index_writer = 0;
+ info->kernel_buffer.wrap = false;
+ init_waitqueue_head(&info->kernel_buffer.wq);
+ info->dev = dev;
+ info->log_buffer = buffer;
+
+ /* HACK: clang make error
+ buffer->index_reader = 0;
+ buffer->index_writer = 0;
+ */
+ info->save_file_name[0] = '\0';
+ info->filp = NULL;
+
+ dev_info(dev, "%s with %p buffer size %d. %p kernel buffer size %d\n",
+ __func__, buffer->buffer, buffer->size,
+ info->kernel_buffer.buffer, SIZE_OF_BUFFER);
+
+ debugfs_create_file(name, S_IRWUG, chub_dbg_get_root_dir(), info,
+ &log_fops);
+
+ list_add_tail(&info->list, &log_list_head);
+
+ if (sram) {
+ info->sram_log_buffer = true;
+ info->support_log_save = true;
+
+ /* add device files */
+ for (i = 0, ret = 0; i < ARRAY_SIZE(attributes); i++) {
+ ret = device_create_file(dev, &attributes[i]);
+ if (ret)
+ dev_warn(dev, "Failed to create file: %s\n",
+ attributes[i].attr.name);
+ }
+ } else {
+ print_info = info;
+ info->sram_log_buffer = false;
+ info->support_log_save = false;
+ }
+
+ return info;
+}
+
+void log_printf(const char *format, ...)
+{
+ struct LOG_BUFFER *buffer;
+ int size;
+ va_list args;
+
+ if (print_info) {
+ char tmp_buf[512];
+ char *buffer_index = tmp_buf;
+
+ buffer = print_info->log_buffer;
+
+ va_start(args, format);
+ size = vsprintf(tmp_buf, format, args);
+ va_end(args);
+
+ size++;
+ if (buffer->index_writer + size > buffer->size) {
+ int left_size = buffer->size - buffer->index_writer;
+
+ memcpy(&buffer->buffer[buffer->index_writer],
+ buffer_index, left_size);
+ buffer->index_writer = 0;
+ buffer_index += left_size;
+ }
+ memcpy(&buffer->buffer[buffer->index_writer], buffer_index,
+ size - (buffer_index - tmp_buf));
+ buffer->index_writer += size - (buffer_index - tmp_buf);
+
+ }
+}
+
+static int __init log_late_initcall(void)
+{
+ debugfs_create_u32("log_auto_save", S_IRWUG, chub_dbg_get_root_dir(),
+ &log_auto_save);
+ return 0;
+}
+
+late_initcall(log_late_initcall);
--- /dev/null
+/*
+ * Copyright (c) 2017 Samsung Electronics Co., Ltd.
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License as published by
+ * the Free Software Foundation; either version 2 of the License, or
+ * (at your option) any later version.
+ */
+
+#ifndef __CHUB_LOG_H_
+#define __CHUB_LOG_H_
+
+#include <linux/device.h>
+
+struct log_kernel_buffer {
+ char *buffer;
+ unsigned int index;
+ bool wrap;
+ volatile bool updated;
+ wait_queue_head_t wq;
+ u32 log_file_index;
+ u32 index_writer;
+ u32 index_reader;
+};
+
+struct log_buffer_info {
+ struct list_head list;
+ struct device *dev;
+ struct file *filp;
+ int id;
+ bool file_created;
+ struct mutex lock; /* logbuf access lock */
+ ssize_t log_file_index;
+ char save_file_name[32];
+ struct LOG_BUFFER *log_buffer;
+ struct log_kernel_buffer kernel_buffer;
+ bool sram_log_buffer;
+ bool support_log_save;
+};
+
+struct LOG_BUFFER {
+ u32 index_writer;
+ u32 index_reader;
+ u32 size;
+ char buffer[0];
+};
+
+void log_flush(struct log_buffer_info *info);
+void log_schedule_flush_all(void);
+struct log_buffer_info *log_register_buffer(struct device *dev, int id,
+ struct LOG_BUFFER *buffer,
+ char *name, bool sram);
+
+#ifdef CONFIG_CONTEXTHUB_DEBUG
+void log_dump_all(int err);
+#else
+#define log_dump_all(err) do {} while (0)
+#endif
+
+void log_printf(const char *format, ...);
+#endif /* __CHUB_LOG_H_ */
#include "main.h"
#include "comms.h"
+#if defined(CONFIG_NANOHUB_MAILBOX)
+#include "chub.h"
+#endif
+
#define READ_ACK_TIMEOUT_MS 10
#define READ_MSG_TIMEOUT_MS 70
return (struct nanohub_packet_pad *)packet;
}
+#ifdef PACKET_LOW_DEBUG
+enum comms_action {
+ ca_tx,
+ ca_rx_ack,
+ ca_rx,
+};
+
+#define GET_ACT_STRING(act) \
+ ((act) == ca_tx ? 'W' : ((act) == ca_rx_ack ? 'A' : 'R'))
+
+/* This function is from hostIntfGetFooter function on nanohub kernel. */
+static inline struct nanohub_packet_crc *get_footer(struct nanohub_packet *packet)
+{
+ return (void *)(packet + sizeof(*packet) + packet->len);
+}
+
+static inline void packet_disassemble(void *buf, int ret, enum comms_action act)
+{
+ struct nanohub_packet *packet = (struct nanohub_packet *)buf;
+ struct nanohub_packet_crc *footer = get_footer(packet);
+
+ DEBUG_PRINT(KERN_DEBUG,
+ "%c-PACKET(ret:%d):buf:%p,sync:0x%x,seq:0x%x,reason:0x%x,len:%d,crc:0x%x\n",
+ GET_ACT_STRING(act), ret, (unsigned long)buf,
+ (unsigned int)packet->sync, (unsigned int)packet->seq,
+ (unsigned int)packet->reason, (unsigned int)packet->len,
+ (unsigned int)footer->crc);
+}
+#else
+#define packet_disassemble(a, b, c) do {} while (0)
+#endif
+
static int packet_create(struct nanohub_packet *packet, uint32_t seq,
uint32_t reason, uint8_t len, const uint8_t *data,
bool user)
} else {
ret = ERROR_NACK;
}
+ packet_disassemble(packet, ret, ca_tx);
return ret;
}
ret =
data->comms.read(data, (uint8_t *) response, max_size,
timeout);
+ packet_disassemble(response, ret, ca_rx_ack);
if (ret == 0) {
pr_debug("nanohub: read_ack: %d: empty packet\n", i);
ret =
data->comms.read(data, (uint8_t *) response, max_size,
timeout);
+ packet_disassemble(response, ret, ca_rx);
if (ret == 0) {
pr_debug("nanohub: read_msg: %d: empty packet\n", i);
} else {
int i;
uint8_t *b = (uint8_t *) response;
+
+#ifdef CONFIG_NANOHUB_MAILBOX /* remove invalid error check */
+ if ((response->reason == CMD_COMMS_READ) || (response->reason == CMD_COMMS_WRITE))
+ return ret;
+#endif
for (i = 0; i < ret; i += 25)
pr_debug(
"nanohub: %d: %d: %02x %02x %02x %02x %02x %02x %02x %02x %02x %02x %02x %02x %02x %02x %02x %02x %02x %02x %02x %02x %02x %02x %02x %02x %02x\n",
#ifndef _NANOHUB_COMMS_H
#define _NANOHUB_COMMS_H
+#include <linux/semaphore.h>
+
struct __attribute__ ((__packed__)) nanohub_packet {
uint8_t sync;
uint32_t seq;
uint8_t *rx_buffer;
};
+#define NANOHUB_PACKET_SIZE(len) (sizeof(struct nanohub_packet) + (len) + sizeof(struct nanohub_packet_crc))
+#define NANOHUB_PACKET_PAYLOAD_MAX 255
+#define NANOHUB_PACKET_SIZE_MAX NANOHUB_PACKET_SIZE(NANOHUB_PACKET_PAYLOAD_MAX)
+
int nanohub_comms_kernel_download(struct nanohub_data *, const uint8_t *,
size_t);
int nanohub_comms_app_download(struct nanohub_data *, const uint8_t *, size_t);
#include <linux/semaphore.h>
#include <linux/sched.h>
#include <linux/sched/rt.h>
+#include <linux/sched/prio.h>
+#include <linux/sched/signal.h>
#include <linux/time.h>
#include <linux/platform_data/nanohub.h>
+#include <uapi/linux/sched/types.h>
#include "main.h"
#include "comms.h"
#include "bl.h"
+
+#if defined(CONFIG_NANOHUB_MAILBOX)
+#include "chub.h"
+#include "chub_dbg.h"
+#elif defined(CONFIG_SPI_MAILBOX)
#include "spi.h"
+#endif
#define READ_QUEUE_DEPTH 10
#define APP_FROM_HOST_EVENTID 0x000000F8
#define WAKEUP_ERR_TIME_NS (60LL * NSEC_PER_SEC)
#define WAKEUP_ERR_CNT 4
+#ifdef CONFIG_EXT_CHUB
/**
* struct gpio_config - this is a binding between platform data and driver data
* @label: for diagnostics
.data_off = offsetof(struct nanohub_data, name), \
.options = GPIO_OPT_HAS_IRQ | (_opts) \
+#endif
+
static int nanohub_open(struct inode *, struct file *);
static ssize_t nanohub_read(struct file *, char *, size_t, loff_t *);
static ssize_t nanohub_write(struct file *, const char *, size_t, loff_t *);
static struct class *sensor_class;
static int major;
+#ifdef CONFIG_EXT_CHUB
static const struct gpio_config gconf[] = {
{ PLAT_GPIO_DEF(nreset, GPIOF_OUT_INIT_HIGH) },
{ PLAT_GPIO_DEF(wakeup, GPIOF_OUT_INIT_HIGH) },
{ PLAT_GPIO_DEF_IRQ(irq1, GPIOF_DIR_IN, 0) },
{ PLAT_GPIO_DEF_IRQ(irq2, GPIOF_DIR_IN, GPIO_OPT_OPTIONAL) },
};
+#endif
static const struct iio_info nanohub_iio_info = {
.driver_module = THIS_MODULE,
ST_RUNNING
};
+#ifdef CONFIG_EXT_CHUB
static inline bool gpio_is_optional(const struct gpio_config *_cfg)
{
return _cfg->options & GPIO_OPT_OPTIONAL;
{
return _cfg->options & GPIO_OPT_HAS_IRQ;
}
+#endif
static inline bool nanohub_has_priority_lock_locked(struct nanohub_data *data)
{
}
}
+#ifdef CONFIG_EXT_CHUB
static inline int plat_gpio_get(struct nanohub_data *data,
const struct gpio_config *_cfg)
{
else
WARN(1, "No data binding defined for %s", _cfg->label);
}
+#endif
static inline void mcu_wakeup_gpio_set_value(struct nanohub_data *data,
int val)
{
+#ifdef CONFIG_EXT_CHUB
const struct nanohub_platform_data *pdata = data->pdata;
gpio_set_value(pdata->wakeup_gpio, val);
+#else
+ if (val)
+ contexthub_ipc_write_event(data->pdata->mailbox_client, MAILBOX_EVT_WAKEUP_CLR);
+ else
+ contexthub_ipc_write_event(data->pdata->mailbox_client, MAILBOX_EVT_WAKEUP);
+#endif
}
static inline void mcu_wakeup_gpio_get_locked(struct nanohub_data *data,
return atomic_read(&data->wakeup_lock_cnt) != 0;
}
-static inline void nanohub_handle_irq1(struct nanohub_data *data)
+inline void nanohub_handle_irq1(struct nanohub_data *data)
{
bool locked;
data->kthread_err_cnt = data->wakeup_err_cnt = 0;
}
-/* the following fragment is based on wait_event_* code from wait.h */
-#define wait_event_interruptible_timeout_locked(q, cond, tmo) \
-({ \
- long __ret = (tmo); \
- DEFINE_WAIT(__wait); \
- if (!(cond)) { \
- for (;;) { \
- __wait.flags &= ~WQ_FLAG_EXCLUSIVE; \
- if (list_empty(&__wait.task_list)) \
- __add_wait_queue_tail(&(q), &__wait); \
- set_current_state(TASK_INTERRUPTIBLE); \
- if ((cond)) \
- break; \
- if (signal_pending(current)) { \
- __ret = -ERESTARTSYS; \
- break; \
- } \
- spin_unlock(&(q).lock); \
- __ret = schedule_timeout(__ret); \
- spin_lock(&(q).lock); \
- if (!__ret) { \
- if ((cond)) \
- __ret = 1; \
- break; \
- } \
- } \
- __set_current_state(TASK_RUNNING); \
- if (!list_empty(&__wait.task_list)) \
- list_del_init(&__wait.task_list); \
- else if (__ret == -ERESTARTSYS && \
- /*reimplementation of wait_abort_exclusive() */\
- waitqueue_active(&(q))) \
- __wake_up_locked_key(&(q), TASK_INTERRUPTIBLE, \
- NULL); \
- } else { \
- __ret = 1; \
- } \
- __ret; \
-}) \
-
int request_wakeup_ex(struct nanohub_data *data, long timeout_ms,
int key, int lock_mode)
{
int ret;
ktime_t ktime_delta;
ktime_t wakeup_ktime;
-
+#ifdef CONFIG_NANOHUB_MAILBOX
+ unsigned long flag;
+ spin_lock_irqsave(&data->wakeup_wait.lock, flag);
+#else
spin_lock(&data->wakeup_wait.lock);
+#endif
mcu_wakeup_gpio_get_locked(data, priority_lock);
timeout = (timeout_ms != MAX_SCHEDULE_TIMEOUT) ?
msecs_to_jiffies(timeout_ms) :
data->wakeup_err_cnt = 0;
timeout = 0;
}
+
+#ifdef CONFIG_NANOHUB_MAILBOX
+ spin_unlock_irqrestore(&data->wakeup_wait.lock, flag);
+#else
spin_unlock(&data->wakeup_wait.lock);
+#endif
return timeout;
}
bool done;
bool priority_lock = lock_mode > LOCK_MODE_NORMAL;
+#ifdef CONFIG_NANOHUB_MAILBOX
+ unsigned long flag;
+ spin_lock_irqsave(&data->wakeup_wait.lock, flag);
+#else
spin_lock(&data->wakeup_wait.lock);
+#endif
+
done = mcu_wakeup_gpio_put_locked(data, priority_lock);
mcu_wakeup_unlock(data, key);
+
+#ifdef CONFIG_NANOHUB_MAILBOX
+ spin_unlock_irqrestore(&data->wakeup_wait.lock, flag);
+#else
spin_unlock(&data->wakeup_wait.lock);
+#endif
if (!done)
wake_up_interruptible_sync(&data->wakeup_wait);
/* release the wakeup line, and wait for nanohub to send
* us an interrupt indicating the transaction completed.
*/
+
+#ifdef CONFIG_NANOHUB_MAILBOX
+ unsigned long flag;
+ spin_lock_irqsave(&data->wakeup_wait.lock, flag);
+#else
spin_lock(&data->wakeup_wait.lock);
+#endif
+
if (mcu_wakeup_gpio_is_locked(data)) {
mcu_wakeup_gpio_set_value(data, 1);
ret = wait_event_interruptible_locked(data->wakeup_wait,
nanohub_irq1_fired(data));
mcu_wakeup_gpio_set_value(data, 0);
}
+#ifdef CONFIG_NANOHUB_MAILBOX
+ spin_unlock_irqrestore(&data->wakeup_wait.lock, flag);
+#else
spin_unlock(&data->wakeup_wait.lock);
+#endif
return ret;
}
int nanohub_wakeup_eom(struct nanohub_data *data, bool repeat)
{
int ret = -EFAULT;
+#ifdef LOWLEVEL_DEBUG
+ int wakeup_flag = 0;
+#endif
+#ifdef CONFIG_NANOHUB_MAILBOX
+ unsigned long flag;
+ spin_lock_irqsave(&data->wakeup_wait.lock, flag);
+#else
spin_lock(&data->wakeup_wait.lock);
+#endif
+
if (mcu_wakeup_gpio_is_locked(data)) {
mcu_wakeup_gpio_set_value(data, 1);
if (repeat)
mcu_wakeup_gpio_set_value(data, 0);
ret = 0;
+#ifdef LOWLEVEL_DEBUG
+ wakeup_flag = 1;
+#endif
}
+
+#ifdef CONFIG_NANOHUB_MAILBOX
+ spin_unlock_irqrestore(&data->wakeup_wait.lock, flag);
+#else
spin_unlock(&data->wakeup_wait.lock);
+#endif
return ret;
}
{
struct nanohub_data *data = dev_get_nanohub_data(dev);
const struct nanohub_platform_data *pdata = data->pdata;
+#ifdef CONFIG_NANOHUB_MAILBOX
+ struct contexthub_ipc_info *ipc;
+#endif
nanohub_clear_err_cnt(data);
if (nanohub_irq1_fired(data) || nanohub_irq2_fired(data))
wake_up_interruptible(&data->wakeup_wait);
+#ifdef CONFIG_NANOHUB_MAILBOX
+ ipc = pdata->mailbox_client;
+ return scnprintf(buf, PAGE_SIZE, "WAKEUP: %d INT1: %d INT2: %d\n",
+ atomic_read(&ipc->wakeup_chub),
+ atomic_read(&ipc->irq1_apInt), -1);
+#else
return scnprintf(buf, PAGE_SIZE, "WAKEUP: %d INT1: %d INT2: %d\n",
gpio_get_value(pdata->wakeup_gpio),
gpio_get_value(pdata->irq1_gpio),
data->irq2 ? gpio_get_value(pdata->irq2_gpio) : -1);
+#endif
}
static ssize_t nanohub_app_info(struct device *dev,
return ret;
}
+#ifdef CONFIG_EXT_CHUB
if (mode == LOCK_MODE_IO || mode == LOCK_MODE_IO_BL)
ret = nanohub_bl_open(data);
if (ret < 0) {
release_wakeup_ex(data, KEY_WAKEUP_LOCK, mode);
return ret;
}
+#endif
if (mode != LOCK_MODE_SUSPEND_RESUME)
disable_irq(data->irq1);
atomic_set(&data->lock_mode, LOCK_MODE_NONE);
if (mode != LOCK_MODE_SUSPEND_RESUME)
enable_irq(data->irq1);
+#ifdef CONFIG_EXT_CHUB
if (mode == LOCK_MODE_IO || mode == LOCK_MODE_IO_BL)
nanohub_bl_close(data);
+#endif
if (data->irq2)
enable_irq(data->irq2);
release_wakeup_ex(data, KEY_WAKEUP_LOCK, mode);
{
const struct nanohub_platform_data *pdata = data->pdata;
+#if defined(CONFIG_EXT_CHUB)
gpio_set_value(pdata->nreset_gpio, 0);
gpio_set_value(pdata->boot0_gpio, boot0 > 0);
usleep_range(30, 40);
usleep_range(70000, 75000);
else if (!boot0)
usleep_range(750000, 800000);
- nanohub_clear_err_cnt(data);
+#elif defined(CONFIG_NANOHUB_MAILBOX)
+ int ret;
+
+ if (boot0)
+ ret = contexthub_ipc_write_event(pdata->mailbox_client, MAILBOX_EVT_SHUTDOWN);
+ else
+ ret = contexthub_ipc_write_event(pdata->mailbox_client, MAILBOX_EVT_RESET);
+
+ if (ret)
+ dev_warn(data->io[ID_NANOHUB_SENSOR].dev,
+ "%s: fail to reset on boot0 %d\n", __func__, boot0);
+#endif
}
static int nanohub_hw_reset(struct nanohub_data *data)
{
int ret;
+#if defined(CONFIG_EXT_CHUB)
ret = nanohub_wakeup_lock(data, LOCK_MODE_RESET);
if (!ret) {
+ data->err_cnt = 0;
__nanohub_hw_reset(data, 0);
nanohub_wakeup_unlock(data);
}
-
+#elif defined(CONFIG_NANOHUB_MAILBOX)
+ ret = contexthub_reset(data->pdata->mailbox_client);
+#endif
return ret;
}
const char *buf, size_t count)
{
struct nanohub_data *data = dev_get_nanohub_data(dev);
+
+#ifdef CONFIG_EXT_CHUB
uint8_t status = CMD_ACK;
int ret;
if (ret < 0)
return ret;
+ data->err_cnt = 0;
__nanohub_hw_reset(data, 1);
status = nanohub_bl_erase_shared(data);
nanohub_wakeup_unlock(data);
return ret < 0 ? ret : count;
+#elif defined(CONFIG_NANOHUB_MAILBOX)
+ __nanohub_hw_reset(data, 1);
+
+ contexthub_ipc_write_event(data->pdata->mailbox_client, MAILBOX_EVT_ERASE_SHARED);
+
+ __nanohub_hw_reset(data, 0);
+ return count;
+#endif
+
}
+#ifdef CONFIG_EXT_CHUB
static ssize_t nanohub_erase_shared_bl(struct device *dev,
struct device_attribute *attr,
const char *buf, size_t count)
return ret < 0 ? ret : count;
}
-
+#endif
static ssize_t nanohub_download_bl(struct device *dev,
struct device_attribute *attr,
const char *buf, size_t count)
{
struct nanohub_data *data = dev_get_nanohub_data(dev);
+ int ret;
+
+#ifdef CONFIG_EXT_CHUB
const struct nanohub_platform_data *pdata = data->pdata;
const struct firmware *fw_entry;
- int ret;
+
uint8_t status = CMD_ACK;
+ uint32_t *buf;
ret = nanohub_wakeup_lock(data, LOCK_MODE_IO);
if (ret < 0)
return ret;
+ data->err_cnt = 0;
__nanohub_hw_reset(data, 1);
ret = request_firmware(&fw_entry, "nanohub.full.bin", dev);
nanohub_wakeup_unlock(data);
return ret < 0 ? ret : count;
+#elif defined(CONFIG_NANOHUB_MAILBOX)
+ ret = contexthub_download_bl(data->pdata->mailbox_client);
+
+ return ret < 0 ? ret : count;
+#endif
}
static ssize_t nanohub_download_kernel(struct device *dev,
const char *buf, size_t count)
{
struct nanohub_data *data = dev_get_nanohub_data(dev);
+
+#ifdef CONFIG_NANOHUB_MAILBOX
+ int ret = contexthub_download_kernel(data->pdata->mailbox_client);
+
+ return ret < 0 ? ret : count;
+#else
const struct firmware *fw_entry;
int ret;
return count;
}
-
+#endif
}
+#ifdef CONFIG_EXT_CHUB
static ssize_t nanohub_download_kernel_bl(struct device *dev,
struct device_attribute *attr,
const char *buf, size_t count)
return ret < 0 ? ret : count;
}
+#endif
static ssize_t nanohub_download_app(struct device *dev,
struct device_attribute *attr,
return count;
}
-
+#ifdef CONFIG_EXT_CHUB
static ssize_t nanohub_lock_bl(struct device *dev,
struct device_attribute *attr,
const char *buf, size_t count)
return ret < 0 ? ret : count;
}
-
+#endif
static struct device_attribute attributes[] = {
__ATTR(wakeup, 0440, nanohub_wakeup_query, NULL),
__ATTR(app_info, 0440, nanohub_app_info, NULL),
__ATTR(firmware_version, 0440, nanohub_firmware_query, NULL),
__ATTR(download_bl, 0220, NULL, nanohub_download_bl),
__ATTR(download_kernel, 0220, NULL, nanohub_download_kernel),
+#ifdef CONFIG_EXT_CHUB
__ATTR(download_kernel_bl, 0220, NULL, nanohub_download_kernel_bl),
+#endif
__ATTR(download_app, 0220, NULL, nanohub_download_app),
__ATTR(erase_shared, 0220, NULL, nanohub_erase_shared),
+#ifdef CONFIG_EXT_CHUB
__ATTR(erase_shared_bl, 0220, NULL, nanohub_erase_shared_bl),
+#endif
__ATTR(reset, 0220, NULL, nanohub_try_hw_reset),
+#ifdef CONFIG_EXT_CHUB
__ATTR(lock, 0220, NULL, nanohub_lock_bl),
__ATTR(unlock, 0220, NULL, nanohub_unlock_bl),
+#endif
};
static inline int nanohub_create_sensor(struct nanohub_data *data)
return dev->devt == *devt;
}
+int nanohub_reset(struct nanohub_data *data)
+{
+#ifdef CONFIG_NANOHUB_MAILBOX
+ return contexthub_poweron(data->pdata->mailbox_client);
+#else
+ const struct nanohub_platform_data *pdata = data->pdata;
+
+ gpio_set_value(pdata->nreset_gpio, 1);
+ usleep_range(650000, 700000);
+ enable_irq(data->irq1);
+ if (data->irq2)
+ enable_irq(data->irq2);
+ else
+ nanohub_unmask_interrupt(data, 2);
+
+ return 0;
+#endif
+}
+
static int nanohub_open(struct inode *inode, struct file *file)
{
dev_t devt = inode->i_rdev;
struct device *dev;
+#ifdef CONFIG_NANOHUB_MAILBOX
+ struct nanohub_io *io;
+#endif
dev = class_find_device(sensor_class, NULL, &devt, nanohub_match_devt);
if (dev) {
file->private_data = dev_get_drvdata(dev);
nonseekable_open(inode, file);
+#ifdef CONFIG_NANOHUB_MAILBOX
+ io = file->private_data;
+ nanohub_reset(io->data);
+#endif
return 0;
}
struct nanohub_io *io = file->private_data;
struct nanohub_data *data = io->data;
int ret;
+#ifdef CONFIG_NANOHUB_MAILBOX
+ struct contexthub_ipc_info *ipc = data->pdata->mailbox_client;
+
+ if (atomic_read(&ipc->chub_status) != CHUB_ST_RUN) {
+ dev_warn(data->io[ID_NANOHUB_SENSOR].dev,
+ "%s fails. nanohub isn't running\n", __func__);
+ return -EINVAL;
+ }
+#endif
ret = request_wakeup_timeout(data, WAKEUP_TIMEOUT_MS);
if (ret)
device_destroy(sensor_class, MKDEV(major, i));
}
+#ifdef CONFIG_EXT_CHUB
static irqreturn_t nanohub_irq1(int irq, void *dev_id)
{
struct nanohub_data *data = (struct nanohub_data *)dev_id;
return IRQ_HANDLED;
}
+#endif
static bool nanohub_os_log(char *buffer, int len)
{
return 0;
}
+#ifndef CONFIG_NANOHUB_MAILBOX
#ifdef CONFIG_OF
static struct nanohub_platform_data *nanohub_parse_dt(struct device *dev)
{
gpio_set_value(pdata->boot0_gpio, 0);
gpio_free(pdata->boot0_gpio);
}
+#endif
struct iio_dev *nanohub_probe(struct device *dev, struct iio_dev *iio_dev)
{
int ret, i;
+#ifdef CONFIG_NANOHUB_MAILBOX
+ struct nanohub_platform_data *pdata;
+#else
const struct nanohub_platform_data *pdata;
+#endif
struct nanohub_data *data;
struct nanohub_buf *buf;
bool own_iio_dev = !iio_dev;
-
pdata = dev_get_platdata(dev);
if (!pdata) {
+#ifdef CONFIG_NANOHUB_MAILBOX
+ pdata = devm_kzalloc(dev, sizeof(*pdata), GFP_KERNEL);
+#else
pdata = nanohub_parse_dt(dev);
+#endif
if (IS_ERR(pdata))
return ERR_PTR(PTR_ERR(pdata));
}
atomic_set(&data->wakeup_acquired, 0);
init_waitqueue_head(&data->wakeup_wait);
+#ifdef CONFIG_EXT_CHUB
ret = nanohub_request_gpios(data);
if (ret)
goto fail_gpio;
ret = nanohub_request_irqs(data);
if (ret)
goto fail_irq;
+#endif
ret = iio_device_register(iio_dev);
if (ret) {
fail_dev:
iio_device_unregister(iio_dev);
fail_irq:
+#ifdef CONFIG_EXT_CHUB
nanohub_release_gpios_irqs(data);
fail_gpio:
+ free_irq(data->irq, data);
+#endif
wake_lock_destroy(&data->wakelock_read);
vfree(buf);
fail_vma:
return ERR_PTR(ret);
}
-int nanohub_reset(struct nanohub_data *data)
-{
- const struct nanohub_platform_data *pdata = data->pdata;
-
- gpio_set_value(pdata->nreset_gpio, 1);
- usleep_range(650000, 700000);
- enable_irq(data->irq1);
- if (data->irq2)
- enable_irq(data->irq2);
- else
- nanohub_unmask_interrupt(data, 2);
-
- return 0;
-}
-
int nanohub_remove(struct iio_dev *iio_dev)
{
struct nanohub_data *data = iio_priv(iio_dev);
nanohub_destroy_devices(data);
iio_device_unregister(iio_dev);
+#ifdef CONFIG_EXT_CHUB
nanohub_release_gpios_irqs(data);
+#endif
wake_lock_destroy(&data->wakelock_read);
vfree(data->vbuf);
iio_device_free(iio_dev);
int nanohub_suspend(struct iio_dev *iio_dev)
{
+#ifdef CONFIG_EXT_CHUB
struct nanohub_data *data = iio_priv(iio_dev);
int ret;
}
return ret;
+#else
+ (void)iio_dev;
+
+ return 0;
+#endif
}
int nanohub_resume(struct iio_dev *iio_dev)
{
+#ifdef CONFIG_EXT_CHUB
struct nanohub_data *data = iio_priv(iio_dev);
disable_irq_wake(data->irq1);
nanohub_wakeup_unlock(data);
+#else
+ (void)iio_dev;
return 0;
+#endif
}
static int __init nanohub_init(void)
#include "comms.h"
#include "bl.h"
+#include "chub.h"
#define NANOHUB_NAME "nanohub"
struct nanohub_io io[ID_NANOHUB_MAX];
struct nanohub_comms comms;
+#ifdef CONFIG_NANOHUB_MAILBOX
+ struct nanohub_platform_data *pdata;
+#else
struct nanohub_bl bl;
const struct nanohub_platform_data *pdata;
+#endif
int irq1;
int irq2;
LOCK_MODE_SUSPEND_RESUME,
};
+#ifndef CONFIG_NANOHUB_MAILBOX
+#define wait_event_interruptible_timeout_locked(q, cond, tmo) \
+({ \
+ long __ret = (tmo); \
+ DEFINE_WAIT(__wait); \
+ if (!(cond)) { \
+ for (;;) { \
+ __wait.flags &= ~WQ_FLAG_EXCLUSIVE; \
+ if (list_empty(&__wait.entry)) \
+ __add_wait_queue_entry_tail(&(q), &__wait); \
+ set_current_state(TASK_INTERRUPTIBLE); \
+ if ((cond)) \
+ break; \
+ if (signal_pending(current)) { \
+ __ret = -ERESTARTSYS; \
+ break; \
+ } \
+ spin_unlock(&(q).lock); \
+ __ret = schedule_timeout(__ret); \
+ spin_lock(&(q).lock); \
+ if (!__ret) { \
+ if ((cond)) \
+ __ret = 1; \
+ break; \
+ } \
+ } \
+ __set_current_state(TASK_RUNNING); \
+ if (!list_empty(&__wait.entry)) \
+ list_del_init(&__wait.entry); \
+ else if (__ret == -ERESTARTSYS && \
+ /*reimplementation of wait_abort_exclusive() */\
+ waitqueue_active(&(q))) \
+ __wake_up_locked_key(&(q), TASK_INTERRUPTIBLE, \
+ NULL); \
+ } else { \
+ __ret = 1; \
+ } \
+ __ret; \
+})
+#endif
+
int request_wakeup_ex(struct nanohub_data *data, long timeout,
int key, int lock_mode);
void release_wakeup_ex(struct nanohub_data *data, int key, int lock_mode);
int nanohub_remove(struct iio_dev *iio_dev);
int nanohub_suspend(struct iio_dev *iio_dev);
int nanohub_resume(struct iio_dev *iio_dev);
+void nanohub_handle_irq1(struct nanohub_data *data);
+#ifdef CONFIG_EXT_CHUB
static inline int nanohub_irq1_fired(struct nanohub_data *data)
{
const struct nanohub_platform_data *pdata = data->pdata;
return data->irq2 && !gpio_get_value(pdata->irq2_gpio);
}
+#else
+static inline int nanohub_irq1_fired(struct nanohub_data *data)
+{
+ struct contexthub_ipc_info *ipc = data->pdata->mailbox_client;
+
+ return !atomic_read(&ipc->irq1_apInt);
+}
+
+static inline int nanohub_irq2_fired(struct nanohub_data *data)
+{
+ return 0;
+}
+#endif
static inline int request_wakeup_timeout(struct nanohub_data *data, int timeout)
{
size_t length;
};
+#define MAX_FILE_LEN (32)
+
struct nanohub_platform_data {
+#ifdef CONFIG_NANOHUB_MAILBOX
+ void *mailbox_client;
+ int irq;
+#else
u32 wakeup_gpio;
u32 nreset_gpio;
u32 boot0_gpio;
struct nanohub_flash_bank *flash_banks;
u32 num_shared_flash_banks;
struct nanohub_flash_bank *shared_flash_banks;
+#endif
};
#endif /* __LINUX_PLATFORM_DATA_NANOHUB_H */