this patch supports chub debug feature like dump_hw and dump_log.
Change-Id: I798d964a433f8ab442b4b91c35d92340536e7b4a
Signed-off-by: Boojin Kim <boojin.kim@samsung.com>
enum { CHUB_ON, CHUB_OFF };
enum { C2A_ON, C2A_OFF };
-#ifdef CONFIG_CONTEXTHUB_POWERMODE
/* host interface functions */
int contexthub_is_run(struct contexthub_ipc_info *ipc)
{
+ if (!ipc->powermode)
+ return 1;
+
#ifdef CONFIG_CHRE_SENSORHUB_HAL
return nanohub_irq1_fired(ipc->data);
#else
/* request contexthub to host driver */
int contexthub_request(struct contexthub_ipc_info *ipc)
{
+ if (!ipc->powermode)
+ return 0;
+
#ifdef CONFIG_CHRE_SENSORHUB_HAL
return request_wakeup_timeout(ipc->data, WAIT_TIMEOUT_MS);
#else
/* rlease contexthub to host driver */
void contexthub_release(struct contexthub_ipc_info *ipc)
{
+ if (!ipc->powermode)
+ return;
+
#ifdef CONFIG_CHRE_SENSORHUB_HAL
release_wakeup(ipc->data);
#endif
}
-#endif
static inline void contexthub_notify_host(struct contexthub_ipc_info *ipc)
{
#endif
chub_dbg_init(chub_dev);
- /* chub err init */
- for (i = 0; i < CHUB_ERR_MAX; i++)
- chub->err_cnt[i] = 0;
-
dev_info(chub_dev,
"IPC map information\n\tinfo(base:%p size:%zu)\n\tipc(base:%p size:%zu)\n\tlogbuf(base:%p size:%d)\n",
chub, sizeof(struct contexthub_ipc_info),
return 0;
}
-static void chub_dump_and_reset(struct contexthub_ipc_info *ipc,
- enum CHUB_ERR_TYPE err)
-{
- int ret = contexthub_ipc_write_event(ipc, MAILBOX_EVT_DUMP_STATUS);
- int hang;
-
- if (ret)
- dev_dbg(ipc->dev, "%s: fails to dump\n", __func__);
-
- chub_dbg_dump_hw(ipc, err);
-
- if (err != CHUB_ERR_CHUB_NO_RESPONSE) {
- hang = contexthub_lowlevel_alive(ipc);
- if (!hang) {
-#ifdef DEBUG_IMAGE
- chub_dbg_check_and_download_image(ipc);
-#endif
- }
- }
-
- /* reset chub */
- contexthub_reset(ipc);
-}
-
#ifdef PACKET_LOW_DEBUG
static void debug_dumpbuf(unsigned char *buf, int len)
{
ap_t = rtc_tm_to_time64(&ap_tm);
}
+/* simple alive check function */
+static bool contexthub_lowlevel_alive(struct contexthub_ipc_info *ipc)
+{
+ int val;
+
+ ipc->chub_alive_lock.flag = 0;
+ ipc_hw_gen_interrupt(AP, IRQ_EVT_CHUB_ALIVE);
+ val = wait_event_timeout(ipc->chub_alive_lock.event,
+ ipc->chub_alive_lock.flag,
+ msecs_to_jiffies(WAIT_TIMEOUT_MS));
+
+ return ipc->chub_alive_lock.flag;
+}
+
+
static int contexthub_wait_alive(struct contexthub_ipc_info *ipc)
{
int trycnt = 0;
u32 val;
int trycnt = 0;
int ret = 0;
+ int i;
/* clear ipc value */
ipc_init();
atomic_set(&ipc->irq1_apInt, C2A_OFF);
atomic_set(&ipc->read_lock.cnt, 0x0);
+ /* chub err init */
+ for (i = 0; i < CHUB_ERR_MAX; i++)
+ ipc->err_cnt[i] = 0;
+
ipc->read_lock.flag = 0;
#ifndef USE_IPC_BUF
ipc->recv_order.order = 0;
REG_CHUB_RESET_CHUB_CONFIGURATION);
msleep(WAIT_TIMEOUT_MS);
if (++trycnt > WAIT_TRY_CNT) {
- dev_warn(ipc->dev, "chub cpu status is not set correctly\n");
+ dev_warn(ipc->dev,
+ "chub cpu status is not set correctly\n");
break;
}
} while ((val & 0x1) == 0x0);
atomic_set(&ipc->chub_status, CHUB_ST_SHUTDOWN);
break;
case MAILBOX_EVT_CHUB_ALIVE:
- ipc->chub_alive_lock.flag = 0;
- ipc_hw_gen_interrupt(AP, IRQ_EVT_CHUB_ALIVE);
- val = wait_event_timeout(ipc->chub_alive_lock.event,
- ipc->chub_alive_lock.flag,
- msecs_to_jiffies(WAIT_TIMEOUT_MS));
-
- if (ipc->chub_alive_lock.flag) {
+ val = contexthub_lowlevel_alive(ipc);
+ if (val) {
atomic_set(&ipc->chub_status, CHUB_ST_RUN);
dev_info(ipc->dev, "chub is alive");
} else {
return ret;
}
- /* simple alive check function */
-
-int contexthub_lowlevel_alive(struct contexthub_ipc_info *ipc)
-{
- int val;
-
- ipc->chub_alive_lock.flag = 0;
-
- ipc_hw_gen_interrupt(AP, IRQ_EVT_CHUB_ALIVE);
- val = wait_event_timeout(ipc->chub_alive_lock.event,
- ipc->chub_alive_lock.flag,
- msecs_to_jiffies(WAIT_TIMEOUT_MS));
-
- return !(ipc->chub_alive_lock.flag);
-}
-
int contexthub_poweron(struct contexthub_ipc_info *ipc)
{
int ret = 0;
int ret;
/* TODO: add wait lock */
+ dev_info(ipc->dev, "%s\n", __func__);
ret = contexthub_ipc_write_event(ipc, MAILBOX_EVT_SHUTDOWN);
if (ret) {
pr_err("%s: shutdonw fails, ret:%d\n", __func__, ret);
struct contexthub_ipc_info *ipc =
container_of(work, struct contexthub_ipc_info, debug_work);
enum ipc_debug_event event = ipc_read_debug_event(AP);
- int i;
enum CHUB_ERR_TYPE fw_err = 0;
+ bool need_reset = 0;
+ bool alive = contexthub_lowlevel_alive(ipc);
+ int err = 0;
+ int ret;
+ int i;
- dev_info(ipc->dev,
- "%s is run with nanohub driver %d, fw %d error\n", __func__,
- ipc->chub_err, event);
+ dev_info(ipc->dev, "%s: fw_err:%d, alive:%d\n",
+ __func__, event, alive);
+
+ if (!alive || ipc->err_cnt[CHUB_ERR_NANOHUB_WDT]) {
+ need_reset = 1;
+ err = CHUB_ERR_NANOHUB_WDT;
+ goto do_reset;
+ }
- /* do slient reset */
for (i = 0; i < CHUB_ERR_MAX; i++) {
+ if (ipc->err_cnt[i])
+ dev_info(ipc->dev, "%s: err%d-%d\n",
+ __func__, i, ipc->err_cnt[i]);
+ /* if error is bigger than MAX_ERR_CNT, should be reset */
if (ipc->err_cnt[i] > MAX_ERR_CNT) {
- pr_info("%s: reset chub due to irq trigger error:%d\n",
- __func__, i);
- chub_dump_and_reset(ipc, i);
- return;
+ err = i;
+ need_reset = 1;
}
}
- log_flush(ipc->fw_log);
-
- /* chub driver error */
- if (ipc->chub_err) {
- log_dump_all(ipc->chub_err);
- chub_dbg_dump_hw(ipc, ipc->chub_err);
- ipc->chub_err = 0;
- return;
- }
+ if (need_reset)
+ goto do_reset;
+ log_flush(ipc->fw_log);
/* chub fw error */
switch (event) {
case IPC_DEBUG_CHUB_FULL_LOG:
break;
case IPC_DEBUG_CHUB_FAULT:
dev_warn(ipc->dev, "Contexthub notified fault\n");
- fw_err = CHUB_ERR_NANOHUB_FAULT;
+ err = CHUB_ERR_NANOHUB_FAULT;
break;
case IPC_DEBUG_CHUB_ASSERT:
dev_warn(ipc->dev, "Contexthub notified assert\n");
- fw_err = CHUB_ERR_NANOHUB_ASSERT;
+ err = CHUB_ERR_NANOHUB_ASSERT;
break;
case IPC_DEBUG_CHUB_ERROR:
dev_warn(ipc->dev, "Contexthub notified error\n");
- fw_err = CHUB_ERR_NANOHUB_ERROR;
+ err = CHUB_ERR_NANOHUB_ERROR;
break;
default:
break;
}
- if (fw_err) {
+ if (event) /* clear dbg event */
+ ipc_write_debug_event(AP, 0);
+ if (err)
ipc->err_cnt[fw_err]++;
- contexthub_ipc_write_event(ipc, MAILBOX_EVT_DUMP_STATUS);
- log_dump_all(fw_err);
+
+do_reset:
+ dev_info(ipc->dev, "%s: reset chub due to irq trigger error:%d\n",
+ __func__, err);
+ /* req to chub fw dump */
+ ret = contexthub_ipc_write_event(ipc, MAILBOX_EVT_DUMP_STATUS);
+ /* dump log into file */
+ log_dump_all(err);
+ /* dump hw & sram into file */
+ chub_dbg_dump_hw(ipc, err);
+ if (need_reset) {
+ ret = contexthub_reset(ipc);
+ if (ret)
+ dev_warn(ipc->dev, "%s: fails to reset %d.\n",
+ __func__, ret);
+ else {
+ /* TODO: recovery */
+ dev_info(ipc->dev, "%s: chub reset! should be recovery\n",
+ __func__);
+ if (CHUB_ERR_NANOHUB_WDT == CHUB_ERR_NANOHUB_WDT)
+ enable_irq(ipc->irq_wdt);
+ }
}
}
}
if (err) {
- ipc->chub_err = err;
pr_err("inval irq err(%d):start_irqnum:%d,evt(%p):%d,irq_hw:%d,status_reg:0x%x(0x%x,0x%x)\n",
- ipc->chub_err, start_index, cur_evt, evt, irq_num,
+ err, start_index, cur_evt, evt, irq_num,
status, ipc_hw_read_int_status_reg(AP),
ipc_hw_read_int_gen_reg(AP));
ipc->err_cnt[err]++;
{
struct contexthub_ipc_info *ipc = data;
- dev_info(ipc->dev, "context generated WDT timeout.\n");
+ dev_info(ipc->dev, "%s calledn", __func__);
+ ipc->err_cnt[CHUB_ERR_NANOHUB_WDT]++;
+ disable_irq_nosync(ipc->irq_wdt);
+ schedule_work(&ipc->debug_work);
return IRQ_HANDLED;
}
chub->block_reset = 0;
/* get mailbox interrupt */
- irq = irq_of_parse_and_map(node, 0);
- if (irq < 0) {
+ chub->irq_mailbox = irq_of_parse_and_map(node, 0);
+ if (chub->irq_mailbox < 0) {
dev_err(dev, "failed to get irq:%d\n", irq);
return -EINVAL;
}
/* request irq handler */
- ret = devm_request_irq(dev, irq, contexthub_irq_handler,
+ ret = devm_request_irq(dev, chub->irq_mailbox, contexthub_irq_handler,
0, dev_name(dev), chub);
if (ret) {
- dev_err(dev, "failed to request irq:%d, ret:%d\n", irq, ret);
+ dev_err(dev, "failed to request irq:%d, ret:%d\n",
+ chub->irq_mailbox, ret);
return ret;
}
/* get wdt interrupt optionally */
- irq = irq_of_parse_and_map(node, 1);
- if (irq > 0) {
+ chub->irq_wdt = irq_of_parse_and_map(node, 1);
+ if (chub->irq_wdt > 0) {
/* request irq handler */
- ret = devm_request_irq(dev, irq,
+ ret = devm_request_irq(dev, chub->irq_wdt,
contexthub_irq_wdt_handler, 0,
dev_name(dev), chub);
if (ret) {
dev_err(dev, "failed to request wdt irq:%d, ret:%d\n",
- irq, ret);
+ chub->irq_wdt, ret);
return ret;
}
} else {
#endif
atomic_set(&chub->chub_status, CHUB_ST_NO_POWER);
- chub->chub_err = 0;
- chub->powermode = INIT_CHUB_VAL;
+ chub->powermode = 0; /* updated by fw bl */
chub->dev = &pdev->dev;
platform_set_drvdata(pdev, chub);
contexthub_config_init(chub);
};
enum CHUB_ERR_TYPE {
+ CHUB_ERR_NONE,
CHUB_ERR_EVTQ_EMTPY, /* ap error */
CHUB_ERR_READ_FAIL,
CHUB_ERR_WRITE_FAIL,
CHUB_ERR_NANOHUB_FAULT, /* chub error */
CHUB_ERR_NANOHUB_ASSERT,
CHUB_ERR_NANOHUB_ERROR,
+ CHUB_ERR_NANOHUB_WDT,
CHUB_ERR_MAX,
};
struct log_buffer_info *dd_log;
struct LOG_BUFFER *dd_log_buffer;
unsigned long clkrate;
- enum CHUB_ERR_TYPE chub_err;
atomic_t chub_status;
atomic_t irq1_apInt;
atomic_t wakeup_chub;
+ int irq_mailbox;
+ int irq_wdt;
int err_cnt[CHUB_ERR_MAX];
int utc_run;
int powermode;
uint8_t *rx, int max_length, int timeout);
int contexthub_ipc_write(struct contexthub_ipc_info *ipc,
uint8_t *tx, int length, int timeout);
-int contexthub_lowlevel_alive(struct contexthub_ipc_info *ipc);
int contexthub_poweron(struct contexthub_ipc_info *data);
int contexthub_download_image(struct contexthub_ipc_info *data, int bl);
int contexthub_download_kernel(struct contexthub_ipc_info *dev);
int contexthub_reset(struct contexthub_ipc_info *data);
int contexthub_wakeup(struct contexthub_ipc_info *data, int evt);
-#ifdef CONFIG_CONTEXTHUB_POWERMODE
int contexthub_is_run(struct contexthub_ipc_info *ipc);
int contexthub_request(struct contexthub_ipc_info *ipc);
void contexthub_release(struct contexthub_ipc_info *ipc);
-#else
-#define contexthub_is_run(a) (1)
-#define contexthub_request(a) (0)
-#define contexthub_release(a)
-#endif
#endif
#define AREA_NAME_MAX (8)
/* it's align ramdump side to prevent override */
#define SRAM_ALIGN (1024)
+#define S_IRWUG (0660)
struct map_info {
char name[AREA_NAME_MAX];
};
#ifdef CONFIG_CONTEXTHUB_DEBUG
-static void chub_dbg_write_file(struct device *dev)
+static void chub_dbg_write_file(struct device *dev, char *name, void *buf, int size)
{
struct file *filp;
char file_name[32];
struct dbg_dump *p_dump = p_dbg_dump;
u32 sec = p_dump->time / NSEC_PER_SEC;
- snprintf(file_name, sizeof(file_name), "/data/nano-%02u-%06u.dump",
- p_dump->reason, sec);
+ snprintf(file_name, sizeof(file_name), "/data/nano-%02u-%06u-%s.dump",
+ p_dump->reason, sec, name);
old_fs = get_fs();
set_fs(KERNEL_DS);
- filp = filp_open(file_name, O_RDWR | O_TRUNC | O_CREAT, 0660);
-
- dev_dbg(dev, "%s is created with %d size\n", file_name,
- get_dbg_dump_size());
-
+ filp = filp_open(file_name, O_RDWR | O_TRUNC | O_CREAT, S_IRWUG);
if (IS_ERR(filp)) {
- dev_warn(dev, "%s: saving log fail\n", __func__);
+ dev_warn(dev, "%s: open file fail\n", __func__);
goto out;
}
- vfs_write(filp, (void *)p_dbg_dump, sizeof(struct dbg_dump),
- &filp->f_pos);
+ vfs_write(filp, buf, size, &filp->f_pos);
vfs_fsync(filp, 0);
filp_close(filp, NULL);
- snprintf(file_name, sizeof(file_name), "/data/nano-%02u-%06u-sram.dump",
- p_dump->reason, sec);
-
- old_fs = get_fs();
- set_fs(KERNEL_DS);
-
- filp = filp_open(file_name, O_RDWR | O_TRUNC | O_CREAT, 0660);
-
dev_dbg(dev, "%s is created with %d size\n", file_name,
get_dbg_dump_size());
-
- if (IS_ERR(filp)) {
- dev_warn(dev, "%s: saving log fail\n", __func__);
- goto out;
- }
-
- vfs_write(filp, &p_dbg_dump->sram[p_dbg_dump->sram_start],
- ipc_get_chub_mem_size(), &filp->f_pos);
- vfs_fsync(filp, 0);
- filp_close(filp, NULL);
-
out:
set_fs(old_fs);
}
/* dump GPR */
chub_dbg_dump_gpr(ipc);
-
- /* dump SRAM */
+ /* dump SRAM to reserved DRAM */
memcpy_fromio(&p_dbg_dump->sram[p_dbg_dump->sram_start],
ipc_get_base(IPC_REG_DUMP),
ipc_get_chub_mem_size());
- dev_dbg(ipc->dev, "contexthub dump is done\n");
-
- chub_dbg_write_file(ipc->dev);
+ /* write file */
+ dev_dbg(ipc->dev,
+ "%s: write file: sram:%p, dram:%p(off:%d), size:%d\n",
+ __func__, ipc_get_base(IPC_REG_DUMP),
+ &p_dbg_dump->sram[p_dbg_dump->sram_start],
+ p_dbg_dump->sram_start, ipc_get_chub_mem_size());
+ chub_dbg_write_file(ipc->dev, "dram",
+ p_dbg_dump, sizeof(struct dbg_dump));
+ chub_dbg_write_file(ipc->dev, "sram",
+ &p_dbg_dump->sram[p_dbg_dump->sram_start],
+ ipc_get_chub_mem_size());
}
-
contexthub_release(ipc);
}
[IPC_DEBUG_UTC_CHECK_STATUS] = "stack",
[IPC_DEBUG_UTC_CHECK_CPU_UTIL] = "utilization",
[IPC_DEBUG_UTC_HEAP_DEBUG] = "heap",
+ [IPC_DEBUG_UTC_HANG] = "hang",
[IPC_DEBUG_NANOHUB_CHUB_ALIVE] = "alive",
};
IPC_DEBUG_UTC_CHECK_STATUS,
IPC_DEBUG_UTC_CHECK_CPU_UTIL,
IPC_DEBUG_UTC_HEAP_DEBUG,
+ IPC_DEBUG_UTC_HANG,
IPC_DEBUG_UTC_IPC_TEST_START,
IPC_DEBUG_UTC_IPC_TEST_END,
IPC_DEBUG_UTC_MAX,
struct ipc_logbuf {
u32 token;
- u32 eq; /* write owner chub */
- u32 dq; /* read onwer ap */
+ u32 eq; /* write owner chub (index_writer) */
+ u32 dq; /* read onwer ap (index_reader) */
u32 size;
char buf[0];
};
size_t left_size = SIZE_OF_BUFFER - kernel_buffer->index;
dev_dbg(info->dev, "%s(%zu)\n", __func__, size);
-
if (size > SIZE_OF_BUFFER) {
dev_warn(info->dev,
"flush size (%zu, %zu) is bigger than kernel buffer size (%d)",
size = SIZE_OF_BUFFER;
}
- if (log_auto_save)
+ if (log_auto_save && !info->filp)
info->filp = filp_open(info->save_file_name, O_RDWR | O_APPEND | O_CREAT, S_IRWUG);
if (left_size < size) {
vfs_write(info->filp, kernel_buffer->buffer + kernel_buffer->index, left_size, &info->filp->f_pos);
vfs_fsync(info->filp, 0);
}
-
src += left_size;
size -= left_size;
pr_warn("%s: chub isn't run\n", __func__);
return;
}
-
log_flush(info);
}
}
return dbg_root_dir;
}
-void chub_log_auto_save_open(struct log_buffer_info *info)
+static void chub_log_auto_save_open(struct log_buffer_info *info)
{
mm_segment_t old_fs = get_fs();
set_fs(KERNEL_DS);
- if (info->filp) {
- /* close previous */
+ /* close previous */
+ if (info->filp)
filp_close(info->filp, NULL);
- }
info->filp =
filp_open(info->save_file_name, O_RDWR | O_TRUNC | O_CREAT,
S_IRWUG);
- dev_dbg(info->dev, "created\n");
+ dev_dbg(info->dev, "%s created\n", info->save_file_name);
if (IS_ERR(info->filp))
dev_warn(info->dev, "%s: saving log fail\n", __func__);
(u32)(sched_clock() / NSEC_PER_SEC));
chub_log_auto_save_open(info);
-
log_auto_save = 1;
} else {
log_auto_save = 0;
long event;
int err;
+ /* auto log_save */
err = kstrtol(&buf[0], 10, &event);
if (!err) {
struct log_buffer_info *info;
list_for_each_entry(info, &log_list_head, list)
- if (info->support_log_save)
+ if (info->support_log_save) /* sram can support it */
chub_log_auto_save_ctrl(info, event);
+ /* set log_flush to save log */
if (!auto_log_flush_ms) {
log_schedule_flush_all();
auto_log_flush_ms = DEFAULT_FLUSH_MS;
+ dev_dbg(dev, "%s: set log_flush time(% dms) for log_save\n",
+ auto_log_flush_ms);
}
-
return count;
} else {
return 0;
mm_segment_t old_fs;
char save_file_name[32];
struct LOG_BUFFER *buffer = info->log_buffer;
- u32 index = buffer->index_writer;
+ u32 wrap_index = buffer->index_writer;
snprintf(save_file_name, sizeof(save_file_name),
"/data/nano-%02d-%02d-%06u.log", info->id, err,
(u32)(sched_clock() / NSEC_PER_SEC));
+ old_fs = get_fs();
set_fs(KERNEL_DS);
filp = filp_open(save_file_name, O_RDWR | O_TRUNC | O_CREAT, S_IRWUG);
-
- old_fs = get_fs();
+ if (IS_ERR(filp)) {
+ dev_warn(info->dev, "%s: fails filp:%p\n", __func__, filp);
+ goto out;
+ }
if (info->sram_log_buffer) {
int i;
int size;
bool wrap = false;
char tmp_buffer[TMP_BUFFER_SIZE];
- u32 start_index = index;
-
- for (i = 0; i < buffer->size / TMP_BUFFER_SIZE + 1;
+ u32 start_index = wrap_index;
+ int bottom = 0;
+
+ /* dump sram-log buffer to fs (eq ~ eq + logbuf_size) */
+ dev_dbg(info->dev, "%s: logbuf:%p, eq:%d, dq:%d, size:%d, loop:%d\n", __func__,
+ (void *)buffer, wrap_index, buffer->index_reader, buffer->size,
+ (buffer->size / TMP_BUFFER_SIZE) + 1);
+ for (i = 0; i < (buffer->size / TMP_BUFFER_SIZE) + 1;
i++, start_index += TMP_BUFFER_SIZE) {
if (start_index + TMP_BUFFER_SIZE > buffer->size) {
size = buffer->size - start_index;
wrap = true;
- } else if (index - start_index < TMP_BUFFER_SIZE) {
- size = index - start_index;
+ bottom = 1;
+ } else if (bottom && (wrap_index - start_index < TMP_BUFFER_SIZE)) {
+ size = wrap_index - start_index;
} else {
size = TMP_BUFFER_SIZE;
}
-
- memcpy(tmp_buffer, buffer->buffer + start_index, size);
+ memcpy_fromio(tmp_buffer, buffer->buffer + start_index, size);
vfs_write(filp, tmp_buffer, size, &filp->f_pos);
-
if (wrap) {
wrap = false;
start_index = 0;
}
}
} else {
- vfs_write(filp, buffer->buffer + index, buffer->size - index,
+ vfs_write(filp, buffer->buffer + wrap_index, buffer->size - wrap_index,
&filp->f_pos);
- vfs_write(filp, buffer->buffer, index, &filp->f_pos);
+ vfs_write(filp, buffer->buffer, wrap_index, &filp->f_pos);
}
-
- dev_info(info->dev, "%s is created\n", save_file_name);
+ dev_dbg(info->dev, "%s is created\n", save_file_name);
vfs_fsync(filp, 0);
filp_close(filp, NULL);
+
+out:
+ set_fs(old_fs);
}
void log_dump_all(int err)
pr_err("%s: fails to flush log\n", __func__);
}
}
+ /* update log_flush time */
auto_log_flush_ms = event * 1000;
return count;
}
static struct device_attribute attributes[] = {
+ /* enable auto-save with flush_log */
__ATTR(save_log, 0664, chub_log_save_show, chub_log_save_save),
+ /* flush sram-logbuf to dram */
__ATTR(flush_log, 0664, chub_log_flush_show, chub_log_flush_save),
+ /* dump sram-logbuf to file */
__ATTR(dump_log, 0220, NULL, chub_dump_log_save)
};