From 72ea17d8ce040253c06a99bd7c2e41ad5cf119e2 Mon Sep 17 00:00:00 2001 From: Jaejoon Yoo Date: Thu, 28 Jun 2018 19:35:00 +0900 Subject: [PATCH] [COMMON] soc: samsung: introduce BCMDBG driver Change-Id: I3ae0b0bf986a8123cca91a24758817449cc751ed Signed-off-by: Jaejoon Yoo --- drivers/soc/samsung/Kconfig | 21 + drivers/soc/samsung/Makefile | 3 +- drivers/soc/samsung/exynos-bcm_dbg-dt.c | 702 ++++ drivers/soc/samsung/exynos-bcm_dbg-dump.c | 126 + drivers/soc/samsung/exynos-bcm_dbg.c | 3413 +++++++++++++++++ drivers/soc/samsung/exynos-pd.c | 14 +- .../dt-bindings/soc/samsung/exynos-bcm_dbg.h | 47 + include/soc/samsung/exynos-bcm_dbg-dt.h | 16 + include/soc/samsung/exynos-bcm_dbg-dump.h | 42 + include/soc/samsung/exynos-bcm_dbg.h | 255 ++ include/soc/samsung/exynos-pd.h | 6 + 11 files changed, 4640 insertions(+), 5 deletions(-) create mode 100644 drivers/soc/samsung/exynos-bcm_dbg-dt.c create mode 100644 drivers/soc/samsung/exynos-bcm_dbg-dump.c create mode 100644 drivers/soc/samsung/exynos-bcm_dbg.c create mode 100644 include/dt-bindings/soc/samsung/exynos-bcm_dbg.h create mode 100644 include/soc/samsung/exynos-bcm_dbg-dt.h create mode 100644 include/soc/samsung/exynos-bcm_dbg-dump.h create mode 100644 include/soc/samsung/exynos-bcm_dbg.h diff --git a/drivers/soc/samsung/Kconfig b/drivers/soc/samsung/Kconfig index 06e6fbbc3bdb..e77ba80996ad 100644 --- a/drivers/soc/samsung/Kconfig +++ b/drivers/soc/samsung/Kconfig @@ -10,6 +10,27 @@ config EXYNOS_CHIPID depends on ARCH_EXYNOS select SOC_BUS +config EXYNOS_BCM_DBG + bool "EXYNOS_BCM_DBG driver support" + default y + help + Enable exynos-bcm_dbg driver support + +config EXYNOS_BCM_DBG_GNR + bool "EXYNOS_BCM_DBG general interface support" + depends on !EXYNOS_ADV_TRACER + default y + help + Enable exynos-bcm_dbg general interface support. + The general interface is not used ADV-tracer IPC interface. + +config EXYNOS_BCM_DBG_DUMP + bool "EXYNOS_BCM_DBG dump support" + depends on EXYNOS_BCM_DBG + default y if EXYNOS_BCM_DBG + help + Enable exynos-bcm_dbg dump support + config EXYNOS_BCM bool "EXYNOS_BCM driver support" help diff --git a/drivers/soc/samsung/Makefile b/drivers/soc/samsung/Makefile index 6254e36ea1f1..9d97ff810aae 100644 --- a/drivers/soc/samsung/Makefile +++ b/drivers/soc/samsung/Makefile @@ -5,7 +5,6 @@ obj-$(CONFIG_ARCH_EXYNOS) += exynos-smc.o obj-$(CONFIG_CAL_IF) += cal-if/ obj-$(CONFIG_ECT) += ect_parser.o -obj-$(CONFIG_EXYNOS_BCM) += exynos-bcm.o obj-$(CONFIG_EXYNOS_PMU) += exynos-pmu.o @@ -14,6 +13,8 @@ obj-$(CONFIG_ARCH_EXYNOS) += exynos-fsys0-tcxo.o obj-$(CONFIG_EXYNOS_PMU_ARM_DRIVERS) += exynos3250-pmu.o exynos4-pmu.o \ exynos5250-pmu.o exynos5420-pmu.o +obj-$(CONFIG_EXYNOS_BCM_DBG) += exynos-bcm_dbg.o exynos-bcm_dbg-dt.o +obj-$(CONFIG_EXYNOS_BCM_DBG_DUMP) += exynos-bcm_dbg-dump.o obj-$(CONFIG_EXYNOS_CHIPID) += exynos-chipid.o diff --git a/drivers/soc/samsung/exynos-bcm_dbg-dt.c b/drivers/soc/samsung/exynos-bcm_dbg-dt.c new file mode 100644 index 000000000000..bf00c145d4e3 --- /dev/null +++ b/drivers/soc/samsung/exynos-bcm_dbg-dt.c @@ -0,0 +1,702 @@ +/* + * Copyright (c) 2018 Samsung Electronics Co., Ltd. + * http://www.samsung.com + * + * This program is free software; you can redistribute it and/or modify + * it under the terms of the GNU General Public License version 2 as + * published by the Free Software Foundation. + */ + +#include +#include +#include +#include +#include + +#include +#include + +static const char *list[BCM_PD_INFO_MAX]; + +static void print_bcm_dbg_data(struct exynos_bcm_dbg_data *data) +{ + int i, j; + + BCM_DBG("IPC node name: %s\n", data->ipc_node->name); + BCM_DBG("\n"); + + BCM_DBG("pd_size: %u, pd_sync_init: %d\n", data->pd_size, data->pd_sync_init); + for (i = 0; i < data->pd_size; i++) + BCM_DBG("pd_name: %s, pd_index: %u, pd_on: %d, cal_pdid: 0x%08x\n", + data->pd_info[i]->pd_name, data->pd_info[i]->pd_index, + data->pd_info[i]->on, data->pd_info[i]->cal_pdid); + BCM_DBG("\n"); + + for (i = 0; i < data->define_event_max; i++) { + BCM_DBG("Pre-defined Event index: %u\n", data->define_event[i].index); + for (j = 0; j < data->bcm_cnt_nr; j++) + BCM_DBG(" Event[%d]: 0x%02x\n", j, data->define_event[i].event[j]); + } + BCM_DBG("\n"); + + BCM_DBG("Default Pre-defined Event index: %u\n", data->default_define_event); + BCM_DBG("Pre-defined Event Max NR: %u\n", data->define_event_max); + BCM_DBG("\n"); + + for (i = 0; i < data->define_event_max; i++) { + BCM_DBG("Pre-defined Event index: %u\n", data->define_event[i].index); + BCM_DBG(" Filter ID mask: 0x%08x\n", data->define_filter_id[i].sm_id_mask); + BCM_DBG(" Filter ID value: 0x%08x\n", data->define_filter_id[i].sm_id_value); + BCM_DBG(" Filter ID active\n"); + for (j = 0; j < data->bcm_cnt_nr; j++) + BCM_DBG(" Event[%d]: %u\n", j, data->define_filter_id[i].sm_id_active[j]); + } + BCM_DBG("\n"); + + for (i = 0; i < data->define_event_max; i++) { + BCM_DBG("Pre-defined Event index: %u\n", data->define_event[i].index); + for (j = 0; j < BCM_EVT_FLT_OTHR_MAX; j++) { + BCM_DBG(" Filter others type[%d]: 0x%02x\n", + j, data->define_filter_others[i].sm_other_type[j]); + BCM_DBG(" Filter others mask[%d]: 0x%02x\n", + j, data->define_filter_others[i].sm_other_mask[j]); + BCM_DBG(" Filter others value[%d]: 0x%02x\n", + j, data->define_filter_others[i].sm_other_value[j]); + } + BCM_DBG(" Filter others active\n"); + for (j = 0; j < data->bcm_cnt_nr; j++) + BCM_DBG(" Event[%d]: %u\n", + j, data->define_filter_others[i].sm_other_active[j]); + } + BCM_DBG("\n"); + + for (i = 0; i < data->define_event_max; i++) { + BCM_DBG("Pre-defined Event index: %u\n", data->define_event[i].index); + BCM_DBG(" Sample ID peak mask: 0x%08x\n", data->define_sample_id[i].peak_mask); + BCM_DBG(" Sample ID peak id: 0x%08x\n", data->define_sample_id[i].peak_id); + BCM_DBG(" Sample ID active\n"); + for (j = 0; j < data->bcm_cnt_nr; j++) + BCM_DBG(" Event[%d]: %u\n", j, data->define_sample_id[i].peak_enable[j]); + } + BCM_DBG("\n"); + + BCM_DBG("Available stop owner:\n"); + for (i = 0; i < STOP_OWNER_MAX; i++) + BCM_DBG(" stop owner[%d]: %s\n", i, + data->available_stop_owner[i] ? "true" : "false"); + BCM_DBG("\n"); + + BCM_DBG("Initial BCM run: %u\n", data->initial_bcm_run); + BCM_DBG("Initial monitor period: %u\n", data->initial_period); + BCM_DBG("Initial BCM mode: %u\n", data->initial_bcm_mode); + BCM_DBG("BCM Buffer size: 0x%x\n", data->dump_addr.buff_size); + + BCM_DBG("Initial Run IPs\n"); + for (i = 0; i < data->bcm_ip_nr; i++) + BCM_DBG(" BCM IP[%d]: %u\n", i, data->initial_run_ip[i]); + BCM_DBG("\n"); +} + +#ifdef CONFIG_OF +#ifndef CONFIG_EXYNOS_BCM_DBG_GNR +static int exynos_bcm_ipc_node_parse_dt(struct device_node *np, + struct exynos_bcm_dbg_data *data) +{ + struct device_node *child_np; + + for_each_child_of_node(np, child_np) { + const char *node_name; + + node_name = child_np->name; + BCM_DBG("%s: child node name: %s\n", __func__, node_name); + if (!strcmp(node_name, "ipc_bcm_event")) { + data->ipc_node = child_np; + } else { + BCM_ERR("%s: No device node name: %s\n", __func__, node_name); + return -ENODEV; + } + } + + return 0; +} +#endif +static int exynos_bcm_pd_info_parse_dt(struct device_node *np, + struct exynos_bcm_dbg_data *data) +{ + unsigned int pd_index = 0; + int size; + + size = of_property_count_strings(np, "pd-name"); + if (size < 0) { + BCM_ERR("%s: Failed get number of pd-name\n", __func__); + return size; + } + data->pd_size = size; + + size = of_property_read_string_array(np, "pd-name", list, size); + if (size < 0) { + BCM_ERR("%s: Failed get pd-name\n", __func__); + return size; + } + + for (pd_index = 0; pd_index < size; pd_index++) { + data->pd_info[pd_index] = + kzalloc(sizeof(struct exynos_bcm_pd_info), GFP_KERNEL); + data->pd_info[pd_index]->pd_name = (char *)list[pd_index]; + data->pd_info[pd_index]->pd_index = pd_index; + data->pd_info[pd_index]->cal_pdid = pd_index; + BCM_DBG("%s: get pd-name: %s(%u)\n", __func__, + data->pd_info[pd_index]->pd_name, + data->pd_info[pd_index]->pd_index); + } + + return 0; +} + +static int exynos_bcm_init_run_ip_parse_dt(struct device_node *np, + struct exynos_bcm_dbg_data *data) +{ + int size, ret, i; + unsigned int run_ip; + + ret = of_property_read_u32(np, "bcm_ip_nr", &data->bcm_ip_nr); + if (ret) { + BCM_ERR("%s: Failed get bcm_ip_nr\n", __func__); + return ret; + } + + data->initial_run_ip = + kzalloc(sizeof(unsigned int) * data->bcm_ip_nr, GFP_KERNEL); + if (data->initial_run_ip == NULL) { + BCM_ERR("%s: failed to allocate BCM IPs\n", __func__); + return -ENOMEM; + } + + size = of_property_count_u32_elems(np, "initial_run_bcm_ip"); + if (size < 0) { + BCM_ERR("%s: Failed get number of initial run BCM IPs\n", + __func__); + kfree(data->initial_run_ip); + return size; + } + + if (size > data->bcm_ip_nr) { + BCM_ERR("%s: Invalid BCM IPs size, size(%d):ip_nr(%u)\n", + __func__, size, data->bcm_ip_nr); + kfree(data->initial_run_ip); + return -EINVAL; + } + + for (i = 0; i < size; i++) { + ret = of_property_read_u32_index(np, + "initial_run_bcm_ip", i, &run_ip); + if (ret) { + BCM_ERR("%s: Failed get initial run BCM IP(%d)\n", + __func__, i); + kfree(data->initial_run_ip); + return ret; + } + + data->initial_run_ip[run_ip] = BCM_IP_EN; + } + + return 0; +} + +static int exynos_bcm_define_event_parse_dt(struct device_node *np, + struct exynos_bcm_dbg_data *data) +{ + int i, size, ret; + int event_cnt, nr_event; + const unsigned int *event_addr; + unsigned int event_get_len, event_len; + unsigned int *event_data; + + ret = of_property_read_u32(np, "max_define_event", &data->define_event_max); + if (ret) { + BCM_ERR("%s: Failed get max define event\n", __func__); + return ret; + } + + ret = of_property_read_u32(np, "bcm_cnt_nr", &data->bcm_cnt_nr); + if (ret) { + BCM_ERR("%s: Failed get bcm cnt nr\n", __func__); + return ret; + } + + size = of_property_count_u32_elems(np, "define_events"); + if (size < 0) { + BCM_ERR("%s: Failed get number of defined events\n", + __func__); + return size; + } + + event_len = data->bcm_cnt_nr + 1; + + /* + * Element number of define_event is (bcm_cnt_nr + 1). + * calculation number of array + */ + if (size % event_len) { + BCM_ERR("%s: Invalid define event size, size(%d):event_len(%d)\n", + __func__, size, event_len); + return -EINVAL; + } + + nr_event = size / event_len; + + if (nr_event != data->define_event_max) { + BCM_ERR("%s: Invalid define event nr, nr_event(%d):nr_max(%u)\n", + __func__, nr_event, data->define_event_max); + return -EINVAL; + } + + event_addr = of_get_property(np, "define_events", &event_get_len); + if (event_addr == NULL) { + BCM_ERR("%s: Failed get defined events length\n", __func__); + return -ENODEV; + } + + for (i = 0; i < nr_event; i++) { + event_data = (unsigned int *)&event_addr[i * event_len]; + + if ((size - (i * event_len)) <= 0) { + BCM_ERR("%s: Invalid defined event range\n", __func__); + return -EINVAL; + } + + data->define_event[i].index = be32_to_cpu(event_data[0]); + for (event_cnt = 0; event_cnt < data->bcm_cnt_nr; event_cnt++) + data->define_event[i].event[event_cnt] = + be32_to_cpu(event_data[event_cnt + 1]); + } + + ret = of_property_read_u32(np, "default_define_event", + &data->default_define_event); + if (ret) { + BCM_ERR("%s: Failed get default define event\n", __func__); + data->default_define_event = PEAK_LATENCY_FMT_EVT; + BCM_INFO("%s: replaced default define event: %u\n", + __func__, data->default_define_event); + } else { + if (data->default_define_event >= data->define_event_max) { + BCM_ERR("%s: Invalid default define event(%u), max(%u)\n", + __func__, data->default_define_event, + data->define_event_max); + data->default_define_event = PEAK_LATENCY_FMT_EVT; + BCM_INFO("%s: replaced default define event: %u\n", + __func__, data->default_define_event); + } + } + + return 0; +} + +static int exynos_bcm_filter_id_info_parse_dt(struct device_node *np, + struct exynos_bcm_dbg_data *data) +{ + int i, size; + int active_cnt; + const unsigned int *filter_id_addr; + const unsigned int *filter_id_active_addr; + unsigned int filter_id_len, active_len; + unsigned int filter_id_get_len, active_get_len; + unsigned int *id_data; + unsigned int *active_data; + + /* sm_id_mask and sm_id_value */ + size = of_property_count_u32_elems(np, "define_filter_id"); + if (size < 0) { + BCM_ERR("%s: Failed get number of defined filter_id\n", + __func__); + return size; + } + + filter_id_addr = of_get_property(np, "define_filter_id", &filter_id_get_len); + if (filter_id_addr == NULL) { + BCM_ERR("%s: Failed get define filter id length\n", __func__); + return -ENODEV; + } + + filter_id_len = 3; + + for (i = 0; i < PRE_DEFINE_EVT_MAX; i++) { + id_data = (unsigned int *)&filter_id_addr[i * filter_id_len]; + + if ((size - (i * filter_id_len)) <= 0) { + BCM_ERR("%s: Invalid defined filter id range\n", __func__); + return -EINVAL; + } + + data->define_filter_id[i].sm_id_mask = be32_to_cpu(id_data[1]); + data->define_filter_id[i].sm_id_value = be32_to_cpu(id_data[2]); + } + + /* sm_id_active */ + size = of_property_count_u32_elems(np, "define_filter_id_active"); + if (size < 0) { + BCM_ERR("%s: Failed get number of defined filter_id_active\n", + __func__); + return size; + } + + filter_id_active_addr = of_get_property(np, "define_filter_id_active", + &active_get_len); + if (filter_id_active_addr == NULL) { + BCM_ERR("%s: Failed get define filter id active length\n", __func__); + return -ENODEV; + } + + active_len = data->bcm_cnt_nr + 1; + + for (i = 0; i < PRE_DEFINE_EVT_MAX; i++) { + active_data = (unsigned int *)&filter_id_active_addr[i * active_len]; + + if ((size - (i * active_len)) <= 0) { + BCM_ERR("%s: Invalid defined filter id active range\n", __func__); + return -EINVAL; + } + + for (active_cnt = 0; active_cnt < data->bcm_cnt_nr; active_cnt++) + data->define_filter_id[i].sm_id_active[active_cnt] = + be32_to_cpu(active_data[active_cnt + 1]); + } + + return 0; +} + +static int exynos_bcm_filter_others_info_parse_dt(struct device_node *np, + struct exynos_bcm_dbg_data *data) +{ + int i, size; + int active_cnt; + const unsigned int *filter_other_0_addr; + const unsigned int *filter_other_1_addr; + const unsigned int *filter_other_active_addr; + unsigned int filter_other_0_len, filter_other_1_len, active_len; + unsigned int filter_other_0_get_len, filter_other_1_get_len, active_get_len; + unsigned int *other0_data; + unsigned int *other1_data; + unsigned int *active_data; + + /* sm_other_type, sm_other_mask and sm_other_value */ + size = of_property_count_u32_elems(np, "define_filter_other_0"); + if (size < 0) { + BCM_ERR("%s: Failed get number of defined filter_other_0\n", + __func__); + return size; + } + + filter_other_0_addr = of_get_property(np, "define_filter_other_0", + &filter_other_0_get_len); + if (filter_other_0_addr == NULL) { + BCM_ERR("%s: Failed get define filter_other_0 length\n", __func__); + return -ENODEV; + } + + filter_other_0_len = 4; + + for (i = 0; i < PRE_DEFINE_EVT_MAX; i++) { + other0_data = (unsigned int *)&filter_other_0_addr[i * filter_other_0_len]; + + if ((size - (i * filter_other_0_len)) <= 0) { + BCM_ERR("%s: Invalid defined filter_other_0 range\n", __func__); + return -EINVAL; + } + + data->define_filter_others[i].sm_other_type[0] = + be32_to_cpu(other0_data[1]); + data->define_filter_others[i].sm_other_mask[0] = + be32_to_cpu(other0_data[2]); + data->define_filter_others[i].sm_other_value[0] = + be32_to_cpu(other0_data[3]); + } + + size = of_property_count_u32_elems(np, "define_filter_other_1"); + if (size < 0) { + BCM_ERR("%s: Failed get number of defined filter_other_1\n", + __func__); + return size; + } + + filter_other_1_addr = of_get_property(np, "define_filter_other_1", + &filter_other_1_get_len); + if (filter_other_1_addr == NULL) { + BCM_ERR("%s: Failed get define filter_other_1 length\n", __func__); + return -ENODEV; + } + + filter_other_1_len = 4; + + for (i = 0; i < PRE_DEFINE_EVT_MAX; i++) { + other1_data = (unsigned int *)&filter_other_1_addr[i * filter_other_1_len]; + + if ((size - (i * filter_other_1_len)) <= 0) { + BCM_ERR("%s: Invalid defined filter_other_1 range\n", __func__); + return -EINVAL; + } + + data->define_filter_others[i].sm_other_type[1] = + be32_to_cpu(other1_data[1]); + data->define_filter_others[i].sm_other_mask[1] = + be32_to_cpu(other1_data[2]); + data->define_filter_others[i].sm_other_value[1] = + be32_to_cpu(other1_data[3]); + } + + /* sm_other_active */ + size = of_property_count_u32_elems(np, "define_filter_other_active"); + if (size < 0) { + BCM_ERR("%s: Failed get number of defined filter_other_active\n", + __func__); + return size; + } + + filter_other_active_addr = of_get_property(np, "define_filter_other_active", + &active_get_len); + if (filter_other_active_addr == NULL) { + BCM_ERR("%s: Failed get define filter_other_active length\n", __func__); + return -ENODEV; + } + + active_len = data->bcm_cnt_nr + 1; + + for (i = 0; i < PRE_DEFINE_EVT_MAX; i++) { + active_data = (unsigned int *)&filter_other_active_addr[i * active_len]; + + if ((size - (i * active_len)) <= 0) { + BCM_ERR("%s: Invalid defined filter_other_active range\n", __func__); + return -EINVAL; + } + + for (active_cnt = 0; active_cnt < data->bcm_cnt_nr; active_cnt++) + data->define_filter_others[i].sm_other_active[active_cnt] = + be32_to_cpu(active_data[active_cnt + 1]); + } + + return 0; +} + +static int exynos_bcm_sample_id_info_parse_dt(struct device_node *np, + struct exynos_bcm_dbg_data *data) +{ + int i, size; + int active_cnt; + const unsigned int *sample_id_addr; + const unsigned int *sample_active_addr; + unsigned int sample_id_len, active_len; + unsigned int sample_id_get_len, active_get_len; + unsigned int *id_data; + unsigned int *active_data; + + /* peak_mask and peak_id */ + size = of_property_count_u32_elems(np, "define_sample_id"); + if (size < 0) { + BCM_ERR("%s: Failed get number of defined sample_id\n", + __func__); + return size; + } + + sample_id_addr = of_get_property(np, "define_sample_id", &sample_id_get_len); + if (sample_id_addr == NULL) { + BCM_ERR("%s: Failed get define_sample_id length\n", __func__); + return -ENODEV; + } + + sample_id_len = 3; + + for (i = 0; i < PRE_DEFINE_EVT_MAX; i++) { + id_data = (unsigned int *)&sample_id_addr[i * sample_id_len]; + + if ((size - (i * sample_id_len)) <= 0) { + BCM_ERR("%s: Invalid defined sample id range\n", __func__); + return -EINVAL; + } + + data->define_sample_id[i].peak_mask = be32_to_cpu(id_data[1]); + data->define_sample_id[i].peak_id = be32_to_cpu(id_data[2]); + } + + /* peak_enable */ + size = of_property_count_u32_elems(np, "define_sample_id_enable"); + if (size < 0) { + BCM_ERR("%s: Failed get number of defined sample_id_enable\n", + __func__); + return size; + } + + sample_active_addr = of_get_property(np, "define_sample_id_enable", + &active_get_len); + if (sample_active_addr == NULL) { + BCM_ERR("%s: Failed get define sample_id_enable length\n", __func__); + return -ENODEV; + } + + active_len = data->bcm_cnt_nr + 1; + + for (i = 0; i < PRE_DEFINE_EVT_MAX; i++) { + active_data = (unsigned int *)&sample_active_addr[i * active_len]; + + if ((size - (i * active_len)) <= 0) { + BCM_ERR("%s: Invalid defined sample_id_enable range\n", __func__); + return -EINVAL; + } + + for (active_cnt = 0; active_cnt < data->bcm_cnt_nr; active_cnt++) + data->define_sample_id[i].peak_enable[active_cnt] = + be32_to_cpu(active_data[active_cnt + 1]); + } + + return 0; +} + +static int exynos_bcm_init_control_parse_dt(struct device_node *np, + struct exynos_bcm_dbg_data *data) +{ + int ret; + int i, size; + const unsigned int *stop_owner_addr; + unsigned int stop_owner_get_len, owner_index; + unsigned int *stop_owner_data; + + + ret = of_property_read_u32(np, "initial_bcm_run", &data->initial_bcm_run); + if (ret) { + BCM_ERR("%s: Failed get initial BCM run state\n", __func__); + return ret; + } + + ret = of_property_read_u32(np, "initial_period", &data->initial_period); + if (ret) { + BCM_ERR("%s: Failed get initial sampling period\n", __func__); + return ret; + } + + ret = of_property_read_u32(np, "initial_bcm_mode", &data->initial_bcm_mode); + if (ret) { + BCM_ERR("%s: Failed get initial BCM measure mode\n", __func__); + return ret; + } + + ret = of_property_read_u32(np, "buff_size", &data->dump_addr.buff_size); + if (ret) { + BCM_ERR("%s: Failed get buffer size\n", __func__); + return ret; + } + + if (data->initial_bcm_mode >= BCM_MODE_MAX) { + BCM_ERR("%s: Invalid initial BCM measure mode(%u), max(%u)\n", + __func__, data->initial_bcm_mode, + BCM_MODE_MAX); + return -EINVAL; + } + + size = of_property_count_u32_elems(np, "available_stop_owner"); + if (size < 0) { + BCM_ERR("%s: Failed get number of available_stop_owner\n", + __func__); + return size; + } + + if (size > STOP_OWNER_MAX) { + BCM_ERR("%s: Invalid stop owner size (%u)\n", __func__, size); + return -EINVAL; + } + + stop_owner_addr = of_get_property(np, "available_stop_owner", &stop_owner_get_len); + if (stop_owner_addr == NULL) { + BCM_ERR("%s: Failed get define stop owner length\n", __func__); + return -ENODEV; + } + + stop_owner_data = (unsigned int *)stop_owner_addr; + + for (i = 0; i < size; i++) { + owner_index = be32_to_cpu(stop_owner_data[i]); + if (owner_index >= STOP_OWNER_MAX) { + BCM_ERR("%s: Invalid stop owner (%d:%u)\n", + __func__, i, owner_index); + return -EINVAL; + } + data->available_stop_owner[owner_index] = true; + } + + return 0; +} + +int exynos_bcm_dbg_parse_dt(struct device_node *np, + struct exynos_bcm_dbg_data *data) +{ + int ret = 0; + + if (!np) + return -ENODEV; +#ifndef CONFIG_EXYNOS_BCM_DBG_GNR + /* get IPC type */ + ret = exynos_bcm_ipc_node_parse_dt(np, data); + if (ret) { + BCM_ERR("%s: Failed parse IPC node\n", __func__); + return ret; + } +#endif + /* get Local Power domain names and set power domain index */ + ret = exynos_bcm_pd_info_parse_dt(np, data); + if (ret) { + BCM_ERR("%s: Failed parse Power domain info\n", __func__); + return ret; + } + + /* Get initial run BCM IPs information */ + ret = exynos_bcm_init_run_ip_parse_dt(np, data); + if (ret) { + BCM_ERR("%s: Failed parse initial run BCM IPs info\n", __func__); + return ret; + } + + /* Get Pre-defined Event information */ + ret = exynos_bcm_define_event_parse_dt(np, data); + if (ret) { + BCM_ERR("%s: Failed parse Pre-defined Event info\n", __func__); + return ret; + } + + /* Get define Filter ID information */ + ret = exynos_bcm_filter_id_info_parse_dt(np, data); + if (ret) { + BCM_ERR("%s: Failed parse Filter ID info\n", __func__); + return ret; + } + + /* Get define Filter Others information */ + ret = exynos_bcm_filter_others_info_parse_dt(np, data); + if (ret) { + BCM_ERR("%s: Failed parse Filter Others info\n", __func__); + return ret; + } + + /* Get define Sample ID information */ + ret = exynos_bcm_sample_id_info_parse_dt(np, data); + if (ret) { + BCM_ERR("%s: Failed parse Sample ID info\n", __func__); + return ret; + } + + /* Get initial Control information */ + ret = exynos_bcm_init_control_parse_dt(np, data); + if (ret) { + BCM_ERR("%s: Failed parse Control info\n", __func__); + return ret; + } + + /* printing BCM data for debug */ + print_bcm_dbg_data(data); + + return ret; +} +#else +int exynos_bcm_dbg_parse_dt(struct device_node *np, + struct exynos_bcm_dbg_data *data) +{ + return -ENODEV; +} +#endif diff --git a/drivers/soc/samsung/exynos-bcm_dbg-dump.c b/drivers/soc/samsung/exynos-bcm_dbg-dump.c new file mode 100644 index 000000000000..734453b88f32 --- /dev/null +++ b/drivers/soc/samsung/exynos-bcm_dbg-dump.c @@ -0,0 +1,126 @@ +/* + * Copyright (c) 2018 Samsung Electronics Co., Ltd. + * http://www.samsung.com + * + * This program is free software; you can redistribute it and/or modify + * it under the terms of the GNU General Public License version 2 as + * published by the Free Software Foundation. + */ + +#include +#include +#include +#include +#include +#include +#include + +#include +#include + +static char file_name[128]; + +int exynos_bcm_dbg_buffer_dump(struct exynos_bcm_dbg_data *data, bool klog) +{ + void __iomem *v_addr = data->dump_addr.v_addr; + u32 buff_size = data->dump_addr.buff_size - EXYNOS_BCM_KTIME_SIZE; + u32 buff_cnt = 0; + u32 dump_entry_size = sizeof(struct exynos_bcm_dump_info); + struct exynos_bcm_dump_info *dump_info = NULL; + u32 defined_event, ip_index; + char *result; + ssize_t str_size; + u32 tmp_ktime[2]; + u64 last_ktime; + struct file *fp = NULL; + mm_segment_t old_fs = get_fs(); + + if (!data->dump_addr.p_addr) { + BCM_ERR("%s: No memory region for dump\n", __func__); + return -ENOMEM; + } + + if (in_interrupt()) { + BCM_INFO("%s: skip file dump in interrupt context\n", __func__); + return 0; + } + + str_size = snprintf(file_name, PAGE_SIZE, "/data/result_bcm_%llu.csv", + cpu_clock(raw_smp_processor_id())); + + result = kzalloc(sizeof(char) * BCM_DUMP_MAX_STR, GFP_KERNEL); + if (result == NULL) { + BCM_ERR("%s: faild allocated of result memory\n", __func__); + return -ENOMEM; + } + + tmp_ktime[0] = __raw_readl(v_addr); + tmp_ktime[1] = __raw_readl(v_addr + 0x4); + last_ktime = (((u64)tmp_ktime[1] << EXYNOS_BCM_32BIT_SHIFT) & + EXYNOS_BCM_U64_HIGH_MASK) | + ((u64)tmp_ktime[0] & EXYNOS_BCM_U64_LOW_MASK); + + dump_info = (struct exynos_bcm_dump_info *)(v_addr + EXYNOS_BCM_KTIME_SIZE); + + set_fs(KERNEL_DS); + + fp = filp_open(file_name, O_WRONLY|O_CREAT|O_APPEND, 0); + if (IS_ERR(fp)) { + BCM_ERR("%s: name: %s filp_open fail\n", __func__, file_name); + set_fs(old_fs); + kfree(result); + return IS_ERR(fp); + } + + str_size = snprintf(result, PAGE_SIZE, "last kernel time, %llu\n", last_ktime); + vfs_write(fp, result, str_size, &fp->f_pos); + + if (data->bcm_cnt_nr == 4) { + str_size = snprintf(result, PAGE_SIZE, "seq_no, ip_index, define_event, time, \ + ccnt, pmcnt0, pmcnt1, pmcnt2, pmcnt3\n"); + } else if (data->bcm_cnt_nr == 8) { + str_size = snprintf(result, PAGE_SIZE, "seq_no, ip_index, define_event, time, \ + ccnt, pmcnt0, pmcnt1, pmcnt2, pmcnt3, " + "pmcnt4, pmcnt5, pmcnt6, pmcnt7\n"); + } + vfs_write(fp, result, str_size, &fp->f_pos); + + if (klog) + pr_info("%s", result); + + while ((buff_size - buff_cnt) > dump_entry_size) { + defined_event = BCM_CMD_GET(dump_info->dump_header, + BCM_EVT_PRE_DEFINE_MASK, BCM_DUMP_PRE_DEFINE_SHIFT); + ip_index = BCM_CMD_GET(dump_info->dump_header, BCM_IP_MASK, 0); + + if (data->bcm_cnt_nr == 4) { + str_size = snprintf(result, PAGE_SIZE, "%u, %u, %u, %u, %u, %u, %u, %u, %u\n", + dump_info->dump_seq_no, ip_index, defined_event, + dump_info->dump_time, dump_info->out_data.ccnt, + dump_info->out_data.pmcnt[0], dump_info->out_data.pmcnt[1], + dump_info->out_data.pmcnt[2], dump_info->out_data.pmcnt[3]); + } else if (data->bcm_cnt_nr == 8) { + str_size = snprintf(result, PAGE_SIZE, "%u, %u, %u, %u, %u, %u, \ + %u, %u, %u, %u, %u, %u, %u\n", + dump_info->dump_seq_no, ip_index, defined_event, + dump_info->dump_time, dump_info->out_data.ccnt, + dump_info->out_data.pmcnt[0], dump_info->out_data.pmcnt[1], + dump_info->out_data.pmcnt[2], dump_info->out_data.pmcnt[3], + dump_info->out_data.pmcnt[4], dump_info->out_data.pmcnt[5], + dump_info->out_data.pmcnt[6], dump_info->out_data.pmcnt[7]); + } + vfs_write(fp, result, str_size, &fp->f_pos); + + if (klog) + pr_info("%s", result); + + dump_info++; + buff_cnt += dump_entry_size; + } + + filp_close(fp, NULL); + set_fs(old_fs); + kfree(result); + + return 0; +} diff --git a/drivers/soc/samsung/exynos-bcm_dbg.c b/drivers/soc/samsung/exynos-bcm_dbg.c new file mode 100644 index 000000000000..29c872d430c3 --- /dev/null +++ b/drivers/soc/samsung/exynos-bcm_dbg.c @@ -0,0 +1,3413 @@ +/* + * Copyright (c) 2018 Samsung Electronics Co., Ltd. + * http://www.samsung.com + * + * This program is free software; you can redistribute it and/or modify + * it under the terms of the GNU General Public License version 2 as + * published by the Free Software Foundation. + */ + +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include + +#include +#include +#include + +#ifdef CONFIG_EXYNOS_ADV_TRACER +#include +#endif +#include +#include +#include +#include +#include +#ifdef CONFIG_EXYNOS_ITMON +#include +#endif + +static struct exynos_bcm_dbg_data *bcm_dbg_data; +#ifdef CONFIG_EXYNOS_BCM_DBG_GNR +static void *bcm_addr; +static struct bin_system_func *bin_func; +static struct os_system_func os_func; +typedef struct bin_system_func*(*start_up_func_t)(void **func); +#endif + +#if defined(CONFIG_EXYNOS_ADV_TRACER) || defined(CONFIG_EXYNOS_BCM_DBG_GNR) +static enum exynos_bcm_err_code exynos_bcm_dbg_ipc_err_handle(unsigned int cmd) +{ + enum exynos_bcm_err_code err_code; + + err_code = BCM_CMD_GET(cmd, BCM_ERR_MASK, BCM_ERR_SHIFT); + if (err_code) + BCM_ERR("%s: BCM IPC error return(%u)\n", __func__, err_code); + + return err_code; +} +#endif + +static int exynos_bcm_ip_validate(unsigned int ip_range, unsigned int ip_index, + unsigned int bcm_ip_nr) +{ + if (ip_range >= BCM_RANGE_MAX) { + BCM_ERR("%s: Invalid ip range(%u)\n", __func__, ip_range); + BCM_ERR("%s: BCM_EACH(%d), BCM_ALL(%d)\n", + __func__, BCM_EACH, BCM_ALL); + return -EINVAL; + } + + if (ip_index >= bcm_ip_nr) { + BCM_ERR("%s: Invalid ip index(%u), ip_max_nr(%u)\n", + __func__, ip_index, bcm_ip_nr - 1); + return -EINVAL; + } + + return 0; +} + +static int exynos_bcm_is_running(unsigned int run_state) +{ + if (run_state == BCM_RUN) { + BCM_ERR("%s: do not set when bcm is running(%u)\n", + __func__, run_state); + return -EBUSY; + } + + return 0; +} + +static int __exynos_bcm_dbg_ipc_send_data(enum exynos_bcm_dbg_ipc_type ipc_type, + struct exynos_bcm_dbg_data *data, + unsigned int *cmd) +{ + int i, ret = 0; +#if defined(CONFIG_EXYNOS_ADV_TRACER) + struct adv_tracer_ipc_cmd config; +#elif defined(CONFIG_EXYNOS_BCM_DBG_GNR) + struct cmd_data config; +#endif + enum exynos_bcm_err_code ipc_err; + unsigned int *bcm_cmd; + + if ((ipc_type < IPC_BCM_DBG_EVENT) || + (ipc_type >= IPC_BCM_DBG_MAX)) { + BCM_ERR("%s: Invalid IPC Type: %d\n", __func__, ipc_type); + ret = -EINVAL; + return ret; + } + + bcm_cmd = cmd; +#if defined(CONFIG_EXYNOS_ADV_TRACER) + config.cmd_raw.cmd = BCM_CMD_SET(ipc_type, BCM_CMD_ID_MASK, BCM_CMD_ID_SHIFT); + memcpy(&config.buffer[1], bcm_cmd, sizeof(unsigned int) * CMD_DATA_MAX); + ret = adv_tracer_ipc_send_data_polling(data->ipc_ch_num, &config); +#elif defined(CONFIG_EXYNOS_BCM_DBG_GNR) + config.raw_cmd = BCM_CMD_SET(ipc_type, BCM_CMD_ID_MASK, BCM_CMD_ID_SHIFT); + memcpy(config.cmd, bcm_cmd, sizeof(unsigned int) * CMD_DATA_MAX); + ret = bin_func->send_data(&config); +#endif + if (ret) { + BCM_ERR("%s: Failed to send IPC(%d:%u) data to origin\n", + __func__, ipc_type, data->ipc_ch_num); + return ret; + } + +#if defined(CONFIG_EXYNOS_ADV_TRACER) + for (i = 0; i < data->ipc_size; i++) + BCM_DBG("%s: received data[%d]: 0x%08x\n", + __func__, i, config.buffer[i]); + + memcpy(bcm_cmd, &config.buffer[1], sizeof(unsigned int) * CMD_DATA_MAX); + + ipc_err = exynos_bcm_dbg_ipc_err_handle(config.cmd_raw.cmd); +#elif defined(CONFIG_EXYNOS_BCM_DBG_GNR) + BCM_DBG("%s: received data raw: 0x%08x\n", __func__, config.raw_cmd); + for (i = 0; i < CMD_DATA_MAX; i++) + BCM_DBG("%s: received data[%d]: 0x%08x\n", + __func__, i, config.cmd[i]); + + memcpy(bcm_cmd, config.cmd, sizeof(unsigned int) * CMD_DATA_MAX); + + ipc_err = exynos_bcm_dbg_ipc_err_handle(config.raw_cmd); +#endif + if (ipc_err) { + ret = -EBADMSG; + return ret; + } + + return 0; +} + +int exynos_bcm_dbg_ipc_send_data(enum exynos_bcm_dbg_ipc_type ipc_type, + struct exynos_bcm_dbg_data *data, + unsigned int *cmd) +{ + int ret; + unsigned long flags; + + spin_lock_irqsave(&data->lock, flags); + ret = __exynos_bcm_dbg_ipc_send_data(ipc_type, data, cmd); + spin_unlock_irqrestore(&data->lock, flags); + + return ret; +} +EXPORT_SYMBOL(exynos_bcm_dbg_ipc_send_data); + +#ifdef CONFIG_EXYNOS_ADV_TRACER +static int adv_tracer_bcm_dbg_handler(struct adv_tracer_ipc_cmd *cmd, unsigned int len) +{ + return 0; +} + +static int exynos_bcm_dbg_ipc_channel_request(struct exynos_bcm_dbg_data *data) +{ + int ret = 0; + + ret = adv_tracer_ipc_request_channel(data->ipc_node, + (ipc_callback)adv_tracer_bcm_dbg_handler, + &data->ipc_ch_num, &data->ipc_size); + if (ret) { + BCM_ERR("%s: adv tracer request channel is failed\n", __func__); + return ret; + } + + BCM_INFO("ipc channel info: ch_num(%u), size(%u)\n", + data->ipc_ch_num, data->ipc_size); + + return ret; +} + +static void exynos_bcm_dbg_ipc_channel_release(struct exynos_bcm_dbg_data *data) +{ + adv_tracer_ipc_release_channel(data->ipc_ch_num); +} +#else +static inline +int exynos_bcm_dbg_ipc_channel_request(struct exynos_bcm_dbg_data *data) +{ + return 0; +} + +static inline +void exynos_bcm_dbg_ipc_channel_release(struct exynos_bcm_dbg_data *data) +{ +} +#endif + +static int exynos_bcm_dbg_early_pd_sync(unsigned int cal_pdid, bool on) +{ + unsigned int cmd[4] = {0, }; + unsigned long flags; + struct exynos_bcm_pd_info *bcm_pd_info = NULL; + int i, ret = 0; + + spin_lock_irqsave(&bcm_dbg_data->lock, flags); + + for (i = 0; i < bcm_dbg_data->pd_size; i++) { + if (bcm_dbg_data->pd_info[i]->cal_pdid == cal_pdid) { + bcm_pd_info = bcm_dbg_data->pd_info[i]; + break; + } + } + + if (!bcm_pd_info) { + ret = -EINVAL; + goto out; + } + + if (on ^ bcm_pd_info->on) { + bcm_pd_info->on = on; + /* Generate IPC command for PD sync */ + cmd[0] |= BCM_CMD_SET(bcm_pd_info->pd_index, BCM_PD_INFO_MASK, + BCM_PD_INFO_SHIFT); + cmd[0] |= BCM_CMD_SET((unsigned int)on, BCM_ONE_BIT_MASK, + BCM_PD_ON_SHIFT); + cmd[1] = 0; + cmd[2] = 0; + cmd[3] = 0; + + /* send command for PD sync */ + ret = __exynos_bcm_dbg_ipc_send_data(IPC_BCM_DBG_PD, + bcm_dbg_data, cmd); + if (ret) { + BCM_ERR("%s: Failed send data for pd sync\n", __func__); + goto out; + } + } + +out: + spin_unlock_irqrestore(&bcm_dbg_data->lock, flags); + + return ret; +} +int exynos_bcm_dbg_pd_sync(unsigned int cal_pdid, bool on) +{ + unsigned int cmd[4] = {0, }; + unsigned long flags; + struct exynos_bcm_pd_info *bcm_pd_info = NULL; + int i, ret = 0; + + if (!bcm_dbg_data || !bcm_dbg_data->pd_sync_init) { + BCM_DBG("%s: do not pd_sync_init(%s)\n", __func__, + bcm_dbg_data->pd_sync_init ? "true" : "false"); + return 0; + } + + spin_lock_irqsave(&bcm_dbg_data->lock, flags); + + for (i = 0; i < bcm_dbg_data->pd_size; i++) { + if (bcm_dbg_data->pd_info[i]->cal_pdid == cal_pdid) { + bcm_pd_info = bcm_dbg_data->pd_info[i]; + break; + } + } + + if (!bcm_pd_info) { + ret = -EINVAL; + goto out; + } + + if (on ^ bcm_pd_info->on) { + bcm_pd_info->on = on; + /* Generate IPC command for PD sync */ + cmd[0] |= BCM_CMD_SET(bcm_pd_info->pd_index, BCM_PD_INFO_MASK, + BCM_PD_INFO_SHIFT); + cmd[0] |= BCM_CMD_SET((unsigned int)on, BCM_ONE_BIT_MASK, + BCM_PD_ON_SHIFT); + cmd[1] = 0; + cmd[2] = 0; + cmd[3] = 0; + + /* send command for PD sync */ + ret = __exynos_bcm_dbg_ipc_send_data(IPC_BCM_DBG_PD, + bcm_dbg_data, cmd); + if (ret) { + BCM_ERR("%s: Failed send data for pd sync\n", __func__); + goto out; + } + } + +out: + spin_unlock_irqrestore(&bcm_dbg_data->lock, flags); + + return ret; +} +EXPORT_SYMBOL(exynos_bcm_dbg_pd_sync); + +static int exynos_bcm_dbg_pd_sync_init(struct exynos_bcm_dbg_data *data) +{ + struct exynos_pm_domain *exynos_pd; + unsigned int pd_index, pd_size; + int ret = 0; + + if (data->pd_sync_init) { + BCM_ERR("%s: already pd_sync_init(%s)\n", + __func__, data->pd_sync_init ? "true" : "false"); + return -EINVAL; + } + + pd_size = data->pd_size; + for (pd_index = 0; pd_index < pd_size; pd_index++) { + exynos_pd = NULL; + data->pd_info[pd_index]->on = false; + exynos_pd = exynos_pd_lookup_name(data->pd_info[pd_index]->pd_name); + if (exynos_pd) { + mutex_lock(&exynos_pd->access_lock); + exynos_pd->bcm = data->pd_info[pd_index]; + data->pd_info[pd_index]->cal_pdid = exynos_pd->cal_pdid; + if (cal_pd_status(exynos_pd->cal_pdid)) { + ret = exynos_bcm_dbg_pd_sync(data->pd_info[pd_index]->cal_pdid, true); + if (ret) { + mutex_unlock(&exynos_pd->access_lock); + return ret; + } + } + mutex_unlock(&exynos_pd->access_lock); + } else { + ret = exynos_bcm_dbg_early_pd_sync(data->pd_info[pd_index]->cal_pdid, true); + if (ret) + return ret; + } + } + + data->pd_sync_init = true; + + return ret; +} + +static int exynos_bcm_dbg_pd_sync_exit(struct exynos_bcm_dbg_data *data) +{ + struct exynos_pm_domain *exynos_pd; + unsigned int pd_index, pd_size; + int ret = 0; + + if (!data->pd_sync_init) { + BCM_ERR("%s: already pd_sync_exit(%s)\n", + __func__, data->pd_sync_init ? "true" : "false"); + return -EINVAL; + } + + pd_size = data->pd_size; + for (pd_index = 0; pd_index < pd_size; pd_index++) { + exynos_pd = exynos_pd_lookup_name(data->pd_info[pd_index]->pd_name); + if (exynos_pd) { + mutex_lock(&exynos_pd->access_lock); + exynos_pd->bcm = NULL; + ret = exynos_bcm_dbg_pd_sync(data->pd_info[pd_index]->cal_pdid, false); + if (ret) { + mutex_unlock(&exynos_pd->access_lock); + return ret; + } + mutex_unlock(&exynos_pd->access_lock); + } else { + ret = exynos_bcm_dbg_pd_sync(data->pd_info[pd_index]->cal_pdid, false); + if (ret) + return ret; + } + } + + data->pd_sync_init = false; + + return ret; +} + +static void exynos_bcm_dbg_set_base_info( + struct exynos_bcm_ipc_base_info *ipc_base_info, + enum exynos_bcm_event_id event_id, + enum exynos_bcm_event_dir direction, + enum exynos_bcm_ip_range ip_range) +{ + ipc_base_info->event_id = event_id; + ipc_base_info->ip_range = ip_range; + ipc_base_info->direction = direction; +} + +static void exynos_bcm_dbg_set_base_cmd(unsigned int *cmd, + struct exynos_bcm_ipc_base_info *ipc_base_info) +{ + cmd[0] = 0; + cmd[0] |= BCM_CMD_SET(ipc_base_info->event_id, BCM_EVT_ID_MASK, + BCM_EVT_ID_SHIFT); + cmd[0] |= BCM_CMD_SET(ipc_base_info->ip_range, BCM_ONE_BIT_MASK, + BCM_IP_RANGE_SHIFT); + cmd[0] |= BCM_CMD_SET(ipc_base_info->direction, BCM_ONE_BIT_MASK, + BCM_EVT_DIR_SHIFT); +} + +static int exynos_bcm_dbg_event_ctrl(struct exynos_bcm_ipc_base_info *ipc_base_info, + struct exynos_bcm_event *bcm_event, + unsigned int bcm_ip_index, + struct exynos_bcm_dbg_data *data) +{ + unsigned int cmd[4] = {0, 0, 0, 0}; + int i, ret = 0; + unsigned long flags; + + spin_lock_irqsave(&data->lock, flags); + + if (!ipc_base_info || !bcm_event) { + BCM_ERR("%s: pointer is NULL\n", __func__); + ret = -EINVAL; + goto out; + } + + exynos_bcm_dbg_set_base_cmd(cmd, ipc_base_info); + + if (ipc_base_info->event_id != BCM_EVT_PRE_DEFINE && + ipc_base_info->event_id != BCM_EVT_EVENT) { + BCM_ERR("%s: Invalid Event ID(%d)\n", __func__, + ipc_base_info->event_id); + ret = -EINVAL; + goto out; + } + + if (ipc_base_info->ip_range == BCM_EACH) + cmd[0] |= BCM_CMD_SET(bcm_ip_index, BCM_IP_MASK, BCM_IP_SHIFT); + + if (ipc_base_info->direction == BCM_EVT_SET) { + /* + * check bcm running state + * When bcm is running, value can not set + */ + ret = exynos_bcm_is_running(data->bcm_run_state); + if (ret) + goto out; + + cmd[0] |= BCM_CMD_SET(bcm_event->index, BCM_EVT_PRE_DEFINE_MASK, + BCM_EVT_PRE_DEFINE_SHIFT); + + for (i = 0; i < BCM_EVT_EVENT_MAX / 2; i++) { + cmd[1] |= BCM_CMD_SET(bcm_event->event[i], BCM_EVT_EVENT_MASK, + BCM_EVT_EVENT_SHIFT(i)); + if (data->bcm_cnt_nr > 4) { + cmd[2] |= BCM_CMD_SET(bcm_event->event[i + 4], BCM_EVT_EVENT_MASK, + BCM_EVT_EVENT_SHIFT(i + 4)); + } + } + } + + /* send command for BCM Event */ + ret = __exynos_bcm_dbg_ipc_send_data(IPC_BCM_DBG_EVENT, data, cmd); + if (ret) { + BCM_ERR("%s: Failed send data\n", __func__); + goto out; + } + + if (ipc_base_info->direction == BCM_EVT_GET) { + bcm_event->index = BCM_CMD_GET(cmd[0], BCM_EVT_PRE_DEFINE_MASK, + BCM_EVT_PRE_DEFINE_SHIFT); + + for (i = 0; i < BCM_EVT_EVENT_MAX / 2; i++) { + bcm_event->event[i] = BCM_CMD_GET(cmd[1], BCM_EVT_EVENT_MASK, + BCM_EVT_EVENT_SHIFT(i)); + if (data->bcm_cnt_nr > 4) { + bcm_event->event[i + 4] = BCM_CMD_GET(cmd[2], BCM_EVT_EVENT_MASK, + BCM_EVT_EVENT_SHIFT(i + 4)); + } + } + } + +out: + spin_unlock_irqrestore(&data->lock, flags); + + return ret; +} + +static int exynos_bcm_dbg_filter_id_ctrl(struct exynos_bcm_ipc_base_info *ipc_base_info, + struct exynos_bcm_filter_id *filter_id, + unsigned int bcm_ip_index, + struct exynos_bcm_dbg_data *data) +{ + unsigned int cmd[4] = {0, 0, 0, 0}; + int i, ret = 0; + unsigned long flags; + + spin_lock_irqsave(&data->lock, flags); + + if (!ipc_base_info || !filter_id) { + BCM_ERR("%s: pointer is NULL\n", __func__); + ret = -EINVAL; + goto out; + } + + exynos_bcm_dbg_set_base_cmd(cmd, ipc_base_info); + + if (ipc_base_info->event_id != BCM_EVT_EVENT_FLT_ID) { + BCM_ERR("%s: Invalid Event ID(%d)\n", __func__, + ipc_base_info->event_id); + ret = -EINVAL; + goto out; + } + + if (ipc_base_info->ip_range == BCM_EACH) + cmd[0] |= BCM_CMD_SET(bcm_ip_index, BCM_IP_MASK, BCM_IP_SHIFT); + + if (ipc_base_info->direction == BCM_EVT_SET) { + /* + * check bcm running state + * When bcm is running, value can not set + */ + ret = exynos_bcm_is_running(data->bcm_run_state); + if (ret) + goto out; + + cmd[1] = filter_id->sm_id_mask; + cmd[2] = filter_id->sm_id_value; + for (i = 0; i < data->bcm_cnt_nr; i++) + cmd[3] |= BCM_CMD_SET(filter_id->sm_id_active[i], + BCM_ONE_BIT_MASK, BCM_EVT_FLT_ACT_SHIFT(i)); + } + + /* send command for BCM Filter ID */ + ret = __exynos_bcm_dbg_ipc_send_data(IPC_BCM_DBG_EVENT, data, cmd); + if (ret) { + BCM_ERR("%s: Failed send data\n", __func__); + goto out; + } + + if (ipc_base_info->direction == BCM_EVT_GET) { + filter_id->sm_id_mask = cmd[1]; + filter_id->sm_id_value = cmd[2]; + for (i = 0; i < data->bcm_cnt_nr; i++) + filter_id->sm_id_active[i] = BCM_CMD_GET(cmd[3], + BCM_ONE_BIT_MASK, + BCM_EVT_FLT_ACT_SHIFT(i)); + } + +out: + spin_unlock_irqrestore(&data->lock, flags); + + return ret; +} + +static int exynos_bcm_dbg_filter_others_ctrl( + struct exynos_bcm_ipc_base_info *ipc_base_info, + struct exynos_bcm_filter_others *filter_others, + unsigned int bcm_ip_index, + struct exynos_bcm_dbg_data *data) +{ + unsigned int cmd[4] = {0, 0, 0, 0}; + int i, ret = 0; + unsigned long flags; + + spin_lock_irqsave(&data->lock, flags); + + if (!ipc_base_info || !filter_others) { + BCM_ERR("%s: pointer is NULL\n", __func__); + ret = -EINVAL; + goto out; + } + + exynos_bcm_dbg_set_base_cmd(cmd, ipc_base_info); + + if (ipc_base_info->event_id != BCM_EVT_EVENT_FLT_OTHERS) { + BCM_ERR("%s: Invalid Event ID(%d)\n", __func__, + ipc_base_info->event_id); + ret = -EINVAL; + goto out; + } + + if (ipc_base_info->ip_range == BCM_EACH) + cmd[0] |= BCM_CMD_SET(bcm_ip_index, BCM_IP_MASK, BCM_IP_SHIFT); + + if (ipc_base_info->direction == BCM_EVT_SET) { + /* + * check bcm running state + * When bcm is running, value can not set + */ + ret = exynos_bcm_is_running(data->bcm_run_state); + if (ret) + goto out; + + for (i = 0; i < BCM_EVT_FLT_OTHR_MAX; i++) { + cmd[1] |= BCM_CMD_SET(filter_others->sm_other_type[i], + BCM_EVT_FLT_OTHR_TYPE_MASK, + BCM_EVT_FLT_OTHR_TYPE_SHIFT(i)); + cmd[1] |= BCM_CMD_SET(filter_others->sm_other_mask[i], + BCM_EVT_FLT_OTHR_MASK_MASK, + BCM_EVT_FLT_OTHR_MASK_SHIFT(i)); + cmd[1] |= BCM_CMD_SET(filter_others->sm_other_value[i], + BCM_EVT_FLT_OTHR_VALUE_MASK, + BCM_EVT_FLT_OTHR_VALUE_SHIFT(i)); + } + + for (i = 0; i < data->bcm_cnt_nr; i++) + cmd[2] |= BCM_CMD_SET(filter_others->sm_other_active[i], + BCM_ONE_BIT_MASK, BCM_EVT_FLT_ACT_SHIFT(i)); + } + + /* send command for BCM Filter Others */ + ret = __exynos_bcm_dbg_ipc_send_data(IPC_BCM_DBG_EVENT, data, cmd); + if (ret) { + BCM_ERR("%s: Failed send data\n", __func__); + goto out; + } + + if (ipc_base_info->direction == BCM_EVT_GET) { + for (i = 0; i < BCM_EVT_FLT_OTHR_MAX; i++) { + filter_others->sm_other_type[i] = + BCM_CMD_GET(cmd[1], BCM_EVT_FLT_OTHR_TYPE_MASK, + BCM_EVT_FLT_OTHR_TYPE_SHIFT(i)); + filter_others->sm_other_mask[i] = + BCM_CMD_GET(cmd[1], BCM_EVT_FLT_OTHR_MASK_MASK, + BCM_EVT_FLT_OTHR_MASK_SHIFT(i)); + filter_others->sm_other_value[i] = + BCM_CMD_GET(cmd[1], BCM_EVT_FLT_OTHR_VALUE_MASK, + BCM_EVT_FLT_OTHR_VALUE_SHIFT(i)); + } + + for (i = 0; i < data->bcm_cnt_nr; i++) + filter_others->sm_other_active[i] = + BCM_CMD_GET(cmd[2], BCM_ONE_BIT_MASK, + BCM_EVT_FLT_ACT_SHIFT(i)); + } + +out: + spin_unlock_irqrestore(&data->lock, flags); + + return ret; +} + +static int exynos_bcm_dbg_sample_id_ctrl(struct exynos_bcm_ipc_base_info *ipc_base_info, + struct exynos_bcm_sample_id *sample_id, + unsigned int bcm_ip_index, + struct exynos_bcm_dbg_data *data) +{ + unsigned int cmd[4] = {0, 0, 0, 0}; + int i, ret = 0; + unsigned long flags; + + spin_lock_irqsave(&data->lock, flags); + + if (!ipc_base_info || !sample_id) { + BCM_ERR("%s: pointer is NULL\n", __func__); + ret = -EINVAL; + goto out; + } + + exynos_bcm_dbg_set_base_cmd(cmd, ipc_base_info); + + if (ipc_base_info->event_id != BCM_EVT_EVENT_SAMPLE_ID) { + BCM_ERR("%s: Invalid Event ID(%d)\n", __func__, + ipc_base_info->event_id); + ret = -EINVAL; + goto out; + } + + if (ipc_base_info->ip_range == BCM_EACH) + cmd[0] |= BCM_CMD_SET(bcm_ip_index, BCM_IP_MASK, BCM_IP_SHIFT); + + if (ipc_base_info->direction == BCM_EVT_SET) { + /* + * check bcm running state + * When bcm is running, value can not set + */ + ret = exynos_bcm_is_running(data->bcm_run_state); + if (ret) + goto out; + + cmd[1] = sample_id->peak_mask; + cmd[2] = sample_id->peak_id; + for (i = 0; i < data->bcm_cnt_nr; i++) + cmd[3] |= BCM_CMD_SET(sample_id->peak_enable[i], + BCM_ONE_BIT_MASK, BCM_EVT_FLT_ACT_SHIFT(i)); + } + + /* send command for BCM Sample ID */ + ret = __exynos_bcm_dbg_ipc_send_data(IPC_BCM_DBG_EVENT, data, cmd); + if (ret) { + BCM_ERR("%s: Failed send data\n", __func__); + goto out; + } + + if (ipc_base_info->direction == BCM_EVT_GET) { + sample_id->peak_mask = cmd[1]; + sample_id->peak_id = cmd[2]; + for (i = 0; i < data->bcm_cnt_nr; i++) + sample_id->peak_enable[i] = BCM_CMD_GET(cmd[3], + BCM_ONE_BIT_MASK, + BCM_EVT_FLT_ACT_SHIFT(i)); + } + +out: + spin_unlock_irqrestore(&data->lock, flags); + + return ret; +} + +static int exynos_bcm_dbg_run_ctrl(struct exynos_bcm_ipc_base_info *ipc_base_info, + unsigned int *bcm_run, + struct exynos_bcm_dbg_data *data) +{ + unsigned int cmd[4] = {0, 0, 0, 0}; + unsigned int run, low_ktime, high_ktime; + int ret = 0; + u64 ktime; + unsigned long flags; + + spin_lock_irqsave(&data->lock, flags); + + if (!ipc_base_info) { + BCM_ERR("%s: pointer is NULL\n", __func__); + ret = -EINVAL; + goto out; + } + + exynos_bcm_dbg_set_base_cmd(cmd, ipc_base_info); + + if (ipc_base_info->event_id != BCM_EVT_RUN_CONT) { + BCM_ERR("%s: Invalid Event ID(%d)\n", __func__, + ipc_base_info->event_id); + ret = -EINVAL; + goto out; + } + + if (ipc_base_info->direction == BCM_EVT_SET) { + run = *bcm_run; + + if (!(run ^ data->bcm_run_state)) { + BCM_INFO("%s: same run control command(%u) bcm_run_state(%u)\n", + __func__, run, data->bcm_run_state); + goto out; + } + + cmd[0] |= BCM_CMD_SET(run, BCM_ONE_BIT_MASK, + BCM_EVT_RUN_CONT_SHIFT); + + if (run == BCM_STOP) { + ktime = sched_clock(); + low_ktime = (unsigned int)(ktime & EXYNOS_BCM_U64_LOW_MASK); + high_ktime = (unsigned int)((ktime & EXYNOS_BCM_U64_HIGH_MASK) + >> EXYNOS_BCM_32BIT_SHIFT); + cmd[1] = low_ktime; + cmd[2] = high_ktime; + +#ifdef CONFIG_EXYNOS_BCM_DBG_GNR + if (data->bcm_mode != BCM_MODE_USERCTRL) + hrtimer_try_to_cancel(&data->bcm_hrtimer); +#endif + } + } + + /* send command for BCM Run */ + ret = __exynos_bcm_dbg_ipc_send_data(IPC_BCM_DBG_EVENT, data, cmd); + if (ret) { + BCM_ERR("%s: Failed send data\n", __func__); + goto out; + } + + if (ipc_base_info->direction == BCM_EVT_GET) { + run = BCM_CMD_GET(cmd[0], BCM_ONE_BIT_MASK, + BCM_EVT_RUN_CONT_SHIFT); + *bcm_run = run; + } else if (ipc_base_info->direction == BCM_EVT_SET) { + data->bcm_run_state = run; +#ifdef CONFIG_EXYNOS_BCM_DBG_GNR + if (run == BCM_RUN && data->bcm_mode != BCM_MODE_USERCTRL) + hrtimer_start(&data->bcm_hrtimer, + ms_to_ktime(data->period), HRTIMER_MODE_REL); +#endif + } + + spin_unlock_irqrestore(&data->lock, flags); + + /* dumping data from buffer */ + if (run == BCM_STOP && + ipc_base_info->direction == BCM_EVT_SET) + exynos_bcm_dbg_buffer_dump(data, data->dump_klog); + + return ret; + +out: + spin_unlock_irqrestore(&data->lock, flags); + + return ret; +} + +static int exynos_bcm_dbg_period_ctrl(struct exynos_bcm_ipc_base_info *ipc_base_info, + unsigned int *bcm_period, + struct exynos_bcm_dbg_data *data) +{ + unsigned int cmd[4] = {0, 0, 0, 0}; + unsigned int period; + int ret = 0; + unsigned long flags; + + spin_lock_irqsave(&data->lock, flags); + + if (!ipc_base_info) { + BCM_ERR("%s: pointer is NULL\n", __func__); + ret = -EINVAL; + goto out; + } + + exynos_bcm_dbg_set_base_cmd(cmd, ipc_base_info); + + if (ipc_base_info->event_id != BCM_EVT_PERIOD_CONT) { + BCM_ERR("%s: Invalid Event ID(%d)\n", __func__, + ipc_base_info->event_id); + ret = -EINVAL; + goto out; + } + + if (ipc_base_info->direction == BCM_EVT_SET) { + /* + * check bcm running state + * When bcm is running, value can not set + */ + ret = exynos_bcm_is_running(data->bcm_run_state); + if (ret) + goto out; + + period = *bcm_period; + + /* valid check for period range */ + if (!(period >= BCM_TIMER_PERIOD_MIN && + period <= BCM_TIMER_PERIOD_MAX)) { + BCM_ERR("%s: Invalid period range(%umsec),(%d ~ %dmsec)\n", + __func__, period, + BCM_TIMER_PERIOD_MIN, BCM_TIMER_PERIOD_MAX); + ret = -EINVAL; + goto out; + } + + cmd[1] |= BCM_CMD_SET(period, BCM_EVT_PERIOD_CONT_MASK, + BCM_EVT_PERIOD_CONT_SHIFT); + } + + /* send command for BCM Period */ + ret = __exynos_bcm_dbg_ipc_send_data(IPC_BCM_DBG_EVENT, data, cmd); + if (ret) { + BCM_ERR("%s: Failed send data\n", __func__); + goto out; + } + + if (ipc_base_info->direction == BCM_EVT_GET) { + period = BCM_CMD_GET(cmd[1], BCM_EVT_PERIOD_CONT_MASK, + BCM_EVT_PERIOD_CONT_SHIFT); + *bcm_period = period; + } + +#ifdef CONFIG_EXYNOS_BCM_DBG_GNR + data->period = period; +#endif +out: + spin_unlock_irqrestore(&data->lock, flags); + + return ret; +} + +static int exynos_bcm_dbg_mode_ctrl(struct exynos_bcm_ipc_base_info *ipc_base_info, + unsigned int *bcm_mode, + struct exynos_bcm_dbg_data *data) +{ + unsigned int cmd[4] = {0, 0, 0, 0}; + unsigned int mode; + int ret = 0; + unsigned long flags; + + spin_lock_irqsave(&data->lock, flags); + + if (!ipc_base_info) { + BCM_ERR("%s: pointer is NULL\n", __func__); + ret = -EINVAL; + goto out; + } + + exynos_bcm_dbg_set_base_cmd(cmd, ipc_base_info); + + if (ipc_base_info->event_id != BCM_EVT_MODE_CONT) { + BCM_ERR("%s: Invalid Event ID(%d)\n", __func__, + ipc_base_info->event_id); + ret = -EINVAL; + goto out; + } + + if (ipc_base_info->direction == BCM_EVT_SET) { + /* + * check bcm running state + * When bcm is running, value can not set + */ + ret = exynos_bcm_is_running(data->bcm_run_state); + if (ret) + goto out; + + mode = *bcm_mode; + + if (mode >= BCM_MODE_MAX) { + BCM_ERR("%s: Invalid BCM mode(%u), BCM mode max(%d)\n", + __func__, mode, BCM_MODE_MAX); + ret = -EINVAL; + goto out; + } + + cmd[0] |= BCM_CMD_SET(mode, BCM_EVT_MODE_CONT_MASK, + BCM_EVT_MODE_CONT_SHIFT); + } + + /* send command for BCM Mode */ + ret = __exynos_bcm_dbg_ipc_send_data(IPC_BCM_DBG_EVENT, data, cmd); + if (ret) { + BCM_ERR("%s: Failed send data\n", __func__); + goto out; + } + + if (ipc_base_info->direction == BCM_EVT_GET) { + mode = BCM_CMD_GET(cmd[0], BCM_EVT_MODE_CONT_MASK, + BCM_EVT_MODE_CONT_SHIFT); + *bcm_mode = mode; + } + +#ifdef CONFIG_EXYNOS_BCM_DBG_GNR + data->bcm_mode = mode; +#endif + +out: + spin_unlock_irqrestore(&data->lock, flags); + + return ret; +} + +static int exynos_bcm_dbg_str_ctrl(struct exynos_bcm_ipc_base_info *ipc_base_info, + unsigned int *ap_suspend, + struct exynos_bcm_dbg_data *data) +{ + unsigned int cmd[4] = {0, 0, 0, 0}; + unsigned int suspend; + int ret = 0; + unsigned long flags; + + spin_lock_irqsave(&data->lock, flags); + + if (!ipc_base_info) { + BCM_ERR("%s: pointer is NULL\n", __func__); + ret = -EINVAL; + goto out; + } + + exynos_bcm_dbg_set_base_cmd(cmd, ipc_base_info); + + if (ipc_base_info->event_id != BCM_EVT_STR_STATE) { + BCM_ERR("%s: Invalid Event ID(%d)\n", __func__, + ipc_base_info->event_id); + ret = -EINVAL; + goto out; + } + + if (ipc_base_info->direction == BCM_EVT_SET) { + suspend = *ap_suspend; + cmd[0] |= BCM_CMD_SET(suspend, BCM_ONE_BIT_MASK, + BCM_EVT_STR_STATE_SHIFT); + +#ifdef CONFIG_EXYNOS_BCM_DBG_GNR + if (suspend && data->bcm_mode != BCM_MODE_USERCTRL) + hrtimer_try_to_cancel(&data->bcm_hrtimer); +#endif + } + + /* send command for BCM STR state */ + ret = __exynos_bcm_dbg_ipc_send_data(IPC_BCM_DBG_EVENT, data, cmd); + if (ret) { + BCM_ERR("%s: Failed send data\n", __func__); + goto out; + } + + if (ipc_base_info->direction == BCM_EVT_GET) { + suspend = BCM_CMD_GET(cmd[0], BCM_ONE_BIT_MASK, + BCM_EVT_STR_STATE_SHIFT); + *ap_suspend = suspend; + } +#ifdef CONFIG_EXYNOS_BCM_DBG_GNR + if (ipc_base_info->direction == BCM_EVT_SET) { + if (!suspend && data->bcm_mode != BCM_MODE_USERCTRL) + hrtimer_start(&data->bcm_hrtimer, + ms_to_ktime(data->period), HRTIMER_MODE_REL); + } +#endif + +out: + spin_unlock_irqrestore(&data->lock, flags); + + return ret; +} + +static int exynos_bcm_dbg_ip_ctrl(struct exynos_bcm_ipc_base_info *ipc_base_info, + unsigned int *bcm_ip_enable, + unsigned int bcm_ip_index, + struct exynos_bcm_dbg_data *data) +{ + unsigned int cmd[4] = {0, 0, 0, 0}; + unsigned int ip_enable; + int ret = 0; + unsigned long flags; + + spin_lock_irqsave(&data->lock, flags); + + if (!ipc_base_info) { + BCM_ERR("%s: pointer is NULL\n", __func__); + ret = -EINVAL; + goto out; + } + + exynos_bcm_dbg_set_base_cmd(cmd, ipc_base_info); + + if (ipc_base_info->event_id != BCM_EVT_IP_CONT) { + BCM_ERR("%s: Invalid Event ID(%d)\n", __func__, + ipc_base_info->event_id); + ret = -EINVAL; + goto out; + } + + if (ipc_base_info->ip_range == BCM_EACH) + cmd[0] |= BCM_CMD_SET(bcm_ip_index, BCM_IP_MASK, BCM_IP_SHIFT); + + if (ipc_base_info->direction == BCM_EVT_SET) { + /* + * check bcm running state + * When bcm is running, value can not set + */ + ret = exynos_bcm_is_running(data->bcm_run_state); + if (ret) + goto out; + + ip_enable = *bcm_ip_enable; + cmd[0] |= BCM_CMD_SET(ip_enable, BCM_ONE_BIT_MASK, + BCM_EVT_IP_CONT_SHIFT); + } + + /* send command for BCM IP control */ + ret = __exynos_bcm_dbg_ipc_send_data(IPC_BCM_DBG_EVENT, data, cmd); + if (ret) { + BCM_ERR("%s: Failed send data\n", __func__); + goto out; + } + + if (ipc_base_info->direction == BCM_EVT_GET) { + ip_enable = BCM_CMD_GET(cmd[0], BCM_ONE_BIT_MASK, + BCM_EVT_IP_CONT_SHIFT); + *bcm_ip_enable = ip_enable; + } + +out: + spin_unlock_irqrestore(&data->lock, flags); + + return ret; +} + +#ifdef CONFIG_DEBUG_SNAPSHOT +static int exynos_bcm_dbg_dump_addr_ctrl(struct exynos_bcm_ipc_base_info *ipc_base_info, + struct exynos_bcm_dump_addr *dump_addr, + struct exynos_bcm_dbg_data *data) +{ + unsigned int cmd[4] = {0, 0, 0, 0}; + int ret = 0; + unsigned long flags; +#ifdef CONFIG_EXYNOS_BCM_DBG_GNR + u64 v_addr; +#endif + + spin_lock_irqsave(&data->lock, flags); + + if (!ipc_base_info) { + BCM_ERR("%s: pointer is NULL\n", __func__); + ret = -EINVAL; + goto out; + } + + exynos_bcm_dbg_set_base_cmd(cmd, ipc_base_info); + + if (ipc_base_info->event_id != BCM_EVT_DUMP_ADDR) { + BCM_ERR("%s: Invalid Event ID(%d)\n", __func__, + ipc_base_info->event_id); + ret = -EINVAL; + goto out; + } + + if (ipc_base_info->direction == BCM_EVT_SET) { + /* + * check bcm running state + * When bcm is running, value can not set + */ + ret = exynos_bcm_is_running(data->bcm_run_state); + if (ret) + goto out; + +#ifdef CONFIG_EXYNOS_BCM_DBG_GNR + v_addr = (u64)dump_addr->v_addr; + if (!v_addr || !dump_addr->buff_size) { + BCM_ERR("%s: No dump address info: v_addr(%llu), buff_size(0x%08x)\n", + __func__, v_addr, dump_addr->buff_size); + ret = -EINVAL; + goto out; + } + + cmd[1] = (unsigned int)(v_addr & EXYNOS_BCM_U64_LOW_MASK); + cmd[2] = (unsigned int)((v_addr & EXYNOS_BCM_U64_HIGH_MASK) + >> EXYNOS_BCM_32BIT_SHIFT); + cmd[3] = (unsigned int)dump_addr->buff_size; +#else + if (!dump_addr->p_addr || !dump_addr->buff_size) { + BCM_ERR("%s: No dump address info: p_addr(0x%08x), buff_size(0x%08x)\n", + __func__, dump_addr->p_addr, dump_addr->buff_size); + ret = -EINVAL; + goto out; + } + + cmd[1] = (unsigned int)dump_addr->p_addr; + cmd[2] = (unsigned int)dump_addr->buff_size; +#endif + } + + /* send command for BCM Dump address info */ + ret = __exynos_bcm_dbg_ipc_send_data(IPC_BCM_DBG_EVENT, data, cmd); + if (ret) { + BCM_ERR("%s: Failed send data\n", __func__); + goto out; + } + +out: + spin_unlock_irqrestore(&data->lock, flags); + + return ret; +} +#endif + +static int exynos_bcm_dbg_early_init(struct exynos_bcm_dbg_data *data) +{ + struct exynos_bcm_ipc_base_info ipc_base_info; + struct exynos_bcm_event bcm_event; + struct exynos_bcm_filter_id filter_id; + struct exynos_bcm_filter_others filter_others; + struct exynos_bcm_sample_id sample_id; + unsigned int default_event; + int ev_cnt, othr_cnt, ip_cnt; + int ret = 0; + + /* pre-defined event set */ + exynos_bcm_dbg_set_base_info(&ipc_base_info, BCM_EVT_PRE_DEFINE, + BCM_EVT_SET, BCM_ALL); + + default_event = data->default_define_event; + bcm_event.index = data->define_event[default_event].index; + for (ev_cnt = 0; ev_cnt < data->bcm_cnt_nr; ev_cnt++) + bcm_event.event[ev_cnt] = + data->define_event[default_event].event[ev_cnt]; + + ret = exynos_bcm_dbg_event_ctrl(&ipc_base_info, &bcm_event, 0, data); + if (ret) { + BCM_ERR("%s: failed set event\n", __func__); + return ret; + } + + /* default filter id set */ + exynos_bcm_dbg_set_base_info(&ipc_base_info, BCM_EVT_EVENT_FLT_ID, + BCM_EVT_SET, BCM_ALL); + + filter_id.sm_id_mask = data->define_filter_id[default_event].sm_id_mask; + filter_id.sm_id_value = data->define_filter_id[default_event].sm_id_value; + for (ev_cnt = 0; ev_cnt < data->bcm_cnt_nr; ev_cnt++) + filter_id.sm_id_active[ev_cnt] = + data->define_filter_id[default_event].sm_id_active[ev_cnt]; + + ret = exynos_bcm_dbg_filter_id_ctrl(&ipc_base_info, &filter_id, + 0, data); + if (ret) { + BCM_ERR("%s: failed set filter ID\n", __func__); + return ret; + } + + /* default filter others set */ + exynos_bcm_dbg_set_base_info(&ipc_base_info, BCM_EVT_EVENT_FLT_OTHERS, + BCM_EVT_SET, BCM_ALL); + + for (othr_cnt = 0; othr_cnt < BCM_EVT_FLT_OTHR_MAX; othr_cnt++) { + filter_others.sm_other_type[othr_cnt] = + data->define_filter_others[default_event].sm_other_type[othr_cnt]; + filter_others.sm_other_mask[othr_cnt] = + data->define_filter_others[default_event].sm_other_mask[othr_cnt]; + filter_others.sm_other_value[othr_cnt] = + data->define_filter_others[default_event].sm_other_value[othr_cnt]; + } + + for (ev_cnt = 0; ev_cnt < data->bcm_cnt_nr; ev_cnt++) + filter_others.sm_other_active[ev_cnt] = + data->define_filter_others[default_event].sm_other_active[ev_cnt]; + + ret = exynos_bcm_dbg_filter_others_ctrl(&ipc_base_info, + &filter_others, 0, data); + if (ret) { + BCM_ERR("%s: failed set filter others\n", __func__); + return ret; + } + + /* default sample id set */ + exynos_bcm_dbg_set_base_info(&ipc_base_info, BCM_EVT_EVENT_SAMPLE_ID, + BCM_EVT_SET, BCM_ALL); + + sample_id.peak_mask = data->define_sample_id[default_event].peak_mask; + sample_id.peak_id = data->define_sample_id[default_event].peak_id; + for (ev_cnt = 0; ev_cnt < data->bcm_cnt_nr; ev_cnt++) + sample_id.peak_enable[ev_cnt] = + data->define_sample_id[default_event].peak_enable[ev_cnt]; + + ret = exynos_bcm_dbg_sample_id_ctrl(&ipc_base_info, &sample_id, + 0, data); + if (ret) { + BCM_ERR("%s: failed set sample ID\n", __func__); + return ret; + } + + /* default period set */ + exynos_bcm_dbg_set_base_info(&ipc_base_info, BCM_EVT_PERIOD_CONT, + BCM_EVT_SET, 0); + + ret = exynos_bcm_dbg_period_ctrl(&ipc_base_info, + &data->initial_period, data); + if (ret) { + BCM_ERR("%s: failed set period\n", __func__); + return ret; + } + + /* default mode set */ + exynos_bcm_dbg_set_base_info(&ipc_base_info, BCM_EVT_MODE_CONT, + BCM_EVT_SET, 0); + + ret = exynos_bcm_dbg_mode_ctrl(&ipc_base_info, + &data->initial_bcm_mode, data); + if (ret) { + BCM_ERR("%s: failed set mode\n", __func__); + return ret; + } + + /* default run ip set */ + for (ip_cnt = 0; ip_cnt < data->bcm_ip_nr; ip_cnt++) { + exynos_bcm_dbg_set_base_info(&ipc_base_info, BCM_EVT_IP_CONT, + BCM_EVT_SET, BCM_EACH); + + ret = exynos_bcm_dbg_ip_ctrl(&ipc_base_info, + &data->initial_run_ip[ip_cnt], ip_cnt, data); + if (ret) { + BCM_ERR("%s: failed set IP control\n", __func__); + return ret; + } + } + + return 0; +} + +static int exynos_bcm_dbg_run(unsigned int bcm_run, + struct exynos_bcm_dbg_data *data) +{ + struct exynos_bcm_ipc_base_info ipc_base_info; + int ret; + + exynos_bcm_dbg_set_base_info(&ipc_base_info, BCM_EVT_RUN_CONT, + BCM_EVT_SET, 0); + + ret = exynos_bcm_dbg_run_ctrl(&ipc_base_info, &bcm_run, data); + if (ret) { + BCM_ERR("%s: failed set Run state\n", __func__); + return ret; + } + + return 0; +} + +void exynos_bcm_dbg_start(void) +{ + int ret; +#ifdef CONFIG_EXYNOS_BCM_DBG_GNR + if (!bcm_dbg_data->bcm_load_bin) { + ret = exynos_bcm_dbg_load_bin(); + if (ret) { + BCM_ERR("%s failed to load BCM bin\n", __func__); + return; + } + } +#endif + ret = exynos_bcm_dbg_run(BCM_RUN, bcm_dbg_data); + if (ret) { + BCM_ERR("%s: failed to bcm start\n", __func__); + return; + } + + BCM_INFO("%s\n", __func__); +} +EXPORT_SYMBOL(exynos_bcm_dbg_start); + +void exynos_bcm_dbg_stop(unsigned int bcm_stop_owner) +{ + int ret; + +#ifdef CONFIG_EXYNOS_BCM_DBG_GNR + if (!bcm_dbg_data->bcm_load_bin) { + BCM_ERR("BCM bin has not been loaded yet!!\n"); + return; + } +#endif + if (bcm_stop_owner >= STOP_OWNER_MAX) { + BCM_ERR("Invalid stop owner (%u)\n", bcm_stop_owner); + return; + } + + if (!bcm_dbg_data->available_stop_owner[bcm_stop_owner]) { + BCM_ERR("Have not stop permission (%u)\n", bcm_stop_owner); + return; + } + + ret = exynos_bcm_dbg_run(BCM_STOP, bcm_dbg_data); + if (ret) { + BCM_ERR("%s: failed to bcm stop\n", __func__); + return; + } + + BCM_INFO("%s\n", __func__); +} +EXPORT_SYMBOL(exynos_bcm_dbg_stop); + +static int exynos_bcm_dbg_str(unsigned int suspend, + struct exynos_bcm_dbg_data *data) +{ + struct exynos_bcm_ipc_base_info ipc_base_info; + int ret; + + exynos_bcm_dbg_set_base_info(&ipc_base_info, BCM_EVT_STR_STATE, + BCM_EVT_SET, 0); + + ret = exynos_bcm_dbg_str_ctrl(&ipc_base_info, &suspend, data); + if (ret) { + BCM_ERR("%s:failed set str state\n", __func__); + return ret; + } + + return 0; +} + +/* SYSFS Interface */ +static ssize_t show_bcm_dbg_data_pd(struct device *dev, + struct device_attribute *attr, char *buf) +{ + struct platform_device *pdev = container_of(dev, + struct platform_device, dev); + struct exynos_bcm_dbg_data *data = platform_get_drvdata(pdev); + int i; + ssize_t count = 0; + + count += snprintf(buf, PAGE_SIZE, "=== IPC node info ===\n"); + + count += snprintf(buf + count, PAGE_SIZE, "IPC node name: %s\n", + data->ipc_node->name); + + count += snprintf(buf + count, PAGE_SIZE, + "\n=== Local Power Domain info ===\n"); + count += snprintf(buf + count, PAGE_SIZE, + "pd_size: %u, pd_sync_init: %s\n", + data->pd_size, + data->pd_sync_init ? "true" : "false"); + + for (i = 0; i < data->pd_size; i++) + count += snprintf(buf + count, PAGE_SIZE, + "pd_name: %12s, pd_index: %2u, pd_on: %s, cal_pdid: 0x%08x\n", + data->pd_info[i]->pd_name, data->pd_info[i]->pd_index, + data->pd_info[i]->on ? "true" : "false", + data->pd_info[i]->cal_pdid); + + return count; +} + +static ssize_t show_bcm_dbg_data_df_event(struct device *dev, + struct device_attribute *attr, char *buf) +{ + struct platform_device *pdev = container_of(dev, + struct platform_device, dev); + struct exynos_bcm_dbg_data *data = platform_get_drvdata(pdev); + int i, j; + ssize_t count = 0; + + count += snprintf(buf + count, PAGE_SIZE, + "\n=== Pre-defined Event info ===\n"); + for (i = 0; i < data->define_event_max; i++) { + count += snprintf(buf + count, PAGE_SIZE, + "Pre-defined Event index: %2u\n", + data->define_event[i].index); + for (j = 0; j < data->bcm_cnt_nr; j++) + count += snprintf(buf + count, PAGE_SIZE, + " Event[%d]: 0x%02x\n", j, data->define_event[i].event[j]); + } + + count += snprintf(buf + count, PAGE_SIZE, + "Default Pre-defined Event index: %2u\n", + data->default_define_event); + count += snprintf(buf + count, PAGE_SIZE, + "Pre-defined Event Max: %2u\n", + data->define_event_max); + + return count; +} + +static ssize_t show_bcm_dbg_data_df_filter(struct device *dev, + struct device_attribute *attr, char *buf) +{ + struct platform_device *pdev = container_of(dev, + struct platform_device, dev); + struct exynos_bcm_dbg_data *data = platform_get_drvdata(pdev); + int i, j; + ssize_t count = 0; + + count += snprintf(buf + count, PAGE_SIZE, "\n=== Filter ID info ===\n"); + for (i = 0; i < data->define_event_max; i++) { + count += snprintf(buf + count, PAGE_SIZE, + "Pre-defined Event index: %2u\n", + data->define_event[i].index); + count += snprintf(buf + count, PAGE_SIZE, " Filter ID mask: 0x%08x\n", + data->define_filter_id[i].sm_id_mask); + count += snprintf(buf + count, PAGE_SIZE, " Filter ID value: 0x%08x\n", + data->define_filter_id[i].sm_id_value); + count += snprintf(buf + count, PAGE_SIZE, " Filter ID active\n"); + + for (j = 0; j < data->bcm_cnt_nr; j++) + count += snprintf(buf + count, PAGE_SIZE, + " Event[%d]: %u\n", j, + data->define_filter_id[i].sm_id_active[j]); + } + + count += snprintf(buf + count, PAGE_SIZE, "\n=== Filter Others info ===\n"); + for (i = 0; i < data->define_event_max; i++) { + count += snprintf(buf + count, PAGE_SIZE, + "Pre-defined Event index: %2u\n", + data->define_event[i].index); + + for (j = 0; j < BCM_EVT_FLT_OTHR_MAX; j++) { + count += snprintf(buf + count, PAGE_SIZE, + " Filter Others type[%d]: 0x%02x\n", + j, data->define_filter_others[i].sm_other_type[j]); + count += snprintf(buf + count, PAGE_SIZE, + " Filter Others mask[%d]: 0x%02x\n", + j, data->define_filter_others[i].sm_other_mask[j]); + count += snprintf(buf + count, PAGE_SIZE, + " Filter Others value[%d]: 0x%02x\n", + j, data->define_filter_others[i].sm_other_value[j]); + } + + count += snprintf(buf + count, PAGE_SIZE, " Filter Others active\n"); + + for (j = 0; j < data->bcm_cnt_nr; j++) + count += snprintf(buf + count, PAGE_SIZE, + " Event[%d]: %u\n", j, + data->define_filter_others[i].sm_other_active[j]); + } + + return count; +} + +static ssize_t show_bcm_dbg_data_df_sample(struct device *dev, + struct device_attribute *attr, char *buf) +{ + struct platform_device *pdev = container_of(dev, + struct platform_device, dev); + struct exynos_bcm_dbg_data *data = platform_get_drvdata(pdev); + int i, j; + ssize_t count = 0; + + count += snprintf(buf + count, PAGE_SIZE, "\n=== Sample ID info ===\n"); + for (i = 0; i < data->define_event_max; i++) { + count += snprintf(buf + count, PAGE_SIZE, + "Pre-defined Event index: %2u\n", + data->define_event[i].index); + + count += snprintf(buf + count, PAGE_SIZE, " Sample ID: peak_mask: 0x%08x\n", + data->define_sample_id[i].peak_mask); + count += snprintf(buf + count, PAGE_SIZE, " Sample ID: peak_id: 0x%08x\n", + data->define_sample_id[i].peak_id); + count += snprintf(buf + count, PAGE_SIZE, " Sample ID active\n"); + + for (j = 0; j < data->bcm_cnt_nr; j++) + count += snprintf(buf + count, PAGE_SIZE, + " Event[%d]: %u\n", j, + data->define_sample_id[i].peak_enable[j]); + } + + return count; +} + +static ssize_t show_bcm_dbg_data_df_attr(struct device *dev, + struct device_attribute *attr, char *buf) +{ + struct platform_device *pdev = container_of(dev, + struct platform_device, dev); + struct exynos_bcm_dbg_data *data = platform_get_drvdata(pdev); + int i; + ssize_t count = 0; + + count += snprintf(buf + count, PAGE_SIZE, "\n=== Ctrl Attr info ===\n"); + count += snprintf(buf + count, PAGE_SIZE, "Initial BCM run: %s\n", + data->initial_bcm_run ? "true" : "false"); + count += snprintf(buf + count, PAGE_SIZE, + "Initial monitor period: %u msec\n", + data->initial_period); + count += snprintf(buf + count, PAGE_SIZE, "Initial BCM mode: %u\n", + data->initial_bcm_mode); + count += snprintf(buf + count, PAGE_SIZE, "Initial Run IPs\n"); + + for (i = 0; i < data->bcm_ip_nr; i++) + count += snprintf(buf + count, PAGE_SIZE, + " BCM IP[%d]: %s\n", i, + data->initial_run_ip[i] ? "true" : "false"); + + return count; +} + +static ssize_t show_get_event(struct device *dev, + struct device_attribute *attr, char *buf) +{ + struct platform_device *pdev = container_of(dev, + struct platform_device, dev); + struct exynos_bcm_dbg_data *data = platform_get_drvdata(pdev); + struct exynos_bcm_ipc_base_info ipc_base_info; + struct exynos_bcm_event bcm_event; + ssize_t count = 0; + int ip_cnt, ev_cnt, ret; + + exynos_bcm_dbg_set_base_info(&ipc_base_info, BCM_EVT_EVENT, + BCM_EVT_GET, BCM_EACH); + + for (ip_cnt = 0; ip_cnt < data->bcm_ip_nr; ip_cnt++) { + ret = exynos_bcm_dbg_event_ctrl(&ipc_base_info, &bcm_event, + ip_cnt, data); + if (ret) { + count += snprintf(buf + count, PAGE_SIZE, + "failed get event(ip:%d)\n", ip_cnt); + return count; + } + + count += snprintf(buf + count, PAGE_SIZE, + "bcm[%2d]: def(%2u),", + ip_cnt, bcm_event.index); + for (ev_cnt = 0; ev_cnt < data->bcm_cnt_nr; ev_cnt++) + count += snprintf(buf + count, PAGE_SIZE, + " (0x%02x),", bcm_event.event[ev_cnt]); + count += snprintf(buf + count, PAGE_SIZE, "\n"); + } + + return count; +} + +static ssize_t show_event_ctrl_help(struct device *dev, + struct device_attribute *attr, char *buf) +{ + struct platform_device *pdev = container_of(dev, + struct platform_device, dev); + struct exynos_bcm_dbg_data *data = platform_get_drvdata(pdev); + ssize_t count = 0; + + /* help store_event_ctrl */ + count += snprintf(buf + count, PAGE_SIZE, "\n= event_ctrl set help =\n"); + count += snprintf(buf + count, PAGE_SIZE, "Usage:\n"); + count += snprintf(buf + count, PAGE_SIZE, + "echo [ip_range] [ip_index] [define_index] \ + [ev0] [ev1] [ev2] [ev3] [ev4] [ev5] [ev6] [ev7] > \ + event_ctrl\n"); + count += snprintf(buf + count, PAGE_SIZE, + " ip_range: BCM_EACH(%d), BCM_ALL(%d)\n", + BCM_EACH, BCM_ALL); + count += snprintf(buf + count, PAGE_SIZE, + " ip_index: number of bcm ip (0 ~ %u)\n" + " (if ip_range is all, set to 0)\n", + data->bcm_ip_nr - 1); + count += snprintf(buf + count, PAGE_SIZE, + " define_index: index of pre-defined event (0 ~ %u)\n" + " 0 means no pre-defined event\n", + data->define_event_max - 1); + count += snprintf(buf + count, PAGE_SIZE, + " evX: event value of counter (if define_index is not 0, \ + set to 0\n" + " event value should be hexa value\n"); + + return count; +} + +static ssize_t store_event_ctrl(struct device *dev, + struct device_attribute *attr, + const char *buf, size_t count) +{ + struct platform_device *pdev = container_of(dev, + struct platform_device, dev); + struct exynos_bcm_dbg_data *data = platform_get_drvdata(pdev); + struct exynos_bcm_ipc_base_info ipc_base_info; + struct exynos_bcm_event bcm_event; + unsigned int bcm_ip_index; + unsigned int event_id, ip_range; + unsigned int defined_index; + unsigned int *event; + int ev_cnt, dfd_cnt, ret; + + event = kzalloc(sizeof(int) * data->bcm_cnt_nr, GFP_KERNEL); + if (event == NULL) { + BCM_ERR("%s: faild allocated of event memory\n", __func__); + return -ENOMEM; + } + + if (data->bcm_cnt_nr == 4) { + ret = sscanf(buf, "%u %u %u %x %x %x %x", + &ip_range, &bcm_ip_index, &defined_index, + &event[0], &event[1], &event[2], &event[3]); + } else if (data->bcm_cnt_nr == 8) { + ret = sscanf(buf, "%u %u %u %x %x %x %x %x %x %x %x", + &ip_range, &bcm_ip_index, &defined_index, + &event[0], &event[1], &event[2], &event[3], + &event[4], &event[5], &event[6], &event[7]); + } + /* 3 means is the number of index */ + if (ret != data->bcm_cnt_nr + 3) { + kfree(event); + return -EINVAL; + } + + ret = exynos_bcm_ip_validate(ip_range, bcm_ip_index, data->bcm_ip_nr); + if (ret) { + kfree(event); + return ret; + } + + if (defined_index >= data->define_event_max) { + BCM_ERR("%s: Invalid defined index(%u)," + " defined_max_nr(%u)\n", __func__, + defined_index, data->define_event_max - 1); + kfree(event); + return -EINVAL; + } + + if (ip_range == BCM_ALL) + bcm_ip_index = 0; + + if (defined_index != NO_PRE_DEFINE_EVT) { + event_id = BCM_EVT_PRE_DEFINE; + for (dfd_cnt = 1; dfd_cnt < data->define_event_max; dfd_cnt++) { + if (defined_index == + data->define_event[dfd_cnt].index) + break; + } + + bcm_event.index = data->define_event[dfd_cnt].index; + for (ev_cnt = 0; ev_cnt < data->bcm_cnt_nr; ev_cnt++) + bcm_event.event[ev_cnt] = + data->define_event[dfd_cnt].event[ev_cnt]; + } else { + event_id = BCM_EVT_EVENT; + bcm_event.index = NO_PRE_DEFINE_EVT; + for (ev_cnt = 0; ev_cnt < data->bcm_cnt_nr; ev_cnt++) + bcm_event.event[ev_cnt] = event[ev_cnt]; + } + + exynos_bcm_dbg_set_base_info(&ipc_base_info, event_id, + BCM_EVT_SET, ip_range); + + ret = exynos_bcm_dbg_event_ctrl(&ipc_base_info, &bcm_event, + bcm_ip_index, data); + if (ret) { + BCM_ERR("%s:failed set event\n", __func__); + kfree(event); + return ret; + } + + kfree(event); + return count; +} + +static ssize_t show_get_filter_id(struct device *dev, + struct device_attribute *attr, char *buf) +{ + struct platform_device *pdev = container_of(dev, + struct platform_device, dev); + struct exynos_bcm_dbg_data *data = platform_get_drvdata(pdev); + struct exynos_bcm_ipc_base_info ipc_base_info; + struct exynos_bcm_filter_id filter_id; + ssize_t count = 0; + int ip_cnt, ret; + + exynos_bcm_dbg_set_base_info(&ipc_base_info, BCM_EVT_EVENT_FLT_ID, + BCM_EVT_GET, BCM_EACH); + + for (ip_cnt = 0; ip_cnt < data->bcm_ip_nr; ip_cnt++) { + ret = exynos_bcm_dbg_filter_id_ctrl(&ipc_base_info, + &filter_id, ip_cnt, data); + if (ret) { + count += snprintf(buf + count, PAGE_SIZE, + "failed get filter id(ip:%d)\n", ip_cnt); + return count; + } + + count += snprintf(buf + count, PAGE_SIZE, + "bcm[%2d]: mask(0x%08x), value(0x%08x)\n", + ip_cnt, filter_id.sm_id_mask, filter_id.sm_id_value); + } + + return count; +} + +static ssize_t show_get_filter_id_active(struct device *dev, + struct device_attribute *attr, char *buf) +{ + struct platform_device *pdev = container_of(dev, + struct platform_device, dev); + struct exynos_bcm_dbg_data *data = platform_get_drvdata(pdev); + struct exynos_bcm_ipc_base_info ipc_base_info; + struct exynos_bcm_filter_id filter_id; + ssize_t count = 0; + int ip_cnt, ev_cnt, ret; + + exynos_bcm_dbg_set_base_info(&ipc_base_info, BCM_EVT_EVENT_FLT_ID, + BCM_EVT_GET, BCM_EACH); + + for (ip_cnt = 0; ip_cnt < data->bcm_ip_nr; ip_cnt++) { + ret = exynos_bcm_dbg_filter_id_ctrl(&ipc_base_info, + &filter_id, ip_cnt, data); + if (ret) { + count += snprintf(buf + count, PAGE_SIZE, + "failed get filter id(ip:%d)\n", ip_cnt); + return count; + } + + count += snprintf(buf + count, PAGE_SIZE, "bcm[%2d]:", ip_cnt); + for (ev_cnt = 0; ev_cnt < data->bcm_cnt_nr; ev_cnt++) + count += snprintf(buf + count, PAGE_SIZE, + " ev%d %u,", ev_cnt, + filter_id.sm_id_active[ev_cnt]); + count += snprintf(buf + count, PAGE_SIZE, "\n"); + } + + return count; +} + +static ssize_t show_filter_id_ctrl_help(struct device *dev, + struct device_attribute *attr, char *buf) +{ + struct platform_device *pdev = container_of(dev, + struct platform_device, dev); + struct exynos_bcm_dbg_data *data = platform_get_drvdata(pdev); + ssize_t count = 0; + + /* help store_filter_id_ctrl */ + count += snprintf(buf + count, PAGE_SIZE, "\n= filter_id_ctrl set help =\n"); + count += snprintf(buf + count, PAGE_SIZE, "Usage:\n"); + count += snprintf(buf + count, PAGE_SIZE, + "echo [ip_range] [ip_index] [define_index] [mask] [value] \ + [ev0] [ev1] [ev2] [ev3] [ev4] [ev5] [ev6] [ev7] > \ + filter_id_ctrl\n"); + count += snprintf(buf + count, PAGE_SIZE, + " ip_range: BCM_EACH(%d), BCM_ALL(%d)\n", + BCM_EACH, BCM_ALL); + count += snprintf(buf + count, PAGE_SIZE, + " ip_index: number of bcm ip (0 ~ %u)\n" + " (if ip_range is all, set to 0)\n", + data->bcm_ip_nr - 1); + count += snprintf(buf + count, PAGE_SIZE, + " define_index: index of pre-defined event (0 ~ %u)\n" + " 0 means no pre-defined event\n", + data->define_event_max - 1); + count += snprintf(buf + count, PAGE_SIZE, + " mask: masking for filter id (if define_index is not 0, \ + set to 0)\n" + " mask value should be hexa value\n"); + count += snprintf(buf + count, PAGE_SIZE, + " value: value of filter id (if define_index is not 0, set to 0)\n" + " value should be hexa value\n"); + count += snprintf(buf + count, PAGE_SIZE, + " evX: event counter alloc for filter id \ + (if define_index is not 0, set to 0)\n" + " value should be 0 or 1\n"); + + return count; +} + +static ssize_t store_filter_id_ctrl(struct device *dev, + struct device_attribute *attr, + const char *buf, size_t count) +{ + struct platform_device *pdev = container_of(dev, + struct platform_device, dev); + struct exynos_bcm_dbg_data *data = platform_get_drvdata(pdev); + struct exynos_bcm_ipc_base_info ipc_base_info; + struct exynos_bcm_filter_id filter_id; + unsigned int bcm_ip_index, ip_range; + unsigned int defined_index; + unsigned int sm_id_mask, sm_id_value; + unsigned int *sm_id_active; + int ev_cnt, ret; + + sm_id_active = kzalloc(sizeof(int) * data->bcm_cnt_nr, GFP_KERNEL); + if (sm_id_active == NULL) { + BCM_ERR("%s: faild allocated of sm_id_active memory\n", __func__); + return -ENOMEM; + } + + if (data->bcm_cnt_nr == 4) { + ret = sscanf(buf, "%u %u %u %x %x %u %u %u %u", + &ip_range, &bcm_ip_index, &defined_index, &sm_id_mask, &sm_id_value, + &sm_id_active[0], &sm_id_active[1], &sm_id_active[2], &sm_id_active[3]); + } else if (data->bcm_cnt_nr == 8) { + ret = sscanf(buf, "%u %u %u %x %x %u %u %u %u %u %u %u %u", + &ip_range, &bcm_ip_index, &defined_index, &sm_id_mask, &sm_id_value, + &sm_id_active[0], &sm_id_active[1], + &sm_id_active[2], &sm_id_active[3], + &sm_id_active[4], &sm_id_active[5], + &sm_id_active[6], &sm_id_active[7]); + } + + /* 5 --> the number of index */ + if (ret != data->bcm_cnt_nr + 5) { + kfree(sm_id_active); + return -EINVAL; + } + + ret = exynos_bcm_ip_validate(ip_range, bcm_ip_index, data->bcm_ip_nr); + if (ret) { + kfree(sm_id_active); + return ret; + } + + for (ev_cnt = 0; ev_cnt < data->bcm_cnt_nr; ev_cnt++) { + if (sm_id_active[ev_cnt]) + sm_id_active[ev_cnt] = true; + } + + if (defined_index >= data->define_event_max) { + BCM_ERR("%s: Invalid defined index(%u)," + " defined_max_nr(%u)\n", __func__, + defined_index, data->define_event_max - 1); + kfree(sm_id_active); + return -EINVAL; + } + + if (ip_range == BCM_ALL) + bcm_ip_index = 0; + + if (defined_index != NO_PRE_DEFINE_EVT) { + filter_id.sm_id_mask = data->define_filter_id[defined_index].sm_id_mask; + filter_id.sm_id_value = data->define_filter_id[defined_index].sm_id_value; + for (ev_cnt = 0; ev_cnt < data->bcm_cnt_nr; ev_cnt++) + filter_id.sm_id_active[ev_cnt] = + data->define_filter_id[defined_index].sm_id_active[ev_cnt]; + } else { + filter_id.sm_id_mask = sm_id_mask; + filter_id.sm_id_value = sm_id_value; + for (ev_cnt = 0; ev_cnt < data->bcm_cnt_nr; ev_cnt++) + filter_id.sm_id_active[ev_cnt] = sm_id_active[ev_cnt]; + } + + exynos_bcm_dbg_set_base_info(&ipc_base_info, BCM_EVT_EVENT_FLT_ID, + BCM_EVT_SET, ip_range); + + ret = exynos_bcm_dbg_filter_id_ctrl(&ipc_base_info, &filter_id, + bcm_ip_index, data); + if (ret) { + BCM_ERR("%s:failed set filter ID\n", __func__); + kfree(sm_id_active); + return ret; + } + + kfree(sm_id_active); + return count; +} + +static ssize_t show_get_filter_others(struct device *dev, + struct device_attribute *attr, char *buf) +{ + struct platform_device *pdev = container_of(dev, + struct platform_device, dev); + struct exynos_bcm_dbg_data *data = platform_get_drvdata(pdev); + struct exynos_bcm_ipc_base_info ipc_base_info; + struct exynos_bcm_filter_others filter_others; + ssize_t count = 0; + int ip_cnt, othr_cnt, ret; + + exynos_bcm_dbg_set_base_info(&ipc_base_info, BCM_EVT_EVENT_FLT_OTHERS, + BCM_EVT_GET, BCM_EACH); + + for (ip_cnt = 0; ip_cnt < data->bcm_ip_nr; ip_cnt++) { + ret = exynos_bcm_dbg_filter_others_ctrl(&ipc_base_info, + &filter_others, ip_cnt, data); + if (ret) { + count += snprintf(buf + count, PAGE_SIZE, + "failed get filter others(ip:%d)\n", ip_cnt); + return count; + } + + count += snprintf(buf + count, PAGE_SIZE, "bcm[%2d]:", ip_cnt); + for (othr_cnt = 0; othr_cnt < BCM_EVT_FLT_OTHR_MAX; othr_cnt++) + count += snprintf(buf + count, PAGE_SIZE, + " type%d(0x%02x), mask%d(0x%02x), value%d(0x%02x),", + othr_cnt, filter_others.sm_other_type[othr_cnt], + othr_cnt, filter_others.sm_other_mask[othr_cnt], + othr_cnt, filter_others.sm_other_value[othr_cnt]); + count += snprintf(buf + count, PAGE_SIZE, "\n"); + } + + return count; +} + +static ssize_t show_get_filter_others_active(struct device *dev, + struct device_attribute *attr, char *buf) +{ + struct platform_device *pdev = container_of(dev, + struct platform_device, dev); + struct exynos_bcm_dbg_data *data = platform_get_drvdata(pdev); + struct exynos_bcm_ipc_base_info ipc_base_info; + struct exynos_bcm_filter_others filter_others; + ssize_t count = 0; + int ip_cnt, ev_cnt, ret; + + exynos_bcm_dbg_set_base_info(&ipc_base_info, BCM_EVT_EVENT_FLT_OTHERS, + BCM_EVT_GET, BCM_EACH); + + for (ip_cnt = 0; ip_cnt < data->bcm_ip_nr; ip_cnt++) { + ret = exynos_bcm_dbg_filter_others_ctrl(&ipc_base_info, + &filter_others, ip_cnt, data); + if (ret) { + count += snprintf(buf + count, PAGE_SIZE, + "failed get filter others(ip:%d)\n", ip_cnt); + return count; + } + + count += snprintf(buf + count, PAGE_SIZE, "bcm[%2d]:", ip_cnt); + for (ev_cnt = 0; ev_cnt < data->bcm_cnt_nr; ev_cnt++) + count += snprintf(buf + count, PAGE_SIZE, + " ev%d %u,", ev_cnt, + filter_others.sm_other_active[ev_cnt]); + count += snprintf(buf + count, PAGE_SIZE, "\n"); + } + + return count; +} + +static ssize_t show_filter_others_ctrl_help(struct device *dev, + struct device_attribute *attr, char *buf) +{ + struct platform_device *pdev = container_of(dev, + struct platform_device, dev); + struct exynos_bcm_dbg_data *data = platform_get_drvdata(pdev); + ssize_t count = 0; + int othr_cnt; + + /* help store_filter_others_ctrl */ + count += snprintf(buf + count, PAGE_SIZE, "\n= filter_others_ctrl set help =\n"); + count += snprintf(buf + count, PAGE_SIZE, "Usage:\n"); + count += snprintf(buf + count, PAGE_SIZE, + "echo [ip_range] [ip_index] [define_index] \ + [type0] [mask0] [value0] [type1] [mask1] [value1] \ + [ev0] [ev1] [ev2] [ev3] [ev4] [ev5] [ev6] [ev7] > \ + filter_others_ctrl\n"); + count += snprintf(buf + count, PAGE_SIZE, + " ip_range: BCM_EACH(%d), BCM_ALL(%d)\n", + BCM_EACH, BCM_ALL); + count += snprintf(buf + count, PAGE_SIZE, + " ip_index: number of bcm ip (0 ~ %u)\n" + " (if ip_range is all, set to 0)\n", + data->bcm_ip_nr - 1); + count += snprintf(buf + count, PAGE_SIZE, + " define_index: index of pre-defined event (0 ~ %u)\n" + " 0 means no pre-defined event\n", + data->define_event_max - 1); + for (othr_cnt = 0; othr_cnt < BCM_EVT_FLT_OTHR_MAX; othr_cnt++) { + count += snprintf(buf + count, PAGE_SIZE, + " type%d: type%d for filter others \ + (if define_index is not 0, set to 0)\n" + " type%d value should be hexa value\n", + othr_cnt, othr_cnt, othr_cnt); + count += snprintf(buf + count, PAGE_SIZE, + " mask%d: mask%d for filter others \ + (if define_index is not 0, set to 0)\n" + " mask%d value should be hexa value\n", + othr_cnt, othr_cnt, othr_cnt); + count += snprintf(buf + count, PAGE_SIZE, + " value%d: value%d of filter others \ + (if define_index is not 0, set to 0)\n" + " value%d should be hexa value\n", + othr_cnt, othr_cnt, othr_cnt); + } + count += snprintf(buf + count, PAGE_SIZE, + " evX: event counter alloc for filter others \ + (if define_index is not 0, set to 0)\n" + " value should be 0 or 1\n"); + + return count; +} + +static ssize_t store_filter_others_ctrl(struct device *dev, + struct device_attribute *attr, + const char *buf, size_t count) +{ + struct platform_device *pdev = container_of(dev, + struct platform_device, dev); + struct exynos_bcm_dbg_data *data = platform_get_drvdata(pdev); + struct exynos_bcm_ipc_base_info ipc_base_info; + struct exynos_bcm_filter_others filter_others; + unsigned int bcm_ip_index, ip_range; + unsigned int defined_index; + unsigned int sm_other_type[BCM_EVT_FLT_OTHR_MAX]; + unsigned int sm_other_mask[BCM_EVT_FLT_OTHR_MAX]; + unsigned int sm_other_value[BCM_EVT_FLT_OTHR_MAX]; + unsigned int *sm_other_active; + int ev_cnt, othr_cnt, ret; + + sm_other_active = kzalloc(sizeof(int) * data->bcm_cnt_nr, GFP_KERNEL); + if (sm_other_active == NULL) { + BCM_ERR("%s: faild allocated of sm_other_active memory\n", __func__); + return -ENOMEM; + } + + if (data->bcm_cnt_nr == 4) { + ret = sscanf(buf, "%u %u %u %x %x %x %x %x %x %u %u %u %u", + &ip_range, &bcm_ip_index, &defined_index, + &sm_other_type[0], &sm_other_mask[0], &sm_other_value[0], + &sm_other_type[1], &sm_other_mask[1], &sm_other_value[1], + &sm_other_active[0], &sm_other_active[1], + &sm_other_active[2], &sm_other_active[3]); + } else if (data->bcm_cnt_nr == 8) { + ret = sscanf(buf, "%u %u %u %x %x %x %x %x %x %u %u %u %u %u %u %u %u", + &ip_range, &bcm_ip_index, &defined_index, + &sm_other_type[0], &sm_other_mask[0], &sm_other_value[0], + &sm_other_type[1], &sm_other_mask[1], &sm_other_value[1], + &sm_other_active[0], &sm_other_active[1], + &sm_other_active[2], &sm_other_active[3], + &sm_other_active[4], &sm_other_active[5], + &sm_other_active[6], &sm_other_active[7]); + } + + /* 9 --> the number of index */ + if (ret != data->bcm_cnt_nr + 9) { + kfree(sm_other_active); + return -EINVAL; + } + + ret = exynos_bcm_ip_validate(ip_range, bcm_ip_index, data->bcm_ip_nr); + if (ret) { + kfree(sm_other_active); + return ret; + } + + for (ev_cnt = 0; ev_cnt < data->bcm_cnt_nr; ev_cnt++) { + if (sm_other_active[ev_cnt]) + sm_other_active[ev_cnt] = true; + } + + if (defined_index >= data->define_event_max) { + BCM_ERR("%s: Invalid defined index(%u)," + " defined_max_nr(%u)\n", __func__, + defined_index, data->define_event_max - 1); + kfree(sm_other_active); + return -EINVAL; + } + + if (ip_range == BCM_ALL) + bcm_ip_index = 0; + + if (defined_index != NO_PRE_DEFINE_EVT) { + for (othr_cnt = 0; othr_cnt < BCM_EVT_FLT_OTHR_MAX; othr_cnt++) { + filter_others.sm_other_type[othr_cnt] = + data->define_filter_others[defined_index].sm_other_type[othr_cnt]; + filter_others.sm_other_mask[othr_cnt] = + data->define_filter_others[defined_index].sm_other_mask[othr_cnt]; + filter_others.sm_other_value[othr_cnt] = + data->define_filter_others[defined_index].sm_other_value[othr_cnt]; + } + + for (ev_cnt = 0; ev_cnt < data->bcm_cnt_nr; ev_cnt++) + filter_others.sm_other_active[ev_cnt] = + data->define_filter_others[defined_index].sm_other_active[ev_cnt]; + } else { + for (othr_cnt = 0; othr_cnt < BCM_EVT_FLT_OTHR_MAX; othr_cnt++) { + filter_others.sm_other_type[othr_cnt] = sm_other_type[othr_cnt]; + filter_others.sm_other_mask[othr_cnt] = sm_other_mask[othr_cnt]; + filter_others.sm_other_value[othr_cnt] = sm_other_value[othr_cnt]; + } + + for (ev_cnt = 0; ev_cnt < data->bcm_cnt_nr; ev_cnt++) + filter_others.sm_other_active[ev_cnt] = sm_other_active[ev_cnt]; + } + + exynos_bcm_dbg_set_base_info(&ipc_base_info, BCM_EVT_EVENT_FLT_OTHERS, + BCM_EVT_SET, ip_range); + + ret = exynos_bcm_dbg_filter_others_ctrl(&ipc_base_info, + &filter_others, bcm_ip_index, data); + if (ret) { + BCM_ERR("%s:failed set filter others\n", __func__); + kfree(sm_other_active); + return ret; + } + + kfree(sm_other_active); + return count; +} + +static ssize_t show_get_sample_id(struct device *dev, + struct device_attribute *attr, char *buf) +{ + struct platform_device *pdev = container_of(dev, + struct platform_device, dev); + struct exynos_bcm_dbg_data *data = platform_get_drvdata(pdev); + struct exynos_bcm_ipc_base_info ipc_base_info; + struct exynos_bcm_sample_id sample_id; + ssize_t count = 0; + int ip_cnt, ret; + + exynos_bcm_dbg_set_base_info(&ipc_base_info, BCM_EVT_EVENT_SAMPLE_ID, + BCM_EVT_GET, BCM_EACH); + + for (ip_cnt = 0; ip_cnt < data->bcm_ip_nr; ip_cnt++) { + ret = exynos_bcm_dbg_sample_id_ctrl(&ipc_base_info, + &sample_id, ip_cnt, data); + if (ret) { + count += snprintf(buf + count, PAGE_SIZE, + "failed get sample id(ip:%d)\n", ip_cnt); + return count; + } + + count += snprintf(buf + count, PAGE_SIZE, + "bcm[%2d]: mask(0x%08x), id(0x%08x)\n", + ip_cnt, sample_id.peak_mask, sample_id.peak_id); + } + + return count; +} + +static ssize_t show_get_sample_id_active(struct device *dev, + struct device_attribute *attr, char *buf) +{ + struct platform_device *pdev = container_of(dev, + struct platform_device, dev); + struct exynos_bcm_dbg_data *data = platform_get_drvdata(pdev); + struct exynos_bcm_ipc_base_info ipc_base_info; + struct exynos_bcm_sample_id sample_id; + ssize_t count = 0; + int ip_cnt, ev_cnt, ret; + + exynos_bcm_dbg_set_base_info(&ipc_base_info, BCM_EVT_EVENT_SAMPLE_ID, + BCM_EVT_GET, BCM_EACH); + + for (ip_cnt = 0; ip_cnt < data->bcm_ip_nr; ip_cnt++) { + ret = exynos_bcm_dbg_sample_id_ctrl(&ipc_base_info, + &sample_id, ip_cnt, data); + if (ret) { + count += snprintf(buf + count, PAGE_SIZE, + "failed get sample id(ip:%d)\n", ip_cnt); + return count; + } + + count += snprintf(buf + count, PAGE_SIZE, "bcm[%2d]:", ip_cnt); + for (ev_cnt = 0; ev_cnt < data->bcm_cnt_nr; ev_cnt++) + count += snprintf(buf + count, PAGE_SIZE, + " ev%d %u,", ev_cnt, + sample_id.peak_enable[ev_cnt]); + count += snprintf(buf + count, PAGE_SIZE, "\n"); + } + + return count; +} + +static ssize_t show_sample_id_ctrl_help(struct device *dev, + struct device_attribute *attr, char *buf) +{ + struct platform_device *pdev = container_of(dev, + struct platform_device, dev); + struct exynos_bcm_dbg_data *data = platform_get_drvdata(pdev); + ssize_t count = 0; + + /* help store_sample_id_ctrl */ + count += snprintf(buf + count, PAGE_SIZE, "\n= sample_id_ctrl set help =\n"); + count += snprintf(buf + count, PAGE_SIZE, "Usage:\n"); + count += snprintf(buf + count, PAGE_SIZE, + "echo [ip_range] [ip_index] [define_index] [mask] [id] \ + [ev0] [ev1] [ev2] [ev3] [ev4] [ev5] [ev6] [ev7] > \ + sample_id_ctrl\n"); + count += snprintf(buf + count, PAGE_SIZE, + " ip_range: BCM_EACH(%d), BCM_ALL(%d)\n", + BCM_EACH, BCM_ALL); + count += snprintf(buf + count, PAGE_SIZE, + " ip_index: number of bcm ip (0 ~ %u)\n" + " (if ip_range is all, set to 0)\n", + data->bcm_ip_nr - 1); + count += snprintf(buf + count, PAGE_SIZE, + " define_index: index of pre-defined event (0 ~ %u)\n" + " 0 means no pre-defined event\n", + data->define_event_max - 1); + count += snprintf(buf + count, PAGE_SIZE, + " mask: masking for sample id \ + (if define_index is not 0, set to 0)\n" + " mask value should be hexa value\n"); + count += snprintf(buf + count, PAGE_SIZE, + " id: id of sample id \ + (if define_index is not 0, set to 0)\n" + " id should be hexa value\n"); + count += snprintf(buf + count, PAGE_SIZE, + " evX: event counter enable for sample id \ + (if define_index is not 0, set to 0)\n" + " value should be 0 or 1\n"); + + return count; +} + +static ssize_t store_sample_id_ctrl(struct device *dev, + struct device_attribute *attr, + const char *buf, size_t count) +{ + struct platform_device *pdev = container_of(dev, + struct platform_device, dev); + struct exynos_bcm_dbg_data *data = platform_get_drvdata(pdev); + struct exynos_bcm_ipc_base_info ipc_base_info; + struct exynos_bcm_sample_id sample_id; + unsigned int bcm_ip_index, ip_range; + unsigned int defined_index; + unsigned int peak_mask, peak_id; + unsigned int *peak_enable; + int ev_cnt, ret; + + peak_enable = kzalloc(sizeof(int) * data->bcm_cnt_nr, GFP_KERNEL); + if (peak_enable == NULL) { + BCM_ERR("%s: faild allocated of peak_enable memory\n", __func__); + return -ENOMEM; + } + + if (data->bcm_cnt_nr == 4) { + ret = sscanf(buf, "%u %u %u %x %x %u %u %u %u", + &ip_range, &bcm_ip_index, &defined_index, + &peak_mask, &peak_id, &peak_enable[0], &peak_enable[1], + &peak_enable[2], &peak_enable[3]); + } else if (data->bcm_cnt_nr == 8) { + ret = sscanf(buf, "%u %u %u %x %x %u %u %u %u %u %u %u %u", + &ip_range, &bcm_ip_index, &defined_index, + &peak_mask, &peak_id, &peak_enable[0], &peak_enable[1], + &peak_enable[2], &peak_enable[3], + &peak_enable[4], &peak_enable[5], + &peak_enable[6], &peak_enable[7]); + } + + /* 5 --> the number of index */ + if (ret != data->bcm_cnt_nr + 5) { + kfree(peak_enable); + return -EINVAL; + } + + ret = exynos_bcm_ip_validate(ip_range, bcm_ip_index, data->bcm_ip_nr); + if (ret) { + kfree(peak_enable); + return ret; + } + + for (ev_cnt = 0; ev_cnt < data->bcm_cnt_nr; ev_cnt++) { + if (peak_enable[ev_cnt]) + peak_enable[ev_cnt] = true; + } + + if (defined_index >= data->define_event_max) { + BCM_ERR("%s: Invalid defined index(%u)," + " defined_max_nr(%u)\n", __func__, + defined_index, data->define_event_max - 1); + kfree(peak_enable); + return -EINVAL; + } + + if (ip_range == BCM_ALL) + bcm_ip_index = 0; + + if (defined_index != NO_PRE_DEFINE_EVT) { + sample_id.peak_mask = data->define_sample_id[defined_index].peak_mask; + sample_id.peak_id = data->define_sample_id[defined_index].peak_id; + for (ev_cnt = 0; ev_cnt < data->bcm_cnt_nr; ev_cnt++) + sample_id.peak_enable[ev_cnt] = + data->define_sample_id[defined_index].peak_enable[ev_cnt]; + } else { + sample_id.peak_mask = peak_mask; + sample_id.peak_id = peak_id; + for (ev_cnt = 0; ev_cnt < data->bcm_cnt_nr; ev_cnt++) + sample_id.peak_enable[ev_cnt] = peak_enable[ev_cnt]; + } + + exynos_bcm_dbg_set_base_info(&ipc_base_info, BCM_EVT_EVENT_SAMPLE_ID, + BCM_EVT_SET, ip_range); + + ret = exynos_bcm_dbg_sample_id_ctrl(&ipc_base_info, &sample_id, + bcm_ip_index, data); + if (ret) { + BCM_ERR("%s:failed set sample ID\n", __func__); + kfree(peak_enable); + return ret; + } + + kfree(peak_enable); + return count; +} + +static ssize_t show_get_run(struct device *dev, + struct device_attribute *attr, char *buf) +{ + struct platform_device *pdev = container_of(dev, + struct platform_device, dev); + struct exynos_bcm_dbg_data *data = platform_get_drvdata(pdev); + struct exynos_bcm_ipc_base_info ipc_base_info; + unsigned int bcm_run; + ssize_t count = 0; + int ret; + + exynos_bcm_dbg_set_base_info(&ipc_base_info, BCM_EVT_RUN_CONT, + BCM_EVT_GET, 0); + + ret = exynos_bcm_dbg_run_ctrl(&ipc_base_info, &bcm_run, data); + if (ret) { + count += snprintf(buf + count, PAGE_SIZE, + "failed get run state\n"); + return count; + } + + count += snprintf(buf + count, PAGE_SIZE, + "run state: raw state(%s), sw state(%s)\n", + bcm_run ? "run" : "stop", + data->bcm_run_state ? "run" : "stop"); + + return count; +} + +static ssize_t show_run_ctrl_help(struct device *dev, + struct device_attribute *attr, char *buf) +{ + ssize_t count = 0; + + /* help store_run_ctrl */ + count += snprintf(buf + count, PAGE_SIZE, "\n= run_ctrl set help =\n"); + count += snprintf(buf + count, PAGE_SIZE, "Usage:\n"); + count += snprintf(buf + count, PAGE_SIZE, "echo [run_state] > run_ctrl\n"); + count += snprintf(buf + count, PAGE_SIZE, + " run_state: BCM_RUN(%d), BCM_STOP(%d)\n", + BCM_RUN, BCM_STOP); + + return count; +} + +static ssize_t store_run_ctrl(struct device *dev, + struct device_attribute *attr, + const char *buf, size_t count) +{ + struct platform_device *pdev = container_of(dev, + struct platform_device, dev); + struct exynos_bcm_dbg_data *data = platform_get_drvdata(pdev); + unsigned int bcm_run; + int ret; + + ret = kstrtouint(buf, 0, &bcm_run); + if (ret) + return ret; + + if (!(bcm_run == 0 || bcm_run == 1)) { + BCM_ERR("%s: invalid parameter (%u)\n", __func__, bcm_run); + return -EINVAL; + } + + if (bcm_run) + bcm_run = true; + + ret = exynos_bcm_dbg_run(bcm_run, data); + if (ret) { + BCM_ERR("%s:failed set Run state\n", __func__); + return ret; + } + + return count; +} + +static ssize_t show_get_period(struct device *dev, + struct device_attribute *attr, char *buf) +{ + struct platform_device *pdev = container_of(dev, + struct platform_device, dev); + struct exynos_bcm_dbg_data *data = platform_get_drvdata(pdev); + struct exynos_bcm_ipc_base_info ipc_base_info; + unsigned int period; + ssize_t count = 0; + int ret; + + exynos_bcm_dbg_set_base_info(&ipc_base_info, BCM_EVT_PERIOD_CONT, + BCM_EVT_GET, 0); + + ret = exynos_bcm_dbg_period_ctrl(&ipc_base_info, &period, data); + if (ret) { + count += snprintf(buf + count, PAGE_SIZE, + "failed get period\n"); + return count; + } + + count += snprintf(buf + count, PAGE_SIZE, + "monitor period: %u msec\n", period); + + return count; +} + +static ssize_t show_period_ctrl_help(struct device *dev, + struct device_attribute *attr, char *buf) +{ + ssize_t count = 0; + + /* help store_period_ctrl */ + count += snprintf(buf + count, PAGE_SIZE, "\n= period_ctrl set help =\n"); + count += snprintf(buf + count, PAGE_SIZE, "Usage:\n"); + count += snprintf(buf + count, PAGE_SIZE, "echo [period] > period_ctrl\n"); + count += snprintf(buf + count, PAGE_SIZE, + " period: monitor period (unit: msec), \ + min(%d msec) ~ max(%d msec)\n", + BCM_TIMER_PERIOD_MIN, BCM_TIMER_PERIOD_MAX); + + return count; +} + +static ssize_t store_period_ctrl(struct device *dev, + struct device_attribute *attr, + const char *buf, size_t count) +{ + struct platform_device *pdev = container_of(dev, + struct platform_device, dev); + struct exynos_bcm_dbg_data *data = platform_get_drvdata(pdev); + struct exynos_bcm_ipc_base_info ipc_base_info; + unsigned int period; + int ret; + + ret = kstrtouint(buf, 0, &period); + if (ret) + return ret; + + exynos_bcm_dbg_set_base_info(&ipc_base_info, BCM_EVT_PERIOD_CONT, + BCM_EVT_SET, 0); + + ret = exynos_bcm_dbg_period_ctrl(&ipc_base_info, &period, data); + if (ret) { + BCM_ERR("%s:failed set period\n", __func__); + return ret; + } + + return count; +} + +static ssize_t show_get_mode(struct device *dev, + struct device_attribute *attr, char *buf) +{ + struct platform_device *pdev = container_of(dev, + struct platform_device, dev); + struct exynos_bcm_dbg_data *data = platform_get_drvdata(pdev); + struct exynos_bcm_ipc_base_info ipc_base_info; + unsigned int bcm_mode; + ssize_t count = 0; + int ret; + + exynos_bcm_dbg_set_base_info(&ipc_base_info, BCM_EVT_MODE_CONT, + BCM_EVT_GET, 0); + + ret = exynos_bcm_dbg_mode_ctrl(&ipc_base_info, &bcm_mode, data); + if (ret) { + count += snprintf(buf + count, PAGE_SIZE, + "failed get mode\n"); + return count; + } + + count += snprintf(buf + count, PAGE_SIZE, + "mode: %d (%d:Interval, %d:Once, %d:User_ctrl)\n", + bcm_mode, + BCM_MODE_INTERVAL, BCM_MODE_ONCE, BCM_MODE_USERCTRL); + + return count; +} + +static ssize_t show_mode_ctrl_help(struct device *dev, + struct device_attribute *attr, char *buf) +{ + ssize_t count = 0; + + /* help store_mode_ctrl */ + count += snprintf(buf + count, PAGE_SIZE, "\n= mode_ctrl set help =\n"); + count += snprintf(buf + count, PAGE_SIZE, "Usage:\n"); + count += snprintf(buf + count, PAGE_SIZE, "echo [mode] > mode_ctrl\n"); + count += snprintf(buf + count, PAGE_SIZE, + " mode: Interval(%d), Once(%d), User_ctrl(%d)\n", + BCM_MODE_INTERVAL, BCM_MODE_ONCE, BCM_MODE_USERCTRL); + + return count; +} + +static ssize_t store_mode_ctrl(struct device *dev, + struct device_attribute *attr, + const char *buf, size_t count) +{ + struct platform_device *pdev = container_of(dev, + struct platform_device, dev); + struct exynos_bcm_dbg_data *data = platform_get_drvdata(pdev); + struct exynos_bcm_ipc_base_info ipc_base_info; + unsigned int bcm_mode; + int ret; + + ret = kstrtouint(buf, 0, &bcm_mode); + if (ret) + return ret; + + exynos_bcm_dbg_set_base_info(&ipc_base_info, BCM_EVT_MODE_CONT, + BCM_EVT_SET, 0); + + ret = exynos_bcm_dbg_mode_ctrl(&ipc_base_info, &bcm_mode, data); + if (ret) { + BCM_ERR("%s:failed set mode\n", __func__); + return ret; + } + + return count; +} + +static ssize_t show_get_str(struct device *dev, + struct device_attribute *attr, char *buf) +{ + struct platform_device *pdev = container_of(dev, + struct platform_device, dev); + struct exynos_bcm_dbg_data *data = platform_get_drvdata(pdev); + struct exynos_bcm_ipc_base_info ipc_base_info; + unsigned int suspend; + ssize_t count = 0; + int ret; + + exynos_bcm_dbg_set_base_info(&ipc_base_info, BCM_EVT_STR_STATE, + BCM_EVT_GET, 0); + + ret = exynos_bcm_dbg_str_ctrl(&ipc_base_info, &suspend, data); + if (ret) { + count += snprintf(buf + count, PAGE_SIZE, + "failed get str state\n"); + return count; + } + + count += snprintf(buf + count, PAGE_SIZE, + "str state: %s\n", suspend ? "suspend" : "resume"); + + return count; +} + +static ssize_t show_str_ctrl_help(struct device *dev, + struct device_attribute *attr, char *buf) +{ + ssize_t count = 0; + + /* help store_str_ctrl */ + count += snprintf(buf + count, PAGE_SIZE, "\n= str_ctrl set help =\n"); + count += snprintf(buf + count, PAGE_SIZE, "Usage:\n"); + count += snprintf(buf + count, PAGE_SIZE, "echo [str_state] > str_ctrl\n"); + count += snprintf(buf + count, PAGE_SIZE, + " str_state: suspend(1), resume(0)\n"); + + return count; +} + +static ssize_t store_str_ctrl(struct device *dev, + struct device_attribute *attr, + const char *buf, size_t count) +{ + struct platform_device *pdev = container_of(dev, + struct platform_device, dev); + struct exynos_bcm_dbg_data *data = platform_get_drvdata(pdev); + unsigned int suspend; + int ret; + + ret = kstrtouint(buf, 0, &suspend); + if (ret) + return ret; + + if (suspend) + suspend = true; + + ret = exynos_bcm_dbg_str(suspend, data); + if (ret) { + BCM_ERR("%s:failed set str state\n", __func__); + return ret; + } + + return count; +} + +static ssize_t show_get_ip(struct device *dev, + struct device_attribute *attr, char *buf) +{ + struct platform_device *pdev = container_of(dev, + struct platform_device, dev); + struct exynos_bcm_dbg_data *data = platform_get_drvdata(pdev); + struct exynos_bcm_ipc_base_info ipc_base_info; + unsigned int ip_enable; + ssize_t count = 0; + int ip_cnt, ret; + + exynos_bcm_dbg_set_base_info(&ipc_base_info, BCM_EVT_IP_CONT, + BCM_EVT_GET, BCM_EACH); + + for (ip_cnt = 0; ip_cnt < data->bcm_ip_nr; ip_cnt++) { + ret = exynos_bcm_dbg_ip_ctrl(&ipc_base_info, + &ip_enable, ip_cnt, data); + if (ret) { + count += snprintf(buf + count, PAGE_SIZE, + "failed get ip_enable state(ip:%d)\n", ip_cnt); + return count; + } + + count += snprintf(buf + count, PAGE_SIZE, "bcm[%2d]: enabled (%s)\n", + ip_cnt, ip_enable ? "true" : "false"); + } + + return count; +} + +static ssize_t show_ip_ctrl_help(struct device *dev, + struct device_attribute *attr, char *buf) +{ + struct platform_device *pdev = container_of(dev, + struct platform_device, dev); + struct exynos_bcm_dbg_data *data = platform_get_drvdata(pdev); + ssize_t count = 0; + + /* help store_ip_ctrl */ + count += snprintf(buf + count, PAGE_SIZE, "\n= ip_ctrl set help =\n"); + count += snprintf(buf + count, PAGE_SIZE, "Usage:\n"); + count += snprintf(buf + count, PAGE_SIZE, + "echo [ip_index] [enable] > ip_ctrl\n"); + count += snprintf(buf + count, PAGE_SIZE, + " ip_index: number of bcm ip (0 ~ %u)\n", + data->bcm_ip_nr - 1); + count += snprintf(buf + count, PAGE_SIZE, + " enable: ip enable state (1:enable, 0:disable)\n"); + + return count; +} + +static ssize_t store_ip_ctrl(struct device *dev, + struct device_attribute *attr, + const char *buf, size_t count) +{ + struct platform_device *pdev = container_of(dev, + struct platform_device, dev); + struct exynos_bcm_dbg_data *data = platform_get_drvdata(pdev); + struct exynos_bcm_ipc_base_info ipc_base_info; + unsigned int bcm_ip_index, ip_enable; + int ret; + + ret = sscanf(buf, "%u %u", &bcm_ip_index, &ip_enable); + if (ret != 2) + return -EINVAL; + + ret = exynos_bcm_ip_validate(BCM_EACH, bcm_ip_index, data->bcm_ip_nr); + if (ret) + return ret; + + if (ip_enable) + ip_enable = true; + + exynos_bcm_dbg_set_base_info(&ipc_base_info, BCM_EVT_IP_CONT, + BCM_EVT_SET, BCM_EACH); + + ret = exynos_bcm_dbg_ip_ctrl(&ipc_base_info, &ip_enable, + bcm_ip_index, data); + if (ret) { + BCM_ERR("%s:failed set IP control\n", __func__); + return ret; + } + + return count; +} + +#ifdef CONFIG_DEBUG_SNAPSHOT +static int exynos_bcm_dbg_set_dump_info(struct exynos_bcm_dbg_data *data) +{ + struct exynos_bcm_ipc_base_info ipc_base_info; + int ret; + + if (data->dump_addr.buff_size == 0 || + data->dump_addr.buff_size > data->dump_addr.p_size) + data->dump_addr.buff_size = data->dump_addr.p_size; + + BCM_INFO("%s: virtual address for reserved memory: v_addr = 0x%p\n", + __func__, data->dump_addr.v_addr); + BCM_INFO("%s: buffer size for reserved memory: buff_size = 0x%x\n", + __func__, data->dump_addr.buff_size); + + /* send physical address info to BCM plugin */ + exynos_bcm_dbg_set_base_info(&ipc_base_info, BCM_EVT_DUMP_ADDR, + BCM_EVT_SET, 0); + + ret = exynos_bcm_dbg_dump_addr_ctrl(&ipc_base_info, &data->dump_addr, data); + if (ret) { + BCM_ERR("%s: failed set dump address info\n", __func__); + return ret; + } + + return 0; +} + +static ssize_t show_dump_addr_info(struct device *dev, + struct device_attribute *attr, char *buf) +{ + struct platform_device *pdev = container_of(dev, + struct platform_device, dev); + struct exynos_bcm_dbg_data *data = platform_get_drvdata(pdev); + ssize_t count = 0; + + count += snprintf(buf + count, PAGE_SIZE, "\n= BCM dump address info =\n"); + count += snprintf(buf + count, PAGE_SIZE, + "physical address = 0x%08x\n", data->dump_addr.p_addr); + count += snprintf(buf + count, PAGE_SIZE, + "virtual address = 0x%p\n", data->dump_addr.v_addr); + count += snprintf(buf + count, PAGE_SIZE, + "dump region size = 0x%08x\n", data->dump_addr.p_size); + count += snprintf(buf + count, PAGE_SIZE, + "actual use size = 0x%08x\n", data->dump_addr.buff_size); + + return count; +} + +static ssize_t store_dump_addr_info(struct device *dev, + struct device_attribute *attr, + const char *buf, size_t count) +{ + struct platform_device *pdev = container_of(dev, + struct platform_device, dev); + struct exynos_bcm_dbg_data *data = platform_get_drvdata(pdev); + unsigned int buff_size; + int ret; + + ret = kstrtouint(buf, 16, &buff_size); + if (ret) + return ret; + + data->dump_addr.buff_size = buff_size; + + ret = exynos_bcm_dbg_set_dump_info(data); + if (ret) { + BCM_ERR("%s: failed set dump info\n", __func__); + return ret; + } + + return count; +} +#endif + +static ssize_t show_enable_dump_klog(struct device *dev, + struct device_attribute *attr, char *buf) +{ + struct platform_device *pdev = container_of(dev, + struct platform_device, dev); + struct exynos_bcm_dbg_data *data = platform_get_drvdata(pdev); + ssize_t count = 0; + + count += snprintf(buf + count, PAGE_SIZE, "\n= BCM dump to kernel log =\n"); + count += snprintf(buf + count, PAGE_SIZE, "%s\n", + data->dump_klog ? "enabled" : "disabled"); + + return count; +} + +static ssize_t store_enable_dump_klog(struct device *dev, + struct device_attribute *attr, + const char *buf, size_t count) +{ + struct platform_device *pdev = container_of(dev, + struct platform_device, dev); + struct exynos_bcm_dbg_data *data = platform_get_drvdata(pdev); + unsigned int enable; + int ret; + + ret = kstrtouint(buf, 0, &enable); + if (ret) + return ret; + + if (enable) + data->dump_klog = true; + else + data->dump_klog = false; + + return count; +} + +static ssize_t show_enable_stop_owner(struct device *dev, + struct device_attribute *attr, char *buf) +{ + struct platform_device *pdev = container_of(dev, + struct platform_device, dev); + struct exynos_bcm_dbg_data *data = platform_get_drvdata(pdev); + ssize_t count = 0; + int i; + + count += snprintf(buf + count, PAGE_SIZE, "\n= BCM Available stop owner =\n"); + for (i = 0; i < STOP_OWNER_MAX; i++) + count += snprintf(buf + count, PAGE_SIZE, " stop owner[%d]: %s\n", + i, data->available_stop_owner[i] ? "true" : "false"); + + return count; +} + +static ssize_t store_enable_stop_owner(struct device *dev, + struct device_attribute *attr, + const char *buf, size_t count) +{ + struct platform_device *pdev = container_of(dev, + struct platform_device, dev); + struct exynos_bcm_dbg_data *data = platform_get_drvdata(pdev); + unsigned int owner_index, enable; + int ret; + + ret = sscanf(buf, "%u %u", &owner_index, &enable); + if (ret != 2) + return -EINVAL; + + if (owner_index >= STOP_OWNER_MAX) { + BCM_ERR("Invalid stop owner (%u)\n", owner_index); + return -EINVAL; + } + + if (enable) + data->available_stop_owner[owner_index] = true; + else + data->available_stop_owner[owner_index] = false; + + return count; +} + +#ifdef CONFIG_EXYNOS_BCM_DBG_GNR +static ssize_t show_bcm_dbg_load_bin(struct device *dev, + struct device_attribute *attr, char *buf) +{ + struct platform_device *pdev = container_of(dev, + struct platform_device, dev); + struct exynos_bcm_dbg_data *data = platform_get_drvdata(pdev); + ssize_t count = 0; + + count += snprintf(buf + count, PAGE_SIZE, "\n= BCM Load Bin =\n"); + count += snprintf(buf + count, PAGE_SIZE, " bcm load bin: %s\n", + data->bcm_load_bin ? "true" : "false"); + + return count; +} + +static ssize_t store_bcm_dbg_load_bin(struct device *dev, + struct device_attribute *attr, + const char *buf, size_t count) +{ + unsigned int load; + int ret; + + ret = kstrtouint(buf, 0, &load); + if (ret) + return ret; + + /* loading firmware */ + if (load) { + ret = exynos_bcm_dbg_load_bin(); + if (ret) { + BCM_ERR("%s: failed to load BCM bin\n", __func__); + return ret; + } + } + + return count; +} +#endif + +static DEVICE_ATTR(bcm_dbg_data_pd, 0440, show_bcm_dbg_data_pd, NULL); +static DEVICE_ATTR(bcm_dbg_data_df_event, 0440, + show_bcm_dbg_data_df_event, NULL); +static DEVICE_ATTR(bcm_dbg_data_df_filter, 0440, + show_bcm_dbg_data_df_filter, NULL); +static DEVICE_ATTR(bcm_dbg_data_df_sample, 0440, + show_bcm_dbg_data_df_sample, NULL); +static DEVICE_ATTR(bcm_dbg_data_df_attr, 0440, + show_bcm_dbg_data_df_attr, NULL); +static DEVICE_ATTR(get_event, 0440, show_get_event, NULL); +static DEVICE_ATTR(event_ctrl_help, 0440, show_event_ctrl_help, NULL); +static DEVICE_ATTR(event_ctrl, 0640, NULL, store_event_ctrl); +static DEVICE_ATTR(get_filter_id, 0440, show_get_filter_id, NULL); +static DEVICE_ATTR(get_filter_id_active, 0440, + show_get_filter_id_active, NULL); +static DEVICE_ATTR(filter_id_ctrl_help, 0440, + show_filter_id_ctrl_help, NULL); +static DEVICE_ATTR(filter_id_ctrl, 0640, NULL, store_filter_id_ctrl); +static DEVICE_ATTR(get_filter_others, 0440, show_get_filter_others, NULL); +static DEVICE_ATTR(get_filter_others_active, 0440, + show_get_filter_others_active, NULL); +static DEVICE_ATTR(filter_others_ctrl_help, 0440, + show_filter_others_ctrl_help, NULL); +static DEVICE_ATTR(filter_others_ctrl, 0640, NULL, store_filter_others_ctrl); +static DEVICE_ATTR(get_sample_id, 0440, show_get_sample_id, NULL); +static DEVICE_ATTR(get_sample_id_active, 0440, + show_get_sample_id_active, NULL); +static DEVICE_ATTR(sample_id_ctrl_help, 0440, + show_sample_id_ctrl_help, NULL); +static DEVICE_ATTR(sample_id_ctrl, 0640, NULL, store_sample_id_ctrl); +static DEVICE_ATTR(get_run, 0440, show_get_run, NULL); +static DEVICE_ATTR(run_ctrl_help, 0440, show_run_ctrl_help, NULL); +static DEVICE_ATTR(run_ctrl, 0640, NULL, store_run_ctrl); +static DEVICE_ATTR(get_period, 0440, show_get_period, NULL); +static DEVICE_ATTR(period_ctrl_help, 0440, show_period_ctrl_help, NULL); +static DEVICE_ATTR(period_ctrl, 0640, NULL, store_period_ctrl); +static DEVICE_ATTR(get_mode, 0440, show_get_mode, NULL); +static DEVICE_ATTR(mode_ctrl_help, 0440, show_mode_ctrl_help, NULL); +static DEVICE_ATTR(mode_ctrl, 0640, NULL, store_mode_ctrl); +static DEVICE_ATTR(get_str, 0440, show_get_str, NULL); +static DEVICE_ATTR(str_ctrl_help, 0440, show_str_ctrl_help, NULL); +static DEVICE_ATTR(str_ctrl, 0640, NULL, store_str_ctrl); +static DEVICE_ATTR(get_ip, 0440, show_get_ip, NULL); +static DEVICE_ATTR(ip_ctrl_help, 0440, show_ip_ctrl_help, NULL); +static DEVICE_ATTR(ip_ctrl, 0640, NULL, store_ip_ctrl); +#ifdef CONFIG_DEBUG_SNAPSHOT +static DEVICE_ATTR(dump_addr_info, 0640, show_dump_addr_info, store_dump_addr_info); +#endif +static DEVICE_ATTR(enable_dump_klog, 0640, show_enable_dump_klog, store_enable_dump_klog); +static DEVICE_ATTR(enable_stop_owner, 0640, show_enable_stop_owner, store_enable_stop_owner); +#ifdef CONFIG_EXYNOS_BCM_DBG_GNR +static DEVICE_ATTR(bcm_dbg_load_bin, 0640, show_bcm_dbg_load_bin, store_bcm_dbg_load_bin); +#endif + +static struct attribute *exynos_bcm_dbg_sysfs_entries[] = { + &dev_attr_bcm_dbg_data_pd.attr, + &dev_attr_bcm_dbg_data_df_event.attr, + &dev_attr_bcm_dbg_data_df_filter.attr, + &dev_attr_bcm_dbg_data_df_sample.attr, + &dev_attr_bcm_dbg_data_df_attr.attr, + &dev_attr_get_event.attr, + &dev_attr_event_ctrl_help.attr, + &dev_attr_event_ctrl.attr, + &dev_attr_get_filter_id.attr, + &dev_attr_get_filter_id_active.attr, + &dev_attr_filter_id_ctrl_help.attr, + &dev_attr_filter_id_ctrl.attr, + &dev_attr_get_filter_others.attr, + &dev_attr_get_filter_others_active.attr, + &dev_attr_filter_others_ctrl_help.attr, + &dev_attr_filter_others_ctrl.attr, + &dev_attr_get_sample_id.attr, + &dev_attr_get_sample_id_active.attr, + &dev_attr_sample_id_ctrl_help.attr, + &dev_attr_sample_id_ctrl.attr, + &dev_attr_get_run.attr, + &dev_attr_run_ctrl_help.attr, + &dev_attr_run_ctrl.attr, + &dev_attr_get_period.attr, + &dev_attr_period_ctrl_help.attr, + &dev_attr_period_ctrl.attr, + &dev_attr_get_mode.attr, + &dev_attr_mode_ctrl_help.attr, + &dev_attr_mode_ctrl.attr, + &dev_attr_get_str.attr, + &dev_attr_str_ctrl_help.attr, + &dev_attr_str_ctrl.attr, + &dev_attr_get_ip.attr, + &dev_attr_ip_ctrl_help.attr, + &dev_attr_ip_ctrl.attr, +#ifdef CONFIG_DEBUG_SNAPSHOT + &dev_attr_dump_addr_info.attr, +#endif + &dev_attr_enable_dump_klog.attr, + &dev_attr_enable_stop_owner.attr, +#ifdef CONFIG_EXYNOS_BCM_DBG_GNR + &dev_attr_bcm_dbg_load_bin.attr, +#endif + NULL, +}; + +static struct attribute_group exynos_bcm_dbg_attr_group = { + .name = "bcm_attr", + .attrs = exynos_bcm_dbg_sysfs_entries, +}; + +#ifdef CONFIG_DEBUG_SNAPSHOT +static int exynos_bcm_dbg_dump_config(struct exynos_bcm_dbg_data *data) +{ + int ret; + + data->dump_addr.p_addr = dbg_snapshot_get_item_paddr(BCM_DSS_NAME); + data->dump_addr.p_size = dbg_snapshot_get_item_size(BCM_DSS_NAME); + data->dump_addr.v_addr = + (void __iomem *)dbg_snapshot_get_item_vaddr(BCM_DSS_NAME); + + ret = exynos_bcm_dbg_set_dump_info(data); + if (ret) { + BCM_ERR("%s: failed set dump info\n", __func__); + return ret; + } + + return 0; +} +#endif + +#ifdef CONFIG_EXYNOS_ITMON +static int exynos_bcm_dbg_itmon_notifier(struct notifier_block *nb, + unsigned long val, void *v) +{ + struct itmon_notifier *itmon_info = (struct itmon_notifier *)v; + + BCM_INFO("%s: itmon error code %u\n", __func__, itmon_info->errcode); + + if (itmon_info->errcode == ERRCODE_ITMON_TIMEOUT) { + BCM_INFO("%s: Note: It can occurred be IPC timeout \ + because can be trying access to timeout block \ + from BCMDBG plugin\n", __func__); + exynos_bcm_dbg_stop(ITMON_HANDLE); + } + + return NOTIFY_OK; +} +#endif + +static int exynos_bcm_dbg_init(struct exynos_bcm_dbg_data *data) +{ + int ret = 0; + + /* parsing dts data for BCM debug */ + ret = exynos_bcm_dbg_parse_dt(data->dev->of_node, data); + if (ret) { + BCM_ERR("%s: failed to parse private data\n", __func__); + goto err_parse_dt; + } + + /* Request IPC channel */ + ret = exynos_bcm_dbg_ipc_channel_request(data); + if (ret) { + BCM_ERR("%s: failed to ipc channel request\n", __func__); + goto err_ipc_channel; + } + + /* Initalize BCM Plugin */ + ret = exynos_bcm_dbg_early_init(data); + if (ret) { + BCM_ERR("%s: failed to early bcm initialize\n", __func__); + goto err_early_init; + } + + /* initial Local Power Domain sync-up */ + data->pd_sync_init = false; + ret = exynos_bcm_dbg_pd_sync_init(data); + if (ret) { + BCM_ERR("%s: failed to pd_sync_init\n", __func__); + goto err_pd_sync_init; + } + +#ifdef CONFIG_DEBUG_SNAPSHOT + ret = exynos_bcm_dbg_dump_config(data); + if (ret) { + BCM_ERR("%s: failed to dump config\n", __func__); + goto err_dump_config; + } +#endif + data->dump_klog = false; + + /* BCM plugin run */ + if (data->initial_bcm_run) { + ret = exynos_bcm_dbg_run(data->initial_bcm_run, data); + if (ret) { + BCM_ERR("%s: failed to bcm initial run\n", __func__); + goto err_initial_run; + } + } + + return 0; + +err_initial_run: +#ifdef CONFIG_DEBUG_SNAPSHOT +err_dump_config: +#endif +err_pd_sync_init: +err_early_init: + exynos_bcm_dbg_ipc_channel_release(data); +err_ipc_channel: +err_parse_dt: + return ret; +} + +#ifdef CONFIG_EXYNOS_BCM_DBG_GNR +static enum hrtimer_restart bcm_monitor(struct hrtimer *hrtimer) +{ + unsigned long flags; + u32 period; + enum hrtimer_restart ret = HRTIMER_NORESTART; + + spin_lock_irqsave(&bcm_dbg_data->lock, flags); + period = bin_func->timer_event(); + spin_unlock_irqrestore(&bcm_dbg_data->lock, flags); + + if (bcm_dbg_data->bcm_mode == BCM_MODE_ONCE || + bcm_dbg_data->bcm_run_state == BCM_STOP) + return ret; + + if (period > 0) { + hrtimer_forward_now(hrtimer, ms_to_ktime(period)); + ret = HRTIMER_RESTART; + } + + return ret; +} + +static void __iomem *bcm_ioremap(phys_addr_t phys_addr, size_t size) +{ + void __iomem *ret; + + ret = ioremap(phys_addr, size); + if (!ret) + BCM_ERR("failed to map bcm physical address\n"); + return ret; +} + +struct page_change_data { + pgprot_t set_mask; + pgprot_t clear_mask; +}; + +static int bcm_change_page_range(pte_t *ptep, pgtable_t token, unsigned long addr, + void *data) +{ + struct page_change_data *cdata = data; + pte_t pte = *ptep; + + pte = clear_pte_bit(pte, cdata->clear_mask); + pte = set_pte_bit(pte, cdata->set_mask); + + set_pte(ptep, pte); + return 0; +} + +static int bcm_change_memory_common(unsigned long addr, int numpages, + pgprot_t set_mask, pgprot_t clear_mask) +{ + unsigned long start = addr; + unsigned long size = PAGE_SIZE * numpages; + unsigned long end = start + size; + int ret; + struct page_change_data data; + + if (!PAGE_ALIGNED(addr)) { + start &= PAGE_MASK; + end = start + size; + WARN_ON_ONCE(1); + } + + if (!numpages) + return 0; + + data.set_mask = set_mask; + data.clear_mask = clear_mask; + + ret = apply_to_page_range(&init_mm, start, size, bcm_change_page_range, + &data); + + flush_tlb_kernel_range(start, end); + return ret; +} + +int exynos_bcm_dbg_load_bin(void) +{ + int ret = 0; + struct file *fp = NULL; + long fsize, nread; + u8 *buf = NULL; + char *lib_bcm = NULL; + mm_segment_t old_fs; + + if (bcm_dbg_data->bcm_load_bin) + return 0; + + ret = bcm_change_memory_common((unsigned long)bcm_addr, + BCM_BIN_SIZE, __pgprot(0), __pgprot(PTE_PXN)); + if (ret) { + BCM_ERR("%s: failed to change memory common\n", __func__); + goto err_out; + } + + os_func.print = printk; + os_func.snprint = snprintf; + os_func.ioremap = bcm_ioremap; + os_func.iounmap = iounmap; + os_func.sched_clock = sched_clock; + + old_fs = get_fs(); + set_fs(KERNEL_DS); + fp = filp_open(BCM_BIN_NAME, O_RDONLY, 0); + if (IS_ERR(fp)) { + BCM_ERR("%s: filp_open fail!!\n", __func__); + ret = -EIO; + goto err_fopen; + } + + fsize = BCM_BIN_SIZE; + BCM_INFO("%s: start, file path %s, size %ld Bytes\n", + __func__, BCM_BIN_NAME, fsize); + buf = vmalloc(fsize); + if (!buf) { + BCM_ERR("%s: failed to allocate memory\n", __func__); + ret = -ENOMEM; + goto err_alloc; + } + + nread = vfs_read(fp, (char __user *)buf, fsize, &fp->f_pos); + if (nread != fsize) { + BCM_ERR("%s: failed to read firmware file, %ld Bytes\n", + __func__, nread); + ret = -EIO; + goto err_vfs_read; + } + + lib_bcm = (char *)bcm_addr; + memset((char *)bcm_addr, 0x0, BCM_BIN_SIZE); + + flush_icache_range((unsigned long)lib_bcm, + (unsigned long)lib_bcm + BCM_BIN_SIZE); + memcpy((void *)lib_bcm, (void *)buf, fsize); + flush_cache_all(); + + bin_func = ((start_up_func_t)lib_bcm)((void **)&os_func); + + bcm_dbg_data->bcm_load_bin = true; + + hrtimer_init(&bcm_dbg_data->bcm_hrtimer, CLOCK_MONOTONIC, HRTIMER_MODE_REL); + bcm_dbg_data->bcm_hrtimer.function = bcm_monitor; + + ret = exynos_bcm_dbg_init(bcm_dbg_data); + if (ret) { + BCM_ERR("%s: failed to bcm init\n", __func__); + goto err_init; + } + +err_init: +err_vfs_read: + vfree((void *)buf); +err_alloc: + filp_close(fp, NULL); +err_fopen: + set_fs(old_fs); +err_out: + return ret; +} +EXPORT_SYMBOL(exynos_bcm_dbg_load_bin); +#endif + +static int exynos_bcm_dbg_pm_suspend(struct device *dev) +{ + unsigned int suspend = true; + int ret; +#ifdef CONFIG_EXYNOS_BCM_DBG_GNR + if (!bcm_dbg_data->bcm_load_bin) + return 0; +#endif + BCM_DBG("%s: ++\n", __func__); + + ret = exynos_bcm_dbg_str(suspend, bcm_dbg_data); + if (ret) { + BCM_ERR("%s: failed set str state\n", __func__); + return ret; + } + + BCM_DBG("%s: --\n", __func__); + + return 0; +} + +static int exynos_bcm_dbg_pm_resume(struct device *dev) +{ + unsigned int suspend = false; + int ret; +#ifdef CONFIG_EXYNOS_BCM_DBG_GNR + if (!bcm_dbg_data->bcm_load_bin) + return 0; +#endif + BCM_DBG("%s: ++\n", __func__); + + ret = exynos_bcm_dbg_str(suspend, bcm_dbg_data); + if (ret) { + BCM_ERR("%s: failed set str state\n", __func__); + return ret; + } + + BCM_DBG("%s: --\n", __func__); + + return 0; +} + +static struct dev_pm_ops exynos_bcm_dbg_pm_ops = { + .suspend = exynos_bcm_dbg_pm_suspend, + .resume = exynos_bcm_dbg_pm_resume, +}; + +static int __init exynos_bcm_dbg_probe(struct platform_device *pdev) +{ + int ret = 0; + struct exynos_bcm_dbg_data *data; + + data = kzalloc(sizeof(struct exynos_bcm_dbg_data), GFP_KERNEL); + if (data == NULL) { + BCM_ERR("%s: failed to allocate BCM debug device\n", __func__); + ret = -ENOMEM; + goto err_data; + } + + bcm_dbg_data = data; + data->dev = &pdev->dev; + + spin_lock_init(&data->lock); + +#ifndef CONFIG_EXYNOS_BCM_DBG_GNR + ret = exynos_bcm_dbg_init(data); + if (ret) { + BCM_ERR("%s: failed to bcm init\n", __func__); + goto err_init; + } +#endif + + platform_set_drvdata(pdev, data); + + ret = sysfs_create_group(&data->dev->kobj, &exynos_bcm_dbg_attr_group); + if (ret) + BCM_ERR("%s: failed creat sysfs for Exynos BCM DBG\n", __func__); + +#ifdef CONFIG_EXYNOS_ITMON + data->itmon_notifier.notifier_call = exynos_bcm_dbg_itmon_notifier; + itmon_notifier_chain_register(&data->itmon_notifier); +#endif + + BCM_INFO("%s: exynos bcm is initialized!!\n", __func__); + + return 0; + +#ifndef CONFIG_EXYNOS_BCM_DBG_GNR +err_init: +#endif + kfree(data); +err_data: + return ret; +} + +static int exynos_bcm_dbg_remove(struct platform_device *pdev) +{ + struct exynos_bcm_dbg_data *data = + platform_get_drvdata(pdev); + int ret; + + sysfs_remove_group(&data->dev->kobj, &exynos_bcm_dbg_attr_group); + platform_set_drvdata(pdev, NULL); + ret = exynos_bcm_dbg_pd_sync_exit(data); + if (ret) { + BCM_ERR("%s: failed to pd_sync_exit\n", __func__); + return ret; + } + + exynos_bcm_dbg_ipc_channel_release(data); + kfree(data); + + BCM_INFO("%s: exynos bcm is removed!!\n", __func__); + + return 0; +} + +static int __init bcm_setup(char *str) +{ + if (kstrtoul(str, 0, (unsigned long *)&bcm_addr)) + goto out; + + return 0; +out: + return -EINVAL; +} +__setup("reserve-fimc=", bcm_setup); + + +static struct platform_device_id exynos_bcm_dbg_driver_ids[] = { + { .name = EXYNOS_BCM_DBG_MODULE_NAME, }, + {}, +}; +MODULE_DEVICE_TABLE(platform, exynos_bcm_dbg_driver_ids); + +static const struct of_device_id exynos_bcm_dbg_match[] = { + { .compatible = "samsung,exynos-bcm_dbg", }, + {}, +}; + +static struct platform_driver exynos_bcm_dbg_driver = { + .remove = exynos_bcm_dbg_remove, + .id_table = exynos_bcm_dbg_driver_ids, + .driver = { + .name = EXYNOS_BCM_DBG_MODULE_NAME, + .owner = THIS_MODULE, + .pm = &exynos_bcm_dbg_pm_ops, + .of_match_table = exynos_bcm_dbg_match, + }, +}; + +module_platform_driver_probe(exynos_bcm_dbg_driver, exynos_bcm_dbg_probe); + +MODULE_AUTHOR("Taekki Kim "); +MODULE_DESCRIPTION("Samsung BCM Debug driver"); +MODULE_LICENSE("GPL"); diff --git a/drivers/soc/samsung/exynos-pd.c b/drivers/soc/samsung/exynos-pd.c index fdc8a6a027dd..fef7fa174754 100644 --- a/drivers/soc/samsung/exynos-pd.c +++ b/drivers/soc/samsung/exynos-pd.c @@ -73,9 +73,12 @@ static void exynos_pd_power_on_pre(struct exynos_pm_domain *pd) static void exynos_pd_power_on_post(struct exynos_pm_domain *pd) { + if (cal_pd_status(pd->cal_pdid)) { + if (pd->bcm) #if defined(CONFIG_EXYNOS_BCM) - if(cal_pd_status(pd->cal_pdid) && pd->bcm) - bcm_pd_sync(pd->bcm, true); + bcm_pd_sync(pd->bcm, true); +#elif defined(CONFIG_EXYNOS_BCM_DBG) + exynos_bcm_dbg_pd_sync(pd->bcm, true); #endif } @@ -86,9 +89,12 @@ static void exynos_pd_power_off_pre(struct exynos_pm_domain *pd) exynos_g3d_power_down_noti_apm(); } #endif /* CONFIG_EXYNOS_CL_DVFS_G3D */ + if (cal_pd_status(pd->cal_pdid)) { + if (pd->bcm) #if defined(CONFIG_EXYNOS_BCM) - if(cal_pd_status(pd->cal_pdid) && pd->bcm) - bcm_pd_sync(pd->bcm, false); + bcm_pd_sync(pd->bcm, false); +#elif defined(CONFIG_EXYNOS_BCM_DBG) + exynos_bcm_dbg_pd_sync(pd->bcm, false); #endif if (!strcmp(pd->name, "pd-dispaud")) diff --git a/include/dt-bindings/soc/samsung/exynos-bcm_dbg.h b/include/dt-bindings/soc/samsung/exynos-bcm_dbg.h new file mode 100644 index 000000000000..1881d5c70349 --- /dev/null +++ b/include/dt-bindings/soc/samsung/exynos-bcm_dbg.h @@ -0,0 +1,47 @@ +/* + * Copyright (c) 2018 Samsung Electronics Co., Ltd. + * + * This program is free software; you can redistribute it and/or modify + * it under the terms of the GNU General Public License version 2 as + * published by the Free Software Foundation. + * + * Device Tree binding constants for Exynos BCM Debug + */ + +#ifndef _DT_BINDINGS_EXYNOS_BCM_DBG_H +#define _DT_BINDINGS_EXYNOS_BCM_DBG_H + +/* BCM Pre-defined event type */ +#ifdef CONFIG_SOC_EXYNOS9610 +#define NO_PRE_DEFINE_EVT 0 +#define PEAK_LATENCY_FMT_EVT 1 +#define PRE_DEFINE_EVT_MAX 2 +#else +#define NO_PRE_DEFINE_EVT 0 +#define LATENCY_FMT_EVT 1 +#define MO_FMT_EVT 2 +#define BURST_LENGTH_FMT_EVT 3 +#define REQ_BLOCK_FMT_EVT 4 +#define DATA_BLOCK_FMT_EVT 5 +#define REQ_TYPE_FMT_EVT 6 +#define PRE_DEFINE_EVT_MAX 7 +#endif + +#define BCM_STOP 0 +#define BCM_RUN 1 + +#define BCM_IP_DIS 0 +#define BCM_IP_EN 1 + +#define BCM_MODE_INTERVAL 0 +#define BCM_MODE_ONCE 1 +#define BCM_MODE_USERCTRL 2 +#define BCM_MODE_MAX 3 + +#define PANIC_HANDLE 0 +#define CAMERA_DRIVER 1 +#define MODEM_IF 2 +#define ITMON_HANDLE 3 +#define STOP_OWNER_MAX 4 + +#endif /* _DT_BINDINGS_EXYNOS_BCM_DBG_H */ diff --git a/include/soc/samsung/exynos-bcm_dbg-dt.h b/include/soc/samsung/exynos-bcm_dbg-dt.h new file mode 100644 index 000000000000..911dd8bf5c34 --- /dev/null +++ b/include/soc/samsung/exynos-bcm_dbg-dt.h @@ -0,0 +1,16 @@ +/* + * Copyright (c) 2018 Samsung Electronics Co., Ltd. + * http://www.samsung.com + * + * This program is free software; you can redistribute it and/or modify + * it under the terms of the GNU General Public License as published by + * the Free Software Foundation; either version 2 of the License, or + * (at your option) any later version. + */ + +#ifndef __EXYNOS_BCM_DBG_DT_H_ +#define __EXYNOS_BCM_DBG_DT_H_ + +int exynos_bcm_dbg_parse_dt(struct device_node *np, + struct exynos_bcm_dbg_data *data); +#endif /* __EXYNOS_BCM_DBG_DT_H_ */ diff --git a/include/soc/samsung/exynos-bcm_dbg-dump.h b/include/soc/samsung/exynos-bcm_dbg-dump.h new file mode 100644 index 000000000000..5064738c479c --- /dev/null +++ b/include/soc/samsung/exynos-bcm_dbg-dump.h @@ -0,0 +1,42 @@ +/* + * Copyright (c) 2018 Samsung Electronics Co., Ltd. + * http://www.samsung.com + * + * This program is free software; you can redistribute it and/or modify + * it under the terms of the GNU General Public License as published by + * the Free Software Foundation; either version 2 of the License, or + * (at your option) any later version. + */ + +#ifndef __EXYNOS_BCM_DBG_DUMP_H_ +#define __EXYNOS_BCM_DBG_DUMP_H_ + +/* BCM DUMP format definition */ +#define BCM_DUMP_PRE_DEFINE_SHIFT (16) +#define BCM_DUMP_MAX_STR (4 * 1024) + +struct exynos_bcm_out_data { + u32 ccnt; + u32 pmcnt[BCM_EVT_EVENT_MAX]; +}; + +struct exynos_bcm_dump_info { + /* + * dump_header: + * [31] = dump validation + * [18:16] = pre-defined event index + * [5:0] = BCM IP index + */ + u32 dump_header; + u32 dump_seq_no; + u32 dump_time; + struct exynos_bcm_out_data out_data; +} __attribute__((packed)); + +#ifdef CONFIG_EXYNOS_BCM_DBG_DUMP +int exynos_bcm_dbg_buffer_dump(struct exynos_bcm_dbg_data *data, bool klog); +#else +#define exynos_bcm_dbg_buffer_dump(a, b) do {} while (0) +#endif + +#endif /* __EXYNOS_BCM_DBG_DUMP_H_ */ diff --git a/include/soc/samsung/exynos-bcm_dbg.h b/include/soc/samsung/exynos-bcm_dbg.h new file mode 100644 index 000000000000..8d3e0cb197ba --- /dev/null +++ b/include/soc/samsung/exynos-bcm_dbg.h @@ -0,0 +1,255 @@ +/* + * Copyright (c) 2018 Samsung Electronics Co., Ltd. + * http://www.samsung.com + * + * This program is free software; you can redistribute it and/or modify + * it under the terms of the GNU General Public License as published by + * the Free Software Foundation; either version 2 of the License, or + * (at your option) any later version. + */ + +#ifndef __EXYNOS_BCM_DBG_H_ +#define __EXYNOS_BCM_DBG_H_ + +#include + +#ifdef CONFIG_EXYNOS_BCM_DBG_GNR +#define BCM_BIN_SIZE (SZ_128K) +#define BCM_BIN_NAME "/system/vendor/firmware/fimc_is_lib.bin" +#endif + +#define ERRCODE_ITMON_TIMEOUT (6) + +#define EXYNOS_BCM_DBG_MODULE_NAME "exynos-bcm_dbg" +#define BCM_DSS_NAME "log_bcm" + +#define EXYNOS_BCM_CMD_LOW_MASK (0x0000FFFF) +#define EXYNOS_BCM_CMD_HIGH_MASK (0xFFFF0000) +#define EXYNOS_BCM_U64_LOW_MASK (0x00000000FFFFFFFF) +#define EXYNOS_BCM_U64_HIGH_MASK (0xFFFFFFFF00000000) +#define EXYNOS_BCM_32BIT_SHIFT (32) +#define EXYNOS_BCM_KTIME_SIZE (0x8) + +#define CMD_DATA_MAX (4) +#define BCM_DEFAULT_TIMER_PERIOD (1) +#define BCM_TIMER_PERIOD_MAX (500) +#define BCM_TIMER_PERIOD_MIN (1) + +/* IPC common definition */ +#define BCM_ONE_BIT_MASK (0x1) +#define BCM_ERR_MASK (0x7) +#define BCM_ERR_SHIFT (13) +#define BCM_CMD_ID_MASK (0xF) +#define BCM_CMD_ID_SHIFT (0) + +/* IPC_AP_BCM_PD definition */ +#define BCM_PD_INFO_MAX (32) +#define BCM_PD_INFO_MASK (0x1F) +#define BCM_PD_INFO_SHIFT (0) +#define BCM_PD_ON_SHIFT (6) + +/* IPC_AP_BCM_EVENT definition */ +#define BCM_EVT_EVENT_MAX (8) +#define BCM_EVT_ID_MASK BCM_PD_INFO_MASK +#define BCM_EVT_ID_SHIFT BCM_PD_INFO_SHIFT +#define BCM_EVT_DIR_SHIFT (5) +#define BCM_EVT_PRE_DEFINE_MASK (0x7) +#define BCM_EVT_PRE_DEFINE_SHIFT (6) +#define BCM_IP_RANGE_SHIFT (9) +#define BCM_IP_MASK (0x3F) +#define BCM_IP_SHIFT (10) +#define BCM_EVT_EVENT_MASK (0xFF) +#define BCM_EVT_EVENT_SHIFT(x) (((x) >= 4) ? (((x) * 8) - 32) : ((x) * 8)) +#define BCM_EVT_FLT_ACT_SHIFT(x) (x) +#define BCM_EVT_FLT_OTHR_MAX (2) +#define BCM_EVT_FLT_OTHR_TYPE_MASK (0x1F) +#define BCM_EVT_FLT_OTHR_MASK_MASK (0x1F) +#define BCM_EVT_FLT_OTHR_VALUE_MASK (0x1F) +#define BCM_EVT_FLT_OTHR_TYPE_SHIFT(x) (((x) == 1) ? 26 : 10) +#define BCM_EVT_FLT_OTHR_MASK_SHIFT(x) (((x) == 1) ? 21 : 5) +#define BCM_EVT_FLT_OTHR_VALUE_SHIFT(x) (((x) == 1) ? 16 : 0) +#define BCM_EVT_RUN_CONT_SHIFT (7) +#define BCM_EVT_PERIOD_CONT_MASK (0xFFFFF) +#define BCM_EVT_PERIOD_CONT_SHIFT (0) +#define BCM_EVT_MODE_CONT_MASK (0x7) +#define BCM_EVT_MODE_CONT_SHIFT (7) +#define BCM_EVT_STR_STATE_SHIFT BCM_EVT_RUN_CONT_SHIFT +#define BCM_EVT_IP_CONT_SHIFT BCM_EVT_RUN_CONT_SHIFT + +#define BCM_CMD_GET(cmd_data, mask, shift) ((cmd_data & (mask << shift)) >> shift) +#define BCM_CMD_CLEAR(mask, shift) (~(mask << shift)) +#define BCM_CMD_SET(data, mask, shift) ((data & mask) << shift) + +#undef BCM_DBGGEN +#ifdef BCM_DBGGEN +#define BCM_DBG(x...) pr_info("bcm_dbg: " x) +#else +#define BCM_DBG(x...) do {} while (0) +#endif + +#define BCM_INFO(x...) pr_info("bcm_info: " x) +#define BCM_ERR(x...) pr_err("bcm_err: " x) + +enum exynos_bcm_dbg_ipc_type { + IPC_BCM_DBG_EVENT = 0, + IPC_BCM_DBG_PD, + IPC_BCM_DBG_MAX +}; + +enum exynos_bcm_err_code { + E_OK = 0, + E_INVAL, + E_BUSY, +}; + +enum exynos_bcm_event_dir { + BCM_EVT_GET = 0, + BCM_EVT_SET, +}; + +enum exynos_bcm_event_id { + BCM_EVT_PRE_DEFINE = 0, + BCM_EVT_EVENT, + BCM_EVT_RUN_CONT, + BCM_EVT_IP_CONT, + BCM_EVT_PERIOD_CONT, + BCM_EVT_MODE_CONT, + BCM_EVT_EVENT_FLT_ID, + BCM_EVT_EVENT_FLT_OTHERS, + BCM_EVT_EVENT_SAMPLE_ID, + BCM_EVT_STR_STATE, + BCM_EVT_DUMP_ADDR, + BCM_EVT_MAX, +}; + +enum exynos_bcm_ip_range { + BCM_EACH = 0, + BCM_ALL, + BCM_RANGE_MAX, +}; + +struct exynos_bcm_dump_addr { + u32 p_addr; + u32 p_size; + u32 buff_size; + void __iomem *v_addr; +}; + +struct exynos_bcm_ipc_base_info { + enum exynos_bcm_event_dir direction; + enum exynos_bcm_event_id event_id; + enum exynos_bcm_ip_range ip_range; +}; + +struct exynos_bcm_pd_info { + char *pd_name; + unsigned int cal_pdid; + unsigned int pd_index; + bool on; +}; + +struct exynos_bcm_filter_id { + unsigned int sm_id_mask; + unsigned int sm_id_value; + unsigned int sm_id_active[BCM_EVT_EVENT_MAX]; +}; + +struct exynos_bcm_filter_others { + unsigned int sm_other_type[BCM_EVT_FLT_OTHR_MAX]; + unsigned int sm_other_mask[BCM_EVT_FLT_OTHR_MAX]; + unsigned int sm_other_value[BCM_EVT_FLT_OTHR_MAX]; + unsigned int sm_other_active[BCM_EVT_EVENT_MAX]; +}; + +struct exynos_bcm_sample_id { + unsigned int peak_mask; + unsigned int peak_id; + unsigned int peak_enable[BCM_EVT_EVENT_MAX]; +}; + +struct exynos_bcm_event { + unsigned int index; + unsigned int event[BCM_EVT_EVENT_MAX]; +}; + +struct exynos_bcm_dbg_data { + struct device *dev; + spinlock_t lock; + + struct exynos_bcm_dump_addr dump_addr; + bool dump_klog; + + struct device_node *ipc_node; + unsigned int ipc_ch_num; + unsigned int ipc_size; + struct exynos_bcm_pd_info *pd_info[BCM_PD_INFO_MAX]; + unsigned int pd_size; + bool pd_sync_init; + + struct exynos_bcm_event define_event[PRE_DEFINE_EVT_MAX]; + unsigned int default_define_event; + unsigned int define_event_max; + + struct exynos_bcm_filter_id define_filter_id[PRE_DEFINE_EVT_MAX]; + struct exynos_bcm_filter_others define_filter_others[PRE_DEFINE_EVT_MAX]; + struct exynos_bcm_sample_id define_sample_id[PRE_DEFINE_EVT_MAX]; + + unsigned int bcm_ip_nr; + unsigned int initial_bcm_run; + unsigned int initial_period; + unsigned int initial_bcm_mode; + unsigned int *initial_run_ip; + + unsigned int bcm_run_state; + bool available_stop_owner[STOP_OWNER_MAX]; +#ifdef CONFIG_EXYNOS_BCM_DBG_GNR + bool bcm_load_bin; + struct hrtimer bcm_hrtimer; + unsigned int period; + unsigned int bcm_mode; +#endif + unsigned int bcm_cnt_nr; + struct notifier_block itmon_notifier; +}; + +#ifdef CONFIG_EXYNOS_BCM_DBG_GNR +struct cmd_data { + unsigned int raw_cmd; + unsigned int cmd[CMD_DATA_MAX]; +}; + +struct bin_system_func { + int (*send_data)(struct cmd_data *); + int (*timer_event)(void); +}; + +struct os_system_func { + void *(*ioremap)(u64 phys_addr, size_t size); + void (*iounmap)(volatile void *addr); + int (*snprint)(char *buf, size_t size, const char *fmt, ...); + int (*print)(const char *, ...); + u64 (*sched_clock)(void); +}; +#endif + +#ifdef CONFIG_EXYNOS_BCM_DBG +int exynos_bcm_dbg_ipc_send_data(enum exynos_bcm_dbg_ipc_type ipc_type, + struct exynos_bcm_dbg_data *data, + unsigned int *cmd); +int exynos_bcm_dbg_pd_sync(unsigned int cal_pdid, bool on); +void exynos_bcm_dbg_start(void); +void exynos_bcm_dbg_stop(unsigned int bcm_stop_owner); +#else +#define exynos_bcm_dbg_pd(a, b) do {} while (0) +#define exynos_bcm_dbg_ipc_send_data(a, b, c) do {} while (0) +#define exynos_bcm_dbg_start() do {} while (0) +#define exynos_bcm_dbg_stop(a) do {} while (0) +#endif + +#ifdef CONFIG_EXYNOS_BCM_DBG_GNR +int exynos_bcm_dbg_load_bin(void); +#else +#define exynos_bcm_dbg_load_bin(a) do {} while (0) +#endif + +#endif /* __EXYNOS_BCM_DBG_H_ */ diff --git a/include/soc/samsung/exynos-pd.h b/include/soc/samsung/exynos-pd.h index 9389e62264c0..42188d8a0447 100644 --- a/include/soc/samsung/exynos-pd.h +++ b/include/soc/samsung/exynos-pd.h @@ -29,7 +29,11 @@ #include #include +#if defined(CONFIG_EXYNOS_BCM) #include +#elif defined(CONFIG_EXYNOS_BCM_DBG) +#include +#endif #include #include @@ -67,6 +71,8 @@ struct exynos_pm_domain { int idle_ip_index; #if defined(CONFIG_EXYNOS_BCM) struct bcm_info *bcm; +#elif defined(CONFIG_EXYNOS_BCM_DBG) + struct exynos_bcm_pd_info *bcm; #endif bool power_down_skipped; unsigned int need_smc; -- 2.20.1