#
# This file is called by the Linux build system.
-############## Kasin Added, for platform. ################
-
-ifndef CONFIG_MALI_DMA_BUF_MAP_ON_ATTACH
- ccflags-y += -DCONFIG_MALI_DMA_BUF_MAP_ON_ATTACH=y
-endif
-#ifeq ($(CONFIG_ARCH_MESON),y)
-#ccflags-y += -DCONFIG_MALI450=y
-#ifeq ($(CONFIG_MALI450),m)
-#ccflags-y += -DCONFIG_MALI450=y
-#endif
-#ifeq ($(CONFIG_MALI450),y)
-#ccflags-y += -DCONFIG_MALI450=y
-#endif
-ccflags-y += -DCONFIG_MALI_DT=y
-ccflags-y += -DMESON_CPU_TYPE=0x80
-ccflags-y += -DMESON_CPU_TYPE_MESON6=0x60
-ccflags-y += -DMESON_CPU_TYPE_MESON6TVD=0x75
-ccflags-y += -DMESON_CPU_TYPE_MESON8=0x80
-ccflags-y += -DMESON_CPU_TYPE_MESON8B=0x8B
-#endif
-
-##################### end Kasin Added. ###################
-
+include $(src)/Kbuild.amlogic
# set up defaults if not defined by the user
TIMESTAMP ?= default
ifeq ($(CONFIG_UMP), m)
endif
# Use our defines when compiling
-ccflags-y += -I$(src) -I$(src)/include -I$(src)/common -I$(src)/linux -I$(src)/platform -Wno-data-time
+ccflags-y += -I$(src) -I$(src)/include -I$(src)/common -I$(src)/linux -I$(src)/platform
# Get subversion revision number, fall back to only ${MALI_RELEASE_NAME} if no svn info is available
MALI_RELEASE_NAME=$(shell cat $(TOP_KBUILD_SRC)$(DRIVER_DIR)/.version 2> /dev/null)
--- /dev/null
+############## Kasin Added, for platform. ################
+
+ifndef CONFIG_MALI_DMA_BUF_MAP_ON_ATTACH
+ ccflags-y += -DCONFIG_MALI_DMA_BUF_MAP_ON_ATTACH=y
+endif
+
+ccflags-y += -DCONFIG_MALI_DT=y
+ccflags-y += -DMESON_CPU_TYPE=0x80
+ccflags-y += -DMESON_CPU_TYPE_MESON6=0x60
+ccflags-y += -DMESON_CPU_TYPE_MESON6TVD=0x75
+ccflags-y += -DMESON_CPU_TYPE_MESON8=0x80
+ccflags-y += -DMESON_CPU_TYPE_MESON8B=0x8B
+
+USE_GPPLL?=0
+ifdef CONFIG_AM_VIDEO
+ USE_GPPLL:=1
+endif
+ifdef CONFIG_AMLOGIC_MEDIA_COMMON
+ USE_GPPLL:=1
+endif
+
+ccflags-y += -DAMLOGIC_GPU_USE_GPPLL=$(USE_GPPLL)
#include <linux/file.h>
#include <linux/seq_file.h>
#include <linux/module.h>
+#include <asm-generic/fcntl.h>
struct mali_sync_pt {
struct sync_pt sync_pt;
#if LINUX_VERSION_CODE < KERNEL_VERSION(3, 19, 0)
fd = get_unused_fd();
#else
- fd = get_unused_fd_flags(0);
+ fd = get_unused_fd_flags(O_CLOEXEC);
#endif
if (fd < 0) {
#include <linux/types.h>
#include <linux/workqueue.h>
-#include <linux/clk-private.h>
+#include <linux/clk-provider.h>
+#include <linux/clk.h>
enum mali_scale_mode_t {
MALI_PP_SCALING = 0,
#include <linux/platform_device.h>
#include <linux/module.h>
-#include <linux/clk-private.h>
+#include <linux/clk-provider.h>
+#include <linux/clk.h>
#include <linux/of_address.h>
#include <linux/delay.h>
#include <linux/mali/mali_utgard.h>
#define AML_CLK_LOCK_ERROR 1
#endif
#define GXBBM_MAX_GPU_FREQ 700000000UL
-
+struct clk;
static unsigned gpu_dbg_level = 0;
module_param(gpu_dbg_level, uint, 0644);
MODULE_PARM_DESC(gpu_dbg_level, "gpu debug level");
printk("gpu_debug"fmt , ## arg); \
} while (0)
-#define GPU_CLK_DBG(fmt, arg...) \
- do { \
- gpu_dbg(1, "line(%d), clk_cntl=0x%08x\n" fmt, __LINE__, mplt_read(HHI_MALI_CLK_CNTL), ## arg);\
- } while (0)
+#define GPU_CLK_DBG(fmt, arg...)
//disable print
#define _dev_info(...)
struct timeval start;
struct timeval end;
+#if (LINUX_VERSION_CODE < KERNEL_VERSION(4, 4, 16))
int mali_clock_init_clk_tree(struct platform_device* pdev)
{
mali_dvfs_threshold_table *dvfs_tbl = &pmali_plat->dvfs_table[pmali_plat->def_clock];
mpdata->pdev = pdev;
return 0;
}
+#else
+int mali_clock_init_clk_tree(struct platform_device* pdev)
+{
+ //mali_dvfs_threshold_table *dvfs_tbl = &pmali_plat->dvfs_table[pmali_plat->def_clock];
+ struct clk *clk_mali = pmali_plat->clk_mali;
+
+ clk_prepare_enable(clk_mali);
+
+ return 0;
+}
+
+int mali_clock_init(mali_plat_info_t *pdev)
+{
+ *pdev = *pdev;
+ return 0;
+}
+
+int mali_clock_critical(critical_t critical, size_t param)
+{
+ int ret = 0;
+
+ ret = critical(param);
+
+ return ret;
+}
+
+static int critical_clock_set(size_t param)
+{
+ int ret = 0;
+ unsigned int idx = param;
+ mali_dvfs_threshold_table *dvfs_tbl = &pmali_plat->dvfs_table[idx];
+
+ struct clk *clk_mali = pmali_plat->clk_mali;
+ unsigned long time_use=0;
+
+
+ GPU_CLK_DBG();
+ do_gettimeofday(&start);
+ ret = clk_set_rate(clk_mali, dvfs_tbl->clk_freq);
+ do_gettimeofday(&end);
+ GPU_CLK_DBG();
+
+#ifndef AML_CLK_LOCK_ERROR
+ clk_disable_unprepare(clk_mali_x_old);
+#endif
+ time_use = (end.tv_sec - start.tv_sec)*1000000 + end.tv_usec - start.tv_usec;
+ GPU_CLK_DBG("step 1, mali_mux use: %ld us\n", time_use);
+
+ return 0;
+}
+
+int mali_clock_set(unsigned int clock)
+{
+ return mali_clock_critical(critical_clock_set, (size_t)clock);
+}
+
+void disable_clock(void)
+{
+#ifndef AML_CLK_LOCK_ERROR
+ struct clk *clk_mali = pmali_plat->clk_mali;
+
+ GPU_CLK_DBG();
+ clk_disable_unprepare(clk_mali);
+#endif
+ GPU_CLK_DBG();
+}
+
+void enable_clock(void)
+{
+#ifndef AML_CLK_LOCK_ERROR
+ struct clk *clk_mali = pmali_plat->clk_mali;
+
+ clk_prepare_enable(clk_mali);
+#endif
+ GPU_CLK_DBG();
+}
+
+u32 get_mali_freq(u32 idx)
+{
+ if (!mali_pm_statue) {
+ return pmali_plat->clk_sample[idx];
+ } else {
+ return 0;
+ }
+}
+
+void set_str_src(u32 data)
+{
+ printk("gpu: %s, %s, %d\n", __FILE__, __func__, __LINE__);
+}
+
+int mali_dt_info(struct platform_device *pdev, struct mali_plat_info_t *mpdata)
+{
+ struct device_node *gpu_dn = pdev->dev.of_node;
+ struct device_node *gpu_clk_dn;
+ struct mali_gpu_clk_item *clk_item;
+ phandle dvfs_clk_hdl;
+ mali_dvfs_threshold_table *dvfs_tbl = NULL;
+ uint32_t *clk_sample = NULL;
+
+ struct property *prop;
+ const __be32 *p;
+ int length = 0, i = 0;
+ u32 u;
+
+ int ret = 0;
+ if (!gpu_dn) {
+ dev_notice(&pdev->dev, "gpu device node not right\n");
+ return -ENODEV;
+ }
+
+ ret = of_property_read_u32(gpu_dn,"num_of_pp",
+ &mpdata->cfg_pp);
+ if (ret) {
+ dev_notice(&pdev->dev, "set max pp to default 6\n");
+ mpdata->cfg_pp = 6;
+ }
+ mpdata->scale_info.maxpp = mpdata->cfg_pp;
+ mpdata->maxpp_sysfs = mpdata->cfg_pp;
+ _dev_info(&pdev->dev, "max pp is %d\n", mpdata->scale_info.maxpp);
+
+ ret = of_property_read_u32(gpu_dn,"min_pp",
+ &mpdata->cfg_min_pp);
+ if (ret) {
+ dev_notice(&pdev->dev, "set min pp to default 1\n");
+ mpdata->cfg_min_pp = 1;
+ }
+ mpdata->scale_info.minpp = mpdata->cfg_min_pp;
+ _dev_info(&pdev->dev, "min pp is %d\n", mpdata->scale_info.minpp);
+
+ ret = of_property_read_u32(gpu_dn,"min_clk",
+ &mpdata->cfg_min_clock);
+ if (ret) {
+ dev_notice(&pdev->dev, "set min clk default to 0\n");
+ mpdata->cfg_min_clock = 0;
+ }
+ mpdata->scale_info.minclk = mpdata->cfg_min_clock;
+ _dev_info(&pdev->dev, "min clk is %d\n", mpdata->scale_info.minclk);
+
+ mpdata->reg_base_hiubus = of_iomap(gpu_dn, 1);
+ _dev_info(&pdev->dev, "hiu io source 0x%p\n", mpdata->reg_base_hiubus);
+
+ mpdata->reg_base_aobus = of_iomap(gpu_dn, 2);
+ _dev_info(&pdev->dev, "hiu io source 0x%p\n", mpdata->reg_base_aobus);
+
+ ret = of_property_read_u32(gpu_dn,"sc_mpp",
+ &mpdata->sc_mpp);
+ if (ret) {
+ dev_notice(&pdev->dev, "set pp used most of time default to %d\n", mpdata->cfg_pp);
+ mpdata->sc_mpp = mpdata->cfg_pp;
+ }
+ _dev_info(&pdev->dev, "num of pp used most of time %d\n", mpdata->sc_mpp);
+
+ of_get_property(gpu_dn, "tbl", &length);
+
+ length = length /sizeof(u32);
+ _dev_info(&pdev->dev, "clock dvfs cfg table size is %d\n", length);
+
+ mpdata->dvfs_table = devm_kzalloc(&pdev->dev,
+ sizeof(struct mali_dvfs_threshold_table)*length,
+ GFP_KERNEL);
+ dvfs_tbl = mpdata->dvfs_table;
+ if (mpdata->dvfs_table == NULL) {
+ dev_err(&pdev->dev, "failed to alloc dvfs table\n");
+ return -ENOMEM;
+ }
+ mpdata->clk_sample = devm_kzalloc(&pdev->dev, sizeof(u32)*length, GFP_KERNEL);
+ if (mpdata->clk_sample == NULL) {
+ dev_err(&pdev->dev, "failed to alloc clk_sample table\n");
+ return -ENOMEM;
+ }
+ clk_sample = mpdata->clk_sample;
+///////////
+ mpdata->clk_items = devm_kzalloc(&pdev->dev, sizeof(struct mali_gpu_clk_item) * length, GFP_KERNEL);
+ if (mpdata->clk_items == NULL) {
+ dev_err(&pdev->dev, "failed to alloc clk_item table\n");
+ return -ENOMEM;
+ }
+ clk_item = mpdata->clk_items;
+//
+ of_property_for_each_u32(gpu_dn, "tbl", prop, p, u) {
+ dvfs_clk_hdl = (phandle) u;
+ gpu_clk_dn = of_find_node_by_phandle(dvfs_clk_hdl);
+ ret = of_property_read_u32(gpu_clk_dn,"clk_freq", &dvfs_tbl->clk_freq);
+ if (ret) {
+ dev_notice(&pdev->dev, "read clk_freq failed\n");
+ }
+#if 0
+#ifdef MESON_CPU_VERSION_OPS
+ if (is_meson_gxbbm_cpu()) {
+ if (dvfs_tbl->clk_freq >= GXBBM_MAX_GPU_FREQ)
+ continue;
+ }
+#endif
+#endif
+#if 0
+ ret = of_property_read_string(gpu_clk_dn,"clk_parent",
+ &dvfs_tbl->clk_parent);
+ if (ret) {
+ dev_notice(&pdev->dev, "read clk_parent failed\n");
+ }
+ dvfs_tbl->clkp_handle = devm_clk_get(&pdev->dev, dvfs_tbl->clk_parent);
+ if (IS_ERR(dvfs_tbl->clkp_handle)) {
+ dev_notice(&pdev->dev, "failed to get %s's clock pointer\n", dvfs_tbl->clk_parent);
+ }
+ ret = of_property_read_u32(gpu_clk_dn,"clkp_freq", &dvfs_tbl->clkp_freq);
+ if (ret) {
+ dev_notice(&pdev->dev, "read clk_parent freq failed\n");
+ }
+#endif
+ ret = of_property_read_u32(gpu_clk_dn,"voltage", &dvfs_tbl->voltage);
+ if (ret) {
+ dev_notice(&pdev->dev, "read voltage failed\n");
+ }
+ ret = of_property_read_u32(gpu_clk_dn,"keep_count", &dvfs_tbl->keep_count);
+ if (ret) {
+ dev_notice(&pdev->dev, "read keep_count failed\n");
+ }
+ //downthreshold and upthreshold shall be u32
+ ret = of_property_read_u32_array(gpu_clk_dn,"threshold",
+ &dvfs_tbl->downthreshold, 2);
+ if (ret) {
+ dev_notice(&pdev->dev, "read threshold failed\n");
+ }
+ dvfs_tbl->freq_index = i;
+ clk_item->clock = dvfs_tbl->clk_freq / 1000000;
+ clk_item->vol = dvfs_tbl->voltage;
+
+ *clk_sample = dvfs_tbl->clk_freq / 1000000;
+
+ dvfs_tbl ++;
+ clk_item ++;
+ clk_sample ++;
+ i++;
+ mpdata->dvfs_table_size ++;
+ }
+
+ ret = of_property_read_u32(gpu_dn,"max_clk",
+ &mpdata->cfg_clock);
+ if (ret) {
+ dev_notice(&pdev->dev, "max clk set %d\n", mpdata->dvfs_table_size-2);
+ mpdata->cfg_clock = mpdata->dvfs_table_size-2;
+ }
+
+ mpdata->cfg_clock_bkup = mpdata->cfg_clock;
+ mpdata->maxclk_sysfs = mpdata->cfg_clock;
+ mpdata->scale_info.maxclk = mpdata->cfg_clock;
+ _dev_info(&pdev->dev, "max clk is %d\n", mpdata->scale_info.maxclk);
+
+ ret = of_property_read_u32(gpu_dn,"turbo_clk",
+ &mpdata->turbo_clock);
+ if (ret) {
+ dev_notice(&pdev->dev, "turbo clk set to %d\n", mpdata->dvfs_table_size-1);
+ mpdata->turbo_clock = mpdata->dvfs_table_size-1;
+ }
+ _dev_info(&pdev->dev, "turbo clk is %d\n", mpdata->turbo_clock);
+
+ ret = of_property_read_u32(gpu_dn,"def_clk",
+ &mpdata->def_clock);
+ if (ret) {
+ dev_notice(&pdev->dev, "default clk set to %d\n", mpdata->dvfs_table_size/2-1);
+ mpdata->def_clock = mpdata->dvfs_table_size/2 - 1;
+ }
+ _dev_info(&pdev->dev, "default clk is %d\n", mpdata->def_clock);
+
+ dvfs_tbl = mpdata->dvfs_table;
+ clk_sample = mpdata->clk_sample;
+ for (i = 0; i< mpdata->dvfs_table_size; i++) {
+ _dev_info(&pdev->dev, "====================%d====================\n"
+ "clk_freq=%10d, clk_parent=%9s, voltage=%d, keep_count=%d, threshod=<%d %d>, clk_sample=%d\n",
+ i,
+ dvfs_tbl->clk_freq, dvfs_tbl->clk_parent,
+ dvfs_tbl->voltage, dvfs_tbl->keep_count,
+ dvfs_tbl->downthreshold, dvfs_tbl->upthreshold, *clk_sample);
+ dvfs_tbl ++;
+ clk_sample ++;
+ }
+ _dev_info(&pdev->dev, "clock dvfs table size is %d\n", mpdata->dvfs_table_size);
+
+ mpdata->clk_mali = devm_clk_get(&pdev->dev, "gpu_mux");
+#if 0
+ mpdata->clk_mali_0 = devm_clk_get(&pdev->dev, "clk_mali_0");
+ mpdata->clk_mali_1 = devm_clk_get(&pdev->dev, "clk_mali_1");
+#endif
+ if (IS_ERR(mpdata->clk_mali)) {
+ dev_err(&pdev->dev, "failed to get clock pointer\n");
+ return -EFAULT;
+ }
+
+ pmali_plat = mpdata;
+ mpdata->pdev = pdev;
+ return 0;
+}
+
+#endif
#include <mali_osk_profiling.h>
#include <linux/time.h>
-#include <linux/amlogic/amports/gp_pll.h>
+//#include <linux/amlogic/amports/gp_pll.h>
#include "meson_main2.h"
#include <mali_kernel_common.h>
#include <mali_osk_profiling.h>
+#if AMLOGIC_GPU_USE_GPPLL
+#if LINUX_VERSION_CODE < KERNEL_VERSION(4, 4, 16)
#include <linux/amlogic/amports/gp_pll.h>
+#else
+#include <linux/amlogic/media/clk/gp_pll.h>
+#endif
+#endif
#define LOG_MALI_SCALING 1
#include "meson_main2.h"
#include "mali_clock.h"
//static int scaling_mode = MALI_SCALING_DISABLE;
//static int scaling_mode = MALI_PP_SCALING;
+#if AMLOGIC_GPU_USE_GPPLL
static struct gp_pll_user_handle_s *gp_pll_user_gpu;
static int is_gp_pll_get;
static int is_gp_pll_put;
+#endif
static unsigned scaling_dbg_level = 0;
module_param(scaling_dbg_level, uint, 0644);
{
mali_dvfs_threshold_table * pdvfs = pmali_plat->dvfs_table;
uint32_t execStep = currentStep;
+#if AMLOGIC_GPU_USE_GPPLL
mali_dvfs_threshold_table *dvfs_tbl = &pmali_plat->dvfs_table[currentStep];
+#endif
//if (pdvfs[currentStep].freq_index == pdvfs[lastStep].freq_index) return;
if ((pdvfs[execStep].freq_index == pdvfs[lastStep].freq_index) ||
return;
}
+#if AMLOGIC_GPU_USE_GPPLL
if (0 == strcmp(dvfs_tbl->clk_parent, "gp0_pll")) {
gp_pll_request(gp_pll_user_gpu);
if (!is_gp_pll_get) {
is_gp_pll_put = 0;
gp_pll_release(gp_pll_user_gpu);
}
+#endif
//mali_dev_pause();
mali_clock_set(pdvfs[execStep].freq_index);
//mali_dev_resume();
lastStep = execStep;
+#if AMLOGIC_GPU_USE_GPPLL
if (is_gp_pll_put) {
//printk("release gp0 pll\n");
gp_pll_release(gp_pll_user_gpu);
is_gp_pll_get = 0;
is_gp_pll_put = 0;
}
+#endif
}
+#if AMLOGIC_GPU_USE_GPPLL
static int gp_pll_user_cb_gpu(struct gp_pll_user_handle_s *user,
int event)
{
return 0;
}
+#endif
static void do_scaling(struct work_struct *work)
{
pmali_plat = mali_plat;
num_cores_enabled = pmali_plat->sc_mpp;
+#if AMLOGIC_GPU_USE_GPPLL
gp_pll_user_gpu = gp_pll_user_register("gpu", 1,
gp_pll_user_cb_gpu);
//not get the gp pll, do need put
is_gp_pll_get = 0;
is_gp_pll_put = 0;
if (gp_pll_user_gpu == NULL) printk("register gp pll user for gpu failed\n");
+#endif
currentStep = pmali_plat->def_clock;
lastStep = currentStep;
{
#ifndef CONFIG_MALI_DVFS
flush_scheduled_work();
+#if AMLOGIC_GPU_USE_GPPLL
gp_pll_user_unregister(gp_pll_user_gpu);
#endif
+#endif
}
#ifndef CONFIG_MALI_DVFS
ifeq ($(CONFIG_MALI_PLATFORM_DEVICETREE),y)
ccflags-y += -I$(src)/platform/devicetree
+ include $(src)/platform/devicetree/Kbuild
endif
# Tell the Linux build system from which .o file to create the kernel module
ifeq ($(CONFIG_MALI_PLATFORM_THIRDPARTY)$(CONFIG_MALI_PLATFORM_FAKE),)
CONFIG_MALI_PLATFORM_DEVICETREE := y
endif
-
-mali_kbase-$(CONFIG_MALI_PLATFORM_DEVICETREE) += \
- platform/devicetree/mali_clock.o \
- platform/devicetree/mpgpu.o \
- platform/devicetree/meson_main2.o \
- platform/devicetree/platform_gx.o \
- platform/devicetree/scaling.o \
- platform/devicetree/mali_kbase_runtime_pm.o \
- platform/devicetree/mali_kbase_config_devicetree.o
-ccflags-$(CONFIG_MALI_PLATFORM_DEVICETREE) += -I$(src)/platform/devicetree
# Boston, MA 02110-1301, USA.
#
#
+USE_GPPLL?=0
+ifdef CONFIG_AM_VIDEO
+ USE_GPPLL:=1
+endif
+ifdef CONFIG_AMLOGIC_MEDIA_COMMON
+ USE_GPPLL:=1
+endif
+
+ccflags-y += -DAMLOGIC_GPU_USE_GPPLL=$(USE_GPPLL)
+
ifeq ($(CONFIG_MALI_MIDGARD),y)
obj-y += platform/devicetree/mali_clock.c
obj-y += platform/devicetree/mpgpu.c
#include <linux/platform_device.h>
#include <linux/module.h>
-#include <linux/clk-private.h>
+#include <linux/clk-provider.h>
+#include <linux/clk.h>
#include <linux/of_address.h>
#include <linux/delay.h>
#include "mali_scaling.h"
printk("gpu_debug"fmt , ## arg); \
} while (0)
-#define GPU_CLK_DBG(fmt, arg...) \
- do { \
- gpu_dbg(1, "line(%d), clk_cntl=0x%08x\n" fmt, __LINE__, mplt_read(HHI_MALI_CLK_CNTL), ## arg);\
- } while (0)
+#define GPU_CLK_DBG(fmt, arg...)
//disable print
#define _dev_info(...)
struct timeval end;
int mali_pm_statue = 0;
+#if (LINUX_VERSION_CODE < KERNEL_VERSION(4, 4, 16))
int mali_clock_init_clk_tree(struct platform_device* pdev)
{
mali_dvfs_threshold_table *dvfs_tbl = &pmali_plat->dvfs_table[pmali_plat->def_clock];
mpdata->pdev = pdev;
return 0;
}
+#else
+int mali_clock_init_clk_tree(struct platform_device* pdev)
+{
+ //mali_dvfs_threshold_table *dvfs_tbl = &pmali_plat->dvfs_table[pmali_plat->def_clock];
+ struct clk *clk_mali = pmali_plat->clk_mali;
+
+ clk_prepare_enable(clk_mali);
+
+ return 0;
+}
+
+int mali_clock_init(mali_plat_info_t *pdev)
+{
+ *pdev = *pdev;
+ return 0;
+}
+
+int mali_clock_critical(critical_t critical, size_t param)
+{
+ int ret = 0;
+
+ ret = critical(param);
+
+ return ret;
+}
+
+static int critical_clock_set(size_t param)
+{
+ int ret = 0;
+ unsigned int idx = param;
+ mali_dvfs_threshold_table *dvfs_tbl = &pmali_plat->dvfs_table[idx];
+
+ struct clk *clk_mali = pmali_plat->clk_mali;
+ unsigned long time_use=0;
+
+
+ GPU_CLK_DBG();
+ do_gettimeofday(&start);
+ ret = clk_set_rate(clk_mali, dvfs_tbl->clk_freq);
+ do_gettimeofday(&end);
+ GPU_CLK_DBG();
+
+#ifndef AML_CLK_LOCK_ERROR
+ clk_disable_unprepare(clk_mali_x_old);
+#endif
+ time_use = (end.tv_sec - start.tv_sec)*1000000 + end.tv_usec - start.tv_usec;
+ GPU_CLK_DBG("step 1, mali_mux use: %ld us\n", time_use);
+
+ return 0;
+}
+
+int mali_clock_set(unsigned int clock)
+{
+ return mali_clock_critical(critical_clock_set, (size_t)clock);
+}
+
+void disable_clock(void)
+{
+#ifndef AML_CLK_LOCK_ERROR
+ struct clk *clk_mali = pmali_plat->clk_mali;
+
+ GPU_CLK_DBG();
+ clk_disable_unprepare(clk_mali);
+#endif
+ GPU_CLK_DBG();
+}
+
+void enable_clock(void)
+{
+#ifndef AML_CLK_LOCK_ERROR
+ struct clk *clk_mali = pmali_plat->clk_mali;
+
+ clk_prepare_enable(clk_mali);
+#endif
+ GPU_CLK_DBG();
+}
+
+u32 get_mali_freq(u32 idx)
+{
+ if (!mali_pm_statue) {
+ return pmali_plat->clk_sample[idx];
+ } else {
+ return 0;
+ }
+}
+
+void set_str_src(u32 data)
+{
+ printk("gpu: %s, %s, %d\n", __FILE__, __func__, __LINE__);
+}
+
+int mali_dt_info(struct platform_device *pdev, struct mali_plat_info_t *mpdata)
+{
+ struct device_node *gpu_dn = pdev->dev.of_node;
+ struct device_node *gpu_clk_dn;
+ phandle dvfs_clk_hdl;
+ mali_dvfs_threshold_table *dvfs_tbl = NULL;
+ uint32_t *clk_sample = NULL;
+
+ struct property *prop;
+ const __be32 *p;
+ int length = 0, i = 0;
+ u32 u;
+
+ int ret = 0;
+ if (!gpu_dn) {
+ dev_notice(&pdev->dev, "gpu device node not right\n");
+ return -ENODEV;
+ }
+
+ ret = of_property_read_u32(gpu_dn,"num_of_pp",
+ &mpdata->cfg_pp);
+ if (ret) {
+ dev_notice(&pdev->dev, "set max pp to default 6\n");
+ mpdata->cfg_pp = 6;
+ }
+ mpdata->scale_info.maxpp = mpdata->cfg_pp;
+ mpdata->maxpp_sysfs = mpdata->cfg_pp;
+ _dev_info(&pdev->dev, "max pp is %d\n", mpdata->scale_info.maxpp);
+
+ ret = of_property_read_u32(gpu_dn,"min_pp",
+ &mpdata->cfg_min_pp);
+ if (ret) {
+ dev_notice(&pdev->dev, "set min pp to default 1\n");
+ mpdata->cfg_min_pp = 1;
+ }
+ mpdata->scale_info.minpp = mpdata->cfg_min_pp;
+ _dev_info(&pdev->dev, "min pp is %d\n", mpdata->scale_info.minpp);
+
+ ret = of_property_read_u32(gpu_dn,"min_clk",
+ &mpdata->cfg_min_clock);
+ if (ret) {
+ dev_notice(&pdev->dev, "set min clk default to 0\n");
+ mpdata->cfg_min_clock = 0;
+ }
+ mpdata->scale_info.minclk = mpdata->cfg_min_clock;
+ _dev_info(&pdev->dev, "min clk is %d\n", mpdata->scale_info.minclk);
+
+ mpdata->reg_base_hiubus = of_iomap(gpu_dn, 1);
+ _dev_info(&pdev->dev, "hiu io source 0x%p\n", mpdata->reg_base_hiubus);
+
+ mpdata->reg_base_aobus = of_iomap(gpu_dn, 2);
+ _dev_info(&pdev->dev, "hiu io source 0x%p\n", mpdata->reg_base_aobus);
+
+ ret = of_property_read_u32(gpu_dn,"sc_mpp",
+ &mpdata->sc_mpp);
+ if (ret) {
+ dev_notice(&pdev->dev, "set pp used most of time default to %d\n", mpdata->cfg_pp);
+ mpdata->sc_mpp = mpdata->cfg_pp;
+ }
+ _dev_info(&pdev->dev, "num of pp used most of time %d\n", mpdata->sc_mpp);
+
+ of_get_property(gpu_dn, "tbl", &length);
+
+ length = length /sizeof(u32);
+ _dev_info(&pdev->dev, "clock dvfs cfg table size is %d\n", length);
+
+ mpdata->dvfs_table = devm_kzalloc(&pdev->dev,
+ sizeof(struct mali_dvfs_threshold_table)*length,
+ GFP_KERNEL);
+ dvfs_tbl = mpdata->dvfs_table;
+ if (mpdata->dvfs_table == NULL) {
+ dev_err(&pdev->dev, "failed to alloc dvfs table\n");
+ return -ENOMEM;
+ }
+ mpdata->clk_sample = devm_kzalloc(&pdev->dev, sizeof(u32)*length, GFP_KERNEL);
+ if (mpdata->clk_sample == NULL) {
+ dev_err(&pdev->dev, "failed to alloc clk_sample table\n");
+ return -ENOMEM;
+ }
+ clk_sample = mpdata->clk_sample;
+ of_property_for_each_u32(gpu_dn, "tbl", prop, p, u) {
+ dvfs_clk_hdl = (phandle) u;
+ gpu_clk_dn = of_find_node_by_phandle(dvfs_clk_hdl);
+ ret = of_property_read_u32(gpu_clk_dn,"clk_freq", &dvfs_tbl->clk_freq);
+ if (ret) {
+ dev_notice(&pdev->dev, "read clk_freq failed\n");
+ }
+#if 0
+#ifdef MESON_CPU_VERSION_OPS
+ if (is_meson_gxbbm_cpu()) {
+ if (dvfs_tbl->clk_freq >= GXBBM_MAX_GPU_FREQ)
+ continue;
+ }
+#endif
+#endif
+#if 0
+ ret = of_property_read_string(gpu_clk_dn,"clk_parent",
+ &dvfs_tbl->clk_parent);
+ if (ret) {
+ dev_notice(&pdev->dev, "read clk_parent failed\n");
+ }
+ dvfs_tbl->clkp_handle = devm_clk_get(&pdev->dev, dvfs_tbl->clk_parent);
+ if (IS_ERR(dvfs_tbl->clkp_handle)) {
+ dev_notice(&pdev->dev, "failed to get %s's clock pointer\n", dvfs_tbl->clk_parent);
+ }
+ ret = of_property_read_u32(gpu_clk_dn,"clkp_freq", &dvfs_tbl->clkp_freq);
+ if (ret) {
+ dev_notice(&pdev->dev, "read clk_parent freq failed\n");
+ }
+#endif
+ ret = of_property_read_u32(gpu_clk_dn,"voltage", &dvfs_tbl->voltage);
+ if (ret) {
+ dev_notice(&pdev->dev, "read voltage failed\n");
+ }
+ ret = of_property_read_u32(gpu_clk_dn,"keep_count", &dvfs_tbl->keep_count);
+ if (ret) {
+ dev_notice(&pdev->dev, "read keep_count failed\n");
+ }
+ //downthreshold and upthreshold shall be u32
+ ret = of_property_read_u32_array(gpu_clk_dn,"threshold",
+ &dvfs_tbl->downthreshold, 2);
+ if (ret) {
+ dev_notice(&pdev->dev, "read threshold failed\n");
+ }
+ dvfs_tbl->freq_index = i;
+
+ *clk_sample = dvfs_tbl->clk_freq / 1000000;
+
+ dvfs_tbl ++;
+ clk_sample ++;
+ i++;
+ mpdata->dvfs_table_size ++;
+ }
+
+ ret = of_property_read_u32(gpu_dn,"max_clk",
+ &mpdata->cfg_clock);
+ if (ret) {
+ dev_notice(&pdev->dev, "max clk set %d\n", mpdata->dvfs_table_size-2);
+ mpdata->cfg_clock = mpdata->dvfs_table_size-2;
+ }
+
+ mpdata->cfg_clock_bkup = mpdata->cfg_clock;
+ mpdata->maxclk_sysfs = mpdata->cfg_clock;
+ mpdata->scale_info.maxclk = mpdata->cfg_clock;
+ _dev_info(&pdev->dev, "max clk is %d\n", mpdata->scale_info.maxclk);
+
+ ret = of_property_read_u32(gpu_dn,"turbo_clk",
+ &mpdata->turbo_clock);
+ if (ret) {
+ dev_notice(&pdev->dev, "turbo clk set to %d\n", mpdata->dvfs_table_size-1);
+ mpdata->turbo_clock = mpdata->dvfs_table_size-1;
+ }
+ _dev_info(&pdev->dev, "turbo clk is %d\n", mpdata->turbo_clock);
+
+ ret = of_property_read_u32(gpu_dn,"def_clk",
+ &mpdata->def_clock);
+ if (ret) {
+ dev_notice(&pdev->dev, "default clk set to %d\n", mpdata->dvfs_table_size/2-1);
+ mpdata->def_clock = mpdata->dvfs_table_size/2 - 1;
+ }
+ _dev_info(&pdev->dev, "default clk is %d\n", mpdata->def_clock);
+
+ dvfs_tbl = mpdata->dvfs_table;
+ clk_sample = mpdata->clk_sample;
+ for (i = 0; i< mpdata->dvfs_table_size; i++) {
+ _dev_info(&pdev->dev, "====================%d====================\n"
+ "clk_freq=%10d, clk_parent=%9s, voltage=%d, keep_count=%d, threshod=<%d %d>, clk_sample=%d\n",
+ i,
+ dvfs_tbl->clk_freq, dvfs_tbl->clk_parent,
+ dvfs_tbl->voltage, dvfs_tbl->keep_count,
+ dvfs_tbl->downthreshold, dvfs_tbl->upthreshold, *clk_sample);
+ dvfs_tbl ++;
+ clk_sample ++;
+ }
+ _dev_info(&pdev->dev, "clock dvfs table size is %d\n", mpdata->dvfs_table_size);
+
+ mpdata->clk_mali = devm_clk_get(&pdev->dev, "gpu_mux");
+#if 0
+ mpdata->clk_mali_0 = devm_clk_get(&pdev->dev, "clk_mali_0");
+ mpdata->clk_mali_1 = devm_clk_get(&pdev->dev, "clk_mali_1");
+#endif
+ if (IS_ERR(mpdata->clk_mali)) {
+ dev_err(&pdev->dev, "failed to get clock pointer\n");
+ return -EFAULT;
+ }
+
+ pmali_plat = mpdata;
+ mpdata->pdev = pdev;
+ return 0;
+}
+
+#endif
#include <linux/types.h>
#include <linux/workqueue.h>
-#include <linux/clk-private.h>
+#include <linux/clk-provider.h>
+#include <linux/clk.h>
enum mali_scale_mode_t {
MALI_PP_SCALING = 0,
#include <linux/module.h>
#include <linux/workqueue.h>
+#if AMLOGIC_GPU_USE_GPPLL
+#if LINUX_VERSION_CODE < KERNEL_VERSION(4, 4, 16)
#include <linux/amlogic/amports/gp_pll.h>
+#else
+#include <linux/amlogic/media/clk/gp_pll.h>
+#endif
+#endif
+
#define LOG_MALI_SCALING 1
#include "meson_main2.h"
#include "mali_clock.h"
//static int scaling_mode = MALI_SCALING_DISABLE;
//static int scaling_mode = MALI_PP_SCALING;
+#if AMLOGIC_GPU_USE_GPPLL
static struct gp_pll_user_handle_s *gp_pll_user_gpu;
static int is_gp_pll_get;
static int is_gp_pll_put;
-
+#endif
static unsigned scaling_dbg_level = 0;
module_param(scaling_dbg_level, uint, 0644);
MODULE_PARM_DESC(scaling_dbg_level , "scaling debug level");
{
mali_dvfs_threshold_table * pdvfs = pmali_plat->dvfs_table;
uint32_t execStep = currentStep;
+#if AMLOGIC_GPU_USE_GPPLL
mali_dvfs_threshold_table *dvfs_tbl = &pmali_plat->dvfs_table[currentStep];
+#endif
//if (pdvfs[currentStep].freq_index == pdvfs[lastStep].freq_index) return;
if ((pdvfs[execStep].freq_index == pdvfs[lastStep].freq_index) ||
return;
}
+#if AMLOGIC_GPU_USE_GPPLL
if (0 == strcmp(dvfs_tbl->clk_parent, "gp0_pll")) {
gp_pll_request(gp_pll_user_gpu);
if (!is_gp_pll_get) {
is_gp_pll_put = 0;
gp_pll_release(gp_pll_user_gpu);
}
-
+#endif
//mali_dev_pause();
mali_clock_set(pdvfs[execStep].freq_index);
//mali_dev_resume();
lastStep = execStep;
+#if AMLOGIC_GPU_USE_GPPLL
if (is_gp_pll_put) {
//printk("release gp0 pll\n");
gp_pll_release(gp_pll_user_gpu);
is_gp_pll_get = 0;
is_gp_pll_put = 0;
}
+#endif
}
+#if AMLOGIC_GPU_USE_GPPLL
static int gp_pll_user_cb_gpu(struct gp_pll_user_handle_s *user,
int event)
{
return 0;
}
+#endif
int mali_perf_set_num_pp_cores(int cores)
{
pmali_plat = mali_plat;
printk("mali_plat=%p\n", mali_plat);
num_cores_enabled = pmali_plat->sc_mpp;
+#if AMLOGIC_GPU_USE_GPPLL
gp_pll_user_gpu = gp_pll_user_register("gpu", 1,
gp_pll_user_cb_gpu);
//not get the gp pll, do need put
is_gp_pll_get = 0;
is_gp_pll_put = 0;
if (gp_pll_user_gpu == NULL) printk("register gp pll user for gpu failed\n");
+#endif
currentStep = pmali_plat->def_clock;
lastStep = currentStep;
{
#ifndef CONFIG_MALI_DVFS
flush_scheduled_work();
+#if AMLOGIC_GPU_USE_GPPLL
gp_pll_user_unregister(gp_pll_user_gpu);
#endif
+#endif
}
#ifndef CONFIG_MALI_DVFS
+++ /dev/null
-t83x
\ No newline at end of file
ifeq ($(CONFIG_MALI_PLATFORM_DEVICETREE),y)
ccflags-y += -I$(src)/platform/devicetree
+ include $(src)/platform/devicetree/Kbuild
endif
# Tell the Linux build system from which .o file to create the kernel module
ifeq ($(CONFIG_MALI_PLATFORM_THIRDPARTY)$(CONFIG_MALI_PLATFORM_FAKE),)
CONFIG_MALI_PLATFORM_DEVICETREE := y
endif
-
-mali_kbase-$(CONFIG_MALI_PLATFORM_DEVICETREE) += \
- platform/devicetree/mali_clock.o \
- platform/devicetree/mpgpu.o \
- platform/devicetree/meson_main2.o \
- platform/devicetree/platform_gx.o \
- platform/devicetree/scaling.o \
- platform/devicetree/mali_kbase_runtime_pm.o \
- platform/devicetree/mali_kbase_config_devicetree.o
-ccflags-$(CONFIG_MALI_PLATFORM_DEVICETREE) += -I$(src)/platform/devicetree
# Boston, MA 02110-1301, USA.
#
#
+USE_GPPLL?=0
+ifdef CONFIG_AM_VIDEO
+ USE_GPPLL:=1
+endif
+ifdef CONFIG_AMLOGIC_MEDIA_COMMON
+ USE_GPPLL:=1
+endif
+
+ccflags-y += -DAMLOGIC_GPU_USE_GPPLL=$(USE_GPPLL)
+
ifeq ($(CONFIG_MALI_MIDGARD),y)
obj-y += platform/devicetree/mali_clock.c
obj-y += platform/devicetree/mpgpu.c
#include <linux/platform_device.h>
#include <linux/module.h>
-#include <linux/clk-private.h>
+#include <linux/clk-provider.h>
+#include <linux/clk.h>
#include <linux/of_address.h>
#include <linux/delay.h>
#include "mali_scaling.h"
printk("gpu_debug"fmt , ## arg); \
} while (0)
-#define GPU_CLK_DBG(fmt, arg...) \
- do { \
- gpu_dbg(1, "line(%d), clk_cntl=0x%08x\n" fmt, __LINE__, mplt_read(HHI_MALI_CLK_CNTL), ## arg);\
- } while (0)
+#define GPU_CLK_DBG(fmt, arg...)
//disable print
#define _dev_info(...)
struct timeval end;
int mali_pm_statue = 0;
+#if (LINUX_VERSION_CODE < KERNEL_VERSION(4, 4, 16))
int mali_clock_init_clk_tree(struct platform_device* pdev)
{
mali_dvfs_threshold_table *dvfs_tbl = &pmali_plat->dvfs_table[pmali_plat->def_clock];
mpdata->pdev = pdev;
return 0;
}
+#else
+int mali_clock_init_clk_tree(struct platform_device* pdev)
+{
+ //mali_dvfs_threshold_table *dvfs_tbl = &pmali_plat->dvfs_table[pmali_plat->def_clock];
+ struct clk *clk_mali = pmali_plat->clk_mali;
+
+ clk_prepare_enable(clk_mali);
+
+ return 0;
+}
+
+int mali_clock_init(mali_plat_info_t *pdev)
+{
+ *pdev = *pdev;
+ return 0;
+}
+
+int mali_clock_critical(critical_t critical, size_t param)
+{
+ int ret = 0;
+
+ ret = critical(param);
+
+ return ret;
+}
+
+static int critical_clock_set(size_t param)
+{
+ int ret = 0;
+ unsigned int idx = param;
+ mali_dvfs_threshold_table *dvfs_tbl = &pmali_plat->dvfs_table[idx];
+
+ struct clk *clk_mali = pmali_plat->clk_mali;
+ unsigned long time_use=0;
+
+
+ GPU_CLK_DBG();
+ do_gettimeofday(&start);
+ ret = clk_set_rate(clk_mali, dvfs_tbl->clk_freq);
+ do_gettimeofday(&end);
+ GPU_CLK_DBG();
+
+#ifndef AML_CLK_LOCK_ERROR
+ clk_disable_unprepare(clk_mali_x_old);
+#endif
+ time_use = (end.tv_sec - start.tv_sec)*1000000 + end.tv_usec - start.tv_usec;
+ GPU_CLK_DBG("step 1, mali_mux use: %ld us\n", time_use);
+
+ return 0;
+}
+
+int mali_clock_set(unsigned int clock)
+{
+ return mali_clock_critical(critical_clock_set, (size_t)clock);
+}
+
+void disable_clock(void)
+{
+#ifndef AML_CLK_LOCK_ERROR
+ struct clk *clk_mali = pmali_plat->clk_mali;
+
+ GPU_CLK_DBG();
+ clk_disable_unprepare(clk_mali);
+#endif
+ GPU_CLK_DBG();
+}
+
+void enable_clock(void)
+{
+#ifndef AML_CLK_LOCK_ERROR
+ struct clk *clk_mali = pmali_plat->clk_mali;
+
+ clk_prepare_enable(clk_mali);
+#endif
+ GPU_CLK_DBG();
+}
+
+u32 get_mali_freq(u32 idx)
+{
+ if (!mali_pm_statue) {
+ return pmali_plat->clk_sample[idx];
+ } else {
+ return 0;
+ }
+}
+
+void set_str_src(u32 data)
+{
+ printk("gpu: %s, %s, %d\n", __FILE__, __func__, __LINE__);
+}
+
+int mali_dt_info(struct platform_device *pdev, struct mali_plat_info_t *mpdata)
+{
+ struct device_node *gpu_dn = pdev->dev.of_node;
+ struct device_node *gpu_clk_dn;
+ phandle dvfs_clk_hdl;
+ mali_dvfs_threshold_table *dvfs_tbl = NULL;
+ uint32_t *clk_sample = NULL;
+
+ struct property *prop;
+ const __be32 *p;
+ int length = 0, i = 0;
+ u32 u;
+
+ int ret = 0;
+ if (!gpu_dn) {
+ dev_notice(&pdev->dev, "gpu device node not right\n");
+ return -ENODEV;
+ }
+
+ ret = of_property_read_u32(gpu_dn,"num_of_pp",
+ &mpdata->cfg_pp);
+ if (ret) {
+ dev_notice(&pdev->dev, "set max pp to default 6\n");
+ mpdata->cfg_pp = 6;
+ }
+ mpdata->scale_info.maxpp = mpdata->cfg_pp;
+ mpdata->maxpp_sysfs = mpdata->cfg_pp;
+ _dev_info(&pdev->dev, "max pp is %d\n", mpdata->scale_info.maxpp);
+
+ ret = of_property_read_u32(gpu_dn,"min_pp",
+ &mpdata->cfg_min_pp);
+ if (ret) {
+ dev_notice(&pdev->dev, "set min pp to default 1\n");
+ mpdata->cfg_min_pp = 1;
+ }
+ mpdata->scale_info.minpp = mpdata->cfg_min_pp;
+ _dev_info(&pdev->dev, "min pp is %d\n", mpdata->scale_info.minpp);
+
+ ret = of_property_read_u32(gpu_dn,"min_clk",
+ &mpdata->cfg_min_clock);
+ if (ret) {
+ dev_notice(&pdev->dev, "set min clk default to 0\n");
+ mpdata->cfg_min_clock = 0;
+ }
+ mpdata->scale_info.minclk = mpdata->cfg_min_clock;
+ _dev_info(&pdev->dev, "min clk is %d\n", mpdata->scale_info.minclk);
+
+ mpdata->reg_base_hiubus = of_iomap(gpu_dn, 1);
+ _dev_info(&pdev->dev, "hiu io source 0x%p\n", mpdata->reg_base_hiubus);
+
+ mpdata->reg_base_aobus = of_iomap(gpu_dn, 2);
+ _dev_info(&pdev->dev, "hiu io source 0x%p\n", mpdata->reg_base_aobus);
+
+ ret = of_property_read_u32(gpu_dn,"sc_mpp",
+ &mpdata->sc_mpp);
+ if (ret) {
+ dev_notice(&pdev->dev, "set pp used most of time default to %d\n", mpdata->cfg_pp);
+ mpdata->sc_mpp = mpdata->cfg_pp;
+ }
+ _dev_info(&pdev->dev, "num of pp used most of time %d\n", mpdata->sc_mpp);
+
+ of_get_property(gpu_dn, "tbl", &length);
+
+ length = length /sizeof(u32);
+ _dev_info(&pdev->dev, "clock dvfs cfg table size is %d\n", length);
+
+ mpdata->dvfs_table = devm_kzalloc(&pdev->dev,
+ sizeof(struct mali_dvfs_threshold_table)*length,
+ GFP_KERNEL);
+ dvfs_tbl = mpdata->dvfs_table;
+ if (mpdata->dvfs_table == NULL) {
+ dev_err(&pdev->dev, "failed to alloc dvfs table\n");
+ return -ENOMEM;
+ }
+ mpdata->clk_sample = devm_kzalloc(&pdev->dev, sizeof(u32)*length, GFP_KERNEL);
+ if (mpdata->clk_sample == NULL) {
+ dev_err(&pdev->dev, "failed to alloc clk_sample table\n");
+ return -ENOMEM;
+ }
+ clk_sample = mpdata->clk_sample;
+ of_property_for_each_u32(gpu_dn, "tbl", prop, p, u) {
+ dvfs_clk_hdl = (phandle) u;
+ gpu_clk_dn = of_find_node_by_phandle(dvfs_clk_hdl);
+ ret = of_property_read_u32(gpu_clk_dn,"clk_freq", &dvfs_tbl->clk_freq);
+ if (ret) {
+ dev_notice(&pdev->dev, "read clk_freq failed\n");
+ }
+#if 0
+#ifdef MESON_CPU_VERSION_OPS
+ if (is_meson_gxbbm_cpu()) {
+ if (dvfs_tbl->clk_freq >= GXBBM_MAX_GPU_FREQ)
+ continue;
+ }
+#endif
+#endif
+#if 0
+ ret = of_property_read_string(gpu_clk_dn,"clk_parent",
+ &dvfs_tbl->clk_parent);
+ if (ret) {
+ dev_notice(&pdev->dev, "read clk_parent failed\n");
+ }
+ dvfs_tbl->clkp_handle = devm_clk_get(&pdev->dev, dvfs_tbl->clk_parent);
+ if (IS_ERR(dvfs_tbl->clkp_handle)) {
+ dev_notice(&pdev->dev, "failed to get %s's clock pointer\n", dvfs_tbl->clk_parent);
+ }
+ ret = of_property_read_u32(gpu_clk_dn,"clkp_freq", &dvfs_tbl->clkp_freq);
+ if (ret) {
+ dev_notice(&pdev->dev, "read clk_parent freq failed\n");
+ }
+#endif
+ ret = of_property_read_u32(gpu_clk_dn,"voltage", &dvfs_tbl->voltage);
+ if (ret) {
+ dev_notice(&pdev->dev, "read voltage failed\n");
+ }
+ ret = of_property_read_u32(gpu_clk_dn,"keep_count", &dvfs_tbl->keep_count);
+ if (ret) {
+ dev_notice(&pdev->dev, "read keep_count failed\n");
+ }
+ //downthreshold and upthreshold shall be u32
+ ret = of_property_read_u32_array(gpu_clk_dn,"threshold",
+ &dvfs_tbl->downthreshold, 2);
+ if (ret) {
+ dev_notice(&pdev->dev, "read threshold failed\n");
+ }
+ dvfs_tbl->freq_index = i;
+
+ *clk_sample = dvfs_tbl->clk_freq / 1000000;
+
+ dvfs_tbl ++;
+ clk_sample ++;
+ i++;
+ mpdata->dvfs_table_size ++;
+ }
+
+ ret = of_property_read_u32(gpu_dn,"max_clk",
+ &mpdata->cfg_clock);
+ if (ret) {
+ dev_notice(&pdev->dev, "max clk set %d\n", mpdata->dvfs_table_size-2);
+ mpdata->cfg_clock = mpdata->dvfs_table_size-2;
+ }
+
+ mpdata->cfg_clock_bkup = mpdata->cfg_clock;
+ mpdata->maxclk_sysfs = mpdata->cfg_clock;
+ mpdata->scale_info.maxclk = mpdata->cfg_clock;
+ _dev_info(&pdev->dev, "max clk is %d\n", mpdata->scale_info.maxclk);
+
+ ret = of_property_read_u32(gpu_dn,"turbo_clk",
+ &mpdata->turbo_clock);
+ if (ret) {
+ dev_notice(&pdev->dev, "turbo clk set to %d\n", mpdata->dvfs_table_size-1);
+ mpdata->turbo_clock = mpdata->dvfs_table_size-1;
+ }
+ _dev_info(&pdev->dev, "turbo clk is %d\n", mpdata->turbo_clock);
+
+ ret = of_property_read_u32(gpu_dn,"def_clk",
+ &mpdata->def_clock);
+ if (ret) {
+ dev_notice(&pdev->dev, "default clk set to %d\n", mpdata->dvfs_table_size/2-1);
+ mpdata->def_clock = mpdata->dvfs_table_size/2 - 1;
+ }
+ _dev_info(&pdev->dev, "default clk is %d\n", mpdata->def_clock);
+
+ dvfs_tbl = mpdata->dvfs_table;
+ clk_sample = mpdata->clk_sample;
+ for (i = 0; i< mpdata->dvfs_table_size; i++) {
+ _dev_info(&pdev->dev, "====================%d====================\n"
+ "clk_freq=%10d, clk_parent=%9s, voltage=%d, keep_count=%d, threshod=<%d %d>, clk_sample=%d\n",
+ i,
+ dvfs_tbl->clk_freq, dvfs_tbl->clk_parent,
+ dvfs_tbl->voltage, dvfs_tbl->keep_count,
+ dvfs_tbl->downthreshold, dvfs_tbl->upthreshold, *clk_sample);
+ dvfs_tbl ++;
+ clk_sample ++;
+ }
+ _dev_info(&pdev->dev, "clock dvfs table size is %d\n", mpdata->dvfs_table_size);
+
+ mpdata->clk_mali = devm_clk_get(&pdev->dev, "gpu_mux");
+#if 0
+ mpdata->clk_mali_0 = devm_clk_get(&pdev->dev, "clk_mali_0");
+ mpdata->clk_mali_1 = devm_clk_get(&pdev->dev, "clk_mali_1");
+#endif
+ if (IS_ERR(mpdata->clk_mali)) {
+ dev_err(&pdev->dev, "failed to get clock pointer\n");
+ return -EFAULT;
+ }
+
+ pmali_plat = mpdata;
+ mpdata->pdev = pdev;
+ return 0;
+}
+
+#endif
#include <linux/types.h>
#include <linux/workqueue.h>
-#include <linux/clk-private.h>
+#include <linux/clk-provider.h>
+#include <linux/clk.h>
enum mali_scale_mode_t {
MALI_PP_SCALING = 0,
#include <linux/module.h>
#include <linux/workqueue.h>
+#if AMLOGIC_GPU_USE_GPPLL
+#if LINUX_VERSION_CODE < KERNEL_VERSION(4, 4, 16)
#include <linux/amlogic/amports/gp_pll.h>
+#else
+#include <linux/amlogic/media/clk/gp_pll.h>
+#endif
+#endif
+
#define LOG_MALI_SCALING 1
#include "meson_main2.h"
#include "mali_clock.h"
//static int scaling_mode = MALI_SCALING_DISABLE;
//static int scaling_mode = MALI_PP_SCALING;
+#if AMLOGIC_GPU_USE_GPPLL
static struct gp_pll_user_handle_s *gp_pll_user_gpu;
static int is_gp_pll_get;
static int is_gp_pll_put;
-
+#endif
static unsigned scaling_dbg_level = 0;
module_param(scaling_dbg_level, uint, 0644);
MODULE_PARM_DESC(scaling_dbg_level , "scaling debug level");
{
mali_dvfs_threshold_table * pdvfs = pmali_plat->dvfs_table;
uint32_t execStep = currentStep;
+#if AMLOGIC_GPU_USE_GPPLL
mali_dvfs_threshold_table *dvfs_tbl = &pmali_plat->dvfs_table[currentStep];
+#endif
//if (pdvfs[currentStep].freq_index == pdvfs[lastStep].freq_index) return;
if ((pdvfs[execStep].freq_index == pdvfs[lastStep].freq_index) ||
return;
}
+#if AMLOGIC_GPU_USE_GPPLL
if (0 == strcmp(dvfs_tbl->clk_parent, "gp0_pll")) {
gp_pll_request(gp_pll_user_gpu);
if (!is_gp_pll_get) {
is_gp_pll_put = 0;
gp_pll_release(gp_pll_user_gpu);
}
-
+#endif
//mali_dev_pause();
mali_clock_set(pdvfs[execStep].freq_index);
//mali_dev_resume();
lastStep = execStep;
+#if AMLOGIC_GPU_USE_GPPLL
if (is_gp_pll_put) {
//printk("release gp0 pll\n");
gp_pll_release(gp_pll_user_gpu);
is_gp_pll_get = 0;
is_gp_pll_put = 0;
}
+#endif
}
+#if AMLOGIC_GPU_USE_GPPLL
static int gp_pll_user_cb_gpu(struct gp_pll_user_handle_s *user,
int event)
{
return 0;
}
+#endif
int mali_perf_set_num_pp_cores(int cores)
{
pmali_plat = mali_plat;
printk("mali_plat=%p\n", mali_plat);
num_cores_enabled = pmali_plat->sc_mpp;
+#if AMLOGIC_GPU_USE_GPPLL
gp_pll_user_gpu = gp_pll_user_register("gpu", 1,
gp_pll_user_cb_gpu);
//not get the gp pll, do need put
is_gp_pll_get = 0;
is_gp_pll_put = 0;
if (gp_pll_user_gpu == NULL) printk("register gp pll user for gpu failed\n");
+#endif
currentStep = pmali_plat->def_clock;
lastStep = currentStep;
{
#ifndef CONFIG_MALI_DVFS
flush_scheduled_work();
+#if AMLOGIC_GPU_USE_GPPLL
gp_pll_user_unregister(gp_pll_user_gpu);
#endif
+#endif
}
#ifndef CONFIG_MALI_DVFS
ifeq ($(CONFIG_MALI_PLATFORM_DEVICETREE),y)
ccflags-y += -I$(src)/platform/devicetree
+ include $(src)/platform/devicetree/Kbuild
endif
# Tell the Linux build system from which .o file to create the kernel module
ifeq ($(CONFIG_MALI_PLATFORM_THIRDPARTY)$(CONFIG_MALI_PLATFORM_FAKE),)
CONFIG_MALI_PLATFORM_DEVICETREE := y
endif
-
-mali_kbase-$(CONFIG_MALI_PLATFORM_DEVICETREE) += \
- platform/devicetree/mali_clock.o \
- platform/devicetree/mpgpu.o \
- platform/devicetree/meson_main2.o \
- platform/devicetree/platform_gx.o \
- platform/devicetree/scaling.o \
- platform/devicetree/mali_kbase_runtime_pm.o \
- platform/devicetree/mali_kbase_config_devicetree.o
-ccflags-$(CONFIG_MALI_PLATFORM_DEVICETREE) += -I$(src)/platform/devicetree
# Boston, MA 02110-1301, USA.
#
#
+USE_GPPLL?=0
+ifdef CONFIG_AM_VIDEO
+ USE_GPPLL:=1
+endif
+ifdef CONFIG_AMLOGIC_MEDIA_COMMON
+ USE_GPPLL:=1
+endif
+
+ccflags-y += -DAMLOGIC_GPU_USE_GPPLL=$(USE_GPPLL)
+
ifeq ($(CONFIG_MALI_MIDGARD),y)
obj-y += platform/devicetree/mali_clock.c
obj-y += platform/devicetree/mpgpu.c
#include <linux/platform_device.h>
#include <linux/module.h>
-#include <linux/clk-private.h>
+#include <linux/clk-provider.h>
+#include <linux/clk.h>
#include <linux/of_address.h>
#include <linux/delay.h>
#include "mali_scaling.h"
printk("gpu_debug"fmt , ## arg); \
} while (0)
-#define GPU_CLK_DBG(fmt, arg...) \
- do { \
- gpu_dbg(1, "line(%d), clk_cntl=0x%08x\n" fmt, __LINE__, mplt_read(HHI_MALI_CLK_CNTL), ## arg);\
- } while (0)
+#define GPU_CLK_DBG(fmt, arg...)
//disable print
#define _dev_info(...)
struct timeval end;
int mali_pm_statue = 0;
+#if (LINUX_VERSION_CODE < KERNEL_VERSION(4, 4, 16))
int mali_clock_init_clk_tree(struct platform_device* pdev)
{
mali_dvfs_threshold_table *dvfs_tbl = &pmali_plat->dvfs_table[pmali_plat->def_clock];
mpdata->pdev = pdev;
return 0;
}
+#else
+int mali_clock_init_clk_tree(struct platform_device* pdev)
+{
+ //mali_dvfs_threshold_table *dvfs_tbl = &pmali_plat->dvfs_table[pmali_plat->def_clock];
+ struct clk *clk_mali = pmali_plat->clk_mali;
+
+ clk_prepare_enable(clk_mali);
+
+ return 0;
+}
+
+int mali_clock_init(mali_plat_info_t *pdev)
+{
+ *pdev = *pdev;
+ return 0;
+}
+
+int mali_clock_critical(critical_t critical, size_t param)
+{
+ int ret = 0;
+
+ ret = critical(param);
+
+ return ret;
+}
+
+static int critical_clock_set(size_t param)
+{
+ int ret = 0;
+ unsigned int idx = param;
+ mali_dvfs_threshold_table *dvfs_tbl = &pmali_plat->dvfs_table[idx];
+
+ struct clk *clk_mali = pmali_plat->clk_mali;
+ unsigned long time_use=0;
+
+
+ GPU_CLK_DBG();
+ do_gettimeofday(&start);
+ ret = clk_set_rate(clk_mali, dvfs_tbl->clk_freq);
+ do_gettimeofday(&end);
+ GPU_CLK_DBG();
+
+#ifndef AML_CLK_LOCK_ERROR
+ clk_disable_unprepare(clk_mali_x_old);
+#endif
+ time_use = (end.tv_sec - start.tv_sec)*1000000 + end.tv_usec - start.tv_usec;
+ GPU_CLK_DBG("step 1, mali_mux use: %ld us\n", time_use);
+
+ return 0;
+}
+
+int mali_clock_set(unsigned int clock)
+{
+ return mali_clock_critical(critical_clock_set, (size_t)clock);
+}
+
+void disable_clock(void)
+{
+#ifndef AML_CLK_LOCK_ERROR
+ struct clk *clk_mali = pmali_plat->clk_mali;
+
+ GPU_CLK_DBG();
+ clk_disable_unprepare(clk_mali);
+#endif
+ GPU_CLK_DBG();
+}
+
+void enable_clock(void)
+{
+#ifndef AML_CLK_LOCK_ERROR
+ struct clk *clk_mali = pmali_plat->clk_mali;
+
+ clk_prepare_enable(clk_mali);
+#endif
+ GPU_CLK_DBG();
+}
+
+u32 get_mali_freq(u32 idx)
+{
+ if (!mali_pm_statue) {
+ return pmali_plat->clk_sample[idx];
+ } else {
+ return 0;
+ }
+}
+
+void set_str_src(u32 data)
+{
+ printk("gpu: %s, %s, %d\n", __FILE__, __func__, __LINE__);
+}
+
+int mali_dt_info(struct platform_device *pdev, struct mali_plat_info_t *mpdata)
+{
+ struct device_node *gpu_dn = pdev->dev.of_node;
+ struct device_node *gpu_clk_dn;
+ phandle dvfs_clk_hdl;
+ mali_dvfs_threshold_table *dvfs_tbl = NULL;
+ uint32_t *clk_sample = NULL;
+
+ struct property *prop;
+ const __be32 *p;
+ int length = 0, i = 0;
+ u32 u;
+
+ int ret = 0;
+ if (!gpu_dn) {
+ dev_notice(&pdev->dev, "gpu device node not right\n");
+ return -ENODEV;
+ }
+
+ ret = of_property_read_u32(gpu_dn,"num_of_pp",
+ &mpdata->cfg_pp);
+ if (ret) {
+ dev_notice(&pdev->dev, "set max pp to default 6\n");
+ mpdata->cfg_pp = 6;
+ }
+ mpdata->scale_info.maxpp = mpdata->cfg_pp;
+ mpdata->maxpp_sysfs = mpdata->cfg_pp;
+ _dev_info(&pdev->dev, "max pp is %d\n", mpdata->scale_info.maxpp);
+
+ ret = of_property_read_u32(gpu_dn,"min_pp",
+ &mpdata->cfg_min_pp);
+ if (ret) {
+ dev_notice(&pdev->dev, "set min pp to default 1\n");
+ mpdata->cfg_min_pp = 1;
+ }
+ mpdata->scale_info.minpp = mpdata->cfg_min_pp;
+ _dev_info(&pdev->dev, "min pp is %d\n", mpdata->scale_info.minpp);
+
+ ret = of_property_read_u32(gpu_dn,"min_clk",
+ &mpdata->cfg_min_clock);
+ if (ret) {
+ dev_notice(&pdev->dev, "set min clk default to 0\n");
+ mpdata->cfg_min_clock = 0;
+ }
+ mpdata->scale_info.minclk = mpdata->cfg_min_clock;
+ _dev_info(&pdev->dev, "min clk is %d\n", mpdata->scale_info.minclk);
+
+ mpdata->reg_base_hiubus = of_iomap(gpu_dn, 1);
+ _dev_info(&pdev->dev, "hiu io source 0x%p\n", mpdata->reg_base_hiubus);
+
+ mpdata->reg_base_aobus = of_iomap(gpu_dn, 2);
+ _dev_info(&pdev->dev, "hiu io source 0x%p\n", mpdata->reg_base_aobus);
+
+ ret = of_property_read_u32(gpu_dn,"sc_mpp",
+ &mpdata->sc_mpp);
+ if (ret) {
+ dev_notice(&pdev->dev, "set pp used most of time default to %d\n", mpdata->cfg_pp);
+ mpdata->sc_mpp = mpdata->cfg_pp;
+ }
+ _dev_info(&pdev->dev, "num of pp used most of time %d\n", mpdata->sc_mpp);
+
+ of_get_property(gpu_dn, "tbl", &length);
+
+ length = length /sizeof(u32);
+ _dev_info(&pdev->dev, "clock dvfs cfg table size is %d\n", length);
+
+ mpdata->dvfs_table = devm_kzalloc(&pdev->dev,
+ sizeof(struct mali_dvfs_threshold_table)*length,
+ GFP_KERNEL);
+ dvfs_tbl = mpdata->dvfs_table;
+ if (mpdata->dvfs_table == NULL) {
+ dev_err(&pdev->dev, "failed to alloc dvfs table\n");
+ return -ENOMEM;
+ }
+ mpdata->clk_sample = devm_kzalloc(&pdev->dev, sizeof(u32)*length, GFP_KERNEL);
+ if (mpdata->clk_sample == NULL) {
+ dev_err(&pdev->dev, "failed to alloc clk_sample table\n");
+ return -ENOMEM;
+ }
+ clk_sample = mpdata->clk_sample;
+ of_property_for_each_u32(gpu_dn, "tbl", prop, p, u) {
+ dvfs_clk_hdl = (phandle) u;
+ gpu_clk_dn = of_find_node_by_phandle(dvfs_clk_hdl);
+ ret = of_property_read_u32(gpu_clk_dn,"clk_freq", &dvfs_tbl->clk_freq);
+ if (ret) {
+ dev_notice(&pdev->dev, "read clk_freq failed\n");
+ }
+#if 0
+#ifdef MESON_CPU_VERSION_OPS
+ if (is_meson_gxbbm_cpu()) {
+ if (dvfs_tbl->clk_freq >= GXBBM_MAX_GPU_FREQ)
+ continue;
+ }
+#endif
+#endif
+#if 0
+ ret = of_property_read_string(gpu_clk_dn,"clk_parent",
+ &dvfs_tbl->clk_parent);
+ if (ret) {
+ dev_notice(&pdev->dev, "read clk_parent failed\n");
+ }
+ dvfs_tbl->clkp_handle = devm_clk_get(&pdev->dev, dvfs_tbl->clk_parent);
+ if (IS_ERR(dvfs_tbl->clkp_handle)) {
+ dev_notice(&pdev->dev, "failed to get %s's clock pointer\n", dvfs_tbl->clk_parent);
+ }
+ ret = of_property_read_u32(gpu_clk_dn,"clkp_freq", &dvfs_tbl->clkp_freq);
+ if (ret) {
+ dev_notice(&pdev->dev, "read clk_parent freq failed\n");
+ }
+#endif
+ ret = of_property_read_u32(gpu_clk_dn,"voltage", &dvfs_tbl->voltage);
+ if (ret) {
+ dev_notice(&pdev->dev, "read voltage failed\n");
+ }
+ ret = of_property_read_u32(gpu_clk_dn,"keep_count", &dvfs_tbl->keep_count);
+ if (ret) {
+ dev_notice(&pdev->dev, "read keep_count failed\n");
+ }
+ //downthreshold and upthreshold shall be u32
+ ret = of_property_read_u32_array(gpu_clk_dn,"threshold",
+ &dvfs_tbl->downthreshold, 2);
+ if (ret) {
+ dev_notice(&pdev->dev, "read threshold failed\n");
+ }
+ dvfs_tbl->freq_index = i;
+
+ *clk_sample = dvfs_tbl->clk_freq / 1000000;
+
+ dvfs_tbl ++;
+ clk_sample ++;
+ i++;
+ mpdata->dvfs_table_size ++;
+ }
+
+ ret = of_property_read_u32(gpu_dn,"max_clk",
+ &mpdata->cfg_clock);
+ if (ret) {
+ dev_notice(&pdev->dev, "max clk set %d\n", mpdata->dvfs_table_size-2);
+ mpdata->cfg_clock = mpdata->dvfs_table_size-2;
+ }
+
+ mpdata->cfg_clock_bkup = mpdata->cfg_clock;
+ mpdata->maxclk_sysfs = mpdata->cfg_clock;
+ mpdata->scale_info.maxclk = mpdata->cfg_clock;
+ _dev_info(&pdev->dev, "max clk is %d\n", mpdata->scale_info.maxclk);
+
+ ret = of_property_read_u32(gpu_dn,"turbo_clk",
+ &mpdata->turbo_clock);
+ if (ret) {
+ dev_notice(&pdev->dev, "turbo clk set to %d\n", mpdata->dvfs_table_size-1);
+ mpdata->turbo_clock = mpdata->dvfs_table_size-1;
+ }
+ _dev_info(&pdev->dev, "turbo clk is %d\n", mpdata->turbo_clock);
+
+ ret = of_property_read_u32(gpu_dn,"def_clk",
+ &mpdata->def_clock);
+ if (ret) {
+ dev_notice(&pdev->dev, "default clk set to %d\n", mpdata->dvfs_table_size/2-1);
+ mpdata->def_clock = mpdata->dvfs_table_size/2 - 1;
+ }
+ _dev_info(&pdev->dev, "default clk is %d\n", mpdata->def_clock);
+
+ dvfs_tbl = mpdata->dvfs_table;
+ clk_sample = mpdata->clk_sample;
+ for (i = 0; i< mpdata->dvfs_table_size; i++) {
+ _dev_info(&pdev->dev, "====================%d====================\n"
+ "clk_freq=%10d, clk_parent=%9s, voltage=%d, keep_count=%d, threshod=<%d %d>, clk_sample=%d\n",
+ i,
+ dvfs_tbl->clk_freq, dvfs_tbl->clk_parent,
+ dvfs_tbl->voltage, dvfs_tbl->keep_count,
+ dvfs_tbl->downthreshold, dvfs_tbl->upthreshold, *clk_sample);
+ dvfs_tbl ++;
+ clk_sample ++;
+ }
+ _dev_info(&pdev->dev, "clock dvfs table size is %d\n", mpdata->dvfs_table_size);
+
+ mpdata->clk_mali = devm_clk_get(&pdev->dev, "gpu_mux");
+#if 0
+ mpdata->clk_mali_0 = devm_clk_get(&pdev->dev, "clk_mali_0");
+ mpdata->clk_mali_1 = devm_clk_get(&pdev->dev, "clk_mali_1");
+#endif
+ if (IS_ERR(mpdata->clk_mali)) {
+ dev_err(&pdev->dev, "failed to get clock pointer\n");
+ return -EFAULT;
+ }
+
+ pmali_plat = mpdata;
+ mpdata->pdev = pdev;
+ return 0;
+}
+
+#endif
#include <linux/types.h>
#include <linux/workqueue.h>
-#include <linux/clk-private.h>
+#include <linux/clk-provider.h>
+#include <linux/clk.h>
enum mali_scale_mode_t {
MALI_PP_SCALING = 0,
#include <linux/module.h>
#include <linux/workqueue.h>
+#if AMLOGIC_GPU_USE_GPPLL
+#if LINUX_VERSION_CODE < KERNEL_VERSION(4, 4, 16)
#include <linux/amlogic/amports/gp_pll.h>
+#else
+#include <linux/amlogic/media/clk/gp_pll.h>
+#endif
+#endif
+
#define LOG_MALI_SCALING 1
#include "meson_main2.h"
#include "mali_clock.h"
//static int scaling_mode = MALI_SCALING_DISABLE;
//static int scaling_mode = MALI_PP_SCALING;
+#if AMLOGIC_GPU_USE_GPPLL
static struct gp_pll_user_handle_s *gp_pll_user_gpu;
static int is_gp_pll_get;
static int is_gp_pll_put;
-
+#endif
static unsigned scaling_dbg_level = 0;
module_param(scaling_dbg_level, uint, 0644);
MODULE_PARM_DESC(scaling_dbg_level , "scaling debug level");
{
mali_dvfs_threshold_table * pdvfs = pmali_plat->dvfs_table;
uint32_t execStep = currentStep;
+#if AMLOGIC_GPU_USE_GPPLL
mali_dvfs_threshold_table *dvfs_tbl = &pmali_plat->dvfs_table[currentStep];
+#endif
//if (pdvfs[currentStep].freq_index == pdvfs[lastStep].freq_index) return;
if ((pdvfs[execStep].freq_index == pdvfs[lastStep].freq_index) ||
return;
}
+#if AMLOGIC_GPU_USE_GPPLL
if (0 == strcmp(dvfs_tbl->clk_parent, "gp0_pll")) {
gp_pll_request(gp_pll_user_gpu);
if (!is_gp_pll_get) {
is_gp_pll_put = 0;
gp_pll_release(gp_pll_user_gpu);
}
-
+#endif
//mali_dev_pause();
mali_clock_set(pdvfs[execStep].freq_index);
//mali_dev_resume();
lastStep = execStep;
+#if AMLOGIC_GPU_USE_GPPLL
if (is_gp_pll_put) {
//printk("release gp0 pll\n");
gp_pll_release(gp_pll_user_gpu);
is_gp_pll_get = 0;
is_gp_pll_put = 0;
}
+#endif
}
+#if AMLOGIC_GPU_USE_GPPLL
static int gp_pll_user_cb_gpu(struct gp_pll_user_handle_s *user,
int event)
{
return 0;
}
+#endif
int mali_perf_set_num_pp_cores(int cores)
{
pmali_plat = mali_plat;
printk("mali_plat=%p\n", mali_plat);
num_cores_enabled = pmali_plat->sc_mpp;
+#if AMLOGIC_GPU_USE_GPPLL
gp_pll_user_gpu = gp_pll_user_register("gpu", 1,
gp_pll_user_cb_gpu);
//not get the gp pll, do need put
is_gp_pll_get = 0;
is_gp_pll_put = 0;
if (gp_pll_user_gpu == NULL) printk("register gp pll user for gpu failed\n");
+#endif
currentStep = pmali_plat->def_clock;
lastStep = currentStep;
{
#ifndef CONFIG_MALI_DVFS
flush_scheduled_work();
+#if AMLOGIC_GPU_USE_GPPLL
gp_pll_user_unregister(gp_pll_user_gpu);
#endif
+#endif
}
#ifndef CONFIG_MALI_DVFS
#
# This file is called by the Linux build system.
-############## Kasin Added, for platform. ################
-
-ifndef CONFIG_MALI_DMA_BUF_MAP_ON_ATTACH
- ccflags-y += -DCONFIG_MALI_DMA_BUF_MAP_ON_ATTACH=y
-endif
-#ifeq ($(CONFIG_ARCH_MESON),y)
-#ccflags-y += -DCONFIG_MALI450=y
-#ifeq ($(CONFIG_MALI450),m)
-#ccflags-y += -DCONFIG_MALI450=y
-#endif
-#ifeq ($(CONFIG_MALI450),y)
-#ccflags-y += -DCONFIG_MALI450=y
-#endif
-ccflags-y += -DCONFIG_MALI_DT=y
-ccflags-y += -DMESON_CPU_TYPE=0x80
-ccflags-y += -DMESON_CPU_TYPE_MESON6=0x60
-ccflags-y += -DMESON_CPU_TYPE_MESON6TVD=0x75
-ccflags-y += -DMESON_CPU_TYPE_MESON8=0x80
-ccflags-y += -DMESON_CPU_TYPE_MESON8B=0x8B
-#endif
-
-##################### end Kasin Added. ###################
-
+include $(src)/Kbuild.amlogic
# set up defaults if not defined by the user
TIMESTAMP ?= default
ifeq ($(CONFIG_UMP), m)
--- /dev/null
+############## Kasin Added, for platform. ################
+
+ifndef CONFIG_MALI_DMA_BUF_MAP_ON_ATTACH
+ ccflags-y += -DCONFIG_MALI_DMA_BUF_MAP_ON_ATTACH=y
+endif
+
+ccflags-y += -DCONFIG_MALI_DT=y
+ccflags-y += -DMESON_CPU_TYPE=0x80
+ccflags-y += -DMESON_CPU_TYPE_MESON6=0x60
+ccflags-y += -DMESON_CPU_TYPE_MESON6TVD=0x75
+ccflags-y += -DMESON_CPU_TYPE_MESON8=0x80
+ccflags-y += -DMESON_CPU_TYPE_MESON8B=0x8B
+
+USE_GPPLL?=0
+ifdef CONFIG_AM_VIDEO
+ USE_GPPLL:=1
+endif
+
+ccflags-y += -DAMLOGIC_GPU_USE_GPPLL=$(USE_GPPLL)
#include <linux/file.h>
#include <linux/seq_file.h>
#include <linux/module.h>
+#include <asm-generic/fcntl.h>
struct mali_sync_pt {
struct sync_pt sync_pt;
{
s32 fd = -1;
+#if LINUX_VERSION_CODE < KERNEL_VERSION(3, 19, 0)
fd = get_unused_fd();
+#else
+ fd = get_unused_fd_flags(O_CLOEXEC);
+#endif
if (fd < 0) {
sync_fence_put(sync_fence);
return -1;
#include <linux/types.h>
#include <linux/workqueue.h>
-#include <linux/clk-private.h>
+#include <linux/clk-provider.h>
+#include <linux/clk.h>
enum mali_scale_mode_t {
MALI_PP_SCALING = 0,
#include <linux/platform_device.h>
#include <linux/module.h>
-#include <linux/clk-private.h>
+#include <linux/clk-provider.h>
+#include <linux/clk.h>
#include <linux/of_address.h>
#include <linux/delay.h>
#include <linux/mali/mali_utgard.h>
#define AML_CLK_LOCK_ERROR 1
#endif
#define GXBBM_MAX_GPU_FREQ 700000000UL
-
+struct clk;
static unsigned gpu_dbg_level = 0;
module_param(gpu_dbg_level, uint, 0644);
MODULE_PARM_DESC(gpu_dbg_level, "gpu debug level");
printk("gpu_debug"fmt , ## arg); \
} while (0)
-#define GPU_CLK_DBG(fmt, arg...) \
- do { \
- gpu_dbg(1, "line(%d), clk_cntl=0x%08x\n" fmt, __LINE__, mplt_read(HHI_MALI_CLK_CNTL), ## arg);\
- } while (0)
+#define GPU_CLK_DBG(fmt, arg...)
//disable print
#define _dev_info(...)
struct timeval start;
struct timeval end;
+#if (LINUX_VERSION_CODE < KERNEL_VERSION(4, 4, 16))
int mali_clock_init_clk_tree(struct platform_device* pdev)
{
mali_dvfs_threshold_table *dvfs_tbl = &pmali_plat->dvfs_table[pmali_plat->def_clock];
mpdata->pdev = pdev;
return 0;
}
+#else
+int mali_clock_init_clk_tree(struct platform_device* pdev)
+{
+
+ //mali_dvfs_threshold_table *dvfs_tbl = &pmali_plat->dvfs_table[pmali_plat->def_clock];
+ struct clk *clk_mali = pmali_plat->clk_mali;
+
+ clk_prepare_enable(clk_mali);
+
+ return 0;
+}
+
+int mali_clock_init(mali_plat_info_t *pdev)
+{
+ *pdev = *pdev;
+ return 0;
+}
+
+int mali_clock_critical(critical_t critical, size_t param)
+{
+ int ret = 0;
+
+ ret = critical(param);
+
+ return ret;
+}
+
+static int critical_clock_set(size_t param)
+{
+ int ret = 0;
+ unsigned int idx = param;
+ mali_dvfs_threshold_table *dvfs_tbl = &pmali_plat->dvfs_table[idx];
+
+ struct clk *clk_mali = pmali_plat->clk_mali;
+ unsigned long time_use=0;
+
+
+ GPU_CLK_DBG();
+ do_gettimeofday(&start);
+ ret = clk_set_rate(clk_mali, dvfs_tbl->clk_freq);
+ do_gettimeofday(&end);
+ GPU_CLK_DBG();
+
+#ifndef AML_CLK_LOCK_ERROR
+ clk_disable_unprepare(clk_mali_x_old);
+#endif
+ time_use = (end.tv_sec - start.tv_sec)*1000000 + end.tv_usec - start.tv_usec;
+ GPU_CLK_DBG("step 1, mali_mux use: %ld us\n", time_use);
+
+ return 0;
+}
+
+int mali_clock_set(unsigned int clock)
+{
+ return mali_clock_critical(critical_clock_set, (size_t)clock);
+}
+
+void disable_clock(void)
+{
+#ifndef AML_CLK_LOCK_ERROR
+ struct clk *clk_mali = pmali_plat->clk_mali;
+
+ GPU_CLK_DBG();
+ clk_disable_unprepare(clk_mali);
+#endif
+ GPU_CLK_DBG();
+}
+
+void enable_clock(void)
+{
+#ifndef AML_CLK_LOCK_ERROR
+ struct clk *clk_mali = pmali_plat->clk_mali;
+
+ clk_prepare_enable(clk_mali);
+#endif
+ GPU_CLK_DBG();
+}
+
+u32 get_mali_freq(u32 idx)
+{
+ if (!mali_pm_statue) {
+ return pmali_plat->clk_sample[idx];
+ } else {
+ return 0;
+ }
+}
+
+void set_str_src(u32 data)
+{
+ printk("gpu: %s, %s, %d\n", __FILE__, __func__, __LINE__);
+}
+
+int mali_dt_info(struct platform_device *pdev, struct mali_plat_info_t *mpdata)
+{
+ struct device_node *gpu_dn = pdev->dev.of_node;
+ struct device_node *gpu_clk_dn;
+ struct mali_gpu_clk_item *clk_item;
+ phandle dvfs_clk_hdl;
+ mali_dvfs_threshold_table *dvfs_tbl = NULL;
+ uint32_t *clk_sample = NULL;
+
+ struct property *prop;
+ const __be32 *p;
+ int length = 0, i = 0;
+ u32 u;
+
+ int ret = 0;
+ if (!gpu_dn) {
+ dev_notice(&pdev->dev, "gpu device node not right\n");
+ return -ENODEV;
+ }
+
+ ret = of_property_read_u32(gpu_dn,"num_of_pp",
+ &mpdata->cfg_pp);
+ if (ret) {
+ dev_notice(&pdev->dev, "set max pp to default 6\n");
+ mpdata->cfg_pp = 6;
+ }
+ mpdata->scale_info.maxpp = mpdata->cfg_pp;
+ mpdata->maxpp_sysfs = mpdata->cfg_pp;
+ _dev_info(&pdev->dev, "max pp is %d\n", mpdata->scale_info.maxpp);
+
+ ret = of_property_read_u32(gpu_dn,"min_pp",
+ &mpdata->cfg_min_pp);
+ if (ret) {
+ dev_notice(&pdev->dev, "set min pp to default 1\n");
+ mpdata->cfg_min_pp = 1;
+ }
+ mpdata->scale_info.minpp = mpdata->cfg_min_pp;
+ _dev_info(&pdev->dev, "min pp is %d\n", mpdata->scale_info.minpp);
+
+ ret = of_property_read_u32(gpu_dn,"min_clk",
+ &mpdata->cfg_min_clock);
+ if (ret) {
+ dev_notice(&pdev->dev, "set min clk default to 0\n");
+ mpdata->cfg_min_clock = 0;
+ }
+ mpdata->scale_info.minclk = mpdata->cfg_min_clock;
+ _dev_info(&pdev->dev, "min clk is %d\n", mpdata->scale_info.minclk);
+
+ mpdata->reg_base_hiubus = of_iomap(gpu_dn, 1);
+ _dev_info(&pdev->dev, "hiu io source 0x%p\n", mpdata->reg_base_hiubus);
+
+ mpdata->reg_base_aobus = of_iomap(gpu_dn, 2);
+ _dev_info(&pdev->dev, "hiu io source 0x%p\n", mpdata->reg_base_aobus);
+
+ ret = of_property_read_u32(gpu_dn,"sc_mpp",
+ &mpdata->sc_mpp);
+ if (ret) {
+ dev_notice(&pdev->dev, "set pp used most of time default to %d\n", mpdata->cfg_pp);
+ mpdata->sc_mpp = mpdata->cfg_pp;
+ }
+ _dev_info(&pdev->dev, "num of pp used most of time %d\n", mpdata->sc_mpp);
+
+ of_get_property(gpu_dn, "tbl", &length);
+
+ length = length /sizeof(u32);
+ _dev_info(&pdev->dev, "clock dvfs cfg table size is %d\n", length);
+
+ mpdata->dvfs_table = devm_kzalloc(&pdev->dev,
+ sizeof(struct mali_dvfs_threshold_table)*length,
+ GFP_KERNEL);
+ dvfs_tbl = mpdata->dvfs_table;
+ if (mpdata->dvfs_table == NULL) {
+ dev_err(&pdev->dev, "failed to alloc dvfs table\n");
+ return -ENOMEM;
+ }
+ mpdata->clk_sample = devm_kzalloc(&pdev->dev, sizeof(u32)*length, GFP_KERNEL);
+ if (mpdata->clk_sample == NULL) {
+ dev_err(&pdev->dev, "failed to alloc clk_sample table\n");
+ return -ENOMEM;
+ }
+ clk_sample = mpdata->clk_sample;
+///////////
+ mpdata->clk_items = devm_kzalloc(&pdev->dev, sizeof(struct mali_gpu_clk_item) * length, GFP_KERNEL);
+ if (mpdata->clk_items == NULL) {
+ dev_err(&pdev->dev, "failed to alloc clk_item table\n");
+ return -ENOMEM;
+ }
+ clk_item = mpdata->clk_items;
+//
+ of_property_for_each_u32(gpu_dn, "tbl", prop, p, u) {
+ dvfs_clk_hdl = (phandle) u;
+ gpu_clk_dn = of_find_node_by_phandle(dvfs_clk_hdl);
+ ret = of_property_read_u32(gpu_clk_dn,"clk_freq", &dvfs_tbl->clk_freq);
+ if (ret) {
+ dev_notice(&pdev->dev, "read clk_freq failed\n");
+ }
+#if 0
+#ifdef MESON_CPU_VERSION_OPS
+ if (is_meson_gxbbm_cpu()) {
+ if (dvfs_tbl->clk_freq >= GXBBM_MAX_GPU_FREQ)
+ continue;
+ }
+#endif
+#endif
+#if 0
+ ret = of_property_read_string(gpu_clk_dn,"clk_parent",
+ &dvfs_tbl->clk_parent);
+ if (ret) {
+ dev_notice(&pdev->dev, "read clk_parent failed\n");
+ }
+ dvfs_tbl->clkp_handle = devm_clk_get(&pdev->dev, dvfs_tbl->clk_parent);
+ if (IS_ERR(dvfs_tbl->clkp_handle)) {
+ dev_notice(&pdev->dev, "failed to get %s's clock pointer\n", dvfs_tbl->clk_parent);
+ }
+ ret = of_property_read_u32(gpu_clk_dn,"clkp_freq", &dvfs_tbl->clkp_freq);
+ if (ret) {
+ dev_notice(&pdev->dev, "read clk_parent freq failed\n");
+ }
+#endif
+ ret = of_property_read_u32(gpu_clk_dn,"voltage", &dvfs_tbl->voltage);
+ if (ret) {
+ dev_notice(&pdev->dev, "read voltage failed\n");
+ }
+ ret = of_property_read_u32(gpu_clk_dn,"keep_count", &dvfs_tbl->keep_count);
+ if (ret) {
+ dev_notice(&pdev->dev, "read keep_count failed\n");
+ }
+ //downthreshold and upthreshold shall be u32
+ ret = of_property_read_u32_array(gpu_clk_dn,"threshold",
+ &dvfs_tbl->downthreshold, 2);
+ if (ret) {
+ dev_notice(&pdev->dev, "read threshold failed\n");
+ }
+ dvfs_tbl->freq_index = i;
+ clk_item->clock = dvfs_tbl->clk_freq / 1000000;
+ clk_item->vol = dvfs_tbl->voltage;
+
+ *clk_sample = dvfs_tbl->clk_freq / 1000000;
+
+ dvfs_tbl ++;
+ clk_item ++;
+ clk_sample ++;
+ i++;
+ mpdata->dvfs_table_size ++;
+ }
+
+ ret = of_property_read_u32(gpu_dn,"max_clk",
+ &mpdata->cfg_clock);
+ if (ret) {
+ dev_notice(&pdev->dev, "max clk set %d\n", mpdata->dvfs_table_size-2);
+ mpdata->cfg_clock = mpdata->dvfs_table_size-2;
+ }
+
+ mpdata->cfg_clock_bkup = mpdata->cfg_clock;
+ mpdata->maxclk_sysfs = mpdata->cfg_clock;
+ mpdata->scale_info.maxclk = mpdata->cfg_clock;
+ _dev_info(&pdev->dev, "max clk is %d\n", mpdata->scale_info.maxclk);
+
+ ret = of_property_read_u32(gpu_dn,"turbo_clk",
+ &mpdata->turbo_clock);
+ if (ret) {
+ dev_notice(&pdev->dev, "turbo clk set to %d\n", mpdata->dvfs_table_size-1);
+ mpdata->turbo_clock = mpdata->dvfs_table_size-1;
+ }
+ _dev_info(&pdev->dev, "turbo clk is %d\n", mpdata->turbo_clock);
+
+ ret = of_property_read_u32(gpu_dn,"def_clk",
+ &mpdata->def_clock);
+ if (ret) {
+ dev_notice(&pdev->dev, "default clk set to %d\n", mpdata->dvfs_table_size/2-1);
+ mpdata->def_clock = mpdata->dvfs_table_size/2 - 1;
+ }
+ _dev_info(&pdev->dev, "default clk is %d\n", mpdata->def_clock);
+
+ dvfs_tbl = mpdata->dvfs_table;
+ clk_sample = mpdata->clk_sample;
+ for (i = 0; i< mpdata->dvfs_table_size; i++) {
+ _dev_info(&pdev->dev, "====================%d====================\n"
+ "clk_freq=%10d, clk_parent=%9s, voltage=%d, keep_count=%d, threshod=<%d %d>, clk_sample=%d\n",
+ i,
+ dvfs_tbl->clk_freq, dvfs_tbl->clk_parent,
+ dvfs_tbl->voltage, dvfs_tbl->keep_count,
+ dvfs_tbl->downthreshold, dvfs_tbl->upthreshold, *clk_sample);
+ dvfs_tbl ++;
+ clk_sample ++;
+ }
+ _dev_info(&pdev->dev, "clock dvfs table size is %d\n", mpdata->dvfs_table_size);
+
+ mpdata->clk_mali = devm_clk_get(&pdev->dev, "gpu_mux");
+#if 0
+ mpdata->clk_mali_0 = devm_clk_get(&pdev->dev, "clk_mali_0");
+ mpdata->clk_mali_1 = devm_clk_get(&pdev->dev, "clk_mali_1");
+#endif
+ if (IS_ERR(mpdata->clk_mali)) {
+ dev_err(&pdev->dev, "failed to get clock pointer\n");
+ return -EFAULT;
+ }
+
+ pmali_plat = mpdata;
+ mpdata->pdev = pdev;
+ return 0;
+}
+
+#endif
#include <mali_osk_profiling.h>
#include <linux/time.h>
-#include <linux/amlogic/amports/gp_pll.h>
+//#include <linux/amlogic/amports/gp_pll.h>
#include "meson_main2.h"
#include <mali_kernel_common.h>
#include <mali_osk_profiling.h>
+#if AMLOGIC_GPU_USE_GPPLL
+#if LINUX_VERSION_CODE < KERNEL_VERSION(4, 4, 16)
#include <linux/amlogic/amports/gp_pll.h>
+#else
+#include <linux/amlogic/media/clk/gp_pll.h>
+#endif
+#endif
#define LOG_MALI_SCALING 1
#include "meson_main2.h"
#include "mali_clock.h"
//static int scaling_mode = MALI_SCALING_DISABLE;
//static int scaling_mode = MALI_PP_SCALING;
+#if AMLOGIC_GPU_USE_GPPLL
static struct gp_pll_user_handle_s *gp_pll_user_gpu;
static int is_gp_pll_get;
static int is_gp_pll_put;
+#endif
static unsigned scaling_dbg_level = 0;
module_param(scaling_dbg_level, uint, 0644);
{
mali_dvfs_threshold_table * pdvfs = pmali_plat->dvfs_table;
uint32_t execStep = currentStep;
+#if AMLOGIC_GPU_USE_GPPLL
mali_dvfs_threshold_table *dvfs_tbl = &pmali_plat->dvfs_table[currentStep];
+#endif
//if (pdvfs[currentStep].freq_index == pdvfs[lastStep].freq_index) return;
if ((pdvfs[execStep].freq_index == pdvfs[lastStep].freq_index) ||
return;
}
+#if AMLOGIC_GPU_USE_GPPLL
if (0 == strcmp(dvfs_tbl->clk_parent, "gp0_pll")) {
gp_pll_request(gp_pll_user_gpu);
if (!is_gp_pll_get) {
is_gp_pll_put = 0;
gp_pll_release(gp_pll_user_gpu);
}
+#endif
//mali_dev_pause();
mali_clock_set(pdvfs[execStep].freq_index);
//mali_dev_resume();
lastStep = execStep;
+#if AMLOGIC_GPU_USE_GPPLL
if (is_gp_pll_put) {
//printk("release gp0 pll\n");
gp_pll_release(gp_pll_user_gpu);
is_gp_pll_get = 0;
is_gp_pll_put = 0;
}
+#endif
}
+#if AMLOGIC_GPU_USE_GPPLL
static int gp_pll_user_cb_gpu(struct gp_pll_user_handle_s *user,
int event)
{
return 0;
}
+#endif
static void do_scaling(struct work_struct *work)
{
pmali_plat = mali_plat;
num_cores_enabled = pmali_plat->sc_mpp;
+#if AMLOGIC_GPU_USE_GPPLL
gp_pll_user_gpu = gp_pll_user_register("gpu", 1,
gp_pll_user_cb_gpu);
//not get the gp pll, do need put
is_gp_pll_get = 0;
is_gp_pll_put = 0;
if (gp_pll_user_gpu == NULL) printk("register gp pll user for gpu failed\n");
+#endif
currentStep = pmali_plat->def_clock;
lastStep = currentStep;
{
#ifndef CONFIG_MALI_DVFS
flush_scheduled_work();
+#if AMLOGIC_GPU_USE_GPPLL
gp_pll_user_unregister(gp_pll_user_gpu);
#endif
+#endif
}
#ifndef CONFIG_MALI_DVFS
#
# This file is called by the Linux build system.
-############## Kasin Added, for platform. ################
-
-ifndef CONFIG_MALI_DMA_BUF_MAP_ON_ATTACH
- ccflags-y += -DCONFIG_MALI_DMA_BUF_MAP_ON_ATTACH=y
-endif
-#ifeq ($(CONFIG_ARCH_MESON),y)
-#ccflags-y += -DCONFIG_MALI450=y
-#ifeq ($(CONFIG_MALI450),m)
-#ccflags-y += -DCONFIG_MALI450=y
-#endif
-#ifeq ($(CONFIG_MALI450),y)
-#ccflags-y += -DCONFIG_MALI450=y
-#endif
-ccflags-y += -DCONFIG_MALI_DT=y
-ccflags-y += -DMESON_CPU_TYPE=0x80
-ccflags-y += -DMESON_CPU_TYPE_MESON6=0x60
-ccflags-y += -DMESON_CPU_TYPE_MESON6TVD=0x75
-ccflags-y += -DMESON_CPU_TYPE_MESON8=0x80
-ccflags-y += -DMESON_CPU_TYPE_MESON8B=0x8B
-#endif
-
-##################### end Kasin Added. ###################
-
+include $(src)/Kbuild.amlogic
# set up defaults if not defined by the user
TIMESTAMP ?= default
ifeq ($(CONFIG_UMP), m)
endif
# Use our defines when compiling
-ccflags-y += -I$(src) -I$(src)/include -I$(src)/common -I$(src)/linux -I$(src)/platform -Wno-data-time
+ccflags-y += -I$(src) -I$(src)/include -I$(src)/common -I$(src)/linux -I$(src)/platform
# Get subversion revision number, fall back to only ${MALI_RELEASE_NAME} if no svn info is available
MALI_RELEASE_NAME=$(shell cat $(TOP_KBUILD_SRC)$(DRIVER_DIR)/.version 2> /dev/null)
--- /dev/null
+############## Kasin Added, for platform. ################
+
+ifndef CONFIG_MALI_DMA_BUF_MAP_ON_ATTACH
+ ccflags-y += -DCONFIG_MALI_DMA_BUF_MAP_ON_ATTACH=y
+endif
+
+ccflags-y += -DCONFIG_MALI_DT=y
+ccflags-y += -DMESON_CPU_TYPE=0x80
+ccflags-y += -DMESON_CPU_TYPE_MESON6=0x60
+ccflags-y += -DMESON_CPU_TYPE_MESON6TVD=0x75
+ccflags-y += -DMESON_CPU_TYPE_MESON8=0x80
+ccflags-y += -DMESON_CPU_TYPE_MESON8B=0x8B
+
+USE_GPPLL?=0
+ifdef CONFIG_AM_VIDEO
+ USE_GPPLL:=1
+endif
+
+ccflags-y += -DAMLOGIC_GPU_USE_GPPLL=$(USE_GPPLL)
#include <linux/file.h>
#include <linux/seq_file.h>
#include <linux/module.h>
+#include <asm-generic/fcntl.h>
struct mali_sync_pt {
struct sync_pt sync_pt;
{
s32 fd = -1;
+#if LINUX_VERSION_CODE < KERNEL_VERSION(3, 19, 0)
fd = get_unused_fd();
+#else
+ fd = get_unused_fd_flags(O_CLOEXEC);
+#endif
if (fd < 0) {
sync_fence_put(sync_fence);
return -1;
#include <linux/types.h>
#include <linux/workqueue.h>
-#include <linux/clk-private.h>
+#include <linux/clk-provider.h>
+#include <linux/clk.h>
enum mali_scale_mode_t {
MALI_PP_SCALING = 0,
#include <linux/platform_device.h>
#include <linux/module.h>
-#include <linux/clk-private.h>
+#include <linux/clk-provider.h>
+#include <linux/clk.h>
#include <linux/of_address.h>
#include <linux/delay.h>
#include <linux/mali/mali_utgard.h>
#define AML_CLK_LOCK_ERROR 1
#endif
#define GXBBM_MAX_GPU_FREQ 700000000UL
-
+struct clk;
static unsigned gpu_dbg_level = 0;
module_param(gpu_dbg_level, uint, 0644);
MODULE_PARM_DESC(gpu_dbg_level, "gpu debug level");
printk("gpu_debug"fmt , ## arg); \
} while (0)
-#define GPU_CLK_DBG(fmt, arg...) \
- do { \
- gpu_dbg(1, "line(%d), clk_cntl=0x%08x\n" fmt, __LINE__, mplt_read(HHI_MALI_CLK_CNTL), ## arg);\
- } while (0)
+#define GPU_CLK_DBG(fmt, arg...)
//disable print
#define _dev_info(...)
struct timeval start;
struct timeval end;
+#if (LINUX_VERSION_CODE < KERNEL_VERSION(4, 4, 16))
int mali_clock_init_clk_tree(struct platform_device* pdev)
{
mali_dvfs_threshold_table *dvfs_tbl = &pmali_plat->dvfs_table[pmali_plat->def_clock];
mpdata->pdev = pdev;
return 0;
}
+#else
+int mali_clock_init_clk_tree(struct platform_device* pdev)
+{
+ //mali_dvfs_threshold_table *dvfs_tbl = &pmali_plat->dvfs_table[pmali_plat->def_clock];
+ struct clk *clk_mali = pmali_plat->clk_mali;
+
+ clk_prepare_enable(clk_mali);
+
+ return 0;
+}
+
+int mali_clock_init(mali_plat_info_t *pdev)
+{
+ *pdev = *pdev;
+ return 0;
+}
+
+int mali_clock_critical(critical_t critical, size_t param)
+{
+ int ret = 0;
+
+ ret = critical(param);
+
+ return ret;
+}
+
+static int critical_clock_set(size_t param)
+{
+ int ret = 0;
+ unsigned int idx = param;
+ mali_dvfs_threshold_table *dvfs_tbl = &pmali_plat->dvfs_table[idx];
+
+ struct clk *clk_mali = pmali_plat->clk_mali;
+ unsigned long time_use=0;
+
+
+ GPU_CLK_DBG();
+ do_gettimeofday(&start);
+ ret = clk_set_rate(clk_mali, dvfs_tbl->clk_freq);
+ do_gettimeofday(&end);
+ GPU_CLK_DBG();
+
+#ifndef AML_CLK_LOCK_ERROR
+ clk_disable_unprepare(clk_mali_x_old);
+#endif
+ time_use = (end.tv_sec - start.tv_sec)*1000000 + end.tv_usec - start.tv_usec;
+ GPU_CLK_DBG("step 1, mali_mux use: %ld us\n", time_use);
+
+ return 0;
+}
+
+int mali_clock_set(unsigned int clock)
+{
+ return mali_clock_critical(critical_clock_set, (size_t)clock);
+}
+
+void disable_clock(void)
+{
+#ifndef AML_CLK_LOCK_ERROR
+ struct clk *clk_mali = pmali_plat->clk_mali;
+
+ GPU_CLK_DBG();
+ clk_disable_unprepare(clk_mali);
+#endif
+ GPU_CLK_DBG();
+}
+
+void enable_clock(void)
+{
+#ifndef AML_CLK_LOCK_ERROR
+ struct clk *clk_mali = pmali_plat->clk_mali;
+
+ clk_prepare_enable(clk_mali);
+#endif
+ GPU_CLK_DBG();
+}
+
+u32 get_mali_freq(u32 idx)
+{
+ if (!mali_pm_statue) {
+ return pmali_plat->clk_sample[idx];
+ } else {
+ return 0;
+ }
+}
+
+void set_str_src(u32 data)
+{
+ printk("gpu: %s, %s, %d\n", __FILE__, __func__, __LINE__);
+}
+
+int mali_dt_info(struct platform_device *pdev, struct mali_plat_info_t *mpdata)
+{
+ struct device_node *gpu_dn = pdev->dev.of_node;
+ struct device_node *gpu_clk_dn;
+ struct mali_gpu_clk_item *clk_item;
+ phandle dvfs_clk_hdl;
+ mali_dvfs_threshold_table *dvfs_tbl = NULL;
+ uint32_t *clk_sample = NULL;
+
+ struct property *prop;
+ const __be32 *p;
+ int length = 0, i = 0;
+ u32 u;
+
+ int ret = 0;
+ if (!gpu_dn) {
+ dev_notice(&pdev->dev, "gpu device node not right\n");
+ return -ENODEV;
+ }
+
+ ret = of_property_read_u32(gpu_dn,"num_of_pp",
+ &mpdata->cfg_pp);
+ if (ret) {
+ dev_notice(&pdev->dev, "set max pp to default 6\n");
+ mpdata->cfg_pp = 6;
+ }
+ mpdata->scale_info.maxpp = mpdata->cfg_pp;
+ mpdata->maxpp_sysfs = mpdata->cfg_pp;
+ _dev_info(&pdev->dev, "max pp is %d\n", mpdata->scale_info.maxpp);
+
+ ret = of_property_read_u32(gpu_dn,"min_pp",
+ &mpdata->cfg_min_pp);
+ if (ret) {
+ dev_notice(&pdev->dev, "set min pp to default 1\n");
+ mpdata->cfg_min_pp = 1;
+ }
+ mpdata->scale_info.minpp = mpdata->cfg_min_pp;
+ _dev_info(&pdev->dev, "min pp is %d\n", mpdata->scale_info.minpp);
+
+ ret = of_property_read_u32(gpu_dn,"min_clk",
+ &mpdata->cfg_min_clock);
+ if (ret) {
+ dev_notice(&pdev->dev, "set min clk default to 0\n");
+ mpdata->cfg_min_clock = 0;
+ }
+ mpdata->scale_info.minclk = mpdata->cfg_min_clock;
+ _dev_info(&pdev->dev, "min clk is %d\n", mpdata->scale_info.minclk);
+
+ mpdata->reg_base_hiubus = of_iomap(gpu_dn, 1);
+ _dev_info(&pdev->dev, "hiu io source 0x%p\n", mpdata->reg_base_hiubus);
+
+ mpdata->reg_base_aobus = of_iomap(gpu_dn, 2);
+ _dev_info(&pdev->dev, "hiu io source 0x%p\n", mpdata->reg_base_aobus);
+
+ ret = of_property_read_u32(gpu_dn,"sc_mpp",
+ &mpdata->sc_mpp);
+ if (ret) {
+ dev_notice(&pdev->dev, "set pp used most of time default to %d\n", mpdata->cfg_pp);
+ mpdata->sc_mpp = mpdata->cfg_pp;
+ }
+ _dev_info(&pdev->dev, "num of pp used most of time %d\n", mpdata->sc_mpp);
+
+ of_get_property(gpu_dn, "tbl", &length);
+
+ length = length /sizeof(u32);
+ _dev_info(&pdev->dev, "clock dvfs cfg table size is %d\n", length);
+
+ mpdata->dvfs_table = devm_kzalloc(&pdev->dev,
+ sizeof(struct mali_dvfs_threshold_table)*length,
+ GFP_KERNEL);
+ dvfs_tbl = mpdata->dvfs_table;
+ if (mpdata->dvfs_table == NULL) {
+ dev_err(&pdev->dev, "failed to alloc dvfs table\n");
+ return -ENOMEM;
+ }
+ mpdata->clk_sample = devm_kzalloc(&pdev->dev, sizeof(u32)*length, GFP_KERNEL);
+ if (mpdata->clk_sample == NULL) {
+ dev_err(&pdev->dev, "failed to alloc clk_sample table\n");
+ return -ENOMEM;
+ }
+ clk_sample = mpdata->clk_sample;
+///////////
+ mpdata->clk_items = devm_kzalloc(&pdev->dev, sizeof(struct mali_gpu_clk_item) * length, GFP_KERNEL);
+ if (mpdata->clk_items == NULL) {
+ dev_err(&pdev->dev, "failed to alloc clk_item table\n");
+ return -ENOMEM;
+ }
+ clk_item = mpdata->clk_items;
+//
+ of_property_for_each_u32(gpu_dn, "tbl", prop, p, u) {
+ dvfs_clk_hdl = (phandle) u;
+ gpu_clk_dn = of_find_node_by_phandle(dvfs_clk_hdl);
+ ret = of_property_read_u32(gpu_clk_dn,"clk_freq", &dvfs_tbl->clk_freq);
+ if (ret) {
+ dev_notice(&pdev->dev, "read clk_freq failed\n");
+ }
+#if 0
+#ifdef MESON_CPU_VERSION_OPS
+ if (is_meson_gxbbm_cpu()) {
+ if (dvfs_tbl->clk_freq >= GXBBM_MAX_GPU_FREQ)
+ continue;
+ }
+#endif
+#endif
+#if 0
+ ret = of_property_read_string(gpu_clk_dn,"clk_parent",
+ &dvfs_tbl->clk_parent);
+ if (ret) {
+ dev_notice(&pdev->dev, "read clk_parent failed\n");
+ }
+ dvfs_tbl->clkp_handle = devm_clk_get(&pdev->dev, dvfs_tbl->clk_parent);
+ if (IS_ERR(dvfs_tbl->clkp_handle)) {
+ dev_notice(&pdev->dev, "failed to get %s's clock pointer\n", dvfs_tbl->clk_parent);
+ }
+ ret = of_property_read_u32(gpu_clk_dn,"clkp_freq", &dvfs_tbl->clkp_freq);
+ if (ret) {
+ dev_notice(&pdev->dev, "read clk_parent freq failed\n");
+ }
+#endif
+ ret = of_property_read_u32(gpu_clk_dn,"voltage", &dvfs_tbl->voltage);
+ if (ret) {
+ dev_notice(&pdev->dev, "read voltage failed\n");
+ }
+ ret = of_property_read_u32(gpu_clk_dn,"keep_count", &dvfs_tbl->keep_count);
+ if (ret) {
+ dev_notice(&pdev->dev, "read keep_count failed\n");
+ }
+ //downthreshold and upthreshold shall be u32
+ ret = of_property_read_u32_array(gpu_clk_dn,"threshold",
+ &dvfs_tbl->downthreshold, 2);
+ if (ret) {
+ dev_notice(&pdev->dev, "read threshold failed\n");
+ }
+ dvfs_tbl->freq_index = i;
+ clk_item->clock = dvfs_tbl->clk_freq / 1000000;
+ clk_item->vol = dvfs_tbl->voltage;
+
+ *clk_sample = dvfs_tbl->clk_freq / 1000000;
+
+ dvfs_tbl ++;
+ clk_item ++;
+ clk_sample ++;
+ i++;
+ mpdata->dvfs_table_size ++;
+ }
+
+ ret = of_property_read_u32(gpu_dn,"max_clk",
+ &mpdata->cfg_clock);
+ if (ret) {
+ dev_notice(&pdev->dev, "max clk set %d\n", mpdata->dvfs_table_size-2);
+ mpdata->cfg_clock = mpdata->dvfs_table_size-2;
+ }
+
+ mpdata->cfg_clock_bkup = mpdata->cfg_clock;
+ mpdata->maxclk_sysfs = mpdata->cfg_clock;
+ mpdata->scale_info.maxclk = mpdata->cfg_clock;
+ _dev_info(&pdev->dev, "max clk is %d\n", mpdata->scale_info.maxclk);
+
+ ret = of_property_read_u32(gpu_dn,"turbo_clk",
+ &mpdata->turbo_clock);
+ if (ret) {
+ dev_notice(&pdev->dev, "turbo clk set to %d\n", mpdata->dvfs_table_size-1);
+ mpdata->turbo_clock = mpdata->dvfs_table_size-1;
+ }
+ _dev_info(&pdev->dev, "turbo clk is %d\n", mpdata->turbo_clock);
+
+ ret = of_property_read_u32(gpu_dn,"def_clk",
+ &mpdata->def_clock);
+ if (ret) {
+ dev_notice(&pdev->dev, "default clk set to %d\n", mpdata->dvfs_table_size/2-1);
+ mpdata->def_clock = mpdata->dvfs_table_size/2 - 1;
+ }
+ _dev_info(&pdev->dev, "default clk is %d\n", mpdata->def_clock);
+
+ dvfs_tbl = mpdata->dvfs_table;
+ clk_sample = mpdata->clk_sample;
+ for (i = 0; i< mpdata->dvfs_table_size; i++) {
+ _dev_info(&pdev->dev, "====================%d====================\n"
+ "clk_freq=%10d, clk_parent=%9s, voltage=%d, keep_count=%d, threshod=<%d %d>, clk_sample=%d\n",
+ i,
+ dvfs_tbl->clk_freq, dvfs_tbl->clk_parent,
+ dvfs_tbl->voltage, dvfs_tbl->keep_count,
+ dvfs_tbl->downthreshold, dvfs_tbl->upthreshold, *clk_sample);
+ dvfs_tbl ++;
+ clk_sample ++;
+ }
+ _dev_info(&pdev->dev, "clock dvfs table size is %d\n", mpdata->dvfs_table_size);
+
+ mpdata->clk_mali = devm_clk_get(&pdev->dev, "gpu_mux");
+#if 0
+ mpdata->clk_mali_0 = devm_clk_get(&pdev->dev, "clk_mali_0");
+ mpdata->clk_mali_1 = devm_clk_get(&pdev->dev, "clk_mali_1");
+#endif
+ if (IS_ERR(mpdata->clk_mali)) {
+ dev_err(&pdev->dev, "failed to get clock pointer\n");
+ return -EFAULT;
+ }
+
+ pmali_plat = mpdata;
+ mpdata->pdev = pdev;
+ return 0;
+}
+
+#endif
#include <mali_osk_profiling.h>
#include <linux/time.h>
-#include <linux/amlogic/amports/gp_pll.h>
+//#include <linux/amlogic/amports/gp_pll.h>
#include "meson_main2.h"
#include <mali_kernel_common.h>
#include <mali_osk_profiling.h>
+#if AMLOGIC_GPU_USE_GPPLL
+#if LINUX_VERSION_CODE < KERNEL_VERSION(4, 4, 16)
#include <linux/amlogic/amports/gp_pll.h>
+#else
+#include <linux/amlogic/media/clk/gp_pll.h>
+#endif
+#endif
#define LOG_MALI_SCALING 1
#include "meson_main2.h"
#include "mali_clock.h"
//static int scaling_mode = MALI_SCALING_DISABLE;
//static int scaling_mode = MALI_PP_SCALING;
+#if AMLOGIC_GPU_USE_GPPLL
static struct gp_pll_user_handle_s *gp_pll_user_gpu;
static int is_gp_pll_get;
static int is_gp_pll_put;
+#endif
static unsigned scaling_dbg_level = 0;
module_param(scaling_dbg_level, uint, 0644);
{
mali_dvfs_threshold_table * pdvfs = pmali_plat->dvfs_table;
uint32_t execStep = currentStep;
+#if AMLOGIC_GPU_USE_GPPLL
mali_dvfs_threshold_table *dvfs_tbl = &pmali_plat->dvfs_table[currentStep];
+#endif
//if (pdvfs[currentStep].freq_index == pdvfs[lastStep].freq_index) return;
if ((pdvfs[execStep].freq_index == pdvfs[lastStep].freq_index) ||
return;
}
+#if AMLOGIC_GPU_USE_GPPLL
if (0 == strcmp(dvfs_tbl->clk_parent, "gp0_pll")) {
gp_pll_request(gp_pll_user_gpu);
if (!is_gp_pll_get) {
is_gp_pll_put = 0;
gp_pll_release(gp_pll_user_gpu);
}
+#endif
//mali_dev_pause();
mali_clock_set(pdvfs[execStep].freq_index);
//mali_dev_resume();
lastStep = execStep;
+#if AMLOGIC_GPU_USE_GPPLL
if (is_gp_pll_put) {
//printk("release gp0 pll\n");
gp_pll_release(gp_pll_user_gpu);
is_gp_pll_get = 0;
is_gp_pll_put = 0;
}
+#endif
}
+#if AMLOGIC_GPU_USE_GPPLL
static int gp_pll_user_cb_gpu(struct gp_pll_user_handle_s *user,
int event)
{
return 0;
}
+#endif
static void do_scaling(struct work_struct *work)
{
pmali_plat = mali_plat;
num_cores_enabled = pmali_plat->sc_mpp;
+#if AMLOGIC_GPU_USE_GPPLL
gp_pll_user_gpu = gp_pll_user_register("gpu", 1,
gp_pll_user_cb_gpu);
//not get the gp pll, do need put
is_gp_pll_get = 0;
is_gp_pll_put = 0;
if (gp_pll_user_gpu == NULL) printk("register gp pll user for gpu failed\n");
+#endif
currentStep = pmali_plat->def_clock;
lastStep = currentStep;
{
#ifndef CONFIG_MALI_DVFS
flush_scheduled_work();
+#if AMLOGIC_GPU_USE_GPPLL
gp_pll_user_unregister(gp_pll_user_gpu);
#endif
+#endif
}
#ifndef CONFIG_MALI_DVFS
#
# This file is called by the Linux build system.
-############## Kasin Added, for platform. ################
-
-ifndef CONFIG_MALI_DMA_BUF_MAP_ON_ATTACH
- ccflags-y += -DCONFIG_MALI_DMA_BUF_MAP_ON_ATTACH=y
-endif
-#ifeq ($(CONFIG_ARCH_MESON),y)
-#ccflags-y += -DCONFIG_MALI450=y
-#ifeq ($(CONFIG_MALI450),m)
-#ccflags-y += -DCONFIG_MALI450=y
-#endif
-#ifeq ($(CONFIG_MALI450),y)
-#ccflags-y += -DCONFIG_MALI450=y
-#endif
-ccflags-y += -DCONFIG_MALI_DT=y
-ccflags-y += -DMESON_CPU_TYPE=0x80
-ccflags-y += -DMESON_CPU_TYPE_MESON6=0x60
-ccflags-y += -DMESON_CPU_TYPE_MESON6TVD=0x75
-ccflags-y += -DMESON_CPU_TYPE_MESON8=0x80
-ccflags-y += -DMESON_CPU_TYPE_MESON8B=0x8B
-#endif
-
-##################### end Kasin Added. ###################
-
+include $(src)/Kbuild.amlogic
# set up defaults if not defined by the user
TIMESTAMP ?= default
ifeq ($(CONFIG_UMP), m)
endif
# Use our defines when compiling
-ccflags-y += -I$(src) -I$(src)/include -I$(src)/common -I$(src)/linux -I$(src)/platform -Wno-data-time
+ccflags-y += -I$(src) -I$(src)/include -I$(src)/common -I$(src)/linux -I$(src)/platform
# Get subversion revision number, fall back to only ${MALI_RELEASE_NAME} if no svn info is available
MALI_RELEASE_NAME=$(shell cat $(TOP_KBUILD_SRC)$(DRIVER_DIR)/.version 2> /dev/null)
--- /dev/null
+############## Kasin Added, for platform. ################
+
+ifndef CONFIG_MALI_DMA_BUF_MAP_ON_ATTACH
+ ccflags-y += -DCONFIG_MALI_DMA_BUF_MAP_ON_ATTACH=y
+endif
+
+ccflags-y += -DCONFIG_MALI_DT=y
+ccflags-y += -DMESON_CPU_TYPE=0x80
+ccflags-y += -DMESON_CPU_TYPE_MESON6=0x60
+ccflags-y += -DMESON_CPU_TYPE_MESON6TVD=0x75
+ccflags-y += -DMESON_CPU_TYPE_MESON8=0x80
+ccflags-y += -DMESON_CPU_TYPE_MESON8B=0x8B
+
+USE_GPPLL?=0
+ifdef CONFIG_AM_VIDEO
+ USE_GPPLL:=1
+endif
+
+ccflags-y += -DAMLOGIC_GPU_USE_GPPLL=$(USE_GPPLL)
#include <linux/file.h>
#include <linux/seq_file.h>
#include <linux/module.h>
+#include <asm-generic/fcntl.h>
struct mali_sync_pt {
struct sync_pt sync_pt;
#if LINUX_VERSION_CODE < KERNEL_VERSION(3, 19, 0)
fd = get_unused_fd();
#else
- fd = get_unused_fd_flags(0);
+ fd = get_unused_fd_flags(O_CLOEXEC);
#endif
if (fd < 0) {
#include <linux/types.h>
#include <linux/workqueue.h>
-#include <linux/clk-private.h>
+#include <linux/clk-provider.h>
+#include <linux/clk.h>
enum mali_scale_mode_t {
MALI_PP_SCALING = 0,
#include <linux/platform_device.h>
#include <linux/module.h>
-#include <linux/clk-private.h>
+#include <linux/clk-provider.h>
+#include <linux/clk.h>
#include <linux/of_address.h>
#include <linux/delay.h>
#include <linux/mali/mali_utgard.h>
#define AML_CLK_LOCK_ERROR 1
#endif
#define GXBBM_MAX_GPU_FREQ 700000000UL
-
+struct clk;
static unsigned gpu_dbg_level = 0;
module_param(gpu_dbg_level, uint, 0644);
MODULE_PARM_DESC(gpu_dbg_level, "gpu debug level");
printk("gpu_debug"fmt , ## arg); \
} while (0)
-#define GPU_CLK_DBG(fmt, arg...) \
- do { \
- gpu_dbg(1, "line(%d), clk_cntl=0x%08x\n" fmt, __LINE__, mplt_read(HHI_MALI_CLK_CNTL), ## arg);\
- } while (0)
+#define GPU_CLK_DBG(fmt, arg...)
//disable print
#define _dev_info(...)
struct timeval start;
struct timeval end;
+#if (LINUX_VERSION_CODE < KERNEL_VERSION(4, 4, 16))
int mali_clock_init_clk_tree(struct platform_device* pdev)
{
mali_dvfs_threshold_table *dvfs_tbl = &pmali_plat->dvfs_table[pmali_plat->def_clock];
mpdata->pdev = pdev;
return 0;
}
+#else
+int mali_clock_init_clk_tree(struct platform_device* pdev)
+{
+ //mali_dvfs_threshold_table *dvfs_tbl = &pmali_plat->dvfs_table[pmali_plat->def_clock];
+ struct clk *clk_mali = pmali_plat->clk_mali;
+
+ clk_prepare_enable(clk_mali);
+
+ return 0;
+}
+
+int mali_clock_init(mali_plat_info_t *pdev)
+{
+ *pdev = *pdev;
+ return 0;
+}
+
+int mali_clock_critical(critical_t critical, size_t param)
+{
+ int ret = 0;
+
+ ret = critical(param);
+
+ return ret;
+}
+
+static int critical_clock_set(size_t param)
+{
+ int ret = 0;
+ unsigned int idx = param;
+ mali_dvfs_threshold_table *dvfs_tbl = &pmali_plat->dvfs_table[idx];
+
+ struct clk *clk_mali = pmali_plat->clk_mali;
+ unsigned long time_use=0;
+
+
+ GPU_CLK_DBG();
+ do_gettimeofday(&start);
+ ret = clk_set_rate(clk_mali, dvfs_tbl->clk_freq);
+ do_gettimeofday(&end);
+ GPU_CLK_DBG();
+
+#ifndef AML_CLK_LOCK_ERROR
+ clk_disable_unprepare(clk_mali_x_old);
+#endif
+ time_use = (end.tv_sec - start.tv_sec)*1000000 + end.tv_usec - start.tv_usec;
+ GPU_CLK_DBG("step 1, mali_mux use: %ld us\n", time_use);
+
+ return 0;
+}
+
+int mali_clock_set(unsigned int clock)
+{
+ return mali_clock_critical(critical_clock_set, (size_t)clock);
+}
+
+void disable_clock(void)
+{
+#ifndef AML_CLK_LOCK_ERROR
+ struct clk *clk_mali = pmali_plat->clk_mali;
+
+ GPU_CLK_DBG();
+ clk_disable_unprepare(clk_mali);
+#endif
+ GPU_CLK_DBG();
+}
+
+void enable_clock(void)
+{
+#ifndef AML_CLK_LOCK_ERROR
+ struct clk *clk_mali = pmali_plat->clk_mali;
+
+ clk_prepare_enable(clk_mali);
+#endif
+ GPU_CLK_DBG();
+}
+
+u32 get_mali_freq(u32 idx)
+{
+ if (!mali_pm_statue) {
+ return pmali_plat->clk_sample[idx];
+ } else {
+ return 0;
+ }
+}
+
+void set_str_src(u32 data)
+{
+ printk("gpu: %s, %s, %d\n", __FILE__, __func__, __LINE__);
+}
+
+int mali_dt_info(struct platform_device *pdev, struct mali_plat_info_t *mpdata)
+{
+ struct device_node *gpu_dn = pdev->dev.of_node;
+ struct device_node *gpu_clk_dn;
+ struct mali_gpu_clk_item *clk_item;
+ phandle dvfs_clk_hdl;
+ mali_dvfs_threshold_table *dvfs_tbl = NULL;
+ uint32_t *clk_sample = NULL;
+
+ struct property *prop;
+ const __be32 *p;
+ int length = 0, i = 0;
+ u32 u;
+
+ int ret = 0;
+ if (!gpu_dn) {
+ dev_notice(&pdev->dev, "gpu device node not right\n");
+ return -ENODEV;
+ }
+
+ ret = of_property_read_u32(gpu_dn,"num_of_pp",
+ &mpdata->cfg_pp);
+ if (ret) {
+ dev_notice(&pdev->dev, "set max pp to default 6\n");
+ mpdata->cfg_pp = 6;
+ }
+ mpdata->scale_info.maxpp = mpdata->cfg_pp;
+ mpdata->maxpp_sysfs = mpdata->cfg_pp;
+ _dev_info(&pdev->dev, "max pp is %d\n", mpdata->scale_info.maxpp);
+
+ ret = of_property_read_u32(gpu_dn,"min_pp",
+ &mpdata->cfg_min_pp);
+ if (ret) {
+ dev_notice(&pdev->dev, "set min pp to default 1\n");
+ mpdata->cfg_min_pp = 1;
+ }
+ mpdata->scale_info.minpp = mpdata->cfg_min_pp;
+ _dev_info(&pdev->dev, "min pp is %d\n", mpdata->scale_info.minpp);
+
+ ret = of_property_read_u32(gpu_dn,"min_clk",
+ &mpdata->cfg_min_clock);
+ if (ret) {
+ dev_notice(&pdev->dev, "set min clk default to 0\n");
+ mpdata->cfg_min_clock = 0;
+ }
+ mpdata->scale_info.minclk = mpdata->cfg_min_clock;
+ _dev_info(&pdev->dev, "min clk is %d\n", mpdata->scale_info.minclk);
+
+ mpdata->reg_base_hiubus = of_iomap(gpu_dn, 1);
+ _dev_info(&pdev->dev, "hiu io source 0x%p\n", mpdata->reg_base_hiubus);
+
+ mpdata->reg_base_aobus = of_iomap(gpu_dn, 2);
+ _dev_info(&pdev->dev, "hiu io source 0x%p\n", mpdata->reg_base_aobus);
+
+ ret = of_property_read_u32(gpu_dn,"sc_mpp",
+ &mpdata->sc_mpp);
+ if (ret) {
+ dev_notice(&pdev->dev, "set pp used most of time default to %d\n", mpdata->cfg_pp);
+ mpdata->sc_mpp = mpdata->cfg_pp;
+ }
+ _dev_info(&pdev->dev, "num of pp used most of time %d\n", mpdata->sc_mpp);
+
+ of_get_property(gpu_dn, "tbl", &length);
+
+ length = length /sizeof(u32);
+ _dev_info(&pdev->dev, "clock dvfs cfg table size is %d\n", length);
+
+ mpdata->dvfs_table = devm_kzalloc(&pdev->dev,
+ sizeof(struct mali_dvfs_threshold_table)*length,
+ GFP_KERNEL);
+ dvfs_tbl = mpdata->dvfs_table;
+ if (mpdata->dvfs_table == NULL) {
+ dev_err(&pdev->dev, "failed to alloc dvfs table\n");
+ return -ENOMEM;
+ }
+ mpdata->clk_sample = devm_kzalloc(&pdev->dev, sizeof(u32)*length, GFP_KERNEL);
+ if (mpdata->clk_sample == NULL) {
+ dev_err(&pdev->dev, "failed to alloc clk_sample table\n");
+ return -ENOMEM;
+ }
+ clk_sample = mpdata->clk_sample;
+///////////
+ mpdata->clk_items = devm_kzalloc(&pdev->dev, sizeof(struct mali_gpu_clk_item) * length, GFP_KERNEL);
+ if (mpdata->clk_items == NULL) {
+ dev_err(&pdev->dev, "failed to alloc clk_item table\n");
+ return -ENOMEM;
+ }
+ clk_item = mpdata->clk_items;
+//
+ of_property_for_each_u32(gpu_dn, "tbl", prop, p, u) {
+ dvfs_clk_hdl = (phandle) u;
+ gpu_clk_dn = of_find_node_by_phandle(dvfs_clk_hdl);
+ ret = of_property_read_u32(gpu_clk_dn,"clk_freq", &dvfs_tbl->clk_freq);
+ if (ret) {
+ dev_notice(&pdev->dev, "read clk_freq failed\n");
+ }
+#if 0
+#ifdef MESON_CPU_VERSION_OPS
+ if (is_meson_gxbbm_cpu()) {
+ if (dvfs_tbl->clk_freq >= GXBBM_MAX_GPU_FREQ)
+ continue;
+ }
+#endif
+#endif
+#if 0
+ ret = of_property_read_string(gpu_clk_dn,"clk_parent",
+ &dvfs_tbl->clk_parent);
+ if (ret) {
+ dev_notice(&pdev->dev, "read clk_parent failed\n");
+ }
+ dvfs_tbl->clkp_handle = devm_clk_get(&pdev->dev, dvfs_tbl->clk_parent);
+ if (IS_ERR(dvfs_tbl->clkp_handle)) {
+ dev_notice(&pdev->dev, "failed to get %s's clock pointer\n", dvfs_tbl->clk_parent);
+ }
+ ret = of_property_read_u32(gpu_clk_dn,"clkp_freq", &dvfs_tbl->clkp_freq);
+ if (ret) {
+ dev_notice(&pdev->dev, "read clk_parent freq failed\n");
+ }
+#endif
+ ret = of_property_read_u32(gpu_clk_dn,"voltage", &dvfs_tbl->voltage);
+ if (ret) {
+ dev_notice(&pdev->dev, "read voltage failed\n");
+ }
+ ret = of_property_read_u32(gpu_clk_dn,"keep_count", &dvfs_tbl->keep_count);
+ if (ret) {
+ dev_notice(&pdev->dev, "read keep_count failed\n");
+ }
+ //downthreshold and upthreshold shall be u32
+ ret = of_property_read_u32_array(gpu_clk_dn,"threshold",
+ &dvfs_tbl->downthreshold, 2);
+ if (ret) {
+ dev_notice(&pdev->dev, "read threshold failed\n");
+ }
+ dvfs_tbl->freq_index = i;
+ clk_item->clock = dvfs_tbl->clk_freq / 1000000;
+ clk_item->vol = dvfs_tbl->voltage;
+
+ *clk_sample = dvfs_tbl->clk_freq / 1000000;
+
+ dvfs_tbl ++;
+ clk_item ++;
+ clk_sample ++;
+ i++;
+ mpdata->dvfs_table_size ++;
+ }
+
+ ret = of_property_read_u32(gpu_dn,"max_clk",
+ &mpdata->cfg_clock);
+ if (ret) {
+ dev_notice(&pdev->dev, "max clk set %d\n", mpdata->dvfs_table_size-2);
+ mpdata->cfg_clock = mpdata->dvfs_table_size-2;
+ }
+
+ mpdata->cfg_clock_bkup = mpdata->cfg_clock;
+ mpdata->maxclk_sysfs = mpdata->cfg_clock;
+ mpdata->scale_info.maxclk = mpdata->cfg_clock;
+ _dev_info(&pdev->dev, "max clk is %d\n", mpdata->scale_info.maxclk);
+
+ ret = of_property_read_u32(gpu_dn,"turbo_clk",
+ &mpdata->turbo_clock);
+ if (ret) {
+ dev_notice(&pdev->dev, "turbo clk set to %d\n", mpdata->dvfs_table_size-1);
+ mpdata->turbo_clock = mpdata->dvfs_table_size-1;
+ }
+ _dev_info(&pdev->dev, "turbo clk is %d\n", mpdata->turbo_clock);
+
+ ret = of_property_read_u32(gpu_dn,"def_clk",
+ &mpdata->def_clock);
+ if (ret) {
+ dev_notice(&pdev->dev, "default clk set to %d\n", mpdata->dvfs_table_size/2-1);
+ mpdata->def_clock = mpdata->dvfs_table_size/2 - 1;
+ }
+ _dev_info(&pdev->dev, "default clk is %d\n", mpdata->def_clock);
+
+ dvfs_tbl = mpdata->dvfs_table;
+ clk_sample = mpdata->clk_sample;
+ for (i = 0; i< mpdata->dvfs_table_size; i++) {
+ _dev_info(&pdev->dev, "====================%d====================\n"
+ "clk_freq=%10d, clk_parent=%9s, voltage=%d, keep_count=%d, threshod=<%d %d>, clk_sample=%d\n",
+ i,
+ dvfs_tbl->clk_freq, dvfs_tbl->clk_parent,
+ dvfs_tbl->voltage, dvfs_tbl->keep_count,
+ dvfs_tbl->downthreshold, dvfs_tbl->upthreshold, *clk_sample);
+ dvfs_tbl ++;
+ clk_sample ++;
+ }
+ _dev_info(&pdev->dev, "clock dvfs table size is %d\n", mpdata->dvfs_table_size);
+
+ mpdata->clk_mali = devm_clk_get(&pdev->dev, "gpu_mux");
+#if 0
+ mpdata->clk_mali_0 = devm_clk_get(&pdev->dev, "clk_mali_0");
+ mpdata->clk_mali_1 = devm_clk_get(&pdev->dev, "clk_mali_1");
+#endif
+ if (IS_ERR(mpdata->clk_mali)) {
+ dev_err(&pdev->dev, "failed to get clock pointer\n");
+ return -EFAULT;
+ }
+
+ pmali_plat = mpdata;
+ mpdata->pdev = pdev;
+ return 0;
+}
+
+#endif
#include <mali_osk_profiling.h>
#include <linux/time.h>
-#include <linux/amlogic/amports/gp_pll.h>
+//#include <linux/amlogic/amports/gp_pll.h>
#include "meson_main2.h"
#include <mali_kernel_common.h>
#include <mali_osk_profiling.h>
+#if AMLOGIC_GPU_USE_GPPLL
+#if LINUX_VERSION_CODE < KERNEL_VERSION(4, 4, 16)
#include <linux/amlogic/amports/gp_pll.h>
+#else
+#include <linux/amlogic/media/clk/gp_pll.h>
+#endif
+#endif
#define LOG_MALI_SCALING 1
#include "meson_main2.h"
#include "mali_clock.h"
//static int scaling_mode = MALI_SCALING_DISABLE;
//static int scaling_mode = MALI_PP_SCALING;
+#if AMLOGIC_GPU_USE_GPPLL
static struct gp_pll_user_handle_s *gp_pll_user_gpu;
static int is_gp_pll_get;
static int is_gp_pll_put;
+#endif
static unsigned scaling_dbg_level = 0;
module_param(scaling_dbg_level, uint, 0644);
{
mali_dvfs_threshold_table * pdvfs = pmali_plat->dvfs_table;
uint32_t execStep = currentStep;
+#if AMLOGIC_GPU_USE_GPPLL
mali_dvfs_threshold_table *dvfs_tbl = &pmali_plat->dvfs_table[currentStep];
+#endif
//if (pdvfs[currentStep].freq_index == pdvfs[lastStep].freq_index) return;
if ((pdvfs[execStep].freq_index == pdvfs[lastStep].freq_index) ||
return;
}
+#if AMLOGIC_GPU_USE_GPPLL
if (0 == strcmp(dvfs_tbl->clk_parent, "gp0_pll")) {
gp_pll_request(gp_pll_user_gpu);
if (!is_gp_pll_get) {
is_gp_pll_put = 0;
gp_pll_release(gp_pll_user_gpu);
}
+#endif
//mali_dev_pause();
mali_clock_set(pdvfs[execStep].freq_index);
//mali_dev_resume();
lastStep = execStep;
+#if AMLOGIC_GPU_USE_GPPLL
if (is_gp_pll_put) {
//printk("release gp0 pll\n");
gp_pll_release(gp_pll_user_gpu);
is_gp_pll_get = 0;
is_gp_pll_put = 0;
}
+#endif
}
+#if AMLOGIC_GPU_USE_GPPLL
static int gp_pll_user_cb_gpu(struct gp_pll_user_handle_s *user,
int event)
{
return 0;
}
+#endif
static void do_scaling(struct work_struct *work)
{
pmali_plat = mali_plat;
num_cores_enabled = pmali_plat->sc_mpp;
+#if AMLOGIC_GPU_USE_GPPLL
gp_pll_user_gpu = gp_pll_user_register("gpu", 1,
gp_pll_user_cb_gpu);
//not get the gp pll, do need put
is_gp_pll_get = 0;
is_gp_pll_put = 0;
if (gp_pll_user_gpu == NULL) printk("register gp pll user for gpu failed\n");
+#endif
currentStep = pmali_plat->def_clock;
lastStep = currentStep;
{
#ifndef CONFIG_MALI_DVFS
flush_scheduled_work();
+#if AMLOGIC_GPU_USE_GPPLL
gp_pll_user_unregister(gp_pll_user_gpu);
#endif
+#endif
}
#ifndef CONFIG_MALI_DVFS