mutex_init(&gsMutex);
#if 0
- ghLogBuf = ged_log_buf_alloc(320, 64, GED_LOG_BUF_TYPE_QUEUEBUFFER_AUTO_INCREASE, NULL, "profile_dvfs");
+ ghLogBuf = ged_log_buf_alloc(320, 64 * 320, GED_LOG_BUF_TYPE_QUEUEBUFFER_AUTO_INCREASE, NULL, "profile_dvfs");
#endif
return GED_OK;
mutex_lock(&gsMutex);
- if (NULL == ghLogBuf)
+ if (0 == ghLogBuf)
{
- ghLogBuf = ged_log_buf_alloc(320, 64, GED_LOG_BUF_TYPE_QUEUEBUFFER_AUTO_INCREASE, NULL, "profile_dvfs");
+ ghLogBuf = ged_log_buf_alloc(320, 64 * 320, GED_LOG_BUF_TYPE_QUEUEBUFFER_AUTO_INCREASE, NULL, "profile_dvfs");
}
ret = ghLogBuf ? GED_OK : GED_ERROR_FAIL;
{
mutex_lock(&gsMutex);
- if (NULL != ghLogBuf)
+ if (0 != ghLogBuf)
{
ged_log_buf_free(ghLogBuf);
- ghLogBuf = NULL;
+ ghLogBuf = 0;
}
mutex_unlock(&gsMutex);
unsigned long long t;
unsigned long nanosec_rem;
- t = cpu_clock(smp_processor_id());
+ t = ged_get_time();
nanosec_rem = do_div(t, 1000000000) / 1000;
ged_log_buf_print(ghLogBuf, "%5lu.%06lu,freq_volt,%u,%u", (unsigned long) t, nanosec_rem, ui32Frequency, ui32Voltage);
unsigned long long t;
unsigned long nanosec_rem;
- t = cpu_clock(smp_processor_id());
+ t = ged_get_time();
nanosec_rem = do_div(t, 1000000000) / 1000;
ged_log_buf_print(ghLogBuf, "%5lu.%06lu,temp,%d", (unsigned long) t, nanosec_rem, i32Temp);
unsigned long long t;
unsigned long nanosec_rem;
- t = cpu_clock(smp_processor_id());
+ t = ged_get_time();
nanosec_rem = do_div(t, 1000000000) / 1000;
ged_log_buf_print(ghLogBuf, "%5lu.%06lu,thermal_limit,%u", (unsigned long) t, nanosec_rem, ui32FreqLimit);
unsigned long long t;
unsigned long nanosec_rem;
- t = cpu_clock(smp_processor_id());
+ t = ged_get_time();
nanosec_rem = do_div(t, 1000000000) / 1000;
ged_log_buf_print(ghLogBuf, "%5lu.%06lu,gpu_load,%u", (unsigned long) t, nanosec_rem, ui32GpuLoading);
unsigned long long t;
unsigned long nanosec_rem;
- t = cpu_clock(smp_processor_id());
+ t = ged_get_time();
nanosec_rem = do_div(t, 1000000000) / 1000;
ged_log_buf_print(ghLogBuf, "%5lu.%06lu,gpu_clock,1", (unsigned long) t, nanosec_rem);
unsigned long long t;
unsigned long nanosec_rem;
- t = cpu_clock(smp_processor_id());
+ t = ged_get_time();
nanosec_rem = do_div(t, 1000000000) / 1000;
ged_log_buf_print(ghLogBuf, "%5lu.%06lu,gpu_clock,0", (unsigned long) t, nanosec_rem);
mutex_unlock(&gsMutex);
}
+
+void ged_profile_dvfs_record_SW_vsync(unsigned long ulTimeStamp, long lPhase, unsigned long ul3DFenceDoneTime)
+{
+ mutex_lock(&gsMutex);
+
+ if (ghLogBuf && gbAllowRecord)
+ {
+ /* copy & modify from ./kernel/printk.c */
+ unsigned long long t;
+ unsigned long nanosec_rem;
+
+ t = ged_get_time();
+ nanosec_rem = do_div(t, 1000000000) / 1000;
+
+ ged_log_buf_print(ghLogBuf, "%5lu.%06lu,SW_vsync,%lu,%ld,%lu", (unsigned long) t, nanosec_rem, ulTimeStamp, lPhase, ul3DFenceDoneTime);
+ }
+
+ mutex_unlock(&gsMutex);
+}
+
+void ged_profile_dvfs_record_policy(
+ long lFreq, unsigned int ui32GpuLoading, long lPreT1, unsigned long ulPreFreq, long t0, unsigned long ulCurFreq, long t1, long lPhase)
+{
+ mutex_lock(&gsMutex);
+
+ if (ghLogBuf && gbAllowRecord)
+ {
+ /* copy & modify from ./kernel/printk.c */
+ unsigned long long t;
+ unsigned long nanosec_rem;
+
+ t = ged_get_time();
+ nanosec_rem = do_div(t, 1000000000) / 1000;
+
+ ged_log_buf_print(ghLogBuf, "%5lu.%06lu,Freq=%ld,Load=%u,PreT1=%ld,PreF=%lu,t0=%ld,CurF=%lu,t1=%ld,phase=%ld", (unsigned long) t, nanosec_rem, lFreq, ui32GpuLoading, lPreT1, ulPreFreq, t0, ulCurFreq, t1, lPhase);
+ }
+
+ mutex_unlock(&gsMutex);
+
+}
+