source: G950FXXS5DSI1
[GitHub/exynos8895/android_kernel_samsung_universal8895.git] / drivers / gpu / arm / b_r16p0 / platform / exynos / gpu_custom_interface.c
1 /* drivers/gpu/arm/.../platform/gpu_custom_interface.c
2 *
3 * Copyright 2011 by S.LSI. Samsung Electronics Inc.
4 * San#24, Nongseo-Dong, Giheung-Gu, Yongin, Korea
5 *
6 * Samsung SoC Mali-T Series DVFS driver
7 *
8 * This program is free software; you can redistribute it and/or modify
9 * it under the terms of the GNU General Public License version 2 as
10 * published by the Free Software FoundatIon.
11 */
12
13 /**
14 * @file gpu_custom_interface.c
15 * DVFS
16 */
17
18 #include <mali_kbase.h>
19
20 #include <linux/fb.h>
21
22 #if defined(CONFIG_MALI_DVFS) && defined(CONFIG_EXYNOS_THERMAL) && defined(CONFIG_GPU_THERMAL)
23 #include "exynos_tmu.h"
24 #endif
25
26 #include "mali_kbase_platform.h"
27 #include "gpu_dvfs_handler.h"
28 #include "gpu_dvfs_governor.h"
29 #include "gpu_control.h"
30 #ifdef CONFIG_CPU_THERMAL_IPA
31 #include "gpu_ipa.h"
32 #endif /* CONFIG_CPU_THERMAL_IPA */
33 #include "gpu_custom_interface.h"
34
35 #ifdef CONFIG_MALI_RT_PM
36 #include <soc/samsung/exynos-pd.h>
37 #endif
38
39 extern struct kbase_device *pkbdev;
40
41 int gpu_pmqos_dvfs_min_lock(int level)
42 {
43 #ifdef CONFIG_MALI_DVFS
44 int clock;
45 struct exynos_context *platform = (struct exynos_context *)pkbdev->platform_context;
46
47 if (!platform) {
48 GPU_LOG(DVFS_ERROR, DUMMY, 0u, 0u, "%s: platform context is not initialized\n", __func__);
49 return -ENODEV;
50 }
51
52 clock = gpu_dvfs_get_clock(level);
53 if (clock < 0)
54 gpu_dvfs_clock_lock(GPU_DVFS_MIN_UNLOCK, PMQOS_LOCK, 0);
55 else
56 gpu_dvfs_clock_lock(GPU_DVFS_MIN_LOCK, PMQOS_LOCK, clock);
57 #endif /* CONFIG_MALI_DVFS */
58 return 0;
59 }
60
61 static ssize_t show_clock(struct device *dev, struct device_attribute *attr, char *buf)
62 {
63 ssize_t ret = 0;
64 int clock = 0;
65 struct exynos_context *platform = (struct exynos_context *)pkbdev->platform_context;
66
67 if (!platform)
68 return -ENODEV;
69
70 #ifdef CONFIG_MALI_RT_PM
71 if (platform->exynos_pm_domain) {
72 mutex_lock(&platform->exynos_pm_domain->access_lock);
73 if(!platform->dvs_is_enabled && gpu_is_power_on())
74 clock = gpu_get_cur_clock(platform);
75 mutex_unlock(&platform->exynos_pm_domain->access_lock);
76 }
77 #else
78 if (gpu_control_is_power_on(pkbdev) == 1) {
79 mutex_lock(&platform->gpu_clock_lock);
80 if (!platform->dvs_is_enabled)
81 clock = gpu_get_cur_clock(platform);
82 mutex_unlock(&platform->gpu_clock_lock);
83 }
84 #endif
85
86 ret += snprintf(buf+ret, PAGE_SIZE-ret, "%d", clock);
87
88 if (ret < PAGE_SIZE - 1) {
89 ret += snprintf(buf+ret, PAGE_SIZE-ret, "\n");
90 } else {
91 buf[PAGE_SIZE-2] = '\n';
92 buf[PAGE_SIZE-1] = '\0';
93 ret = PAGE_SIZE-1;
94 }
95
96 return ret;
97 }
98
99 static ssize_t set_clock(struct device *dev, struct device_attribute *attr, const char *buf, size_t count)
100 {
101 unsigned int clk = 0;
102 int ret, i, policy_count;
103 static bool cur_state;
104 const struct kbase_pm_policy *const *policy_list;
105 static const struct kbase_pm_policy *prev_policy;
106 static bool prev_tmu_status = true;
107 #ifdef CONFIG_MALI_DVFS
108 static bool prev_dvfs_status = true;
109 #endif /* CONFIG_MALI_DVFS */
110 struct exynos_context *platform = (struct exynos_context *)pkbdev->platform_context;
111
112 if (!platform)
113 return -ENODEV;
114
115 ret = kstrtoint(buf, 0, &clk);
116 if (ret) {
117 GPU_LOG(DVFS_WARNING, DUMMY, 0u, 0u, "%s: invalid value\n", __func__);
118 return -ENOENT;
119 }
120
121 if (!cur_state) {
122 prev_tmu_status = platform->tmu_status;
123 #ifdef CONFIG_MALI_DVFS
124 prev_dvfs_status = platform->dvfs_status;
125 #endif /* CONFIG_MALI_DVFS */
126 prev_policy = kbase_pm_get_policy(pkbdev);
127 }
128
129 if (clk == 0) {
130 kbase_pm_set_policy(pkbdev, prev_policy);
131 platform->tmu_status = prev_tmu_status;
132 #ifdef CONFIG_MALI_DVFS
133 if (!platform->dvfs_status)
134 gpu_dvfs_on_off(true);
135 #endif /* CONFIG_MALI_DVFS */
136 cur_state = false;
137 } else {
138 policy_count = kbase_pm_list_policies(&policy_list);
139 for (i = 0; i < policy_count; i++) {
140 if (sysfs_streq(policy_list[i]->name, "always_on")) {
141 kbase_pm_set_policy(pkbdev, policy_list[i]);
142 break;
143 }
144 }
145 platform->tmu_status = false;
146 #ifdef CONFIG_MALI_DVFS
147 if (platform->dvfs_status)
148 gpu_dvfs_on_off(false);
149 #endif /* CONFIG_MALI_DVFS */
150 gpu_set_target_clk_vol(clk, false);
151 cur_state = true;
152 }
153
154 return count;
155 }
156
157 static ssize_t show_vol(struct device *dev, struct device_attribute *attr, char *buf)
158 {
159 ssize_t ret = 0;
160 struct exynos_context *platform = (struct exynos_context *)pkbdev->platform_context;
161
162 if (!platform)
163 return -ENODEV;
164
165 ret += snprintf(buf+ret, PAGE_SIZE-ret, "%d", gpu_get_cur_voltage(platform));
166
167 if (ret < PAGE_SIZE - 1) {
168 ret += snprintf(buf+ret, PAGE_SIZE-ret, "\n");
169 } else {
170 buf[PAGE_SIZE-2] = '\n';
171 buf[PAGE_SIZE-1] = '\0';
172 ret = PAGE_SIZE-1;
173 }
174
175 return ret;
176 }
177
178 static ssize_t show_power_state(struct device *dev, struct device_attribute *attr, char *buf)
179 {
180 ssize_t ret = 0;
181 struct exynos_context *platform = (struct exynos_context *)pkbdev->platform_context;
182
183 if (!platform)
184 return -ENODEV;
185
186 ret += snprintf(buf+ret, PAGE_SIZE-ret, "%d", gpu_control_is_power_on(pkbdev));
187
188 if (ret < PAGE_SIZE - 1) {
189 ret += snprintf(buf+ret, PAGE_SIZE-ret, "\n");
190 } else {
191 buf[PAGE_SIZE-2] = '\n';
192 buf[PAGE_SIZE-1] = '\0';
193 ret = PAGE_SIZE-1;
194 }
195
196 return ret;
197 }
198
199 static int gpu_get_asv_table(struct exynos_context *platform, char *buf, size_t buf_size)
200 {
201 int i, cnt = 0;
202
203 if (!platform)
204 return -ENODEV;
205
206 if (buf == NULL)
207 return 0;
208
209 cnt += snprintf(buf+cnt, buf_size-cnt, "GPU, vol, min, max, down_stay, mif, cpu0, cpu1\n");
210
211 for (i = gpu_dvfs_get_level(platform->gpu_max_clock); i <= gpu_dvfs_get_level(platform->gpu_min_clock); i++) {
212 cnt += snprintf(buf+cnt, buf_size-cnt, "%d, %7d, %2d, %3d, %d, %7d, %7d, %7d\n",
213 platform->table[i].clock, platform->table[i].voltage, platform->table[i].min_threshold,
214 platform->table[i].max_threshold, platform->table[i].down_staycount, platform->table[i].mem_freq,
215 platform->table[i].cpu_little_min_freq, platform->table[i].cpu_middle_min_freq);
216 }
217
218 return cnt;
219 }
220
221 static ssize_t show_asv_table(struct device *dev, struct device_attribute *attr, char *buf)
222 {
223 ssize_t ret = 0;
224 struct exynos_context *platform = (struct exynos_context *)pkbdev->platform_context;
225
226 if (!platform)
227 return -ENODEV;
228
229 ret += gpu_get_asv_table(platform, buf+ret, (size_t)PAGE_SIZE-ret);
230
231 if (ret < PAGE_SIZE - 1) {
232 ret += snprintf(buf+ret, PAGE_SIZE-ret, "\n");
233 } else {
234 buf[PAGE_SIZE-2] = '\n';
235 buf[PAGE_SIZE-1] = '\0';
236 ret = PAGE_SIZE-1;
237 }
238
239 return ret;
240 }
241
242 static int gpu_get_dvfs_table(struct exynos_context *platform, char *buf, size_t buf_size)
243 {
244 int i, cnt = 0;
245
246 if (!platform)
247 return -ENODEV;
248
249 if (buf == NULL)
250 return 0;
251
252 for (i = gpu_dvfs_get_level(platform->gpu_max_clock); i <= gpu_dvfs_get_level(platform->gpu_min_clock); i++)
253 cnt += snprintf(buf+cnt, buf_size-cnt, " %d", platform->table[i].clock);
254
255 cnt += snprintf(buf+cnt, buf_size-cnt, "\n");
256
257 return cnt;
258 }
259
260 static ssize_t show_dvfs_table(struct device *dev, struct device_attribute *attr, char *buf)
261 {
262 ssize_t ret = 0;
263 struct exynos_context *platform = (struct exynos_context *)pkbdev->platform_context;
264
265 if (!platform)
266 return -ENODEV;
267
268 ret += gpu_get_dvfs_table(platform, buf+ret, (size_t)PAGE_SIZE-ret);
269
270 if (ret < PAGE_SIZE - 1) {
271 ret += snprintf(buf+ret, PAGE_SIZE-ret, "\n");
272 } else {
273 buf[PAGE_SIZE-2] = '\n';
274 buf[PAGE_SIZE-1] = '\0';
275 ret = PAGE_SIZE-1;
276 }
277
278 return ret;
279 }
280
281 static ssize_t show_time_in_state(struct device *dev, struct device_attribute *attr, char *buf)
282 {
283 ssize_t ret = 0;
284 int i;
285 struct exynos_context *platform = (struct exynos_context *)pkbdev->platform_context;
286
287 if (!platform)
288 return -ENODEV;
289
290 gpu_dvfs_update_time_in_state(gpu_control_is_power_on(pkbdev) * platform->cur_clock);
291
292 for (i = gpu_dvfs_get_level(platform->gpu_min_clock); i >= gpu_dvfs_get_level(platform->gpu_max_clock); i--) {
293 ret += snprintf(buf+ret, PAGE_SIZE-ret, "%d %llu\n",
294 platform->table[i].clock,
295 platform->table[i].time);
296 }
297
298 if (ret >= PAGE_SIZE - 1) {
299 buf[PAGE_SIZE-2] = '\n';
300 buf[PAGE_SIZE-1] = '\0';
301 ret = PAGE_SIZE-1;
302 }
303
304 return ret;
305 }
306
307 static ssize_t set_time_in_state(struct device *dev, struct device_attribute *attr, const char *buf, size_t count)
308 {
309 gpu_dvfs_init_time_in_state();
310
311 return count;
312 }
313
314 static ssize_t show_utilization(struct device *dev, struct device_attribute *attr, char *buf)
315 {
316 ssize_t ret = 0;
317 struct exynos_context *platform = (struct exynos_context *)pkbdev->platform_context;
318
319 if (!platform)
320 return -ENODEV;
321
322 ret += snprintf(buf+ret, PAGE_SIZE-ret, "%d", gpu_control_is_power_on(pkbdev) * platform->env_data.utilization);
323
324 if (ret < PAGE_SIZE - 1) {
325 ret += snprintf(buf+ret, PAGE_SIZE-ret, "\n");
326 } else {
327 buf[PAGE_SIZE-2] = '\n';
328 buf[PAGE_SIZE-1] = '\0';
329 ret = PAGE_SIZE-1;
330 }
331
332 return ret;
333 }
334
335 static ssize_t show_perf(struct device *dev, struct device_attribute *attr, char *buf)
336 {
337 ssize_t ret = 0;
338 struct exynos_context *platform = (struct exynos_context *)pkbdev->platform_context;
339
340 if (!platform)
341 return -ENODEV;
342
343 ret += snprintf(buf+ret, PAGE_SIZE-ret, "%d", gpu_control_is_power_on(pkbdev) * platform->env_data.perf);
344
345 if (ret < PAGE_SIZE - 1) {
346 ret += snprintf(buf+ret, PAGE_SIZE-ret, "\n");
347 } else {
348 buf[PAGE_SIZE-2] = '\n';
349 buf[PAGE_SIZE-1] = '\0';
350 ret = PAGE_SIZE-1;
351 }
352
353 return ret;
354 }
355
356 #ifdef CONFIG_MALI_DVFS
357 static ssize_t show_dvfs(struct device *dev, struct device_attribute *attr, char *buf)
358 {
359 ssize_t ret = 0;
360 struct exynos_context *platform = (struct exynos_context *)pkbdev->platform_context;
361
362 if (!platform)
363 return -ENODEV;
364
365 ret += snprintf(buf+ret, PAGE_SIZE-ret, "%d", platform->dvfs_status);
366
367 if (ret < PAGE_SIZE - 1) {
368 ret += snprintf(buf+ret, PAGE_SIZE-ret, "\n");
369 } else {
370 buf[PAGE_SIZE-2] = '\n';
371 buf[PAGE_SIZE-1] = '\0';
372 ret = PAGE_SIZE-1;
373 }
374
375 return ret;
376 }
377
378 static ssize_t set_dvfs(struct device *dev, struct device_attribute *attr, const char *buf, size_t count)
379 {
380 if (sysfs_streq("0", buf))
381 gpu_dvfs_on_off(false);
382 else if (sysfs_streq("1", buf))
383 gpu_dvfs_on_off(true);
384
385 return count;
386 }
387
388 static ssize_t show_governor(struct device *dev, struct device_attribute *attr, char *buf)
389 {
390 ssize_t ret = 0;
391 gpu_dvfs_governor_info *governor_info;
392 int i;
393 struct exynos_context *platform = (struct exynos_context *)pkbdev->platform_context;
394
395 if (!platform)
396 return -ENODEV;
397
398 governor_info = (gpu_dvfs_governor_info *)gpu_dvfs_get_governor_info();
399
400 for (i = 0; i < G3D_MAX_GOVERNOR_NUM; i++)
401 ret += snprintf(buf+ret, PAGE_SIZE-ret, "%s\n", governor_info[i].name);
402
403 ret += snprintf(buf+ret, PAGE_SIZE-ret, "[Current Governor] %s", governor_info[platform->governor_type].name);
404
405 if (ret < PAGE_SIZE - 1) {
406 ret += snprintf(buf+ret, PAGE_SIZE-ret, "\n");
407 } else {
408 buf[PAGE_SIZE-2] = '\n';
409 buf[PAGE_SIZE-1] = '\0';
410 ret = PAGE_SIZE-1;
411 }
412
413 return ret;
414 }
415
416 static ssize_t set_governor(struct device *dev, struct device_attribute *attr, const char *buf, size_t count)
417 {
418 int ret;
419 int next_governor_type;
420 struct exynos_context *platform = (struct exynos_context *)pkbdev->platform_context;
421
422 if (!platform)
423 return -ENODEV;
424
425 ret = kstrtoint(buf, 0, &next_governor_type);
426
427 if ((next_governor_type < 0) || (next_governor_type >= G3D_MAX_GOVERNOR_NUM)) {
428 GPU_LOG(DVFS_WARNING, DUMMY, 0u, 0u, "%s: invalid value\n", __func__);
429 return -ENOENT;
430 }
431
432 ret = gpu_dvfs_governor_change(next_governor_type);
433
434 if (ret < 0) {
435 GPU_LOG(DVFS_WARNING, DUMMY, 0u, 0u,
436 "%s: fail to set the new governor (%d)\n", __func__, next_governor_type);
437 return -ENOENT;
438 }
439
440 return count;
441 }
442
443 static ssize_t show_max_lock_status(struct device *dev, struct device_attribute *attr, char *buf)
444 {
445 ssize_t ret = 0;
446 unsigned long flags;
447 int i;
448 int max_lock_status[NUMBER_LOCK];
449 struct exynos_context *platform = (struct exynos_context *)pkbdev->platform_context;
450
451 if (!platform)
452 return -ENODEV;
453
454 spin_lock_irqsave(&platform->gpu_dvfs_spinlock, flags);
455 for (i = 0; i < NUMBER_LOCK; i++)
456 max_lock_status[i] = platform->user_max_lock[i];
457 spin_unlock_irqrestore(&platform->gpu_dvfs_spinlock, flags);
458
459 for (i = 0; i < NUMBER_LOCK; i++)
460 ret += snprintf(buf+ret, PAGE_SIZE-ret, "[%d:%d]", i, max_lock_status[i]);
461
462 if (ret < PAGE_SIZE - 1) {
463 ret += snprintf(buf+ret, PAGE_SIZE-ret, "\n");
464 } else {
465 buf[PAGE_SIZE-2] = '\n';
466 buf[PAGE_SIZE-1] = '\0';
467 ret = PAGE_SIZE-1;
468 }
469
470 return ret;
471 }
472
473 static ssize_t show_min_lock_status(struct device *dev, struct device_attribute *attr, char *buf)
474 {
475 ssize_t ret = 0;
476 unsigned long flags;
477 int i;
478 int min_lock_status[NUMBER_LOCK];
479 struct exynos_context *platform = (struct exynos_context *)pkbdev->platform_context;
480
481 if (!platform)
482 return -ENODEV;
483
484 spin_lock_irqsave(&platform->gpu_dvfs_spinlock, flags);
485 for (i = 0; i < NUMBER_LOCK; i++)
486 min_lock_status[i] = platform->user_min_lock[i];
487 spin_unlock_irqrestore(&platform->gpu_dvfs_spinlock, flags);
488
489 for (i = 0; i < NUMBER_LOCK; i++)
490 ret += snprintf(buf+ret, PAGE_SIZE-ret, "[%d:%d]", i, min_lock_status[i]);
491
492 if (ret < PAGE_SIZE - 1) {
493 ret += snprintf(buf+ret, PAGE_SIZE-ret, "\n");
494 } else {
495 buf[PAGE_SIZE-2] = '\n';
496 buf[PAGE_SIZE-1] = '\0';
497 ret = PAGE_SIZE-1;
498 }
499
500 return ret;
501 }
502
503 static ssize_t show_max_lock_dvfs(struct device *dev, struct device_attribute *attr, char *buf)
504 {
505 ssize_t ret = 0;
506 unsigned long flags;
507 int locked_clock = -1;
508 int user_locked_clock = -1;
509 struct exynos_context *platform = (struct exynos_context *)pkbdev->platform_context;
510
511 if (!platform)
512 return -ENODEV;
513
514 spin_lock_irqsave(&platform->gpu_dvfs_spinlock, flags);
515 locked_clock = platform->max_lock;
516 user_locked_clock = platform->user_max_lock_input;
517 spin_unlock_irqrestore(&platform->gpu_dvfs_spinlock, flags);
518
519 if (locked_clock > 0)
520 ret += snprintf(buf+ret, PAGE_SIZE-ret, "%d / %d", locked_clock, user_locked_clock);
521 else
522 ret += snprintf(buf+ret, PAGE_SIZE-ret, "-1");
523
524 if (ret < PAGE_SIZE - 1) {
525 ret += snprintf(buf+ret, PAGE_SIZE-ret, "\n");
526 } else {
527 buf[PAGE_SIZE-2] = '\n';
528 buf[PAGE_SIZE-1] = '\0';
529 ret = PAGE_SIZE-1;
530 }
531
532 return ret;
533 }
534
535 static ssize_t set_max_lock_dvfs(struct device *dev, struct device_attribute *attr, const char *buf, size_t count)
536 {
537 int ret, clock = 0;
538 struct exynos_context *platform = (struct exynos_context *)pkbdev->platform_context;
539
540 if (!platform)
541 return -ENODEV;
542
543 if (sysfs_streq("0", buf)) {
544 platform->user_max_lock_input = 0;
545 gpu_dvfs_clock_lock(GPU_DVFS_MAX_UNLOCK, SYSFS_LOCK, 0);
546 } else {
547 ret = kstrtoint(buf, 0, &clock);
548 if (ret) {
549 GPU_LOG(DVFS_WARNING, DUMMY, 0u, 0u, "%s: invalid value\n", __func__);
550 return -ENOENT;
551 }
552
553 platform->user_max_lock_input = clock;
554
555 clock = gpu_dvfs_get_level_clock(clock);
556
557 ret = gpu_dvfs_get_level(clock);
558 if ((ret < gpu_dvfs_get_level(platform->gpu_max_clock)) || (ret > gpu_dvfs_get_level(platform->gpu_min_clock))) {
559 GPU_LOG(DVFS_WARNING, DUMMY, 0u, 0u, "%s: invalid clock value (%d)\n", __func__, clock);
560 return -ENOENT;
561 }
562
563 if (clock == platform->gpu_max_clock)
564 gpu_dvfs_clock_lock(GPU_DVFS_MAX_UNLOCK, SYSFS_LOCK, 0);
565 else
566 gpu_dvfs_clock_lock(GPU_DVFS_MAX_LOCK, SYSFS_LOCK, clock);
567 }
568
569 return count;
570 }
571
572 static ssize_t show_min_lock_dvfs(struct device *dev, struct device_attribute *attr, char *buf)
573 {
574 ssize_t ret = 0;
575 unsigned long flags;
576 int locked_clock = -1;
577 int user_locked_clock = -1;
578 struct exynos_context *platform = (struct exynos_context *)pkbdev->platform_context;
579
580 if (!platform)
581 return -ENODEV;
582
583 spin_lock_irqsave(&platform->gpu_dvfs_spinlock, flags);
584 locked_clock = platform->min_lock;
585 user_locked_clock = platform->user_min_lock_input;
586 spin_unlock_irqrestore(&platform->gpu_dvfs_spinlock, flags);
587
588 if (locked_clock > 0)
589 ret += snprintf(buf+ret, PAGE_SIZE-ret, "%d / %d", locked_clock, user_locked_clock);
590 else
591 ret += snprintf(buf+ret, PAGE_SIZE-ret, "-1");
592
593 if (ret < PAGE_SIZE - 1) {
594 ret += snprintf(buf+ret, PAGE_SIZE-ret, "\n");
595 } else {
596 buf[PAGE_SIZE-2] = '\n';
597 buf[PAGE_SIZE-1] = '\0';
598 ret = PAGE_SIZE-1;
599 }
600
601 return ret;
602 }
603
604 static ssize_t set_min_lock_dvfs(struct device *dev, struct device_attribute *attr, const char *buf, size_t count)
605 {
606 int ret, clock = 0;
607 struct exynos_context *platform = (struct exynos_context *)pkbdev->platform_context;
608
609 if (!platform)
610 return -ENODEV;
611
612 if (sysfs_streq("0", buf)) {
613 platform->user_min_lock_input = 0;
614 gpu_dvfs_clock_lock(GPU_DVFS_MIN_UNLOCK, SYSFS_LOCK, 0);
615 } else {
616 ret = kstrtoint(buf, 0, &clock);
617 if (ret) {
618 GPU_LOG(DVFS_WARNING, DUMMY, 0u, 0u, "%s: invalid value\n", __func__);
619 return -ENOENT;
620 }
621
622 platform->user_min_lock_input = clock;
623
624 clock = gpu_dvfs_get_level_clock(clock);
625
626 ret = gpu_dvfs_get_level(clock);
627 if ((ret < gpu_dvfs_get_level(platform->gpu_max_clock)) || (ret > gpu_dvfs_get_level(platform->gpu_min_clock))) {
628 GPU_LOG(DVFS_WARNING, DUMMY, 0u, 0u, "%s: invalid clock value (%d)\n", __func__, clock);
629 return -ENOENT;
630 }
631
632 if (clock > platform->gpu_max_clock_limit)
633 clock = platform->gpu_max_clock_limit;
634
635 if (clock == platform->gpu_min_clock)
636 gpu_dvfs_clock_lock(GPU_DVFS_MIN_UNLOCK, SYSFS_LOCK, 0);
637 else
638 gpu_dvfs_clock_lock(GPU_DVFS_MIN_LOCK, SYSFS_LOCK, clock);
639 }
640
641 return count;
642 }
643
644 static ssize_t show_down_staycount(struct device *dev, struct device_attribute *attr, char *buf)
645 {
646 ssize_t ret = 0;
647 unsigned long flags;
648 int i = -1;
649 struct exynos_context *platform = (struct exynos_context *)pkbdev->platform_context;
650
651 if (!platform)
652 return -ENODEV;
653
654 spin_lock_irqsave(&platform->gpu_dvfs_spinlock, flags);
655 for (i = gpu_dvfs_get_level(platform->gpu_max_clock); i <= gpu_dvfs_get_level(platform->gpu_min_clock); i++)
656 ret += snprintf(buf+ret, PAGE_SIZE-ret, "Clock %d - %d\n",
657 platform->table[i].clock, platform->table[i].down_staycount);
658 spin_unlock_irqrestore(&platform->gpu_dvfs_spinlock, flags);
659
660 if (ret < PAGE_SIZE - 1) {
661 ret += snprintf(buf+ret, PAGE_SIZE-ret, "\n");
662 } else {
663 buf[PAGE_SIZE-2] = '\n';
664 buf[PAGE_SIZE-1] = '\0';
665 ret = PAGE_SIZE-1;
666 }
667
668 return ret;
669 }
670
671 #define MIN_DOWN_STAYCOUNT 1
672 #define MAX_DOWN_STAYCOUNT 10
673 static ssize_t set_down_staycount(struct device *dev, struct device_attribute *attr, const char *buf, size_t count)
674 {
675 unsigned long flags;
676 char tmpbuf[32];
677 char *sptr, *tok;
678 int ret = -1;
679 int clock = -1, level = -1, down_staycount = 0;
680 unsigned int len = 0;
681 struct exynos_context *platform = (struct exynos_context *)pkbdev->platform_context;
682
683 if (!platform)
684 return -ENODEV;
685
686 len = (unsigned int)min(count, sizeof(tmpbuf) - 1);
687 memcpy(tmpbuf, buf, len);
688 tmpbuf[len] = '\0';
689 sptr = tmpbuf;
690
691 tok = strsep(&sptr, " ,");
692 if (tok == NULL) {
693 GPU_LOG(DVFS_WARNING, DUMMY, 0u, 0u, "%s: invalid input\n", __func__);
694 return -ENOENT;
695 }
696
697 ret = kstrtoint(tok, 0, &clock);
698 if (ret) {
699 GPU_LOG(DVFS_WARNING, DUMMY, 0u, 0u, "%s: invalid input %d\n", __func__, clock);
700 return -ENOENT;
701 }
702
703 tok = strsep(&sptr, " ,");
704 if (tok == NULL) {
705 GPU_LOG(DVFS_WARNING, DUMMY, 0u, 0u, "%s: invalid input\n", __func__);
706 return -ENOENT;
707 }
708
709 ret = kstrtoint(tok, 0, &down_staycount);
710 if (ret) {
711 GPU_LOG(DVFS_WARNING, DUMMY, 0u, 0u, "%s: invalid input %d\n", __func__, down_staycount);
712 return -ENOENT;
713 }
714
715 level = gpu_dvfs_get_level(clock);
716 if (level < 0) {
717 GPU_LOG(DVFS_WARNING, DUMMY, 0u, 0u, "%s: invalid clock value (%d)\n", __func__, clock);
718 return -ENOENT;
719 }
720
721 if ((down_staycount < MIN_DOWN_STAYCOUNT) || (down_staycount > MAX_DOWN_STAYCOUNT)) {
722 GPU_LOG(DVFS_WARNING, DUMMY, 0u, 0u, "%s: down_staycount is out of range (%d, %d ~ %d)\n",
723 __func__, down_staycount, MIN_DOWN_STAYCOUNT, MAX_DOWN_STAYCOUNT);
724 return -ENOENT;
725 }
726
727 spin_lock_irqsave(&platform->gpu_dvfs_spinlock, flags);
728 platform->table[level].down_staycount = down_staycount;
729 spin_unlock_irqrestore(&platform->gpu_dvfs_spinlock, flags);
730
731 return count;
732 }
733
734 static ssize_t show_highspeed_clock(struct device *dev, struct device_attribute *attr, char *buf)
735 {
736 ssize_t ret = 0;
737 unsigned long flags;
738 int highspeed_clock = -1;
739 struct exynos_context *platform = (struct exynos_context *)pkbdev->platform_context;
740
741 if (!platform)
742 return -ENODEV;
743
744 spin_lock_irqsave(&platform->gpu_dvfs_spinlock, flags);
745 highspeed_clock = platform->interactive.highspeed_clock;
746 spin_unlock_irqrestore(&platform->gpu_dvfs_spinlock, flags);
747
748 ret += snprintf(buf+ret, PAGE_SIZE-ret, "%d", highspeed_clock);
749
750 if (ret < PAGE_SIZE - 1) {
751 ret += snprintf(buf+ret, PAGE_SIZE-ret, "\n");
752 } else {
753 buf[PAGE_SIZE-2] = '\n';
754 buf[PAGE_SIZE-1] = '\0';
755 ret = PAGE_SIZE-1;
756 }
757
758 return ret;
759 }
760
761 static ssize_t set_highspeed_clock(struct device *dev, struct device_attribute *attr, const char *buf, size_t count)
762 {
763 ssize_t ret = 0;
764 unsigned long flags;
765 int highspeed_clock = -1;
766 struct exynos_context *platform = (struct exynos_context *)pkbdev->platform_context;
767
768 if (!platform)
769 return -ENODEV;
770
771 ret = kstrtoint(buf, 0, &highspeed_clock);
772 if (ret) {
773 GPU_LOG(DVFS_WARNING, DUMMY, 0u, 0u, "%s: invalid value\n", __func__);
774 return -ENOENT;
775 }
776
777 ret = gpu_dvfs_get_level(highspeed_clock);
778 if ((ret < gpu_dvfs_get_level(platform->gpu_max_clock)) || (ret > gpu_dvfs_get_level(platform->gpu_min_clock))) {
779 GPU_LOG(DVFS_WARNING, DUMMY, 0u, 0u, "%s: invalid clock value (%d)\n", __func__, highspeed_clock);
780 return -ENOENT;
781 }
782
783 if (highspeed_clock > platform->gpu_max_clock_limit)
784 highspeed_clock = platform->gpu_max_clock_limit;
785
786 spin_lock_irqsave(&platform->gpu_dvfs_spinlock, flags);
787 platform->interactive.highspeed_clock = highspeed_clock;
788 spin_unlock_irqrestore(&platform->gpu_dvfs_spinlock, flags);
789
790 return count;
791 }
792
793 static ssize_t show_highspeed_load(struct device *dev, struct device_attribute *attr, char *buf)
794 {
795 ssize_t ret = 0;
796 unsigned long flags;
797 int highspeed_load = -1;
798 struct exynos_context *platform = (struct exynos_context *)pkbdev->platform_context;
799
800 if (!platform)
801 return -ENODEV;
802
803 spin_lock_irqsave(&platform->gpu_dvfs_spinlock, flags);
804 highspeed_load = platform->interactive.highspeed_load;
805 spin_unlock_irqrestore(&platform->gpu_dvfs_spinlock, flags);
806
807 ret += snprintf(buf+ret, PAGE_SIZE-ret, "%d", highspeed_load);
808
809 if (ret < PAGE_SIZE - 1) {
810 ret += snprintf(buf+ret, PAGE_SIZE-ret, "\n");
811 } else {
812 buf[PAGE_SIZE-2] = '\n';
813 buf[PAGE_SIZE-1] = '\0';
814 ret = PAGE_SIZE-1;
815 }
816
817 return ret;
818 }
819
820 static ssize_t set_highspeed_load(struct device *dev, struct device_attribute *attr, const char *buf, size_t count)
821 {
822 ssize_t ret = 0;
823 unsigned long flags;
824 int highspeed_load = -1;
825 struct exynos_context *platform = (struct exynos_context *)pkbdev->platform_context;
826
827 if (!platform)
828 return -ENODEV;
829
830 ret = kstrtoint(buf, 0, &highspeed_load);
831 if (ret) {
832 GPU_LOG(DVFS_WARNING, DUMMY, 0u, 0u, "%s: invalid value\n", __func__);
833 return -ENOENT;
834 }
835
836 if ((highspeed_load < 0) || (highspeed_load > 100)) {
837 GPU_LOG(DVFS_WARNING, DUMMY, 0u, 0u, "%s: invalid load value (%d)\n", __func__, highspeed_load);
838 return -ENOENT;
839 }
840
841 spin_lock_irqsave(&platform->gpu_dvfs_spinlock, flags);
842 platform->interactive.highspeed_load = highspeed_load;
843 spin_unlock_irqrestore(&platform->gpu_dvfs_spinlock, flags);
844
845 return count;
846 }
847
848 static ssize_t show_highspeed_delay(struct device *dev, struct device_attribute *attr, char *buf)
849 {
850 ssize_t ret = 0;
851 unsigned long flags;
852 int highspeed_delay = -1;
853 struct exynos_context *platform = (struct exynos_context *)pkbdev->platform_context;
854
855 if (!platform)
856 return -ENODEV;
857
858 spin_lock_irqsave(&platform->gpu_dvfs_spinlock, flags);
859 highspeed_delay = platform->interactive.highspeed_delay;
860 spin_unlock_irqrestore(&platform->gpu_dvfs_spinlock, flags);
861
862 ret += snprintf(buf+ret, PAGE_SIZE-ret, "%d", highspeed_delay);
863
864 if (ret < PAGE_SIZE - 1) {
865 ret += snprintf(buf+ret, PAGE_SIZE-ret, "\n");
866 } else {
867 buf[PAGE_SIZE-2] = '\n';
868 buf[PAGE_SIZE-1] = '\0';
869 ret = PAGE_SIZE-1;
870 }
871
872 return ret;
873 }
874
875 static ssize_t set_highspeed_delay(struct device *dev, struct device_attribute *attr, const char *buf, size_t count)
876 {
877 ssize_t ret = 0;
878 unsigned long flags;
879 int highspeed_delay = -1;
880 struct exynos_context *platform = (struct exynos_context *)pkbdev->platform_context;
881
882 if (!platform)
883 return -ENODEV;
884
885 ret = kstrtoint(buf, 0, &highspeed_delay);
886 if (ret) {
887 GPU_LOG(DVFS_WARNING, DUMMY, 0u, 0u, "%s: invalid value\n", __func__);
888 return -ENOENT;
889 }
890
891 if ((highspeed_delay < 0) || (highspeed_delay > 5)) {
892 GPU_LOG(DVFS_WARNING, DUMMY, 0u, 0u, "%s: invalid load value (%d)\n", __func__, highspeed_delay);
893 return -ENOENT;
894 }
895
896 spin_lock_irqsave(&platform->gpu_dvfs_spinlock, flags);
897 platform->interactive.highspeed_delay = highspeed_delay;
898 spin_unlock_irqrestore(&platform->gpu_dvfs_spinlock, flags);
899
900 return count;
901 }
902
903 static ssize_t show_wakeup_lock(struct device *dev, struct device_attribute *attr, char *buf)
904 {
905 ssize_t ret = 0;
906 struct exynos_context *platform = (struct exynos_context *)pkbdev->platform_context;
907
908 if (!platform)
909 return -ENODEV;
910
911 ret += snprintf(buf+ret, PAGE_SIZE-ret, "%d", platform->wakeup_lock);
912
913 if (ret < PAGE_SIZE - 1) {
914 ret += snprintf(buf+ret, PAGE_SIZE-ret, "\n");
915 } else {
916 buf[PAGE_SIZE-2] = '\n';
917 buf[PAGE_SIZE-1] = '\0';
918 ret = PAGE_SIZE-1;
919 }
920
921 return ret;
922 }
923
924 static ssize_t set_wakeup_lock(struct device *dev, struct device_attribute *attr, const char *buf, size_t count)
925 {
926 struct exynos_context *platform = (struct exynos_context *)pkbdev->platform_context;
927
928 if (!platform)
929 return -ENODEV;
930
931 if (sysfs_streq("0", buf))
932 platform->wakeup_lock = false;
933 else if (sysfs_streq("1", buf))
934 platform->wakeup_lock = true;
935 else
936 GPU_LOG(DVFS_WARNING, DUMMY, 0u, 0u, "%s: invalid val - only [0 or 1] is available\n", __func__);
937
938 return count;
939 }
940
941 static ssize_t show_polling_speed(struct device *dev, struct device_attribute *attr, char *buf)
942 {
943 ssize_t ret = 0;
944 struct exynos_context *platform = (struct exynos_context *)pkbdev->platform_context;
945
946 if (!platform)
947 return -ENODEV;
948
949 ret += snprintf(buf+ret, PAGE_SIZE-ret, "%d", platform->polling_speed);
950
951 if (ret < PAGE_SIZE - 1) {
952 ret += snprintf(buf+ret, PAGE_SIZE-ret, "\n");
953 } else {
954 buf[PAGE_SIZE-2] = '\n';
955 buf[PAGE_SIZE-1] = '\0';
956 ret = PAGE_SIZE-1;
957 }
958
959 return ret;
960 }
961
962 static ssize_t set_polling_speed(struct device *dev, struct device_attribute *attr, const char *buf, size_t count)
963 {
964 int ret, polling_speed;
965 struct exynos_context *platform = (struct exynos_context *)pkbdev->platform_context;
966
967 if (!platform)
968 return -ENODEV;
969
970 ret = kstrtoint(buf, 0, &polling_speed);
971
972 if (ret) {
973 GPU_LOG(DVFS_WARNING, DUMMY, 0u, 0u, "%s: invalid value\n", __func__);
974 return -ENOENT;
975 }
976
977 if ((polling_speed < 100) || (polling_speed > 1000)) {
978 GPU_LOG(DVFS_WARNING, DUMMY, 0u, 0u, "%s: out of range [100~1000] (%d)\n", __func__, polling_speed);
979 return -ENOENT;
980 }
981
982 platform->polling_speed = polling_speed;
983
984 return count;
985 }
986
987 static ssize_t show_tmu(struct device *dev, struct device_attribute *attr, char *buf)
988 {
989 ssize_t ret = 0;
990 struct exynos_context *platform = (struct exynos_context *)pkbdev->platform_context;
991
992 if (!platform)
993 return -ENODEV;
994
995 ret += snprintf(buf+ret, PAGE_SIZE-ret, "%d", platform->tmu_status);
996
997 if (ret < PAGE_SIZE - 1) {
998 ret += snprintf(buf+ret, PAGE_SIZE-ret, "\n");
999 } else {
1000 buf[PAGE_SIZE-2] = '\n';
1001 buf[PAGE_SIZE-1] = '\0';
1002 ret = PAGE_SIZE-1;
1003 }
1004
1005 return ret;
1006 }
1007
1008 static ssize_t set_tmu_control(struct device *dev, struct device_attribute *attr, const char *buf, size_t count)
1009 {
1010 struct exynos_context *platform = (struct exynos_context *)pkbdev->platform_context;
1011
1012 if (!platform)
1013 return -ENODEV;
1014
1015 if (sysfs_streq("0", buf)) {
1016 if (platform->voltage_margin != 0) {
1017 platform->voltage_margin = 0;
1018 gpu_set_target_clk_vol(platform->cur_clock, false);
1019 }
1020 gpu_dvfs_clock_lock(GPU_DVFS_MAX_UNLOCK, TMU_LOCK, 0);
1021 platform->tmu_status = false;
1022 } else if (sysfs_streq("1", buf))
1023 platform->tmu_status = true;
1024 else
1025 GPU_LOG(DVFS_WARNING, DUMMY, 0u, 0u, "%s: invalid value - only [0 or 1] is available\n", __func__);
1026
1027 return count;
1028 }
1029
1030 #ifdef CONFIG_CPU_THERMAL_IPA
1031 static ssize_t show_norm_utilization(struct device *dev, struct device_attribute *attr, char *buf)
1032 {
1033 ssize_t ret = 0;
1034 #ifdef CONFIG_EXYNOS_THERMAL
1035
1036 ret += snprintf(buf+ret, PAGE_SIZE-ret, "%d", gpu_ipa_dvfs_get_norm_utilisation(pkbdev));
1037
1038 if (ret < PAGE_SIZE - 1) {
1039 ret += snprintf(buf+ret, PAGE_SIZE-ret, "\n");
1040 } else {
1041 buf[PAGE_SIZE-2] = '\n';
1042 buf[PAGE_SIZE-1] = '\0';
1043 ret = PAGE_SIZE-1;
1044 }
1045 #else
1046 GPU_LOG(DVFS_WARNING, DUMMY, 0u, 0u, "%s: EXYNOS THERMAL build config is disabled\n", __func__);
1047 #endif /* CONFIG_EXYNOS_THERMAL */
1048
1049 return ret;
1050 }
1051
1052 static ssize_t show_utilization_stats(struct device *dev, struct device_attribute *attr, char *buf)
1053 {
1054 ssize_t ret = 0;
1055 #ifdef CONFIG_EXYNOS_THERMAL
1056 struct mali_debug_utilisation_stats stats;
1057
1058 gpu_ipa_dvfs_get_utilisation_stats(&stats);
1059
1060 ret += snprintf(buf+ret, PAGE_SIZE-ret, "util=%d norm_util=%d norm_freq=%d time_busy=%u time_idle=%u time_tick=%d",
1061 stats.s.utilisation, stats.s.norm_utilisation,
1062 stats.s.freq_for_norm, stats.time_busy, stats.time_idle,
1063 stats.time_tick);
1064
1065 if (ret < PAGE_SIZE - 1) {
1066 ret += snprintf(buf+ret, PAGE_SIZE-ret, "\n");
1067 } else {
1068 buf[PAGE_SIZE-2] = '\n';
1069 buf[PAGE_SIZE-1] = '\0';
1070 ret = PAGE_SIZE-1;
1071 }
1072 #else
1073 GPU_LOG(DVFS_WARNING, DUMMY, 0u, 0u, "%s: EXYNOS THERMAL build config is disabled\n", __func__);
1074 #endif /* CONFIG_EXYNOS_THERMAL */
1075
1076 return ret;
1077 }
1078 #endif /* CONFIG_CPU_THERMAL_IPA */
1079 #endif /* CONFIG_MALI_DVFS */
1080
1081 static ssize_t show_debug_level(struct device *dev, struct device_attribute *attr, char *buf)
1082 {
1083 ssize_t ret = 0;
1084
1085 ret += snprintf(buf+ret, PAGE_SIZE-ret, "[Current] %d (%d ~ %d)",
1086 gpu_get_debug_level(), DVFS_DEBUG_START+1, DVFS_DEBUG_END-1);
1087
1088 if (ret < PAGE_SIZE - 1) {
1089 ret += snprintf(buf+ret, PAGE_SIZE-ret, "\n");
1090 } else {
1091 buf[PAGE_SIZE-2] = '\n';
1092 buf[PAGE_SIZE-1] = '\0';
1093 ret = PAGE_SIZE-1;
1094 }
1095
1096 return ret;
1097 }
1098
1099 static ssize_t set_debug_level(struct device *dev, struct device_attribute *attr, const char *buf, size_t count)
1100 {
1101 int debug_level, ret;
1102
1103 ret = kstrtoint(buf, 0, &debug_level);
1104 if (ret) {
1105 GPU_LOG(DVFS_WARNING, DUMMY, 0u, 0u, "%s: invalid value\n", __func__);
1106 return -ENOENT;
1107 }
1108
1109 if ((debug_level <= DVFS_DEBUG_START) || (debug_level >= DVFS_DEBUG_END)) {
1110 GPU_LOG(DVFS_WARNING, DUMMY, 0u, 0u, "%s: invalid debug level (%d)\n", __func__, debug_level);
1111 return -ENOENT;
1112 }
1113
1114 gpu_set_debug_level(debug_level);
1115
1116 return count;
1117 }
1118
1119 #ifdef CONFIG_MALI_EXYNOS_TRACE
1120 static ssize_t show_trace_level(struct device *dev, struct device_attribute *attr, char *buf)
1121 {
1122 ssize_t ret = 0;
1123 int level;
1124
1125 for (level = TRACE_NONE + 1; level < TRACE_END - 1; level++)
1126 if (gpu_check_trace_level(level))
1127 ret += snprintf(buf+ret, PAGE_SIZE-ret, "<%d> ", level);
1128 ret += snprintf(buf+ret, PAGE_SIZE-ret, "\nList: %d ~ %d\n(None: %d, All: %d)",
1129 TRACE_NONE + 1, TRACE_ALL - 1, TRACE_NONE, TRACE_ALL);
1130
1131 if (ret < PAGE_SIZE - 1) {
1132 ret += snprintf(buf+ret, PAGE_SIZE-ret, "\n");
1133 } else {
1134 buf[PAGE_SIZE-2] = '\n';
1135 buf[PAGE_SIZE-1] = '\0';
1136 ret = PAGE_SIZE-1;
1137 }
1138
1139 return ret;
1140 }
1141
1142 static ssize_t set_trace_level(struct device *dev, struct device_attribute *attr, const char *buf, size_t count)
1143 {
1144 int trace_level, ret;
1145
1146 ret = kstrtoint(buf, 0, &trace_level);
1147 if (ret) {
1148 GPU_LOG(DVFS_WARNING, DUMMY, 0u, 0u, "%s: invalid value\n", __func__);
1149 return -ENOENT;
1150 }
1151
1152 if ((trace_level <= TRACE_START) || (trace_level >= TRACE_END)) {
1153 GPU_LOG(DVFS_WARNING, DUMMY, 0u, 0u, "%s: invalid trace level (%d)\n", __func__, trace_level);
1154 return -ENOENT;
1155 }
1156
1157 gpu_set_trace_level(trace_level);
1158
1159 return count;
1160 }
1161
1162 extern void kbasep_trace_format_msg(struct kbase_trace *trace_msg, char *buffer, int len);
1163 static ssize_t show_trace_dump(struct device *dev, struct device_attribute *attr, char *buf)
1164 {
1165 ssize_t ret = 0;
1166 unsigned long flags;
1167 u32 start, end;
1168
1169 spin_lock_irqsave(&pkbdev->trace_lock, flags);
1170 start = pkbdev->trace_first_out;
1171 end = pkbdev->trace_next_in;
1172
1173 while (start != end) {
1174 char buffer[KBASE_TRACE_SIZE];
1175 struct kbase_trace *trace_msg = &pkbdev->trace_rbuf[start];
1176
1177 kbasep_trace_format_msg(trace_msg, buffer, KBASE_TRACE_SIZE);
1178 ret += snprintf(buf+ret, PAGE_SIZE-ret, "%s\n", buffer);
1179
1180 if (ret >= PAGE_SIZE - 1)
1181 break;
1182
1183 start = (start + 1) & KBASE_TRACE_MASK;
1184 }
1185
1186 spin_unlock_irqrestore(&pkbdev->trace_lock, flags);
1187 KBASE_TRACE_CLEAR(pkbdev);
1188
1189 if (ret < PAGE_SIZE - 1) {
1190 ret += snprintf(buf+ret, PAGE_SIZE-ret, "\n");
1191 } else {
1192 buf[PAGE_SIZE-2] = '\n';
1193 buf[PAGE_SIZE-1] = '\0';
1194 ret = PAGE_SIZE-1;
1195 }
1196
1197 return ret;
1198 }
1199
1200 static ssize_t init_trace_dump(struct device *dev, struct device_attribute *attr, const char *buf, size_t count)
1201 {
1202 KBASE_TRACE_CLEAR(pkbdev);
1203
1204 return count;
1205 }
1206 #endif /* CONFIG_MALI_EXYNOS_TRACE */
1207
1208 #ifdef DEBUG_FBDEV
1209 static ssize_t show_fbdev(struct device *dev, struct device_attribute *attr, char *buf)
1210 {
1211 ssize_t ret = 0;
1212 int i;
1213
1214 for (i = 0; i < num_registered_fb; i++)
1215 ret += snprintf(buf+ret, PAGE_SIZE-ret, "fb[%d] xres=%d, yres=%d, addr=0x%lx\n", i, registered_fb[i]->var.xres, registered_fb[i]->var.yres, registered_fb[i]->fix.smem_start);
1216
1217 if (ret < PAGE_SIZE - 1) {
1218 ret += snprintf(buf+ret, PAGE_SIZE-ret, "\n");
1219 } else {
1220 buf[PAGE_SIZE-2] = '\n';
1221 buf[PAGE_SIZE-1] = '\0';
1222 ret = PAGE_SIZE-1;
1223 }
1224
1225 return ret;
1226 }
1227 #endif
1228
1229 static int gpu_get_status(struct exynos_context *platform, char *buf, size_t buf_size)
1230 {
1231 int cnt = 0;
1232 int i;
1233 int mmu_fault_cnt = 0;
1234
1235 if (!platform)
1236 return -ENODEV;
1237
1238 if (buf == NULL)
1239 return 0;
1240
1241 for (i = GPU_MMU_TRANSLATION_FAULT; i <= GPU_MMU_MEMORY_ATTRIBUTES_FAULT; i++)
1242 mmu_fault_cnt += platform->gpu_exception_count[i];
1243
1244 cnt += snprintf(buf+cnt, buf_size-cnt, "reset count : %d\n", platform->gpu_exception_count[GPU_RESET]);
1245 cnt += snprintf(buf+cnt, buf_size-cnt, "data invalid count : %d\n", platform->gpu_exception_count[GPU_DATA_INVALIDATE_FAULT]);
1246 cnt += snprintf(buf+cnt, buf_size-cnt, "mmu fault count : %d\n", mmu_fault_cnt);
1247
1248 for (i = 0; i < BMAX_RETRY_CNT; i++)
1249 cnt += snprintf(buf+cnt, buf_size-cnt, "warmup retry count %d : %d\n", i+1, platform->balance_retry_count[i]);
1250
1251 return cnt;
1252 }
1253
1254 static ssize_t show_gpu_status(struct device *dev, struct device_attribute *attr, char *buf)
1255 {
1256 ssize_t ret = 0;
1257 struct exynos_context *platform = (struct exynos_context *)pkbdev->platform_context;
1258
1259 if (!platform)
1260 return -ENODEV;
1261
1262 ret += gpu_get_status(platform, buf+ret, (size_t)PAGE_SIZE-ret);
1263
1264 if (ret < PAGE_SIZE - 1) {
1265 ret += snprintf(buf+ret, PAGE_SIZE-ret, "\n");
1266 } else {
1267 buf[PAGE_SIZE-2] = '\n';
1268 buf[PAGE_SIZE-1] = '\0';
1269 ret = PAGE_SIZE-1;
1270 }
1271
1272 return ret;
1273 }
1274
1275 #ifdef CONFIG_MALI_VK_BOOST
1276 static ssize_t show_vk_boost_status(struct device *dev, struct device_attribute *attr, char *buf)
1277 {
1278 ssize_t ret = 0;
1279 struct exynos_context *platform = (struct exynos_context *)pkbdev->platform_context;
1280
1281 if (!platform)
1282 return -ENODEV;
1283
1284 ret += snprintf(buf+ret, PAGE_SIZE-ret, "%d", platform->ctx_vk_need_qos);
1285
1286 if (ret < PAGE_SIZE - 1) {
1287 ret += snprintf(buf+ret, PAGE_SIZE-ret, "\n");
1288 } else {
1289 buf[PAGE_SIZE-2] = '\n';
1290 buf[PAGE_SIZE-1] = '\0';
1291 ret = PAGE_SIZE-1;
1292 }
1293
1294 return ret;
1295 }
1296 #endif
1297
1298 #ifdef CONFIG_MALI_SUSTAINABLE_OPT
1299 static ssize_t show_sustainable_status(struct device *dev, struct device_attribute *attr, char *buf)
1300 {
1301 ssize_t ret = 0;
1302 struct exynos_context *platform = (struct exynos_context *)pkbdev->platform_context;
1303
1304 if (!platform)
1305 return -ENODEV;
1306
1307 ret += snprintf(buf+ret, PAGE_SIZE-ret, "%d", platform->sustainable.status);
1308
1309 if (ret < PAGE_SIZE - 1) {
1310 ret += snprintf(buf+ret, PAGE_SIZE-ret, "\n");
1311 } else {
1312 buf[PAGE_SIZE-2] = '\n';
1313 buf[PAGE_SIZE-1] = '\0';
1314 ret = PAGE_SIZE-1;
1315 }
1316
1317 return ret;
1318 }
1319 #endif
1320
1321 #ifdef CONFIG_MALI_SEC_CL_BOOST
1322 static ssize_t set_cl_boost_disable(struct device *dev, struct device_attribute *attr, const char *buf, size_t count)
1323 {
1324 unsigned int cl_boost_disable = 0;
1325 int ret;
1326
1327 struct exynos_context *platform = (struct exynos_context *)pkbdev->platform_context;
1328
1329 if (!platform)
1330 return -ENODEV;
1331
1332 ret = kstrtoint(buf, 0, &cl_boost_disable);
1333 if (ret) {
1334 GPU_LOG(DVFS_WARNING, DUMMY, 0u, 0u, "%s: invalid value\n", __func__);
1335 return -ENOENT;
1336 }
1337
1338 if (cl_boost_disable == 0)
1339 platform->cl_boost_disable = false;
1340 else
1341 platform->cl_boost_disable = true;
1342
1343 return count;
1344 }
1345
1346 static ssize_t show_cl_boost_disable(struct device *dev, struct device_attribute *attr, char *buf)
1347 {
1348 ssize_t ret = 0;
1349 struct exynos_context *platform = (struct exynos_context *)pkbdev->platform_context;
1350
1351 if (!platform)
1352 return -ENODEV;
1353
1354 ret += snprintf(buf+ret, PAGE_SIZE-ret, "%d", platform->cl_boost_disable);
1355
1356 if (ret < PAGE_SIZE - 1) {
1357 ret += snprintf(buf+ret, PAGE_SIZE-ret, "\n");
1358 } else {
1359 buf[PAGE_SIZE-2] = '\n';
1360 buf[PAGE_SIZE-1] = '\0';
1361 ret = PAGE_SIZE-1;
1362 }
1363
1364 return ret;
1365 }
1366 #endif
1367 /** The sysfs file @c clock, fbdev.
1368 *
1369 * This is used for obtaining information about the mali t series operating clock & framebuffer address,
1370 */
1371
1372 DEVICE_ATTR(clock, S_IRUGO|S_IWUSR, show_clock, set_clock);
1373 DEVICE_ATTR(vol, S_IRUGO, show_vol, NULL);
1374 DEVICE_ATTR(power_state, S_IRUGO, show_power_state, NULL);
1375 DEVICE_ATTR(asv_table, S_IRUGO, show_asv_table, NULL);
1376 DEVICE_ATTR(dvfs_table, S_IRUGO, show_dvfs_table, NULL);
1377 DEVICE_ATTR(time_in_state, S_IRUGO|S_IWUSR, show_time_in_state, set_time_in_state);
1378 DEVICE_ATTR(utilization, S_IRUGO, show_utilization, NULL);
1379 DEVICE_ATTR(perf, S_IRUGO, show_perf, NULL);
1380 #ifdef CONFIG_MALI_DVFS
1381 DEVICE_ATTR(dvfs, S_IRUGO|S_IWUSR, show_dvfs, set_dvfs);
1382 DEVICE_ATTR(dvfs_governor, S_IRUGO|S_IWUSR, show_governor, set_governor);
1383 DEVICE_ATTR(dvfs_max_lock_status, S_IRUGO, show_max_lock_status, NULL);
1384 DEVICE_ATTR(dvfs_min_lock_status, S_IRUGO, show_min_lock_status, NULL);
1385 DEVICE_ATTR(dvfs_max_lock, S_IRUGO|S_IWUSR, show_max_lock_dvfs, set_max_lock_dvfs);
1386 DEVICE_ATTR(dvfs_min_lock, S_IRUGO|S_IWUSR, show_min_lock_dvfs, set_min_lock_dvfs);
1387 DEVICE_ATTR(down_staycount, S_IRUGO|S_IWUSR, show_down_staycount, set_down_staycount);
1388 DEVICE_ATTR(highspeed_clock, S_IRUGO|S_IWUSR, show_highspeed_clock, set_highspeed_clock);
1389 DEVICE_ATTR(highspeed_load, S_IRUGO|S_IWUSR, show_highspeed_load, set_highspeed_load);
1390 DEVICE_ATTR(highspeed_delay, S_IRUGO|S_IWUSR, show_highspeed_delay, set_highspeed_delay);
1391 DEVICE_ATTR(wakeup_lock, S_IRUGO|S_IWUSR, show_wakeup_lock, set_wakeup_lock);
1392 DEVICE_ATTR(polling_speed, S_IRUGO|S_IWUSR, show_polling_speed, set_polling_speed);
1393 DEVICE_ATTR(tmu, S_IRUGO|S_IWUSR, show_tmu, set_tmu_control);
1394 #ifdef CONFIG_CPU_THERMAL_IPA
1395 DEVICE_ATTR(norm_utilization, S_IRUGO, show_norm_utilization, NULL);
1396 DEVICE_ATTR(utilization_stats, S_IRUGO, show_utilization_stats, NULL);
1397 #endif /* CONFIG_CPU_THERMAL_IPA */
1398 #endif /* CONFIG_MALI_DVFS */
1399 DEVICE_ATTR(debug_level, S_IRUGO|S_IWUSR, show_debug_level, set_debug_level);
1400 #ifdef CONFIG_MALI_EXYNOS_TRACE
1401 DEVICE_ATTR(trace_level, S_IRUGO|S_IWUSR, show_trace_level, set_trace_level);
1402 DEVICE_ATTR(trace_dump, S_IRUGO|S_IWUSR, show_trace_dump, init_trace_dump);
1403 #endif /* CONFIG_MALI_EXYNOS_TRACE */
1404 #ifdef DEBUG_FBDEV
1405 DEVICE_ATTR(fbdev, S_IRUGO, show_fbdev, NULL);
1406 #endif
1407 DEVICE_ATTR(gpu_status, S_IRUGO, show_gpu_status, NULL);
1408 #ifdef CONFIG_MALI_VK_BOOST
1409 DEVICE_ATTR(vk_boost_status, S_IRUGO, show_vk_boost_status, NULL);
1410 #endif
1411 #ifdef CONFIG_MALI_SUSTAINABLE_OPT
1412 DEVICE_ATTR(sustainable_status, S_IRUGO, show_sustainable_status, NULL);
1413 #endif
1414 #ifdef CONFIG_MALI_SEC_CL_BOOST
1415 DEVICE_ATTR(cl_boost_disable, S_IRUGO|S_IWUSR, show_cl_boost_disable, set_cl_boost_disable);
1416 #endif
1417
1418 #ifdef CONFIG_MALI_DEBUG_KERNEL_SYSFS
1419 #ifdef CONFIG_MALI_DVFS
1420 #define BUF_SIZE 1000
1421 static ssize_t show_kernel_sysfs_gpu_info(struct kobject *kobj, struct kobj_attribute *attr, char *buf)
1422 {
1423 struct exynos_context *platform = (struct exynos_context *)pkbdev->platform_context;
1424 ssize_t ret = 0;
1425
1426 if (!platform)
1427 return -ENODEV;
1428
1429 if (buf == NULL)
1430 return 0;
1431
1432 ret += snprintf(buf+ret, BUF_SIZE-ret, "\"SSTOP\":\"%d\",", platform->gpu_exception_count[GPU_SOFT_STOP]);
1433 ret += snprintf(buf+ret, BUF_SIZE-ret, "\"HSTOP\":\"%d\",", platform->gpu_exception_count[GPU_HARD_STOP]);
1434 ret += snprintf(buf+ret, BUF_SIZE-ret, "\"RESET\":\"%d\",", platform->gpu_exception_count[GPU_RESET]);
1435 ret += snprintf(buf+ret, BUF_SIZE-ret, "\"DIFLT\":\"%d\",", platform->gpu_exception_count[GPU_DATA_INVALIDATE_FAULT]);
1436 ret += snprintf(buf+ret, BUF_SIZE-ret, "\"TRFLT\":\"%d\",", platform->gpu_exception_count[GPU_MMU_TRANSLATION_FAULT]);
1437 ret += snprintf(buf+ret, BUF_SIZE-ret, "\"PMFLT\":\"%d\",", platform->gpu_exception_count[GPU_MMU_PERMISSION_FAULT]);
1438 ret += snprintf(buf+ret, BUF_SIZE-ret, "\"BFLT\":\"%d\",", platform->gpu_exception_count[GPU_MMU_TRANSTAB_BUS_FAULT]);
1439 ret += snprintf(buf+ret, BUF_SIZE-ret, "\"ACCFG\":\"%d\",", platform->gpu_exception_count[GPU_MMU_ACCESS_FLAG_FAULT]);
1440 ret += snprintf(buf+ret, BUF_SIZE-ret, "\"ASFLT\":\"%d\",", platform->gpu_exception_count[GPU_MMU_ADDRESS_SIZE_FAULT]);
1441 ret += snprintf(buf+ret, BUF_SIZE-ret, "\"ATFLT\":\"%d\",", platform->gpu_exception_count[GPU_MMU_MEMORY_ATTRIBUTES_FAULT]);
1442 ret += snprintf(buf+ret, BUF_SIZE-ret, "\"UNKN\":\"%d\"", platform->gpu_exception_count[GPU_UNKNOWN]);
1443
1444 if (ret < PAGE_SIZE - 1) {
1445 ret += snprintf(buf+ret, PAGE_SIZE-ret, "\n");
1446 } else {
1447 buf[PAGE_SIZE-2] = '\n';
1448 buf[PAGE_SIZE-1] = '\0';
1449 ret = PAGE_SIZE-1;
1450 }
1451
1452 return ret;
1453 }
1454
1455 static ssize_t show_kernel_sysfs_max_lock_dvfs(struct kobject *kobj, struct kobj_attribute *attr, char *buf)
1456 {
1457 ssize_t ret = 0;
1458 unsigned long flags;
1459 int locked_clock = -1;
1460 struct exynos_context *platform = (struct exynos_context *)pkbdev->platform_context;
1461
1462 if (!platform)
1463 return -ENODEV;
1464
1465 spin_lock_irqsave(&platform->gpu_dvfs_spinlock, flags);
1466 locked_clock = platform->max_lock;
1467 spin_unlock_irqrestore(&platform->gpu_dvfs_spinlock, flags);
1468
1469 if (locked_clock > 0)
1470 ret += snprintf(buf+ret, PAGE_SIZE-ret, "%d", locked_clock);
1471 else
1472 ret += snprintf(buf+ret, PAGE_SIZE-ret, "%d", platform->gpu_max_clock);
1473
1474 if (ret < PAGE_SIZE - 1) {
1475 ret += snprintf(buf+ret, PAGE_SIZE-ret, "\n");
1476 } else {
1477 buf[PAGE_SIZE-2] = '\n';
1478 buf[PAGE_SIZE-1] = '\0';
1479 ret = PAGE_SIZE-1;
1480 }
1481
1482 return ret;
1483 }
1484
1485 static ssize_t set_kernel_sysfs_max_lock_dvfs(struct kobject *kobj, struct kobj_attribute *attr, const char *buf, size_t count)
1486 {
1487 int ret, clock = 0;
1488 struct exynos_context *platform = (struct exynos_context *)pkbdev->platform_context;
1489
1490 if (!platform)
1491 return -ENODEV;
1492
1493 if (sysfs_streq("0", buf)) {
1494 platform->user_max_lock_input = 0;
1495 gpu_dvfs_clock_lock(GPU_DVFS_MAX_UNLOCK, SYSFS_LOCK, 0);
1496 } else {
1497 ret = kstrtoint(buf, 0, &clock);
1498 if (ret) {
1499 GPU_LOG(DVFS_WARNING, DUMMY, 0u, 0u, "%s: invalid value\n", __func__);
1500 return -ENOENT;
1501 }
1502
1503 platform->user_max_lock_input = clock;
1504
1505 clock = gpu_dvfs_get_level_clock(clock);
1506
1507 ret = gpu_dvfs_get_level(clock);
1508 if ((ret < gpu_dvfs_get_level(platform->gpu_max_clock)) || (ret > gpu_dvfs_get_level(platform->gpu_min_clock))) {
1509 GPU_LOG(DVFS_WARNING, DUMMY, 0u, 0u, "%s: invalid clock value (%d)\n", __func__, clock);
1510 return -ENOENT;
1511 }
1512
1513 if (clock == platform->gpu_max_clock)
1514 gpu_dvfs_clock_lock(GPU_DVFS_MAX_UNLOCK, SYSFS_LOCK, 0);
1515 else
1516 gpu_dvfs_clock_lock(GPU_DVFS_MAX_LOCK, SYSFS_LOCK, clock);
1517 }
1518
1519 return count;
1520 }
1521
1522 static ssize_t show_kernel_sysfs_available_governor(struct kobject *kobj, struct kobj_attribute *attr, char *buf)
1523 {
1524 ssize_t ret = 0;
1525 gpu_dvfs_governor_info *governor_info;
1526 int i;
1527 struct exynos_context *platform = (struct exynos_context *)pkbdev->platform_context;
1528
1529 if (!platform)
1530 return -ENODEV;
1531
1532 governor_info = (gpu_dvfs_governor_info *)gpu_dvfs_get_governor_info();
1533
1534 for (i = 0; i < G3D_MAX_GOVERNOR_NUM; i++)
1535 ret += snprintf(buf+ret, PAGE_SIZE-ret, "%s ", governor_info[i].name);
1536
1537 if (ret < PAGE_SIZE - 1) {
1538 ret += snprintf(buf+ret, PAGE_SIZE-ret, "\n");
1539 } else {
1540 buf[PAGE_SIZE-2] = '\n';
1541 buf[PAGE_SIZE-1] = '\0';
1542 ret = PAGE_SIZE-1;
1543 }
1544
1545 return ret;
1546 }
1547
1548 static ssize_t show_kernel_sysfs_min_lock_dvfs(struct kobject *kobj, struct kobj_attribute *attr, char *buf)
1549 {
1550 ssize_t ret = 0;
1551 unsigned long flags;
1552 int locked_clock = -1;
1553 struct exynos_context *platform = (struct exynos_context *)pkbdev->platform_context;
1554
1555 if (!platform)
1556 return -ENODEV;
1557
1558 spin_lock_irqsave(&platform->gpu_dvfs_spinlock, flags);
1559 locked_clock = platform->min_lock;
1560 spin_unlock_irqrestore(&platform->gpu_dvfs_spinlock, flags);
1561
1562 if (locked_clock > 0)
1563 ret += snprintf(buf+ret, PAGE_SIZE-ret, "%d", locked_clock);
1564 else
1565 ret += snprintf(buf+ret, PAGE_SIZE-ret, "%d", platform->gpu_min_clock);
1566
1567 if (ret < PAGE_SIZE - 1) {
1568 ret += snprintf(buf+ret, PAGE_SIZE-ret, "\n");
1569 } else {
1570 buf[PAGE_SIZE-2] = '\n';
1571 buf[PAGE_SIZE-1] = '\0';
1572 ret = PAGE_SIZE-1;
1573 }
1574
1575 return ret;
1576 }
1577
1578 static ssize_t set_kernel_sysfs_min_lock_dvfs(struct kobject *kobj, struct kobj_attribute *attr, const char *buf, size_t count)
1579 {
1580 int ret, clock = 0;
1581 struct exynos_context *platform = (struct exynos_context *)pkbdev->platform_context;
1582
1583 if (!platform)
1584 return -ENODEV;
1585
1586 if (sysfs_streq("0", buf)) {
1587 platform->user_min_lock_input = 0;
1588 gpu_dvfs_clock_lock(GPU_DVFS_MIN_UNLOCK, SYSFS_LOCK, 0);
1589 } else {
1590 ret = kstrtoint(buf, 0, &clock);
1591 if (ret) {
1592 GPU_LOG(DVFS_WARNING, DUMMY, 0u, 0u, "%s: invalid value\n", __func__);
1593 return -ENOENT;
1594 }
1595
1596 platform->user_min_lock_input = clock;
1597
1598 clock = gpu_dvfs_get_level_clock(clock);
1599
1600 ret = gpu_dvfs_get_level(clock);
1601 if ((ret < gpu_dvfs_get_level(platform->gpu_max_clock)) || (ret > gpu_dvfs_get_level(platform->gpu_min_clock))) {
1602 GPU_LOG(DVFS_WARNING, DUMMY, 0u, 0u, "%s: invalid clock value (%d)\n", __func__, clock);
1603 return -ENOENT;
1604 }
1605
1606 if (clock > platform->gpu_max_clock_limit)
1607 clock = platform->gpu_max_clock_limit;
1608
1609 if (clock == platform->gpu_min_clock)
1610 gpu_dvfs_clock_lock(GPU_DVFS_MIN_UNLOCK, SYSFS_LOCK, 0);
1611 else
1612 gpu_dvfs_clock_lock(GPU_DVFS_MIN_LOCK, SYSFS_LOCK, clock);
1613 }
1614
1615 return count;
1616 }
1617 #endif /* #ifdef CONFIG_MALI_DVFS */
1618
1619 static ssize_t show_kernel_sysfs_utilization(struct kobject *kobj, struct kobj_attribute *attr, char *buf)
1620 {
1621 ssize_t ret = 0;
1622 struct exynos_context *platform = (struct exynos_context *)pkbdev->platform_context;
1623
1624 if (!platform)
1625 return -ENODEV;
1626
1627 ret += snprintf(buf+ret, PAGE_SIZE-ret, "%3d%%", platform->env_data.utilization);
1628
1629 if (ret < PAGE_SIZE - 1) {
1630 ret += snprintf(buf+ret, PAGE_SIZE-ret, "\n");
1631 } else {
1632 buf[PAGE_SIZE-2] = '\n';
1633 buf[PAGE_SIZE-1] = '\0';
1634 ret = PAGE_SIZE-1;
1635 }
1636
1637 return ret;
1638 }
1639
1640 static ssize_t show_kernel_sysfs_clock(struct kobject *kobj, struct kobj_attribute *attr, char *buf)
1641 {
1642 ssize_t ret = 0;
1643 int clock = 0;
1644 struct exynos_context *platform = (struct exynos_context *)pkbdev->platform_context;
1645
1646 if (!platform)
1647 return -ENODEV;
1648
1649 #ifdef CONFIG_MALI_RT_PM
1650 if (platform->exynos_pm_domain) {
1651 mutex_lock(&platform->exynos_pm_domain->access_lock);
1652 if (!platform->dvs_is_enabled && gpu_is_power_on())
1653 clock = gpu_get_cur_clock(platform);
1654 mutex_unlock(&platform->exynos_pm_domain->access_lock);
1655 }
1656 #else
1657 if (gpu_control_is_power_on(pkbdev) == 1) {
1658 mutex_lock(&platform->gpu_clock_lock);
1659 if (!platform->dvs_is_enabled)
1660 clock = gpu_get_cur_clock(platform);
1661 mutex_unlock(&platform->gpu_clock_lock);
1662 }
1663 #endif
1664
1665 ret += snprintf(buf+ret, PAGE_SIZE-ret, "%d", clock);
1666
1667 if (ret < PAGE_SIZE - 1) {
1668 ret += snprintf(buf+ret, PAGE_SIZE-ret, "\n");
1669 } else {
1670 buf[PAGE_SIZE-2] = '\n';
1671 buf[PAGE_SIZE-1] = '\0';
1672 ret = PAGE_SIZE-1;
1673 }
1674
1675 return ret;
1676 }
1677
1678 static ssize_t show_kernel_sysfs_freq_table(struct kobject *kobj, struct kobj_attribute *attr, char *buf)
1679 {
1680 ssize_t ret = 0;
1681 int i = 0;
1682 struct exynos_context *platform = (struct exynos_context *)pkbdev->platform_context;
1683
1684 if (!platform)
1685 return -ENODEV;
1686
1687 for (i = gpu_dvfs_get_level(platform->gpu_min_clock); i >= gpu_dvfs_get_level(platform->gpu_max_clock); i--) {
1688 ret += snprintf(buf+ret, PAGE_SIZE-ret, "%d ", platform->table[i].clock);
1689 }
1690
1691 if (ret < PAGE_SIZE - 1) {
1692 ret += snprintf(buf+ret, PAGE_SIZE-ret, "\n");
1693 } else {
1694 buf[PAGE_SIZE-2] = '\n';
1695 buf[PAGE_SIZE-1] = '\0';
1696 ret = PAGE_SIZE-1;
1697 }
1698
1699 return ret;
1700 }
1701
1702 #ifdef CONFIG_MALI_DVFS
1703 static ssize_t show_kernel_sysfs_governor(struct kobject *kobj, struct kobj_attribute *attr, char *buf)
1704 {
1705 ssize_t ret = 0;
1706 gpu_dvfs_governor_info *governor_info = NULL;
1707 struct exynos_context *platform = (struct exynos_context *)pkbdev->platform_context;
1708
1709 if (!platform)
1710 return -ENODEV;
1711
1712 governor_info = (gpu_dvfs_governor_info *)gpu_dvfs_get_governor_info();
1713
1714 ret += snprintf(buf+ret, PAGE_SIZE-ret, "%s", governor_info[platform->governor_type].name);
1715
1716 if (ret < PAGE_SIZE - 1) {
1717 ret += snprintf(buf+ret, PAGE_SIZE-ret, "\n");
1718 } else {
1719 buf[PAGE_SIZE-2] = '\n';
1720 buf[PAGE_SIZE-1] = '\0';
1721 ret = PAGE_SIZE-1;
1722 }
1723
1724 return ret;
1725 }
1726
1727 static ssize_t set_kernel_sysfs_governor(struct kobject *kobj, struct kobj_attribute *attr, const char *buf, size_t count)
1728 {
1729 int ret;
1730 int i = 0;
1731 int next_governor_type = -1;
1732 size_t governor_name_size = 0;
1733 gpu_dvfs_governor_info *governor_info = NULL;
1734 struct exynos_context *platform = (struct exynos_context *)pkbdev->platform_context;
1735
1736 if (!platform)
1737 return -ENODEV;
1738
1739 governor_info = (gpu_dvfs_governor_info *)gpu_dvfs_get_governor_info();
1740
1741 for (i = 0; i < G3D_MAX_GOVERNOR_NUM; i++) {
1742 governor_name_size = strlen(governor_info[i].name);
1743 if (!strncmp(buf, governor_info[i].name, governor_name_size)) {
1744 next_governor_type = i;
1745 break;
1746 }
1747 }
1748
1749 if ((next_governor_type < 0) || (next_governor_type >= G3D_MAX_GOVERNOR_NUM)) {
1750 GPU_LOG(DVFS_ERROR, DUMMY, 0u, 0u, "%s: invalid value\n", __func__);
1751 return -ENOENT;
1752 }
1753
1754 ret = gpu_dvfs_governor_change(next_governor_type);
1755
1756 if (ret < 0) {
1757 GPU_LOG(DVFS_ERROR, DUMMY, 0u, 0u,
1758 "%s: fail to set the new governor (%d)\n", __func__, next_governor_type);
1759 return -ENOENT;
1760 }
1761
1762 return count;
1763 }
1764 #endif /* #ifdef CONFIG_MALI_DVFS */
1765
1766 static ssize_t show_kernel_sysfs_gpu_model(struct kobject *kobj, struct kobj_attribute *attr, char *buf)
1767 {
1768 /* COPY from mali_kbase_core_linux.c : 2594 line, last updated: 20161017, r2p0-03rel0 */
1769 static const struct gpu_product_id_name {
1770 unsigned id;
1771 char *name;
1772 } gpu_product_id_names[] = {
1773 { .id = GPU_ID_PI_T60X, .name = "Mali-T60x" },
1774 { .id = GPU_ID_PI_T62X, .name = "Mali-T62x" },
1775 { .id = GPU_ID_PI_T72X, .name = "Mali-T72x" },
1776 { .id = GPU_ID_PI_T76X, .name = "Mali-T76x" },
1777 { .id = GPU_ID_PI_T82X, .name = "Mali-T82x" },
1778 { .id = GPU_ID_PI_T83X, .name = "Mali-T83x" },
1779 { .id = GPU_ID_PI_T86X, .name = "Mali-T86x" },
1780 { .id = GPU_ID_PI_TFRX, .name = "Mali-T88x" },
1781 { .id = GPU_ID2_PRODUCT_TMIX >> GPU_ID_VERSION_PRODUCT_ID_SHIFT,
1782 .name = "Mali-G71" },
1783 { .id = GPU_ID2_PRODUCT_THEX >> GPU_ID_VERSION_PRODUCT_ID_SHIFT,
1784 .name = "Mali-THEx" },
1785 };
1786 const char *product_name = "(Unknown Mali GPU)";
1787 struct kbase_device *kbdev;
1788 u32 gpu_id;
1789 unsigned product_id, product_id_mask;
1790 unsigned i;
1791 bool is_new_format;
1792
1793 kbdev = pkbdev;
1794 if (!kbdev)
1795 return -ENODEV;
1796
1797 gpu_id = kbdev->gpu_props.props.raw_props.gpu_id;
1798 product_id = gpu_id >> GPU_ID_VERSION_PRODUCT_ID_SHIFT;
1799 is_new_format = GPU_ID_IS_NEW_FORMAT(product_id);
1800 product_id_mask =
1801 (is_new_format ?
1802 GPU_ID2_PRODUCT_MODEL :
1803 GPU_ID_VERSION_PRODUCT_ID) >>
1804 GPU_ID_VERSION_PRODUCT_ID_SHIFT;
1805
1806 for (i = 0; i < ARRAY_SIZE(gpu_product_id_names); ++i) {
1807 const struct gpu_product_id_name *p = &gpu_product_id_names[i];
1808
1809 if ((GPU_ID_IS_NEW_FORMAT(p->id) == is_new_format) &&
1810 (p->id & product_id_mask) ==
1811 (product_id & product_id_mask)) {
1812 product_name = p->name;
1813 break;
1814 }
1815 }
1816
1817 return scnprintf(buf, PAGE_SIZE, "%s\n", product_name);
1818 }
1819
1820 #if defined(CONFIG_MALI_DVFS) && defined(CONFIG_EXYNOS_THERMAL) && defined(CONFIG_GPU_THERMAL)
1821
1822 extern struct exynos_tmu_data *gpu_thermal_data;
1823
1824 static ssize_t show_kernel_sysfs_gpu_temp(struct kobject *kobj, struct kobj_attribute *attr, char *buf)
1825 {
1826 ssize_t ret = 0;
1827 int gpu_temp = 0;
1828 int gpu_temp_int = 0;
1829 int gpu_temp_point = 0;
1830
1831
1832 if (!gpu_thermal_data) {
1833 GPU_LOG(DVFS_ERROR, DUMMY, 0u, 0u, "[Kernel group SYSFS] thermal driver does not ready\n");
1834 return -ENODEV;
1835 }
1836
1837 mutex_lock(&gpu_thermal_data->lock);
1838
1839 if (gpu_thermal_data->num_of_sensors)
1840 gpu_temp = gpu_thermal_data->tmu_read(gpu_thermal_data) * MCELSIUS;
1841
1842 mutex_unlock(&gpu_thermal_data->lock);
1843
1844 gpu_temp_int = gpu_temp / 1000;
1845 gpu_temp_point = gpu_temp % gpu_temp_int;
1846 ret += snprintf(buf+ret, PAGE_SIZE-ret, "%d.%d", gpu_temp_int, gpu_temp_point);
1847
1848 if (ret < PAGE_SIZE - 1) {
1849 ret += snprintf(buf+ret, PAGE_SIZE-ret, "\n");
1850 } else {
1851 buf[PAGE_SIZE-2] = '\n';
1852 buf[PAGE_SIZE-1] = '\0';
1853 ret = PAGE_SIZE-1;
1854 }
1855
1856 return ret;
1857 }
1858
1859 static struct kobj_attribute gpu_temp_attribute =
1860 __ATTR(gpu_tmu, S_IRUGO, show_kernel_sysfs_gpu_temp, NULL);
1861 #endif
1862
1863 #ifdef CONFIG_MALI_DVFS
1864 static struct kobj_attribute gpu_info_attribute =
1865 __ATTR(gpu_info, S_IRUGO, show_kernel_sysfs_gpu_info, NULL);
1866
1867 static struct kobj_attribute gpu_max_lock_attribute =
1868 __ATTR(gpu_max_clock, S_IRUGO|S_IWUSR, show_kernel_sysfs_max_lock_dvfs, set_kernel_sysfs_max_lock_dvfs);
1869
1870 static struct kobj_attribute gpu_min_lock_attribute =
1871 __ATTR(gpu_min_clock, S_IRUGO|S_IWUSR, show_kernel_sysfs_min_lock_dvfs, set_kernel_sysfs_min_lock_dvfs);
1872 #endif /* #ifdef CONFIG_MALI_DVFS */
1873
1874 static struct kobj_attribute gpu_busy_attribute =
1875 __ATTR(gpu_busy, S_IRUGO, show_kernel_sysfs_utilization, NULL);
1876
1877 static struct kobj_attribute gpu_clock_attribute =
1878 __ATTR(gpu_clock, S_IRUGO, show_kernel_sysfs_clock, NULL);
1879
1880 static struct kobj_attribute gpu_freq_table_attribute =
1881 __ATTR(gpu_freq_table, S_IRUGO, show_kernel_sysfs_freq_table, NULL);
1882
1883 #ifdef CONFIG_MALI_DVFS
1884 static struct kobj_attribute gpu_governor_attribute =
1885 __ATTR(gpu_governor, S_IRUGO|S_IWUSR, show_kernel_sysfs_governor, set_kernel_sysfs_governor);
1886
1887 static struct kobj_attribute gpu_available_governor_attribute =
1888 __ATTR(gpu_available_governor, S_IRUGO, show_kernel_sysfs_available_governor, NULL);
1889 #endif /* #ifdef CONFIG_MALI_DVFS */
1890
1891 static struct kobj_attribute gpu_model_attribute =
1892 __ATTR(gpu_model, S_IRUGO, show_kernel_sysfs_gpu_model, NULL);
1893
1894
1895 static struct attribute *attrs[] = {
1896 #ifdef CONFIG_MALI_DVFS
1897 #if defined(CONFIG_EXYNOS_THERMAL) && defined(CONFIG_GPU_THERMAL)
1898 &gpu_temp_attribute.attr,
1899 #endif
1900 &gpu_info_attribute.attr,
1901 &gpu_max_lock_attribute.attr,
1902 &gpu_min_lock_attribute.attr,
1903 #endif /* #ifdef CONFIG_MALI_DVFS */
1904 &gpu_busy_attribute.attr,
1905 &gpu_clock_attribute.attr,
1906 &gpu_freq_table_attribute.attr,
1907 #ifdef CONFIG_MALI_DVFS
1908 &gpu_governor_attribute.attr,
1909 &gpu_available_governor_attribute.attr,
1910 #endif /* #ifdef CONFIG_MALI_DVFS */
1911 &gpu_model_attribute.attr,
1912 NULL,
1913 };
1914
1915 static struct attribute_group attr_group = {
1916 .attrs = attrs,
1917 };
1918 static struct kobject *external_kobj;
1919 #endif
1920
1921 int gpu_create_sysfs_file(struct device *dev)
1922 {
1923 #ifdef CONFIG_MALI_DEBUG_KERNEL_SYSFS
1924 int retval = 0;
1925 #endif
1926
1927 if (device_create_file(dev, &dev_attr_clock)) {
1928 GPU_LOG(DVFS_ERROR, DUMMY, 0u, 0u, "couldn't create sysfs file [clock]\n");
1929 goto out;
1930 }
1931
1932 if (device_create_file(dev, &dev_attr_vol)) {
1933 GPU_LOG(DVFS_ERROR, DUMMY, 0u, 0u, "couldn't create sysfs file [vol]\n");
1934 goto out;
1935 }
1936
1937 if (device_create_file(dev, &dev_attr_power_state)) {
1938 GPU_LOG(DVFS_ERROR, DUMMY, 0u, 0u, "couldn't create sysfs file [power_state]\n");
1939 goto out;
1940 }
1941
1942 if (device_create_file(dev, &dev_attr_asv_table)) {
1943 GPU_LOG(DVFS_ERROR, DUMMY, 0u, 0u, "couldn't create sysfs file [asv_table]\n");
1944 goto out;
1945 }
1946
1947 if (device_create_file(dev, &dev_attr_dvfs_table)) {
1948 GPU_LOG(DVFS_ERROR, DUMMY, 0u, 0u, "couldn't create sysfs file [dvfs_table]\n");
1949 goto out;
1950 }
1951
1952 if (device_create_file(dev, &dev_attr_time_in_state)) {
1953 GPU_LOG(DVFS_ERROR, DUMMY, 0u, 0u, "couldn't create sysfs file [time_in_state]\n");
1954 goto out;
1955 }
1956
1957 if (device_create_file(dev, &dev_attr_utilization)) {
1958 GPU_LOG(DVFS_ERROR, DUMMY, 0u, 0u, "couldn't create sysfs file [utilization]\n");
1959 goto out;
1960 }
1961
1962 if (device_create_file(dev, &dev_attr_perf)) {
1963 GPU_LOG(DVFS_ERROR, DUMMY, 0u, 0u, "couldn't create sysfs file [perf]\n");
1964 goto out;
1965 }
1966 #ifdef CONFIG_MALI_DVFS
1967 if (device_create_file(dev, &dev_attr_dvfs)) {
1968 GPU_LOG(DVFS_ERROR, DUMMY, 0u, 0u, "couldn't create sysfs file [dvfs]\n");
1969 goto out;
1970 }
1971
1972 if (device_create_file(dev, &dev_attr_dvfs_governor)) {
1973 GPU_LOG(DVFS_ERROR, DUMMY, 0u, 0u, "couldn't create sysfs file [dvfs_governor]\n");
1974 goto out;
1975 }
1976
1977 if (device_create_file(dev, &dev_attr_dvfs_max_lock_status)) {
1978 GPU_LOG(DVFS_ERROR, DUMMY, 0u, 0u, "couldn't create sysfs file [dvfs_max_lock_status]\n");
1979 goto out;
1980 }
1981
1982 if (device_create_file(dev, &dev_attr_dvfs_min_lock_status)) {
1983 GPU_LOG(DVFS_ERROR, DUMMY, 0u, 0u, "couldn't create sysfs file [dvfs_min_lock_status]\n");
1984 goto out;
1985 }
1986
1987 if (device_create_file(dev, &dev_attr_dvfs_max_lock)) {
1988 GPU_LOG(DVFS_ERROR, DUMMY, 0u, 0u, "couldn't create sysfs file [dvfs_max_lock]\n");
1989 goto out;
1990 }
1991
1992 if (device_create_file(dev, &dev_attr_dvfs_min_lock)) {
1993 GPU_LOG(DVFS_ERROR, DUMMY, 0u, 0u, "couldn't create sysfs file [dvfs_min_lock]\n");
1994 goto out;
1995 }
1996
1997 if (device_create_file(dev, &dev_attr_down_staycount)) {
1998 GPU_LOG(DVFS_ERROR, DUMMY, 0u, 0u, "couldn't create sysfs file [down_staycount]\n");
1999 goto out;
2000 }
2001
2002 if (device_create_file(dev, &dev_attr_highspeed_clock)) {
2003 GPU_LOG(DVFS_ERROR, DUMMY, 0u, 0u, "couldn't create sysfs file [highspeed_clock]\n");
2004 goto out;
2005 }
2006
2007 if (device_create_file(dev, &dev_attr_highspeed_load)) {
2008 GPU_LOG(DVFS_ERROR, DUMMY, 0u, 0u, "couldn't create sysfs file [highspeed_load]\n");
2009 goto out;
2010 }
2011
2012 if (device_create_file(dev, &dev_attr_highspeed_delay)) {
2013 GPU_LOG(DVFS_ERROR, DUMMY, 0u, 0u, "couldn't create sysfs file [highspeed_delay]\n");
2014 goto out;
2015 }
2016
2017 if (device_create_file(dev, &dev_attr_wakeup_lock)) {
2018 GPU_LOG(DVFS_ERROR, DUMMY, 0u, 0u, "couldn't create sysfs file [wakeup_lock]\n");
2019 goto out;
2020 }
2021
2022 if (device_create_file(dev, &dev_attr_polling_speed)) {
2023 GPU_LOG(DVFS_ERROR, DUMMY, 0u, 0u, "couldn't create sysfs file [polling_speed]\n");
2024 goto out;
2025 }
2026
2027 if (device_create_file(dev, &dev_attr_tmu)) {
2028 GPU_LOG(DVFS_ERROR, DUMMY, 0u, 0u, "couldn't create sysfs file [tmu]\n");
2029 goto out;
2030 }
2031 #ifdef CONFIG_CPU_THERMAL_IPA
2032 if (device_create_file(dev, &dev_attr_norm_utilization)) {
2033 GPU_LOG(DVFS_ERROR, DUMMY, 0u, 0u, "couldn't create sysfs file [norm_utilization]\n");
2034 goto out;
2035 }
2036
2037 if (device_create_file(dev, &dev_attr_utilization_stats)) {
2038 GPU_LOG(DVFS_ERROR, DUMMY, 0u, 0u, "couldn't create sysfs file [utilization_stats]\n");
2039 goto out;
2040 }
2041 #endif /* CONFIG_CPU_THERMAL_IPA */
2042 #endif /* CONFIG_MALI_DVFS */
2043 if (device_create_file(dev, &dev_attr_debug_level)) {
2044 GPU_LOG(DVFS_ERROR, DUMMY, 0u, 0u, "couldn't create sysfs file [debug_level]\n");
2045 goto out;
2046 }
2047 #ifdef CONFIG_MALI_EXYNOS_TRACE
2048 if (device_create_file(dev, &dev_attr_trace_level)) {
2049 GPU_LOG(DVFS_ERROR, DUMMY, 0u, 0u, "couldn't create sysfs file [trace_level]\n");
2050 goto out;
2051 }
2052
2053 if (device_create_file(dev, &dev_attr_trace_dump)) {
2054 GPU_LOG(DVFS_ERROR, DUMMY, 0u, 0u, "couldn't create sysfs file [trace_dump]\n");
2055 goto out;
2056 }
2057 #endif /* CONFIG_MALI_EXYNOS_TRACE */
2058 #ifdef DEBUG_FBDEV
2059 if (device_create_file(dev, &dev_attr_fbdev)) {
2060 GPU_LOG(DVFS_ERROR, DUMMY, 0u, 0u, "couldn't create sysfs file [fbdev]\n");
2061 goto out;
2062 }
2063 #endif
2064
2065 if (device_create_file(dev, &dev_attr_gpu_status)) {
2066 GPU_LOG(DVFS_ERROR, DUMMY, 0u, 0u, "couldn't create sysfs file [gpu_status]\n");
2067 goto out;
2068 }
2069
2070 #ifdef CONFIG_MALI_VK_BOOST
2071 if (device_create_file(dev, &dev_attr_vk_boost_status)) {
2072 GPU_LOG(DVFS_ERROR, DUMMY, 0u, 0u, "couldn't create sysfs file [vk_boost_status]\n");
2073 goto out;
2074 }
2075 #endif
2076
2077 #ifdef CONFIG_MALI_SUSTAINABLE_OPT
2078 if (device_create_file(dev, &dev_attr_sustainable_status)) {
2079 GPU_LOG(DVFS_ERROR, DUMMY, 0u, 0u, "couldn't create sysfs file [sustainable_status]\n");
2080 goto out;
2081 }
2082 #endif
2083
2084 #ifdef CONFIG_MALI_SEC_CL_BOOST
2085 if (device_create_file(dev, &dev_attr_cl_boost_disable)) {
2086 GPU_LOG(DVFS_ERROR, DUMMY, 0u, 0u, "couldn't create sysfs file [cl_boost_disable]\n");
2087 goto out;
2088 }
2089 #endif
2090
2091 #ifdef CONFIG_MALI_DEBUG_KERNEL_SYSFS
2092 external_kobj = kobject_create_and_add("gpu", kernel_kobj);
2093 if (!external_kobj) {
2094 GPU_LOG(DVFS_ERROR, DUMMY, 0u, 0u, "couldn't create Kobj for group [KERNEL - GPU]\n");
2095 goto out;
2096 }
2097
2098 retval = sysfs_create_group(external_kobj, &attr_group);
2099 if (retval) {
2100 kobject_put(external_kobj);
2101 GPU_LOG(DVFS_ERROR, DUMMY, 0u, 0u, "couldn't add sysfs group [KERNEL - GPU]\n");
2102 goto out;
2103 }
2104 #endif
2105
2106 return 0;
2107 out:
2108 return -ENOENT;
2109 }
2110
2111 void gpu_remove_sysfs_file(struct device *dev)
2112 {
2113 device_remove_file(dev, &dev_attr_clock);
2114 device_remove_file(dev, &dev_attr_vol);
2115 device_remove_file(dev, &dev_attr_power_state);
2116 device_remove_file(dev, &dev_attr_asv_table);
2117 device_remove_file(dev, &dev_attr_dvfs_table);
2118 device_remove_file(dev, &dev_attr_time_in_state);
2119 device_remove_file(dev, &dev_attr_utilization);
2120 device_remove_file(dev, &dev_attr_perf);
2121 #ifdef CONFIG_MALI_DVFS
2122 device_remove_file(dev, &dev_attr_dvfs);
2123 device_remove_file(dev, &dev_attr_dvfs_governor);
2124 device_remove_file(dev, &dev_attr_dvfs_max_lock_status);
2125 device_remove_file(dev, &dev_attr_dvfs_min_lock_status);
2126 device_remove_file(dev, &dev_attr_dvfs_max_lock);
2127 device_remove_file(dev, &dev_attr_dvfs_min_lock);
2128 device_remove_file(dev, &dev_attr_down_staycount);
2129 device_remove_file(dev, &dev_attr_highspeed_clock);
2130 device_remove_file(dev, &dev_attr_highspeed_load);
2131 device_remove_file(dev, &dev_attr_highspeed_delay);
2132 device_remove_file(dev, &dev_attr_wakeup_lock);
2133 device_remove_file(dev, &dev_attr_polling_speed);
2134 device_remove_file(dev, &dev_attr_tmu);
2135 #ifdef CONFIG_CPU_THERMAL_IPA
2136 device_remove_file(dev, &dev_attr_norm_utilization);
2137 device_remove_file(dev, &dev_attr_utilization_stats);
2138 #endif /* CONFIG_CPU_THERMAL_IPA */
2139 #endif /* CONFIG_MALI_DVFS */
2140 device_remove_file(dev, &dev_attr_debug_level);
2141 #ifdef CONFIG_MALI_EXYNOS_TRACE
2142 device_remove_file(dev, &dev_attr_trace_level);
2143 device_remove_file(dev, &dev_attr_trace_dump);
2144 #endif /* CONFIG_MALI_EXYNOS_TRACE */
2145 #ifdef DEBUG_FBDEV
2146 device_remove_file(dev, &dev_attr_fbdev);
2147 #endif
2148 device_remove_file(dev, &dev_attr_gpu_status);
2149 #ifdef CONFIG_MALI_VK_BOOST
2150 device_remove_file(dev, &dev_attr_vk_boost_status);
2151 #endif
2152 #ifdef CONFIG_MALI_SUSTAINABLE_OPT
2153 device_remove_file(dev, &dev_attr_sustainable_status);
2154 #endif
2155 #ifdef CONFIG_MALI_SEC_CL_BOOST
2156 device_remove_file(dev, &dev_attr_cl_boost_disable);
2157 #endif
2158 #ifdef CONFIG_MALI_DEBUG_KERNEL_SYSFS
2159 kobject_put(external_kobj);
2160 #endif
2161 }