856f3890d02718dede2637c9ea41f3187e3f8a23
[GitHub/exynos8895/android_kernel_samsung_universal8895.git] / drivers / gpu / arm / b_r16p0 / platform / exynos / gpu_custom_interface.c
1 /* drivers/gpu/arm/.../platform/gpu_custom_interface.c
2 *
3 * Copyright 2011 by S.LSI. Samsung Electronics Inc.
4 * San#24, Nongseo-Dong, Giheung-Gu, Yongin, Korea
5 *
6 * Samsung SoC Mali-T Series DVFS driver
7 *
8 * This program is free software; you can redistribute it and/or modify
9 * it under the terms of the GNU General Public License version 2 as
10 * published by the Free Software FoundatIon.
11 */
12
13 /**
14 * @file gpu_custom_interface.c
15 * DVFS
16 */
17
18 #include <mali_kbase.h>
19
20 #include <linux/fb.h>
21
22 #if defined(CONFIG_MALI_DVFS) && defined(CONFIG_EXYNOS_THERMAL) && defined(CONFIG_GPU_THERMAL)
23 #include "exynos_tmu.h"
24 #endif
25
26 #include "mali_kbase_platform.h"
27 #include "gpu_dvfs_handler.h"
28 #include "gpu_dvfs_governor.h"
29 #include "gpu_control.h"
30 #ifdef CONFIG_CPU_THERMAL_IPA
31 #include "gpu_ipa.h"
32 #endif /* CONFIG_CPU_THERMAL_IPA */
33 #include "gpu_custom_interface.h"
34
35 #ifdef CONFIG_MALI_RT_PM
36 #include <soc/samsung/exynos-pd.h>
37 #endif
38
39 extern struct kbase_device *pkbdev;
40
41 int gpu_pmqos_dvfs_min_lock(int level)
42 {
43 #ifdef CONFIG_MALI_DVFS
44 int clock;
45 struct exynos_context *platform = (struct exynos_context *)pkbdev->platform_context;
46
47 if (!platform) {
48 GPU_LOG(DVFS_ERROR, DUMMY, 0u, 0u, "%s: platform context is not initialized\n", __func__);
49 return -ENODEV;
50 }
51
52 clock = gpu_dvfs_get_clock(level);
53 if (clock < 0)
54 gpu_dvfs_clock_lock(GPU_DVFS_MIN_UNLOCK, PMQOS_LOCK, 0);
55 else
56 gpu_dvfs_clock_lock(GPU_DVFS_MIN_LOCK, PMQOS_LOCK, clock);
57 #endif /* CONFIG_MALI_DVFS */
58 return 0;
59 }
60
61 static ssize_t show_clock(struct device *dev, struct device_attribute *attr, char *buf)
62 {
63 ssize_t ret = 0;
64 int clock = 0;
65 struct exynos_context *platform = (struct exynos_context *)pkbdev->platform_context;
66
67 if (!platform)
68 return -ENODEV;
69
70 #ifdef CONFIG_MALI_RT_PM
71 if (platform->exynos_pm_domain) {
72 mutex_lock(&platform->exynos_pm_domain->access_lock);
73 if(!platform->dvs_is_enabled && gpu_is_power_on())
74 clock = gpu_get_cur_clock(platform);
75 mutex_unlock(&platform->exynos_pm_domain->access_lock);
76 }
77 #else
78 if (gpu_control_is_power_on(pkbdev) == 1) {
79 mutex_lock(&platform->gpu_clock_lock);
80 if (!platform->dvs_is_enabled)
81 clock = gpu_get_cur_clock(platform);
82 mutex_unlock(&platform->gpu_clock_lock);
83 }
84 #endif
85
86 ret += snprintf(buf+ret, PAGE_SIZE-ret, "%d", clock);
87
88 if (ret < PAGE_SIZE - 1) {
89 ret += snprintf(buf+ret, PAGE_SIZE-ret, "\n");
90 } else {
91 buf[PAGE_SIZE-2] = '\n';
92 buf[PAGE_SIZE-1] = '\0';
93 ret = PAGE_SIZE-1;
94 }
95
96 return ret;
97 }
98
99 static ssize_t set_clock(struct device *dev, struct device_attribute *attr, const char *buf, size_t count)
100 {
101 unsigned int clk = 0;
102 int ret, i, policy_count;
103 static bool cur_state;
104 const struct kbase_pm_policy *const *policy_list;
105 static const struct kbase_pm_policy *prev_policy;
106 static bool prev_tmu_status = true;
107 #ifdef CONFIG_MALI_DVFS
108 static bool prev_dvfs_status = true;
109 #endif /* CONFIG_MALI_DVFS */
110 struct exynos_context *platform = (struct exynos_context *)pkbdev->platform_context;
111
112 if (!platform)
113 return -ENODEV;
114
115 ret = kstrtoint(buf, 0, &clk);
116 if (ret) {
117 GPU_LOG(DVFS_WARNING, DUMMY, 0u, 0u, "%s: invalid value\n", __func__);
118 return -ENOENT;
119 }
120
121 if (!cur_state) {
122 prev_tmu_status = platform->tmu_status;
123 #ifdef CONFIG_MALI_DVFS
124 prev_dvfs_status = platform->dvfs_status;
125 #endif /* CONFIG_MALI_DVFS */
126 prev_policy = kbase_pm_get_policy(pkbdev);
127 }
128
129 if (clk == 0) {
130 kbase_pm_set_policy(pkbdev, prev_policy);
131 platform->tmu_status = prev_tmu_status;
132 #ifdef CONFIG_MALI_DVFS
133 if (!platform->dvfs_status)
134 gpu_dvfs_on_off(true);
135 #endif /* CONFIG_MALI_DVFS */
136 cur_state = false;
137 } else {
138 policy_count = kbase_pm_list_policies(&policy_list);
139 for (i = 0; i < policy_count; i++) {
140 if (sysfs_streq(policy_list[i]->name, "always_on")) {
141 kbase_pm_set_policy(pkbdev, policy_list[i]);
142 break;
143 }
144 }
145 platform->tmu_status = false;
146 #ifdef CONFIG_MALI_DVFS
147 if (platform->dvfs_status)
148 gpu_dvfs_on_off(false);
149 #endif /* CONFIG_MALI_DVFS */
150 gpu_set_target_clk_vol(clk, false);
151 cur_state = true;
152 }
153
154 return count;
155 }
156
157 static ssize_t show_vol(struct device *dev, struct device_attribute *attr, char *buf)
158 {
159 ssize_t ret = 0;
160 struct exynos_context *platform = (struct exynos_context *)pkbdev->platform_context;
161
162 if (!platform)
163 return -ENODEV;
164
165 ret += snprintf(buf+ret, PAGE_SIZE-ret, "%d", gpu_get_cur_voltage(platform));
166
167 if (ret < PAGE_SIZE - 1) {
168 ret += snprintf(buf+ret, PAGE_SIZE-ret, "\n");
169 } else {
170 buf[PAGE_SIZE-2] = '\n';
171 buf[PAGE_SIZE-1] = '\0';
172 ret = PAGE_SIZE-1;
173 }
174
175 return ret;
176 }
177
178 static ssize_t show_power_state(struct device *dev, struct device_attribute *attr, char *buf)
179 {
180 ssize_t ret = 0;
181 struct exynos_context *platform = (struct exynos_context *)pkbdev->platform_context;
182
183 if (!platform)
184 return -ENODEV;
185
186 ret += snprintf(buf+ret, PAGE_SIZE-ret, "%d", gpu_control_is_power_on(pkbdev));
187
188 if (ret < PAGE_SIZE - 1) {
189 ret += snprintf(buf+ret, PAGE_SIZE-ret, "\n");
190 } else {
191 buf[PAGE_SIZE-2] = '\n';
192 buf[PAGE_SIZE-1] = '\0';
193 ret = PAGE_SIZE-1;
194 }
195
196 return ret;
197 }
198
199 static int gpu_get_asv_table(struct exynos_context *platform, char *buf, size_t buf_size)
200 {
201 int i, cnt = 0;
202
203 if (!platform)
204 return -ENODEV;
205
206 if (buf == NULL)
207 return 0;
208
209 cnt += snprintf(buf+cnt, buf_size-cnt, "GPU, vol, min, max, down_stay, mif, cpu0, cpu1\n");
210
211 for (i = gpu_dvfs_get_level(platform->gpu_max_clock); i <= gpu_dvfs_get_level(platform->gpu_min_clock); i++) {
212 cnt += snprintf(buf+cnt, buf_size-cnt, "%d, %7d, %2d, %3d, %d, %7d, %7d, %7d\n",
213 platform->table[i].clock, platform->table[i].voltage, platform->table[i].min_threshold,
214 platform->table[i].max_threshold, platform->table[i].down_staycount, platform->table[i].mem_freq,
215 platform->table[i].cpu_little_min_freq, platform->table[i].cpu_middle_min_freq);
216 }
217
218 return cnt;
219 }
220
221 static ssize_t show_asv_table(struct device *dev, struct device_attribute *attr, char *buf)
222 {
223 ssize_t ret = 0;
224 struct exynos_context *platform = (struct exynos_context *)pkbdev->platform_context;
225
226 if (!platform)
227 return -ENODEV;
228
229 ret += gpu_get_asv_table(platform, buf+ret, (size_t)PAGE_SIZE-ret);
230
231 if (ret < PAGE_SIZE - 1) {
232 ret += snprintf(buf+ret, PAGE_SIZE-ret, "\n");
233 } else {
234 buf[PAGE_SIZE-2] = '\n';
235 buf[PAGE_SIZE-1] = '\0';
236 ret = PAGE_SIZE-1;
237 }
238
239 return ret;
240 }
241
242 static int gpu_get_dvfs_table(struct exynos_context *platform, char *buf, size_t buf_size)
243 {
244 int i, cnt = 0;
245
246 if (!platform)
247 return -ENODEV;
248
249 if (buf == NULL)
250 return 0;
251
252 for (i = gpu_dvfs_get_level(platform->gpu_max_clock); i <= gpu_dvfs_get_level(platform->gpu_min_clock); i++)
253 cnt += snprintf(buf+cnt, buf_size-cnt, " %d", platform->table[i].clock);
254
255 cnt += snprintf(buf+cnt, buf_size-cnt, "\n");
256
257 return cnt;
258 }
259
260 static ssize_t show_dvfs_table(struct device *dev, struct device_attribute *attr, char *buf)
261 {
262 ssize_t ret = 0;
263 struct exynos_context *platform = (struct exynos_context *)pkbdev->platform_context;
264
265 if (!platform)
266 return -ENODEV;
267
268 ret += gpu_get_dvfs_table(platform, buf+ret, (size_t)PAGE_SIZE-ret);
269
270 if (ret < PAGE_SIZE - 1) {
271 ret += snprintf(buf+ret, PAGE_SIZE-ret, "\n");
272 } else {
273 buf[PAGE_SIZE-2] = '\n';
274 buf[PAGE_SIZE-1] = '\0';
275 ret = PAGE_SIZE-1;
276 }
277
278 return ret;
279 }
280
281 static ssize_t show_time_in_state(struct device *dev, struct device_attribute *attr, char *buf)
282 {
283 ssize_t ret = 0;
284 int i;
285 struct exynos_context *platform = (struct exynos_context *)pkbdev->platform_context;
286
287 if (!platform)
288 return -ENODEV;
289
290 gpu_dvfs_update_time_in_state(gpu_control_is_power_on(pkbdev) * platform->cur_clock);
291
292 for (i = gpu_dvfs_get_level(platform->gpu_min_clock); i >= gpu_dvfs_get_level(platform->gpu_max_clock); i--) {
293 ret += snprintf(buf+ret, PAGE_SIZE-ret, "%d %llu\n",
294 platform->table[i].clock,
295 platform->table[i].time);
296 }
297
298 if (ret >= PAGE_SIZE - 1) {
299 buf[PAGE_SIZE-2] = '\n';
300 buf[PAGE_SIZE-1] = '\0';
301 ret = PAGE_SIZE-1;
302 }
303
304 return ret;
305 }
306
307 static ssize_t set_time_in_state(struct device *dev, struct device_attribute *attr, const char *buf, size_t count)
308 {
309 gpu_dvfs_init_time_in_state();
310
311 return count;
312 }
313
314 static ssize_t show_utilization(struct device *dev, struct device_attribute *attr, char *buf)
315 {
316 ssize_t ret = 0;
317 struct exynos_context *platform = (struct exynos_context *)pkbdev->platform_context;
318
319 if (!platform)
320 return -ENODEV;
321
322 ret += snprintf(buf+ret, PAGE_SIZE-ret, "%d", gpu_control_is_power_on(pkbdev) * platform->env_data.utilization);
323
324 if (ret < PAGE_SIZE - 1) {
325 ret += snprintf(buf+ret, PAGE_SIZE-ret, "\n");
326 } else {
327 buf[PAGE_SIZE-2] = '\n';
328 buf[PAGE_SIZE-1] = '\0';
329 ret = PAGE_SIZE-1;
330 }
331
332 return ret;
333 }
334
335 static ssize_t show_perf(struct device *dev, struct device_attribute *attr, char *buf)
336 {
337 ssize_t ret = 0;
338 struct exynos_context *platform = (struct exynos_context *)pkbdev->platform_context;
339
340 if (!platform)
341 return -ENODEV;
342
343 ret += snprintf(buf+ret, PAGE_SIZE-ret, "%d", gpu_control_is_power_on(pkbdev) * platform->env_data.perf);
344
345 if (ret < PAGE_SIZE - 1) {
346 ret += snprintf(buf+ret, PAGE_SIZE-ret, "\n");
347 } else {
348 buf[PAGE_SIZE-2] = '\n';
349 buf[PAGE_SIZE-1] = '\0';
350 ret = PAGE_SIZE-1;
351 }
352
353 return ret;
354 }
355
356 #ifdef CONFIG_MALI_DVFS
357 static ssize_t show_dvfs(struct device *dev, struct device_attribute *attr, char *buf)
358 {
359 ssize_t ret = 0;
360 struct exynos_context *platform = (struct exynos_context *)pkbdev->platform_context;
361
362 if (!platform)
363 return -ENODEV;
364
365 ret += snprintf(buf+ret, PAGE_SIZE-ret, "%d", platform->dvfs_status);
366
367 if (ret < PAGE_SIZE - 1) {
368 ret += snprintf(buf+ret, PAGE_SIZE-ret, "\n");
369 } else {
370 buf[PAGE_SIZE-2] = '\n';
371 buf[PAGE_SIZE-1] = '\0';
372 ret = PAGE_SIZE-1;
373 }
374
375 return ret;
376 }
377
378 static ssize_t set_dvfs(struct device *dev, struct device_attribute *attr, const char *buf, size_t count)
379 {
380 if (sysfs_streq("0", buf))
381 gpu_dvfs_on_off(false);
382 else if (sysfs_streq("1", buf))
383 gpu_dvfs_on_off(true);
384
385 return count;
386 }
387
388 static ssize_t show_governor(struct device *dev, struct device_attribute *attr, char *buf)
389 {
390 ssize_t ret = 0;
391 gpu_dvfs_governor_info *governor_info;
392 int i;
393 struct exynos_context *platform = (struct exynos_context *)pkbdev->platform_context;
394
395 if (!platform)
396 return -ENODEV;
397
398 governor_info = (gpu_dvfs_governor_info *)gpu_dvfs_get_governor_info();
399
400 for (i = 0; i < G3D_MAX_GOVERNOR_NUM; i++)
401 ret += snprintf(buf+ret, PAGE_SIZE-ret, "%s\n", governor_info[i].name);
402
403 ret += snprintf(buf+ret, PAGE_SIZE-ret, "[Current Governor] %s", governor_info[platform->governor_type].name);
404
405 if (ret < PAGE_SIZE - 1) {
406 ret += snprintf(buf+ret, PAGE_SIZE-ret, "\n");
407 } else {
408 buf[PAGE_SIZE-2] = '\n';
409 buf[PAGE_SIZE-1] = '\0';
410 ret = PAGE_SIZE-1;
411 }
412
413 return ret;
414 }
415
416 static ssize_t set_governor(struct device *dev, struct device_attribute *attr, const char *buf, size_t count)
417 {
418 int ret;
419 int next_governor_type;
420 struct exynos_context *platform = (struct exynos_context *)pkbdev->platform_context;
421
422 if (!platform)
423 return -ENODEV;
424
425 ret = kstrtoint(buf, 0, &next_governor_type);
426
427 if ((next_governor_type < 0) || (next_governor_type >= G3D_MAX_GOVERNOR_NUM)) {
428 GPU_LOG(DVFS_WARNING, DUMMY, 0u, 0u, "%s: invalid value\n", __func__);
429 return -ENOENT;
430 }
431
432 ret = gpu_dvfs_governor_change(next_governor_type);
433
434 if (ret < 0) {
435 GPU_LOG(DVFS_WARNING, DUMMY, 0u, 0u,
436 "%s: fail to set the new governor (%d)\n", __func__, next_governor_type);
437 return -ENOENT;
438 }
439
440 return count;
441 }
442
443 static ssize_t show_max_lock_status(struct device *dev, struct device_attribute *attr, char *buf)
444 {
445 ssize_t ret = 0;
446 unsigned long flags;
447 int i;
448 int max_lock_status[NUMBER_LOCK];
449 struct exynos_context *platform = (struct exynos_context *)pkbdev->platform_context;
450
451 if (!platform)
452 return -ENODEV;
453
454 spin_lock_irqsave(&platform->gpu_dvfs_spinlock, flags);
455 for (i = 0; i < NUMBER_LOCK; i++)
456 max_lock_status[i] = platform->user_max_lock[i];
457 spin_unlock_irqrestore(&platform->gpu_dvfs_spinlock, flags);
458
459 for (i = 0; i < NUMBER_LOCK; i++)
460 ret += snprintf(buf+ret, PAGE_SIZE-ret, "[%d:%d]", i, max_lock_status[i]);
461
462 if (ret < PAGE_SIZE - 1) {
463 ret += snprintf(buf+ret, PAGE_SIZE-ret, "\n");
464 } else {
465 buf[PAGE_SIZE-2] = '\n';
466 buf[PAGE_SIZE-1] = '\0';
467 ret = PAGE_SIZE-1;
468 }
469
470 return ret;
471 }
472
473 static ssize_t show_min_lock_status(struct device *dev, struct device_attribute *attr, char *buf)
474 {
475 ssize_t ret = 0;
476 unsigned long flags;
477 int i;
478 int min_lock_status[NUMBER_LOCK];
479 struct exynos_context *platform = (struct exynos_context *)pkbdev->platform_context;
480
481 if (!platform)
482 return -ENODEV;
483
484 spin_lock_irqsave(&platform->gpu_dvfs_spinlock, flags);
485 for (i = 0; i < NUMBER_LOCK; i++)
486 min_lock_status[i] = platform->user_min_lock[i];
487 spin_unlock_irqrestore(&platform->gpu_dvfs_spinlock, flags);
488
489 for (i = 0; i < NUMBER_LOCK; i++)
490 ret += snprintf(buf+ret, PAGE_SIZE-ret, "[%d:%d]", i, min_lock_status[i]);
491
492 if (ret < PAGE_SIZE - 1) {
493 ret += snprintf(buf+ret, PAGE_SIZE-ret, "\n");
494 } else {
495 buf[PAGE_SIZE-2] = '\n';
496 buf[PAGE_SIZE-1] = '\0';
497 ret = PAGE_SIZE-1;
498 }
499
500 return ret;
501 }
502
503 static ssize_t show_max_lock_dvfs(struct device *dev, struct device_attribute *attr, char *buf)
504 {
505 ssize_t ret = 0;
506 unsigned long flags;
507 int locked_clock = -1;
508 int user_locked_clock = -1;
509 struct exynos_context *platform = (struct exynos_context *)pkbdev->platform_context;
510
511 if (!platform)
512 return -ENODEV;
513
514 spin_lock_irqsave(&platform->gpu_dvfs_spinlock, flags);
515 locked_clock = platform->max_lock;
516 user_locked_clock = platform->user_max_lock_input;
517 spin_unlock_irqrestore(&platform->gpu_dvfs_spinlock, flags);
518
519 if (locked_clock > 0)
520 ret += snprintf(buf+ret, PAGE_SIZE-ret, "%d / %d", locked_clock, user_locked_clock);
521 else
522 ret += snprintf(buf+ret, PAGE_SIZE-ret, "-1");
523
524 if (ret < PAGE_SIZE - 1) {
525 ret += snprintf(buf+ret, PAGE_SIZE-ret, "\n");
526 } else {
527 buf[PAGE_SIZE-2] = '\n';
528 buf[PAGE_SIZE-1] = '\0';
529 ret = PAGE_SIZE-1;
530 }
531
532 return ret;
533 }
534
535 static ssize_t set_max_lock_dvfs(struct device *dev, struct device_attribute *attr, const char *buf, size_t count)
536 {
537 int ret, clock = 0;
538 struct exynos_context *platform = (struct exynos_context *)pkbdev->platform_context;
539
540 if (!platform)
541 return -ENODEV;
542
543 if (sysfs_streq("0", buf)) {
544 platform->user_max_lock_input = 0;
545 gpu_dvfs_clock_lock(GPU_DVFS_MAX_UNLOCK, SYSFS_LOCK, 0);
546 } else {
547 ret = kstrtoint(buf, 0, &clock);
548 if (ret) {
549 GPU_LOG(DVFS_WARNING, DUMMY, 0u, 0u, "%s: invalid value\n", __func__);
550 return -ENOENT;
551 }
552
553 platform->user_max_lock_input = clock;
554
555 clock = gpu_dvfs_get_level_clock(clock);
556
557 ret = gpu_dvfs_get_level(clock);
558 if ((ret < gpu_dvfs_get_level(platform->gpu_max_clock)) || (ret > gpu_dvfs_get_level(platform->gpu_min_clock))) {
559 GPU_LOG(DVFS_WARNING, DUMMY, 0u, 0u, "%s: invalid clock value (%d)\n", __func__, clock);
560 return -ENOENT;
561 }
562
563 if (clock == platform->gpu_max_clock)
564 gpu_dvfs_clock_lock(GPU_DVFS_MAX_UNLOCK, SYSFS_LOCK, 0);
565 else
566 gpu_dvfs_clock_lock(GPU_DVFS_MAX_LOCK, SYSFS_LOCK, clock);
567 }
568
569 return count;
570 }
571
572 static ssize_t show_min_lock_dvfs(struct device *dev, struct device_attribute *attr, char *buf)
573 {
574 ssize_t ret = 0;
575 unsigned long flags;
576 int locked_clock = -1;
577 int user_locked_clock = -1;
578 struct exynos_context *platform = (struct exynos_context *)pkbdev->platform_context;
579
580 if (!platform)
581 return -ENODEV;
582
583 spin_lock_irqsave(&platform->gpu_dvfs_spinlock, flags);
584 locked_clock = platform->min_lock;
585 user_locked_clock = platform->user_min_lock_input;
586 spin_unlock_irqrestore(&platform->gpu_dvfs_spinlock, flags);
587
588 if (locked_clock > 0)
589 ret += snprintf(buf+ret, PAGE_SIZE-ret, "%d / %d", locked_clock, user_locked_clock);
590 else
591 ret += snprintf(buf+ret, PAGE_SIZE-ret, "-1");
592
593 if (ret < PAGE_SIZE - 1) {
594 ret += snprintf(buf+ret, PAGE_SIZE-ret, "\n");
595 } else {
596 buf[PAGE_SIZE-2] = '\n';
597 buf[PAGE_SIZE-1] = '\0';
598 ret = PAGE_SIZE-1;
599 }
600
601 return ret;
602 }
603
604 static ssize_t set_min_lock_dvfs(struct device *dev, struct device_attribute *attr, const char *buf, size_t count)
605 {
606 int ret, clock = 0;
607 struct exynos_context *platform = (struct exynos_context *)pkbdev->platform_context;
608
609 if (!platform)
610 return -ENODEV;
611
612 if (sysfs_streq("0", buf)) {
613 platform->user_min_lock_input = 0;
614 gpu_dvfs_clock_lock(GPU_DVFS_MIN_UNLOCK, SYSFS_LOCK, 0);
615 } else {
616 ret = kstrtoint(buf, 0, &clock);
617 if (ret) {
618 GPU_LOG(DVFS_WARNING, DUMMY, 0u, 0u, "%s: invalid value\n", __func__);
619 return -ENOENT;
620 }
621
622 platform->user_min_lock_input = clock;
623
624 clock = gpu_dvfs_get_level_clock(clock);
625
626 ret = gpu_dvfs_get_level(clock);
627 if ((ret < gpu_dvfs_get_level(platform->gpu_max_clock)) || (ret > gpu_dvfs_get_level(platform->gpu_min_clock))) {
628 GPU_LOG(DVFS_WARNING, DUMMY, 0u, 0u, "%s: invalid clock value (%d)\n", __func__, clock);
629 return -ENOENT;
630 }
631
632 if (clock > platform->gpu_max_clock_limit)
633 clock = platform->gpu_max_clock_limit;
634
635 if (clock == platform->gpu_min_clock)
636 gpu_dvfs_clock_lock(GPU_DVFS_MIN_UNLOCK, SYSFS_LOCK, 0);
637 else
638 gpu_dvfs_clock_lock(GPU_DVFS_MIN_LOCK, SYSFS_LOCK, clock);
639 }
640
641 return count;
642 }
643
644 static ssize_t show_down_staycount(struct device *dev, struct device_attribute *attr, char *buf)
645 {
646 ssize_t ret = 0;
647 unsigned long flags;
648 int i = -1;
649 struct exynos_context *platform = (struct exynos_context *)pkbdev->platform_context;
650
651 if (!platform)
652 return -ENODEV;
653
654 spin_lock_irqsave(&platform->gpu_dvfs_spinlock, flags);
655 for (i = gpu_dvfs_get_level(platform->gpu_max_clock); i <= gpu_dvfs_get_level(platform->gpu_min_clock); i++)
656 ret += snprintf(buf+ret, PAGE_SIZE-ret, "Clock %d - %d\n",
657 platform->table[i].clock, platform->table[i].down_staycount);
658 spin_unlock_irqrestore(&platform->gpu_dvfs_spinlock, flags);
659
660 if (ret < PAGE_SIZE - 1) {
661 ret += snprintf(buf+ret, PAGE_SIZE-ret, "\n");
662 } else {
663 buf[PAGE_SIZE-2] = '\n';
664 buf[PAGE_SIZE-1] = '\0';
665 ret = PAGE_SIZE-1;
666 }
667
668 return ret;
669 }
670
671 #define MIN_DOWN_STAYCOUNT 1
672 #define MAX_DOWN_STAYCOUNT 10
673 static ssize_t set_down_staycount(struct device *dev, struct device_attribute *attr, const char *buf, size_t count)
674 {
675 unsigned long flags;
676 char tmpbuf[32];
677 char *sptr, *tok;
678 int ret = -1;
679 int clock = -1, level = -1, down_staycount = 0;
680 unsigned int len = 0;
681 struct exynos_context *platform = (struct exynos_context *)pkbdev->platform_context;
682
683 if (!platform)
684 return -ENODEV;
685
686 len = (unsigned int)min(count, sizeof(tmpbuf) - 1);
687 memcpy(tmpbuf, buf, len);
688 tmpbuf[len] = '\0';
689 sptr = tmpbuf;
690
691 tok = strsep(&sptr, " ,");
692 if (tok == NULL) {
693 GPU_LOG(DVFS_WARNING, DUMMY, 0u, 0u, "%s: invalid input\n", __func__);
694 return -ENOENT;
695 }
696
697 ret = kstrtoint(tok, 0, &clock);
698 if (ret) {
699 GPU_LOG(DVFS_WARNING, DUMMY, 0u, 0u, "%s: invalid input %d\n", __func__, clock);
700 return -ENOENT;
701 }
702
703 tok = strsep(&sptr, " ,");
704 if (tok == NULL) {
705 GPU_LOG(DVFS_WARNING, DUMMY, 0u, 0u, "%s: invalid input\n", __func__);
706 return -ENOENT;
707 }
708
709 ret = kstrtoint(tok, 0, &down_staycount);
710 if (ret) {
711 GPU_LOG(DVFS_WARNING, DUMMY, 0u, 0u, "%s: invalid input %d\n", __func__, down_staycount);
712 return -ENOENT;
713 }
714
715 level = gpu_dvfs_get_level(clock);
716 if (level < 0) {
717 GPU_LOG(DVFS_WARNING, DUMMY, 0u, 0u, "%s: invalid clock value (%d)\n", __func__, clock);
718 return -ENOENT;
719 }
720
721 if ((down_staycount < MIN_DOWN_STAYCOUNT) || (down_staycount > MAX_DOWN_STAYCOUNT)) {
722 GPU_LOG(DVFS_WARNING, DUMMY, 0u, 0u, "%s: down_staycount is out of range (%d, %d ~ %d)\n",
723 __func__, down_staycount, MIN_DOWN_STAYCOUNT, MAX_DOWN_STAYCOUNT);
724 return -ENOENT;
725 }
726
727 spin_lock_irqsave(&platform->gpu_dvfs_spinlock, flags);
728 platform->table[level].down_staycount = down_staycount;
729 spin_unlock_irqrestore(&platform->gpu_dvfs_spinlock, flags);
730
731 return count;
732 }
733
734 static ssize_t show_highspeed_clock(struct device *dev, struct device_attribute *attr, char *buf)
735 {
736 ssize_t ret = 0;
737 unsigned long flags;
738 int highspeed_clock = -1;
739 struct exynos_context *platform = (struct exynos_context *)pkbdev->platform_context;
740
741 if (!platform)
742 return -ENODEV;
743
744 spin_lock_irqsave(&platform->gpu_dvfs_spinlock, flags);
745 highspeed_clock = platform->interactive.highspeed_clock;
746 spin_unlock_irqrestore(&platform->gpu_dvfs_spinlock, flags);
747
748 ret += snprintf(buf+ret, PAGE_SIZE-ret, "%d", highspeed_clock);
749
750 if (ret < PAGE_SIZE - 1) {
751 ret += snprintf(buf+ret, PAGE_SIZE-ret, "\n");
752 } else {
753 buf[PAGE_SIZE-2] = '\n';
754 buf[PAGE_SIZE-1] = '\0';
755 ret = PAGE_SIZE-1;
756 }
757
758 return ret;
759 }
760
761 static ssize_t set_highspeed_clock(struct device *dev, struct device_attribute *attr, const char *buf, size_t count)
762 {
763 ssize_t ret = 0;
764 unsigned long flags;
765 int highspeed_clock = -1;
766 struct exynos_context *platform = (struct exynos_context *)pkbdev->platform_context;
767
768 if (!platform)
769 return -ENODEV;
770
771 ret = kstrtoint(buf, 0, &highspeed_clock);
772 if (ret) {
773 GPU_LOG(DVFS_WARNING, DUMMY, 0u, 0u, "%s: invalid value\n", __func__);
774 return -ENOENT;
775 }
776
777 ret = gpu_dvfs_get_level(highspeed_clock);
778 if ((ret < gpu_dvfs_get_level(platform->gpu_max_clock)) || (ret > gpu_dvfs_get_level(platform->gpu_min_clock))) {
779 GPU_LOG(DVFS_WARNING, DUMMY, 0u, 0u, "%s: invalid clock value (%d)\n", __func__, highspeed_clock);
780 return -ENOENT;
781 }
782
783 if (highspeed_clock > platform->gpu_max_clock_limit)
784 highspeed_clock = platform->gpu_max_clock_limit;
785
786 spin_lock_irqsave(&platform->gpu_dvfs_spinlock, flags);
787 platform->interactive.highspeed_clock = highspeed_clock;
788 spin_unlock_irqrestore(&platform->gpu_dvfs_spinlock, flags);
789
790 return count;
791 }
792
793 static ssize_t show_highspeed_load(struct device *dev, struct device_attribute *attr, char *buf)
794 {
795 ssize_t ret = 0;
796 unsigned long flags;
797 int highspeed_load = -1;
798 struct exynos_context *platform = (struct exynos_context *)pkbdev->platform_context;
799
800 if (!platform)
801 return -ENODEV;
802
803 spin_lock_irqsave(&platform->gpu_dvfs_spinlock, flags);
804 highspeed_load = platform->interactive.highspeed_load;
805 spin_unlock_irqrestore(&platform->gpu_dvfs_spinlock, flags);
806
807 ret += snprintf(buf+ret, PAGE_SIZE-ret, "%d", highspeed_load);
808
809 if (ret < PAGE_SIZE - 1) {
810 ret += snprintf(buf+ret, PAGE_SIZE-ret, "\n");
811 } else {
812 buf[PAGE_SIZE-2] = '\n';
813 buf[PAGE_SIZE-1] = '\0';
814 ret = PAGE_SIZE-1;
815 }
816
817 return ret;
818 }
819
820 static ssize_t set_highspeed_load(struct device *dev, struct device_attribute *attr, const char *buf, size_t count)
821 {
822 ssize_t ret = 0;
823 unsigned long flags;
824 int highspeed_load = -1;
825 struct exynos_context *platform = (struct exynos_context *)pkbdev->platform_context;
826
827 if (!platform)
828 return -ENODEV;
829
830 ret = kstrtoint(buf, 0, &highspeed_load);
831 if (ret) {
832 GPU_LOG(DVFS_WARNING, DUMMY, 0u, 0u, "%s: invalid value\n", __func__);
833 return -ENOENT;
834 }
835
836 if ((highspeed_load < 0) || (highspeed_load > 100)) {
837 GPU_LOG(DVFS_WARNING, DUMMY, 0u, 0u, "%s: invalid load value (%d)\n", __func__, highspeed_load);
838 return -ENOENT;
839 }
840
841 spin_lock_irqsave(&platform->gpu_dvfs_spinlock, flags);
842 platform->interactive.highspeed_load = highspeed_load;
843 spin_unlock_irqrestore(&platform->gpu_dvfs_spinlock, flags);
844
845 return count;
846 }
847
848 static ssize_t show_highspeed_delay(struct device *dev, struct device_attribute *attr, char *buf)
849 {
850 ssize_t ret = 0;
851 unsigned long flags;
852 int highspeed_delay = -1;
853 struct exynos_context *platform = (struct exynos_context *)pkbdev->platform_context;
854
855 if (!platform)
856 return -ENODEV;
857
858 spin_lock_irqsave(&platform->gpu_dvfs_spinlock, flags);
859 highspeed_delay = platform->interactive.highspeed_delay;
860 spin_unlock_irqrestore(&platform->gpu_dvfs_spinlock, flags);
861
862 ret += snprintf(buf+ret, PAGE_SIZE-ret, "%d", highspeed_delay);
863
864 if (ret < PAGE_SIZE - 1) {
865 ret += snprintf(buf+ret, PAGE_SIZE-ret, "\n");
866 } else {
867 buf[PAGE_SIZE-2] = '\n';
868 buf[PAGE_SIZE-1] = '\0';
869 ret = PAGE_SIZE-1;
870 }
871
872 return ret;
873 }
874
875 static ssize_t set_highspeed_delay(struct device *dev, struct device_attribute *attr, const char *buf, size_t count)
876 {
877 ssize_t ret = 0;
878 unsigned long flags;
879 int highspeed_delay = -1;
880 struct exynos_context *platform = (struct exynos_context *)pkbdev->platform_context;
881
882 if (!platform)
883 return -ENODEV;
884
885 ret = kstrtoint(buf, 0, &highspeed_delay);
886 if (ret) {
887 GPU_LOG(DVFS_WARNING, DUMMY, 0u, 0u, "%s: invalid value\n", __func__);
888 return -ENOENT;
889 }
890
891 if ((highspeed_delay < 0) || (highspeed_delay > 5)) {
892 GPU_LOG(DVFS_WARNING, DUMMY, 0u, 0u, "%s: invalid load value (%d)\n", __func__, highspeed_delay);
893 return -ENOENT;
894 }
895
896 spin_lock_irqsave(&platform->gpu_dvfs_spinlock, flags);
897 platform->interactive.highspeed_delay = highspeed_delay;
898 spin_unlock_irqrestore(&platform->gpu_dvfs_spinlock, flags);
899
900 return count;
901 }
902
903 static ssize_t show_wakeup_lock(struct device *dev, struct device_attribute *attr, char *buf)
904 {
905 ssize_t ret = 0;
906 struct exynos_context *platform = (struct exynos_context *)pkbdev->platform_context;
907
908 if (!platform)
909 return -ENODEV;
910
911 ret += snprintf(buf+ret, PAGE_SIZE-ret, "%d", platform->wakeup_lock);
912
913 if (ret < PAGE_SIZE - 1) {
914 ret += snprintf(buf+ret, PAGE_SIZE-ret, "\n");
915 } else {
916 buf[PAGE_SIZE-2] = '\n';
917 buf[PAGE_SIZE-1] = '\0';
918 ret = PAGE_SIZE-1;
919 }
920
921 return ret;
922 }
923
924 static ssize_t set_wakeup_lock(struct device *dev, struct device_attribute *attr, const char *buf, size_t count)
925 {
926 struct exynos_context *platform = (struct exynos_context *)pkbdev->platform_context;
927
928 if (!platform)
929 return -ENODEV;
930
931 if (sysfs_streq("0", buf))
932 platform->wakeup_lock = false;
933 else if (sysfs_streq("1", buf))
934 platform->wakeup_lock = true;
935 else
936 GPU_LOG(DVFS_WARNING, DUMMY, 0u, 0u, "%s: invalid val - only [0 or 1] is available\n", __func__);
937
938 return count;
939 }
940
941 static ssize_t show_polling_speed(struct device *dev, struct device_attribute *attr, char *buf)
942 {
943 ssize_t ret = 0;
944 struct exynos_context *platform = (struct exynos_context *)pkbdev->platform_context;
945
946 if (!platform)
947 return -ENODEV;
948
949 ret += snprintf(buf+ret, PAGE_SIZE-ret, "%d", platform->polling_speed);
950
951 if (ret < PAGE_SIZE - 1) {
952 ret += snprintf(buf+ret, PAGE_SIZE-ret, "\n");
953 } else {
954 buf[PAGE_SIZE-2] = '\n';
955 buf[PAGE_SIZE-1] = '\0';
956 ret = PAGE_SIZE-1;
957 }
958
959 return ret;
960 }
961
962 static ssize_t set_polling_speed(struct device *dev, struct device_attribute *attr, const char *buf, size_t count)
963 {
964 int ret, polling_speed;
965 struct exynos_context *platform = (struct exynos_context *)pkbdev->platform_context;
966
967 if (!platform)
968 return -ENODEV;
969
970 ret = kstrtoint(buf, 0, &polling_speed);
971
972 if (ret) {
973 GPU_LOG(DVFS_WARNING, DUMMY, 0u, 0u, "%s: invalid value\n", __func__);
974 return -ENOENT;
975 }
976
977 if ((polling_speed < 100) || (polling_speed > 1000)) {
978 GPU_LOG(DVFS_WARNING, DUMMY, 0u, 0u, "%s: out of range [100~1000] (%d)\n", __func__, polling_speed);
979 return -ENOENT;
980 }
981
982 platform->polling_speed = polling_speed;
983
984 return count;
985 }
986
987 static ssize_t show_tmu(struct device *dev, struct device_attribute *attr, char *buf)
988 {
989 ssize_t ret = 0;
990 struct exynos_context *platform = (struct exynos_context *)pkbdev->platform_context;
991
992 if (!platform)
993 return -ENODEV;
994
995 ret += snprintf(buf+ret, PAGE_SIZE-ret, "%d", platform->tmu_status);
996
997 if (ret < PAGE_SIZE - 1) {
998 ret += snprintf(buf+ret, PAGE_SIZE-ret, "\n");
999 } else {
1000 buf[PAGE_SIZE-2] = '\n';
1001 buf[PAGE_SIZE-1] = '\0';
1002 ret = PAGE_SIZE-1;
1003 }
1004
1005 return ret;
1006 }
1007
1008 static ssize_t set_tmu_control(struct device *dev, struct device_attribute *attr, const char *buf, size_t count)
1009 {
1010 struct exynos_context *platform = (struct exynos_context *)pkbdev->platform_context;
1011
1012 if (!platform)
1013 return -ENODEV;
1014
1015 if (sysfs_streq("0", buf)) {
1016 if (platform->voltage_margin != 0) {
1017 platform->voltage_margin = 0;
1018 gpu_set_target_clk_vol(platform->cur_clock, false);
1019 }
1020 gpu_dvfs_clock_lock(GPU_DVFS_MAX_UNLOCK, TMU_LOCK, 0);
1021 platform->tmu_status = false;
1022 } else if (sysfs_streq("1", buf))
1023 platform->tmu_status = true;
1024 else
1025 GPU_LOG(DVFS_WARNING, DUMMY, 0u, 0u, "%s: invalid value - only [0 or 1] is available\n", __func__);
1026
1027 return count;
1028 }
1029
1030 #ifdef CONFIG_CPU_THERMAL_IPA
1031 static ssize_t show_norm_utilization(struct device *dev, struct device_attribute *attr, char *buf)
1032 {
1033 ssize_t ret = 0;
1034 #ifdef CONFIG_EXYNOS_THERMAL
1035
1036 ret += snprintf(buf+ret, PAGE_SIZE-ret, "%d", gpu_ipa_dvfs_get_norm_utilisation(pkbdev));
1037
1038 if (ret < PAGE_SIZE - 1) {
1039 ret += snprintf(buf+ret, PAGE_SIZE-ret, "\n");
1040 } else {
1041 buf[PAGE_SIZE-2] = '\n';
1042 buf[PAGE_SIZE-1] = '\0';
1043 ret = PAGE_SIZE-1;
1044 }
1045 #else
1046 GPU_LOG(DVFS_WARNING, DUMMY, 0u, 0u, "%s: EXYNOS THERMAL build config is disabled\n", __func__);
1047 #endif /* CONFIG_EXYNOS_THERMAL */
1048
1049 return ret;
1050 }
1051
1052 static ssize_t show_utilization_stats(struct device *dev, struct device_attribute *attr, char *buf)
1053 {
1054 ssize_t ret = 0;
1055 #ifdef CONFIG_EXYNOS_THERMAL
1056 struct mali_debug_utilisation_stats stats;
1057
1058 gpu_ipa_dvfs_get_utilisation_stats(&stats);
1059
1060 ret += snprintf(buf+ret, PAGE_SIZE-ret, "util=%d norm_util=%d norm_freq=%d time_busy=%u time_idle=%u time_tick=%d",
1061 stats.s.utilisation, stats.s.norm_utilisation,
1062 stats.s.freq_for_norm, stats.time_busy, stats.time_idle,
1063 stats.time_tick);
1064
1065 if (ret < PAGE_SIZE - 1) {
1066 ret += snprintf(buf+ret, PAGE_SIZE-ret, "\n");
1067 } else {
1068 buf[PAGE_SIZE-2] = '\n';
1069 buf[PAGE_SIZE-1] = '\0';
1070 ret = PAGE_SIZE-1;
1071 }
1072 #else
1073 GPU_LOG(DVFS_WARNING, DUMMY, 0u, 0u, "%s: EXYNOS THERMAL build config is disabled\n", __func__);
1074 #endif /* CONFIG_EXYNOS_THERMAL */
1075
1076 return ret;
1077 }
1078 #endif /* CONFIG_CPU_THERMAL_IPA */
1079 #endif /* CONFIG_MALI_DVFS */
1080
1081 static ssize_t show_debug_level(struct device *dev, struct device_attribute *attr, char *buf)
1082 {
1083 ssize_t ret = 0;
1084
1085 ret += snprintf(buf+ret, PAGE_SIZE-ret, "[Current] %d (%d ~ %d)",
1086 gpu_get_debug_level(), DVFS_DEBUG_START+1, DVFS_DEBUG_END-1);
1087
1088 if (ret < PAGE_SIZE - 1) {
1089 ret += snprintf(buf+ret, PAGE_SIZE-ret, "\n");
1090 } else {
1091 buf[PAGE_SIZE-2] = '\n';
1092 buf[PAGE_SIZE-1] = '\0';
1093 ret = PAGE_SIZE-1;
1094 }
1095
1096 return ret;
1097 }
1098
1099 static ssize_t set_debug_level(struct device *dev, struct device_attribute *attr, const char *buf, size_t count)
1100 {
1101 int debug_level, ret;
1102
1103 ret = kstrtoint(buf, 0, &debug_level);
1104 if (ret) {
1105 GPU_LOG(DVFS_WARNING, DUMMY, 0u, 0u, "%s: invalid value\n", __func__);
1106 return -ENOENT;
1107 }
1108
1109 if ((debug_level <= DVFS_DEBUG_START) || (debug_level >= DVFS_DEBUG_END)) {
1110 GPU_LOG(DVFS_WARNING, DUMMY, 0u, 0u, "%s: invalid debug level (%d)\n", __func__, debug_level);
1111 return -ENOENT;
1112 }
1113
1114 gpu_set_debug_level(debug_level);
1115
1116 return count;
1117 }
1118
1119 #ifdef CONFIG_MALI_EXYNOS_TRACE
1120 static ssize_t show_trace_level(struct device *dev, struct device_attribute *attr, char *buf)
1121 {
1122 ssize_t ret = 0;
1123 int level;
1124
1125 for (level = TRACE_NONE + 1; level < TRACE_END - 1; level++)
1126 if (gpu_check_trace_level(level))
1127 ret += snprintf(buf+ret, PAGE_SIZE-ret, "<%d> ", level);
1128 ret += snprintf(buf+ret, PAGE_SIZE-ret, "\nList: %d ~ %d\n(None: %d, All: %d)",
1129 TRACE_NONE + 1, TRACE_ALL - 1, TRACE_NONE, TRACE_ALL);
1130
1131 if (ret < PAGE_SIZE - 1) {
1132 ret += snprintf(buf+ret, PAGE_SIZE-ret, "\n");
1133 } else {
1134 buf[PAGE_SIZE-2] = '\n';
1135 buf[PAGE_SIZE-1] = '\0';
1136 ret = PAGE_SIZE-1;
1137 }
1138
1139 return ret;
1140 }
1141
1142 static ssize_t set_trace_level(struct device *dev, struct device_attribute *attr, const char *buf, size_t count)
1143 {
1144 int trace_level, ret;
1145
1146 ret = kstrtoint(buf, 0, &trace_level);
1147 if (ret) {
1148 GPU_LOG(DVFS_WARNING, DUMMY, 0u, 0u, "%s: invalid value\n", __func__);
1149 return -ENOENT;
1150 }
1151
1152 if ((trace_level <= TRACE_START) || (trace_level >= TRACE_END)) {
1153 GPU_LOG(DVFS_WARNING, DUMMY, 0u, 0u, "%s: invalid trace level (%d)\n", __func__, trace_level);
1154 return -ENOENT;
1155 }
1156
1157 gpu_set_trace_level(trace_level);
1158
1159 return count;
1160 }
1161
1162 extern void kbasep_trace_format_msg(struct kbase_trace *trace_msg, char *buffer, int len);
1163 static ssize_t show_trace_dump(struct device *dev, struct device_attribute *attr, char *buf)
1164 {
1165 ssize_t ret = 0;
1166 unsigned long flags;
1167 u32 start, end;
1168
1169 spin_lock_irqsave(&pkbdev->trace_lock, flags);
1170 start = pkbdev->trace_first_out;
1171 end = pkbdev->trace_next_in;
1172
1173 while (start != end) {
1174 char buffer[KBASE_TRACE_SIZE];
1175 struct kbase_trace *trace_msg = &pkbdev->trace_rbuf[start];
1176
1177 kbasep_trace_format_msg(trace_msg, buffer, KBASE_TRACE_SIZE);
1178 ret += snprintf(buf+ret, PAGE_SIZE-ret, "%s\n", buffer);
1179 start = (start + 1) & KBASE_TRACE_MASK;
1180 }
1181
1182 spin_unlock_irqrestore(&pkbdev->trace_lock, flags);
1183 KBASE_TRACE_CLEAR(pkbdev);
1184
1185 if (ret < PAGE_SIZE - 1) {
1186 ret += snprintf(buf+ret, PAGE_SIZE-ret, "\n");
1187 } else {
1188 buf[PAGE_SIZE-2] = '\n';
1189 buf[PAGE_SIZE-1] = '\0';
1190 ret = PAGE_SIZE-1;
1191 }
1192
1193 return ret;
1194 }
1195
1196 static ssize_t init_trace_dump(struct device *dev, struct device_attribute *attr, const char *buf, size_t count)
1197 {
1198 KBASE_TRACE_CLEAR(pkbdev);
1199
1200 return count;
1201 }
1202 #endif /* CONFIG_MALI_EXYNOS_TRACE */
1203
1204 #ifdef DEBUG_FBDEV
1205 static ssize_t show_fbdev(struct device *dev, struct device_attribute *attr, char *buf)
1206 {
1207 ssize_t ret = 0;
1208 int i;
1209
1210 for (i = 0; i < num_registered_fb; i++)
1211 ret += snprintf(buf+ret, PAGE_SIZE-ret, "fb[%d] xres=%d, yres=%d, addr=0x%lx\n", i, registered_fb[i]->var.xres, registered_fb[i]->var.yres, registered_fb[i]->fix.smem_start);
1212
1213 if (ret < PAGE_SIZE - 1) {
1214 ret += snprintf(buf+ret, PAGE_SIZE-ret, "\n");
1215 } else {
1216 buf[PAGE_SIZE-2] = '\n';
1217 buf[PAGE_SIZE-1] = '\0';
1218 ret = PAGE_SIZE-1;
1219 }
1220
1221 return ret;
1222 }
1223 #endif
1224
1225 static int gpu_get_status(struct exynos_context *platform, char *buf, size_t buf_size)
1226 {
1227 int cnt = 0;
1228 int i;
1229 int mmu_fault_cnt = 0;
1230
1231 if (!platform)
1232 return -ENODEV;
1233
1234 if (buf == NULL)
1235 return 0;
1236
1237 for (i = GPU_MMU_TRANSLATION_FAULT; i <= GPU_MMU_MEMORY_ATTRIBUTES_FAULT; i++)
1238 mmu_fault_cnt += platform->gpu_exception_count[i];
1239
1240 cnt += snprintf(buf+cnt, buf_size-cnt, "reset count : %d\n", platform->gpu_exception_count[GPU_RESET]);
1241 cnt += snprintf(buf+cnt, buf_size-cnt, "data invalid count : %d\n", platform->gpu_exception_count[GPU_DATA_INVALIDATE_FAULT]);
1242 cnt += snprintf(buf+cnt, buf_size-cnt, "mmu fault count : %d\n", mmu_fault_cnt);
1243
1244 for (i = 0; i < BMAX_RETRY_CNT; i++)
1245 cnt += snprintf(buf+cnt, buf_size-cnt, "warmup retry count %d : %d\n", i+1, platform->balance_retry_count[i]);
1246
1247 return cnt;
1248 }
1249
1250 static ssize_t show_gpu_status(struct device *dev, struct device_attribute *attr, char *buf)
1251 {
1252 ssize_t ret = 0;
1253 struct exynos_context *platform = (struct exynos_context *)pkbdev->platform_context;
1254
1255 if (!platform)
1256 return -ENODEV;
1257
1258 ret += gpu_get_status(platform, buf+ret, (size_t)PAGE_SIZE-ret);
1259
1260 if (ret < PAGE_SIZE - 1) {
1261 ret += snprintf(buf+ret, PAGE_SIZE-ret, "\n");
1262 } else {
1263 buf[PAGE_SIZE-2] = '\n';
1264 buf[PAGE_SIZE-1] = '\0';
1265 ret = PAGE_SIZE-1;
1266 }
1267
1268 return ret;
1269 }
1270
1271 #ifdef CONFIG_MALI_VK_BOOST
1272 static ssize_t show_vk_boost_status(struct device *dev, struct device_attribute *attr, char *buf)
1273 {
1274 ssize_t ret = 0;
1275 struct exynos_context *platform = (struct exynos_context *)pkbdev->platform_context;
1276
1277 if (!platform)
1278 return -ENODEV;
1279
1280 ret += snprintf(buf+ret, PAGE_SIZE-ret, "%d", platform->ctx_vk_need_qos);
1281
1282 if (ret < PAGE_SIZE - 1) {
1283 ret += snprintf(buf+ret, PAGE_SIZE-ret, "\n");
1284 } else {
1285 buf[PAGE_SIZE-2] = '\n';
1286 buf[PAGE_SIZE-1] = '\0';
1287 ret = PAGE_SIZE-1;
1288 }
1289
1290 return ret;
1291 }
1292 #endif
1293
1294 #ifdef CONFIG_MALI_SUSTAINABLE_OPT
1295 static ssize_t show_sustainable_status(struct device *dev, struct device_attribute *attr, char *buf)
1296 {
1297 ssize_t ret = 0;
1298 struct exynos_context *platform = (struct exynos_context *)pkbdev->platform_context;
1299
1300 if (!platform)
1301 return -ENODEV;
1302
1303 ret += snprintf(buf+ret, PAGE_SIZE-ret, "%d", platform->sustainable.status);
1304
1305 if (ret < PAGE_SIZE - 1) {
1306 ret += snprintf(buf+ret, PAGE_SIZE-ret, "\n");
1307 } else {
1308 buf[PAGE_SIZE-2] = '\n';
1309 buf[PAGE_SIZE-1] = '\0';
1310 ret = PAGE_SIZE-1;
1311 }
1312
1313 return ret;
1314 }
1315 #endif
1316
1317 #ifdef CONFIG_MALI_SEC_CL_BOOST
1318 static ssize_t set_cl_boost_disable(struct device *dev, struct device_attribute *attr, const char *buf, size_t count)
1319 {
1320 unsigned int cl_boost_disable = 0;
1321 int ret;
1322
1323 struct exynos_context *platform = (struct exynos_context *)pkbdev->platform_context;
1324
1325 if (!platform)
1326 return -ENODEV;
1327
1328 ret = kstrtoint(buf, 0, &cl_boost_disable);
1329 if (ret) {
1330 GPU_LOG(DVFS_WARNING, DUMMY, 0u, 0u, "%s: invalid value\n", __func__);
1331 return -ENOENT;
1332 }
1333
1334 if (cl_boost_disable == 0)
1335 platform->cl_boost_disable = false;
1336 else
1337 platform->cl_boost_disable = true;
1338
1339 return count;
1340 }
1341
1342 static ssize_t show_cl_boost_disable(struct device *dev, struct device_attribute *attr, char *buf)
1343 {
1344 ssize_t ret = 0;
1345 struct exynos_context *platform = (struct exynos_context *)pkbdev->platform_context;
1346
1347 if (!platform)
1348 return -ENODEV;
1349
1350 ret += snprintf(buf+ret, PAGE_SIZE-ret, "%d", platform->cl_boost_disable);
1351
1352 if (ret < PAGE_SIZE - 1) {
1353 ret += snprintf(buf+ret, PAGE_SIZE-ret, "\n");
1354 } else {
1355 buf[PAGE_SIZE-2] = '\n';
1356 buf[PAGE_SIZE-1] = '\0';
1357 ret = PAGE_SIZE-1;
1358 }
1359
1360 return ret;
1361 }
1362 #endif
1363 /** The sysfs file @c clock, fbdev.
1364 *
1365 * This is used for obtaining information about the mali t series operating clock & framebuffer address,
1366 */
1367
1368 DEVICE_ATTR(clock, S_IRUGO|S_IWUSR, show_clock, set_clock);
1369 DEVICE_ATTR(vol, S_IRUGO, show_vol, NULL);
1370 DEVICE_ATTR(power_state, S_IRUGO, show_power_state, NULL);
1371 DEVICE_ATTR(asv_table, S_IRUGO, show_asv_table, NULL);
1372 DEVICE_ATTR(dvfs_table, S_IRUGO, show_dvfs_table, NULL);
1373 DEVICE_ATTR(time_in_state, S_IRUGO|S_IWUSR, show_time_in_state, set_time_in_state);
1374 DEVICE_ATTR(utilization, S_IRUGO, show_utilization, NULL);
1375 DEVICE_ATTR(perf, S_IRUGO, show_perf, NULL);
1376 #ifdef CONFIG_MALI_DVFS
1377 DEVICE_ATTR(dvfs, S_IRUGO|S_IWUSR, show_dvfs, set_dvfs);
1378 DEVICE_ATTR(dvfs_governor, S_IRUGO|S_IWUSR, show_governor, set_governor);
1379 DEVICE_ATTR(dvfs_max_lock_status, S_IRUGO, show_max_lock_status, NULL);
1380 DEVICE_ATTR(dvfs_min_lock_status, S_IRUGO, show_min_lock_status, NULL);
1381 DEVICE_ATTR(dvfs_max_lock, S_IRUGO|S_IWUSR, show_max_lock_dvfs, set_max_lock_dvfs);
1382 DEVICE_ATTR(dvfs_min_lock, S_IRUGO|S_IWUSR, show_min_lock_dvfs, set_min_lock_dvfs);
1383 DEVICE_ATTR(down_staycount, S_IRUGO|S_IWUSR, show_down_staycount, set_down_staycount);
1384 DEVICE_ATTR(highspeed_clock, S_IRUGO|S_IWUSR, show_highspeed_clock, set_highspeed_clock);
1385 DEVICE_ATTR(highspeed_load, S_IRUGO|S_IWUSR, show_highspeed_load, set_highspeed_load);
1386 DEVICE_ATTR(highspeed_delay, S_IRUGO|S_IWUSR, show_highspeed_delay, set_highspeed_delay);
1387 DEVICE_ATTR(wakeup_lock, S_IRUGO|S_IWUSR, show_wakeup_lock, set_wakeup_lock);
1388 DEVICE_ATTR(polling_speed, S_IRUGO|S_IWUSR, show_polling_speed, set_polling_speed);
1389 DEVICE_ATTR(tmu, S_IRUGO|S_IWUSR, show_tmu, set_tmu_control);
1390 #ifdef CONFIG_CPU_THERMAL_IPA
1391 DEVICE_ATTR(norm_utilization, S_IRUGO, show_norm_utilization, NULL);
1392 DEVICE_ATTR(utilization_stats, S_IRUGO, show_utilization_stats, NULL);
1393 #endif /* CONFIG_CPU_THERMAL_IPA */
1394 #endif /* CONFIG_MALI_DVFS */
1395 DEVICE_ATTR(debug_level, S_IRUGO|S_IWUSR, show_debug_level, set_debug_level);
1396 #ifdef CONFIG_MALI_EXYNOS_TRACE
1397 DEVICE_ATTR(trace_level, S_IRUGO|S_IWUSR, show_trace_level, set_trace_level);
1398 DEVICE_ATTR(trace_dump, S_IRUGO|S_IWUSR, show_trace_dump, init_trace_dump);
1399 #endif /* CONFIG_MALI_EXYNOS_TRACE */
1400 #ifdef DEBUG_FBDEV
1401 DEVICE_ATTR(fbdev, S_IRUGO, show_fbdev, NULL);
1402 #endif
1403 DEVICE_ATTR(gpu_status, S_IRUGO, show_gpu_status, NULL);
1404 #ifdef CONFIG_MALI_VK_BOOST
1405 DEVICE_ATTR(vk_boost_status, S_IRUGO, show_vk_boost_status, NULL);
1406 #endif
1407 #ifdef CONFIG_MALI_SUSTAINABLE_OPT
1408 DEVICE_ATTR(sustainable_status, S_IRUGO, show_sustainable_status, NULL);
1409 #endif
1410 #ifdef CONFIG_MALI_SEC_CL_BOOST
1411 DEVICE_ATTR(cl_boost_disable, S_IRUGO|S_IWUSR, show_cl_boost_disable, set_cl_boost_disable);
1412 #endif
1413
1414 #ifdef CONFIG_MALI_DEBUG_KERNEL_SYSFS
1415 #ifdef CONFIG_MALI_DVFS
1416 #define BUF_SIZE 1000
1417 static ssize_t show_kernel_sysfs_gpu_info(struct kobject *kobj, struct kobj_attribute *attr, char *buf)
1418 {
1419 struct exynos_context *platform = (struct exynos_context *)pkbdev->platform_context;
1420 ssize_t ret = 0;
1421
1422 if (!platform)
1423 return -ENODEV;
1424
1425 if (buf == NULL)
1426 return 0;
1427
1428 ret += snprintf(buf+ret, BUF_SIZE-ret, "\"SSTOP\":\"%d\",", platform->gpu_exception_count[GPU_SOFT_STOP]);
1429 ret += snprintf(buf+ret, BUF_SIZE-ret, "\"HSTOP\":\"%d\",", platform->gpu_exception_count[GPU_HARD_STOP]);
1430 ret += snprintf(buf+ret, BUF_SIZE-ret, "\"RESET\":\"%d\",", platform->gpu_exception_count[GPU_RESET]);
1431 ret += snprintf(buf+ret, BUF_SIZE-ret, "\"DIFLT\":\"%d\",", platform->gpu_exception_count[GPU_DATA_INVALIDATE_FAULT]);
1432 ret += snprintf(buf+ret, BUF_SIZE-ret, "\"TRFLT\":\"%d\",", platform->gpu_exception_count[GPU_MMU_TRANSLATION_FAULT]);
1433 ret += snprintf(buf+ret, BUF_SIZE-ret, "\"PMFLT\":\"%d\",", platform->gpu_exception_count[GPU_MMU_PERMISSION_FAULT]);
1434 ret += snprintf(buf+ret, BUF_SIZE-ret, "\"BFLT\":\"%d\",", platform->gpu_exception_count[GPU_MMU_TRANSTAB_BUS_FAULT]);
1435 ret += snprintf(buf+ret, BUF_SIZE-ret, "\"ACCFG\":\"%d\",", platform->gpu_exception_count[GPU_MMU_ACCESS_FLAG_FAULT]);
1436 ret += snprintf(buf+ret, BUF_SIZE-ret, "\"ASFLT\":\"%d\",", platform->gpu_exception_count[GPU_MMU_ADDRESS_SIZE_FAULT]);
1437 ret += snprintf(buf+ret, BUF_SIZE-ret, "\"ATFLT\":\"%d\",", platform->gpu_exception_count[GPU_MMU_MEMORY_ATTRIBUTES_FAULT]);
1438 ret += snprintf(buf+ret, BUF_SIZE-ret, "\"UNKN\":\"%d\"", platform->gpu_exception_count[GPU_UNKNOWN]);
1439
1440 if (ret < PAGE_SIZE - 1) {
1441 ret += snprintf(buf+ret, PAGE_SIZE-ret, "\n");
1442 } else {
1443 buf[PAGE_SIZE-2] = '\n';
1444 buf[PAGE_SIZE-1] = '\0';
1445 ret = PAGE_SIZE-1;
1446 }
1447
1448 return ret;
1449 }
1450
1451 static ssize_t show_kernel_sysfs_max_lock_dvfs(struct kobject *kobj, struct kobj_attribute *attr, char *buf)
1452 {
1453 ssize_t ret = 0;
1454 unsigned long flags;
1455 int locked_clock = -1;
1456 struct exynos_context *platform = (struct exynos_context *)pkbdev->platform_context;
1457
1458 if (!platform)
1459 return -ENODEV;
1460
1461 spin_lock_irqsave(&platform->gpu_dvfs_spinlock, flags);
1462 locked_clock = platform->max_lock;
1463 spin_unlock_irqrestore(&platform->gpu_dvfs_spinlock, flags);
1464
1465 if (locked_clock > 0)
1466 ret += snprintf(buf+ret, PAGE_SIZE-ret, "%d", locked_clock);
1467 else
1468 ret += snprintf(buf+ret, PAGE_SIZE-ret, "%d", platform->gpu_max_clock);
1469
1470 if (ret < PAGE_SIZE - 1) {
1471 ret += snprintf(buf+ret, PAGE_SIZE-ret, "\n");
1472 } else {
1473 buf[PAGE_SIZE-2] = '\n';
1474 buf[PAGE_SIZE-1] = '\0';
1475 ret = PAGE_SIZE-1;
1476 }
1477
1478 return ret;
1479 }
1480
1481 static ssize_t set_kernel_sysfs_max_lock_dvfs(struct kobject *kobj, struct kobj_attribute *attr, const char *buf, size_t count)
1482 {
1483 int ret, clock = 0;
1484 struct exynos_context *platform = (struct exynos_context *)pkbdev->platform_context;
1485
1486 if (!platform)
1487 return -ENODEV;
1488
1489 if (sysfs_streq("0", buf)) {
1490 platform->user_max_lock_input = 0;
1491 gpu_dvfs_clock_lock(GPU_DVFS_MAX_UNLOCK, SYSFS_LOCK, 0);
1492 } else {
1493 ret = kstrtoint(buf, 0, &clock);
1494 if (ret) {
1495 GPU_LOG(DVFS_WARNING, DUMMY, 0u, 0u, "%s: invalid value\n", __func__);
1496 return -ENOENT;
1497 }
1498
1499 platform->user_max_lock_input = clock;
1500
1501 clock = gpu_dvfs_get_level_clock(clock);
1502
1503 ret = gpu_dvfs_get_level(clock);
1504 if ((ret < gpu_dvfs_get_level(platform->gpu_max_clock)) || (ret > gpu_dvfs_get_level(platform->gpu_min_clock))) {
1505 GPU_LOG(DVFS_WARNING, DUMMY, 0u, 0u, "%s: invalid clock value (%d)\n", __func__, clock);
1506 return -ENOENT;
1507 }
1508
1509 if (clock == platform->gpu_max_clock)
1510 gpu_dvfs_clock_lock(GPU_DVFS_MAX_UNLOCK, SYSFS_LOCK, 0);
1511 else
1512 gpu_dvfs_clock_lock(GPU_DVFS_MAX_LOCK, SYSFS_LOCK, clock);
1513 }
1514
1515 return count;
1516 }
1517
1518 static ssize_t show_kernel_sysfs_available_governor(struct kobject *kobj, struct kobj_attribute *attr, char *buf)
1519 {
1520 ssize_t ret = 0;
1521 gpu_dvfs_governor_info *governor_info;
1522 int i;
1523 struct exynos_context *platform = (struct exynos_context *)pkbdev->platform_context;
1524
1525 if (!platform)
1526 return -ENODEV;
1527
1528 governor_info = (gpu_dvfs_governor_info *)gpu_dvfs_get_governor_info();
1529
1530 for (i = 0; i < G3D_MAX_GOVERNOR_NUM; i++)
1531 ret += snprintf(buf+ret, PAGE_SIZE-ret, "%s ", governor_info[i].name);
1532
1533 if (ret < PAGE_SIZE - 1) {
1534 ret += snprintf(buf+ret, PAGE_SIZE-ret, "\n");
1535 } else {
1536 buf[PAGE_SIZE-2] = '\n';
1537 buf[PAGE_SIZE-1] = '\0';
1538 ret = PAGE_SIZE-1;
1539 }
1540
1541 return ret;
1542 }
1543
1544 static ssize_t show_kernel_sysfs_min_lock_dvfs(struct kobject *kobj, struct kobj_attribute *attr, char *buf)
1545 {
1546 ssize_t ret = 0;
1547 unsigned long flags;
1548 int locked_clock = -1;
1549 struct exynos_context *platform = (struct exynos_context *)pkbdev->platform_context;
1550
1551 if (!platform)
1552 return -ENODEV;
1553
1554 spin_lock_irqsave(&platform->gpu_dvfs_spinlock, flags);
1555 locked_clock = platform->min_lock;
1556 spin_unlock_irqrestore(&platform->gpu_dvfs_spinlock, flags);
1557
1558 if (locked_clock > 0)
1559 ret += snprintf(buf+ret, PAGE_SIZE-ret, "%d", locked_clock);
1560 else
1561 ret += snprintf(buf+ret, PAGE_SIZE-ret, "%d", platform->gpu_min_clock);
1562
1563 if (ret < PAGE_SIZE - 1) {
1564 ret += snprintf(buf+ret, PAGE_SIZE-ret, "\n");
1565 } else {
1566 buf[PAGE_SIZE-2] = '\n';
1567 buf[PAGE_SIZE-1] = '\0';
1568 ret = PAGE_SIZE-1;
1569 }
1570
1571 return ret;
1572 }
1573
1574 static ssize_t set_kernel_sysfs_min_lock_dvfs(struct kobject *kobj, struct kobj_attribute *attr, const char *buf, size_t count)
1575 {
1576 int ret, clock = 0;
1577 struct exynos_context *platform = (struct exynos_context *)pkbdev->platform_context;
1578
1579 if (!platform)
1580 return -ENODEV;
1581
1582 if (sysfs_streq("0", buf)) {
1583 platform->user_min_lock_input = 0;
1584 gpu_dvfs_clock_lock(GPU_DVFS_MIN_UNLOCK, SYSFS_LOCK, 0);
1585 } else {
1586 ret = kstrtoint(buf, 0, &clock);
1587 if (ret) {
1588 GPU_LOG(DVFS_WARNING, DUMMY, 0u, 0u, "%s: invalid value\n", __func__);
1589 return -ENOENT;
1590 }
1591
1592 platform->user_min_lock_input = clock;
1593
1594 clock = gpu_dvfs_get_level_clock(clock);
1595
1596 ret = gpu_dvfs_get_level(clock);
1597 if ((ret < gpu_dvfs_get_level(platform->gpu_max_clock)) || (ret > gpu_dvfs_get_level(platform->gpu_min_clock))) {
1598 GPU_LOG(DVFS_WARNING, DUMMY, 0u, 0u, "%s: invalid clock value (%d)\n", __func__, clock);
1599 return -ENOENT;
1600 }
1601
1602 if (clock > platform->gpu_max_clock_limit)
1603 clock = platform->gpu_max_clock_limit;
1604
1605 if (clock == platform->gpu_min_clock)
1606 gpu_dvfs_clock_lock(GPU_DVFS_MIN_UNLOCK, SYSFS_LOCK, 0);
1607 else
1608 gpu_dvfs_clock_lock(GPU_DVFS_MIN_LOCK, SYSFS_LOCK, clock);
1609 }
1610
1611 return count;
1612 }
1613 #endif /* #ifdef CONFIG_MALI_DVFS */
1614
1615 static ssize_t show_kernel_sysfs_utilization(struct kobject *kobj, struct kobj_attribute *attr, char *buf)
1616 {
1617 ssize_t ret = 0;
1618 struct exynos_context *platform = (struct exynos_context *)pkbdev->platform_context;
1619
1620 if (!platform)
1621 return -ENODEV;
1622
1623 ret += snprintf(buf+ret, PAGE_SIZE-ret, "%3d%%", platform->env_data.utilization);
1624
1625 if (ret < PAGE_SIZE - 1) {
1626 ret += snprintf(buf+ret, PAGE_SIZE-ret, "\n");
1627 } else {
1628 buf[PAGE_SIZE-2] = '\n';
1629 buf[PAGE_SIZE-1] = '\0';
1630 ret = PAGE_SIZE-1;
1631 }
1632
1633 return ret;
1634 }
1635
1636 static ssize_t show_kernel_sysfs_clock(struct kobject *kobj, struct kobj_attribute *attr, char *buf)
1637 {
1638 ssize_t ret = 0;
1639 int clock = 0;
1640 struct exynos_context *platform = (struct exynos_context *)pkbdev->platform_context;
1641
1642 if (!platform)
1643 return -ENODEV;
1644
1645 #ifdef CONFIG_MALI_RT_PM
1646 if (platform->exynos_pm_domain) {
1647 mutex_lock(&platform->exynos_pm_domain->access_lock);
1648 if (!platform->dvs_is_enabled && gpu_is_power_on())
1649 clock = gpu_get_cur_clock(platform);
1650 mutex_unlock(&platform->exynos_pm_domain->access_lock);
1651 }
1652 #else
1653 if (gpu_control_is_power_on(pkbdev) == 1) {
1654 mutex_lock(&platform->gpu_clock_lock);
1655 if (!platform->dvs_is_enabled)
1656 clock = gpu_get_cur_clock(platform);
1657 mutex_unlock(&platform->gpu_clock_lock);
1658 }
1659 #endif
1660
1661 ret += snprintf(buf+ret, PAGE_SIZE-ret, "%d", clock);
1662
1663 if (ret < PAGE_SIZE - 1) {
1664 ret += snprintf(buf+ret, PAGE_SIZE-ret, "\n");
1665 } else {
1666 buf[PAGE_SIZE-2] = '\n';
1667 buf[PAGE_SIZE-1] = '\0';
1668 ret = PAGE_SIZE-1;
1669 }
1670
1671 return ret;
1672 }
1673
1674 static ssize_t show_kernel_sysfs_freq_table(struct kobject *kobj, struct kobj_attribute *attr, char *buf)
1675 {
1676 ssize_t ret = 0;
1677 int i = 0;
1678 struct exynos_context *platform = (struct exynos_context *)pkbdev->platform_context;
1679
1680 if (!platform)
1681 return -ENODEV;
1682
1683 for (i = gpu_dvfs_get_level(platform->gpu_min_clock); i >= gpu_dvfs_get_level(platform->gpu_max_clock); i--) {
1684 ret += snprintf(buf+ret, PAGE_SIZE-ret, "%d ", platform->table[i].clock);
1685 }
1686
1687 if (ret < PAGE_SIZE - 1) {
1688 ret += snprintf(buf+ret, PAGE_SIZE-ret, "\n");
1689 } else {
1690 buf[PAGE_SIZE-2] = '\n';
1691 buf[PAGE_SIZE-1] = '\0';
1692 ret = PAGE_SIZE-1;
1693 }
1694
1695 return ret;
1696 }
1697
1698 #ifdef CONFIG_MALI_DVFS
1699 static ssize_t show_kernel_sysfs_governor(struct kobject *kobj, struct kobj_attribute *attr, char *buf)
1700 {
1701 ssize_t ret = 0;
1702 gpu_dvfs_governor_info *governor_info = NULL;
1703 struct exynos_context *platform = (struct exynos_context *)pkbdev->platform_context;
1704
1705 if (!platform)
1706 return -ENODEV;
1707
1708 governor_info = (gpu_dvfs_governor_info *)gpu_dvfs_get_governor_info();
1709
1710 ret += snprintf(buf+ret, PAGE_SIZE-ret, "%s", governor_info[platform->governor_type].name);
1711
1712 if (ret < PAGE_SIZE - 1) {
1713 ret += snprintf(buf+ret, PAGE_SIZE-ret, "\n");
1714 } else {
1715 buf[PAGE_SIZE-2] = '\n';
1716 buf[PAGE_SIZE-1] = '\0';
1717 ret = PAGE_SIZE-1;
1718 }
1719
1720 return ret;
1721 }
1722
1723 static ssize_t set_kernel_sysfs_governor(struct kobject *kobj, struct kobj_attribute *attr, const char *buf, size_t count)
1724 {
1725 int ret;
1726 int i = 0;
1727 int next_governor_type = -1;
1728 size_t governor_name_size = 0;
1729 gpu_dvfs_governor_info *governor_info = NULL;
1730 struct exynos_context *platform = (struct exynos_context *)pkbdev->platform_context;
1731
1732 if (!platform)
1733 return -ENODEV;
1734
1735 governor_info = (gpu_dvfs_governor_info *)gpu_dvfs_get_governor_info();
1736
1737 for (i = 0; i < G3D_MAX_GOVERNOR_NUM; i++) {
1738 governor_name_size = strlen(governor_info[i].name);
1739 if (!strncmp(buf, governor_info[i].name, governor_name_size)) {
1740 next_governor_type = i;
1741 break;
1742 }
1743 }
1744
1745 if ((next_governor_type < 0) || (next_governor_type >= G3D_MAX_GOVERNOR_NUM)) {
1746 GPU_LOG(DVFS_ERROR, DUMMY, 0u, 0u, "%s: invalid value\n", __func__);
1747 return -ENOENT;
1748 }
1749
1750 ret = gpu_dvfs_governor_change(next_governor_type);
1751
1752 if (ret < 0) {
1753 GPU_LOG(DVFS_ERROR, DUMMY, 0u, 0u,
1754 "%s: fail to set the new governor (%d)\n", __func__, next_governor_type);
1755 return -ENOENT;
1756 }
1757
1758 return count;
1759 }
1760 #endif /* #ifdef CONFIG_MALI_DVFS */
1761
1762 static ssize_t show_kernel_sysfs_gpu_model(struct kobject *kobj, struct kobj_attribute *attr, char *buf)
1763 {
1764 /* COPY from mali_kbase_core_linux.c : 2594 line, last updated: 20161017, r2p0-03rel0 */
1765 static const struct gpu_product_id_name {
1766 unsigned id;
1767 char *name;
1768 } gpu_product_id_names[] = {
1769 { .id = GPU_ID_PI_T60X, .name = "Mali-T60x" },
1770 { .id = GPU_ID_PI_T62X, .name = "Mali-T62x" },
1771 { .id = GPU_ID_PI_T72X, .name = "Mali-T72x" },
1772 { .id = GPU_ID_PI_T76X, .name = "Mali-T76x" },
1773 { .id = GPU_ID_PI_T82X, .name = "Mali-T82x" },
1774 { .id = GPU_ID_PI_T83X, .name = "Mali-T83x" },
1775 { .id = GPU_ID_PI_T86X, .name = "Mali-T86x" },
1776 { .id = GPU_ID_PI_TFRX, .name = "Mali-T88x" },
1777 { .id = GPU_ID2_PRODUCT_TMIX >> GPU_ID_VERSION_PRODUCT_ID_SHIFT,
1778 .name = "Mali-G71" },
1779 { .id = GPU_ID2_PRODUCT_THEX >> GPU_ID_VERSION_PRODUCT_ID_SHIFT,
1780 .name = "Mali-THEx" },
1781 };
1782 const char *product_name = "(Unknown Mali GPU)";
1783 struct kbase_device *kbdev;
1784 u32 gpu_id;
1785 unsigned product_id, product_id_mask;
1786 unsigned i;
1787 bool is_new_format;
1788
1789 kbdev = pkbdev;
1790 if (!kbdev)
1791 return -ENODEV;
1792
1793 gpu_id = kbdev->gpu_props.props.raw_props.gpu_id;
1794 product_id = gpu_id >> GPU_ID_VERSION_PRODUCT_ID_SHIFT;
1795 is_new_format = GPU_ID_IS_NEW_FORMAT(product_id);
1796 product_id_mask =
1797 (is_new_format ?
1798 GPU_ID2_PRODUCT_MODEL :
1799 GPU_ID_VERSION_PRODUCT_ID) >>
1800 GPU_ID_VERSION_PRODUCT_ID_SHIFT;
1801
1802 for (i = 0; i < ARRAY_SIZE(gpu_product_id_names); ++i) {
1803 const struct gpu_product_id_name *p = &gpu_product_id_names[i];
1804
1805 if ((GPU_ID_IS_NEW_FORMAT(p->id) == is_new_format) &&
1806 (p->id & product_id_mask) ==
1807 (product_id & product_id_mask)) {
1808 product_name = p->name;
1809 break;
1810 }
1811 }
1812
1813 return scnprintf(buf, PAGE_SIZE, "%s\n", product_name);
1814 }
1815
1816 #if defined(CONFIG_MALI_DVFS) && defined(CONFIG_EXYNOS_THERMAL) && defined(CONFIG_GPU_THERMAL)
1817
1818 extern struct exynos_tmu_data *gpu_thermal_data;
1819
1820 static ssize_t show_kernel_sysfs_gpu_temp(struct kobject *kobj, struct kobj_attribute *attr, char *buf)
1821 {
1822 ssize_t ret = 0;
1823 int gpu_temp = 0;
1824 int gpu_temp_int = 0;
1825 int gpu_temp_point = 0;
1826
1827
1828 if (!gpu_thermal_data) {
1829 GPU_LOG(DVFS_ERROR, DUMMY, 0u, 0u, "[Kernel group SYSFS] thermal driver does not ready\n");
1830 return -ENODEV;
1831 }
1832
1833 mutex_lock(&gpu_thermal_data->lock);
1834
1835 if (gpu_thermal_data->num_of_sensors)
1836 gpu_temp = gpu_thermal_data->tmu_read(gpu_thermal_data) * MCELSIUS;
1837
1838 mutex_unlock(&gpu_thermal_data->lock);
1839
1840 gpu_temp_int = gpu_temp / 1000;
1841 gpu_temp_point = gpu_temp % gpu_temp_int;
1842 ret += snprintf(buf+ret, PAGE_SIZE-ret, "%d.%d", gpu_temp_int, gpu_temp_point);
1843
1844 if (ret < PAGE_SIZE - 1) {
1845 ret += snprintf(buf+ret, PAGE_SIZE-ret, "\n");
1846 } else {
1847 buf[PAGE_SIZE-2] = '\n';
1848 buf[PAGE_SIZE-1] = '\0';
1849 ret = PAGE_SIZE-1;
1850 }
1851
1852 return ret;
1853 }
1854
1855 static struct kobj_attribute gpu_temp_attribute =
1856 __ATTR(gpu_tmu, S_IRUGO, show_kernel_sysfs_gpu_temp, NULL);
1857 #endif
1858
1859 #ifdef CONFIG_MALI_DVFS
1860 static struct kobj_attribute gpu_info_attribute =
1861 __ATTR(gpu_info, S_IRUGO, show_kernel_sysfs_gpu_info, NULL);
1862
1863 static struct kobj_attribute gpu_max_lock_attribute =
1864 __ATTR(gpu_max_clock, S_IRUGO|S_IWUSR, show_kernel_sysfs_max_lock_dvfs, set_kernel_sysfs_max_lock_dvfs);
1865
1866 static struct kobj_attribute gpu_min_lock_attribute =
1867 __ATTR(gpu_min_clock, S_IRUGO|S_IWUSR, show_kernel_sysfs_min_lock_dvfs, set_kernel_sysfs_min_lock_dvfs);
1868 #endif /* #ifdef CONFIG_MALI_DVFS */
1869
1870 static struct kobj_attribute gpu_busy_attribute =
1871 __ATTR(gpu_busy, S_IRUGO, show_kernel_sysfs_utilization, NULL);
1872
1873 static struct kobj_attribute gpu_clock_attribute =
1874 __ATTR(gpu_clock, S_IRUGO, show_kernel_sysfs_clock, NULL);
1875
1876 static struct kobj_attribute gpu_freq_table_attribute =
1877 __ATTR(gpu_freq_table, S_IRUGO, show_kernel_sysfs_freq_table, NULL);
1878
1879 #ifdef CONFIG_MALI_DVFS
1880 static struct kobj_attribute gpu_governor_attribute =
1881 __ATTR(gpu_governor, S_IRUGO|S_IWUSR, show_kernel_sysfs_governor, set_kernel_sysfs_governor);
1882
1883 static struct kobj_attribute gpu_available_governor_attribute =
1884 __ATTR(gpu_available_governor, S_IRUGO, show_kernel_sysfs_available_governor, NULL);
1885 #endif /* #ifdef CONFIG_MALI_DVFS */
1886
1887 static struct kobj_attribute gpu_model_attribute =
1888 __ATTR(gpu_model, S_IRUGO, show_kernel_sysfs_gpu_model, NULL);
1889
1890
1891 static struct attribute *attrs[] = {
1892 #ifdef CONFIG_MALI_DVFS
1893 #if defined(CONFIG_EXYNOS_THERMAL) && defined(CONFIG_GPU_THERMAL)
1894 &gpu_temp_attribute.attr,
1895 #endif
1896 &gpu_info_attribute.attr,
1897 &gpu_max_lock_attribute.attr,
1898 &gpu_min_lock_attribute.attr,
1899 #endif /* #ifdef CONFIG_MALI_DVFS */
1900 &gpu_busy_attribute.attr,
1901 &gpu_clock_attribute.attr,
1902 &gpu_freq_table_attribute.attr,
1903 #ifdef CONFIG_MALI_DVFS
1904 &gpu_governor_attribute.attr,
1905 &gpu_available_governor_attribute.attr,
1906 #endif /* #ifdef CONFIG_MALI_DVFS */
1907 &gpu_model_attribute.attr,
1908 NULL,
1909 };
1910
1911 static struct attribute_group attr_group = {
1912 .attrs = attrs,
1913 };
1914 static struct kobject *external_kobj;
1915 #endif
1916
1917 int gpu_create_sysfs_file(struct device *dev)
1918 {
1919 #ifdef CONFIG_MALI_DEBUG_KERNEL_SYSFS
1920 int retval = 0;
1921 #endif
1922
1923 if (device_create_file(dev, &dev_attr_clock)) {
1924 GPU_LOG(DVFS_ERROR, DUMMY, 0u, 0u, "couldn't create sysfs file [clock]\n");
1925 goto out;
1926 }
1927
1928 if (device_create_file(dev, &dev_attr_vol)) {
1929 GPU_LOG(DVFS_ERROR, DUMMY, 0u, 0u, "couldn't create sysfs file [vol]\n");
1930 goto out;
1931 }
1932
1933 if (device_create_file(dev, &dev_attr_power_state)) {
1934 GPU_LOG(DVFS_ERROR, DUMMY, 0u, 0u, "couldn't create sysfs file [power_state]\n");
1935 goto out;
1936 }
1937
1938 if (device_create_file(dev, &dev_attr_asv_table)) {
1939 GPU_LOG(DVFS_ERROR, DUMMY, 0u, 0u, "couldn't create sysfs file [asv_table]\n");
1940 goto out;
1941 }
1942
1943 if (device_create_file(dev, &dev_attr_dvfs_table)) {
1944 GPU_LOG(DVFS_ERROR, DUMMY, 0u, 0u, "couldn't create sysfs file [dvfs_table]\n");
1945 goto out;
1946 }
1947
1948 if (device_create_file(dev, &dev_attr_time_in_state)) {
1949 GPU_LOG(DVFS_ERROR, DUMMY, 0u, 0u, "couldn't create sysfs file [time_in_state]\n");
1950 goto out;
1951 }
1952
1953 if (device_create_file(dev, &dev_attr_utilization)) {
1954 GPU_LOG(DVFS_ERROR, DUMMY, 0u, 0u, "couldn't create sysfs file [utilization]\n");
1955 goto out;
1956 }
1957
1958 if (device_create_file(dev, &dev_attr_perf)) {
1959 GPU_LOG(DVFS_ERROR, DUMMY, 0u, 0u, "couldn't create sysfs file [perf]\n");
1960 goto out;
1961 }
1962 #ifdef CONFIG_MALI_DVFS
1963 if (device_create_file(dev, &dev_attr_dvfs)) {
1964 GPU_LOG(DVFS_ERROR, DUMMY, 0u, 0u, "couldn't create sysfs file [dvfs]\n");
1965 goto out;
1966 }
1967
1968 if (device_create_file(dev, &dev_attr_dvfs_governor)) {
1969 GPU_LOG(DVFS_ERROR, DUMMY, 0u, 0u, "couldn't create sysfs file [dvfs_governor]\n");
1970 goto out;
1971 }
1972
1973 if (device_create_file(dev, &dev_attr_dvfs_max_lock_status)) {
1974 GPU_LOG(DVFS_ERROR, DUMMY, 0u, 0u, "couldn't create sysfs file [dvfs_max_lock_status]\n");
1975 goto out;
1976 }
1977
1978 if (device_create_file(dev, &dev_attr_dvfs_min_lock_status)) {
1979 GPU_LOG(DVFS_ERROR, DUMMY, 0u, 0u, "couldn't create sysfs file [dvfs_min_lock_status]\n");
1980 goto out;
1981 }
1982
1983 if (device_create_file(dev, &dev_attr_dvfs_max_lock)) {
1984 GPU_LOG(DVFS_ERROR, DUMMY, 0u, 0u, "couldn't create sysfs file [dvfs_max_lock]\n");
1985 goto out;
1986 }
1987
1988 if (device_create_file(dev, &dev_attr_dvfs_min_lock)) {
1989 GPU_LOG(DVFS_ERROR, DUMMY, 0u, 0u, "couldn't create sysfs file [dvfs_min_lock]\n");
1990 goto out;
1991 }
1992
1993 if (device_create_file(dev, &dev_attr_down_staycount)) {
1994 GPU_LOG(DVFS_ERROR, DUMMY, 0u, 0u, "couldn't create sysfs file [down_staycount]\n");
1995 goto out;
1996 }
1997
1998 if (device_create_file(dev, &dev_attr_highspeed_clock)) {
1999 GPU_LOG(DVFS_ERROR, DUMMY, 0u, 0u, "couldn't create sysfs file [highspeed_clock]\n");
2000 goto out;
2001 }
2002
2003 if (device_create_file(dev, &dev_attr_highspeed_load)) {
2004 GPU_LOG(DVFS_ERROR, DUMMY, 0u, 0u, "couldn't create sysfs file [highspeed_load]\n");
2005 goto out;
2006 }
2007
2008 if (device_create_file(dev, &dev_attr_highspeed_delay)) {
2009 GPU_LOG(DVFS_ERROR, DUMMY, 0u, 0u, "couldn't create sysfs file [highspeed_delay]\n");
2010 goto out;
2011 }
2012
2013 if (device_create_file(dev, &dev_attr_wakeup_lock)) {
2014 GPU_LOG(DVFS_ERROR, DUMMY, 0u, 0u, "couldn't create sysfs file [wakeup_lock]\n");
2015 goto out;
2016 }
2017
2018 if (device_create_file(dev, &dev_attr_polling_speed)) {
2019 GPU_LOG(DVFS_ERROR, DUMMY, 0u, 0u, "couldn't create sysfs file [polling_speed]\n");
2020 goto out;
2021 }
2022
2023 if (device_create_file(dev, &dev_attr_tmu)) {
2024 GPU_LOG(DVFS_ERROR, DUMMY, 0u, 0u, "couldn't create sysfs file [tmu]\n");
2025 goto out;
2026 }
2027 #ifdef CONFIG_CPU_THERMAL_IPA
2028 if (device_create_file(dev, &dev_attr_norm_utilization)) {
2029 GPU_LOG(DVFS_ERROR, DUMMY, 0u, 0u, "couldn't create sysfs file [norm_utilization]\n");
2030 goto out;
2031 }
2032
2033 if (device_create_file(dev, &dev_attr_utilization_stats)) {
2034 GPU_LOG(DVFS_ERROR, DUMMY, 0u, 0u, "couldn't create sysfs file [utilization_stats]\n");
2035 goto out;
2036 }
2037 #endif /* CONFIG_CPU_THERMAL_IPA */
2038 #endif /* CONFIG_MALI_DVFS */
2039 if (device_create_file(dev, &dev_attr_debug_level)) {
2040 GPU_LOG(DVFS_ERROR, DUMMY, 0u, 0u, "couldn't create sysfs file [debug_level]\n");
2041 goto out;
2042 }
2043 #ifdef CONFIG_MALI_EXYNOS_TRACE
2044 if (device_create_file(dev, &dev_attr_trace_level)) {
2045 GPU_LOG(DVFS_ERROR, DUMMY, 0u, 0u, "couldn't create sysfs file [trace_level]\n");
2046 goto out;
2047 }
2048
2049 if (device_create_file(dev, &dev_attr_trace_dump)) {
2050 GPU_LOG(DVFS_ERROR, DUMMY, 0u, 0u, "couldn't create sysfs file [trace_dump]\n");
2051 goto out;
2052 }
2053 #endif /* CONFIG_MALI_EXYNOS_TRACE */
2054 #ifdef DEBUG_FBDEV
2055 if (device_create_file(dev, &dev_attr_fbdev)) {
2056 GPU_LOG(DVFS_ERROR, DUMMY, 0u, 0u, "couldn't create sysfs file [fbdev]\n");
2057 goto out;
2058 }
2059 #endif
2060
2061 if (device_create_file(dev, &dev_attr_gpu_status)) {
2062 GPU_LOG(DVFS_ERROR, DUMMY, 0u, 0u, "couldn't create sysfs file [gpu_status]\n");
2063 goto out;
2064 }
2065
2066 #ifdef CONFIG_MALI_VK_BOOST
2067 if (device_create_file(dev, &dev_attr_vk_boost_status)) {
2068 GPU_LOG(DVFS_ERROR, DUMMY, 0u, 0u, "couldn't create sysfs file [vk_boost_status]\n");
2069 goto out;
2070 }
2071 #endif
2072
2073 #ifdef CONFIG_MALI_SUSTAINABLE_OPT
2074 if (device_create_file(dev, &dev_attr_sustainable_status)) {
2075 GPU_LOG(DVFS_ERROR, DUMMY, 0u, 0u, "couldn't create sysfs file [sustainable_status]\n");
2076 goto out;
2077 }
2078 #endif
2079
2080 #ifdef CONFIG_MALI_SEC_CL_BOOST
2081 if (device_create_file(dev, &dev_attr_cl_boost_disable)) {
2082 GPU_LOG(DVFS_ERROR, DUMMY, 0u, 0u, "couldn't create sysfs file [cl_boost_disable]\n");
2083 goto out;
2084 }
2085 #endif
2086
2087 #ifdef CONFIG_MALI_DEBUG_KERNEL_SYSFS
2088 external_kobj = kobject_create_and_add("gpu", kernel_kobj);
2089 if (!external_kobj) {
2090 GPU_LOG(DVFS_ERROR, DUMMY, 0u, 0u, "couldn't create Kobj for group [KERNEL - GPU]\n");
2091 goto out;
2092 }
2093
2094 retval = sysfs_create_group(external_kobj, &attr_group);
2095 if (retval) {
2096 kobject_put(external_kobj);
2097 GPU_LOG(DVFS_ERROR, DUMMY, 0u, 0u, "couldn't add sysfs group [KERNEL - GPU]\n");
2098 goto out;
2099 }
2100 #endif
2101
2102 return 0;
2103 out:
2104 return -ENOENT;
2105 }
2106
2107 void gpu_remove_sysfs_file(struct device *dev)
2108 {
2109 device_remove_file(dev, &dev_attr_clock);
2110 device_remove_file(dev, &dev_attr_vol);
2111 device_remove_file(dev, &dev_attr_power_state);
2112 device_remove_file(dev, &dev_attr_asv_table);
2113 device_remove_file(dev, &dev_attr_dvfs_table);
2114 device_remove_file(dev, &dev_attr_time_in_state);
2115 device_remove_file(dev, &dev_attr_utilization);
2116 device_remove_file(dev, &dev_attr_perf);
2117 #ifdef CONFIG_MALI_DVFS
2118 device_remove_file(dev, &dev_attr_dvfs);
2119 device_remove_file(dev, &dev_attr_dvfs_governor);
2120 device_remove_file(dev, &dev_attr_dvfs_max_lock_status);
2121 device_remove_file(dev, &dev_attr_dvfs_min_lock_status);
2122 device_remove_file(dev, &dev_attr_dvfs_max_lock);
2123 device_remove_file(dev, &dev_attr_dvfs_min_lock);
2124 device_remove_file(dev, &dev_attr_down_staycount);
2125 device_remove_file(dev, &dev_attr_highspeed_clock);
2126 device_remove_file(dev, &dev_attr_highspeed_load);
2127 device_remove_file(dev, &dev_attr_highspeed_delay);
2128 device_remove_file(dev, &dev_attr_wakeup_lock);
2129 device_remove_file(dev, &dev_attr_polling_speed);
2130 device_remove_file(dev, &dev_attr_tmu);
2131 #ifdef CONFIG_CPU_THERMAL_IPA
2132 device_remove_file(dev, &dev_attr_norm_utilization);
2133 device_remove_file(dev, &dev_attr_utilization_stats);
2134 #endif /* CONFIG_CPU_THERMAL_IPA */
2135 #endif /* CONFIG_MALI_DVFS */
2136 device_remove_file(dev, &dev_attr_debug_level);
2137 #ifdef CONFIG_MALI_EXYNOS_TRACE
2138 device_remove_file(dev, &dev_attr_trace_level);
2139 device_remove_file(dev, &dev_attr_trace_dump);
2140 #endif /* CONFIG_MALI_EXYNOS_TRACE */
2141 #ifdef DEBUG_FBDEV
2142 device_remove_file(dev, &dev_attr_fbdev);
2143 #endif
2144 device_remove_file(dev, &dev_attr_gpu_status);
2145 #ifdef CONFIG_MALI_VK_BOOST
2146 device_remove_file(dev, &dev_attr_vk_boost_status);
2147 #endif
2148 #ifdef CONFIG_MALI_SUSTAINABLE_OPT
2149 device_remove_file(dev, &dev_attr_sustainable_status);
2150 #endif
2151 #ifdef CONFIG_MALI_SEC_CL_BOOST
2152 device_remove_file(dev, &dev_attr_cl_boost_disable);
2153 #endif
2154 #ifdef CONFIG_MALI_DEBUG_KERNEL_SYSFS
2155 kobject_put(external_kobj);
2156 #endif
2157 }