3 * (C) COPYRIGHT 2010-2016 ARM Limited. All rights reserved.
5 * This program is free software and is provided to you under the terms of the
6 * GNU General Public License version 2 as published by the Free Software
7 * Foundation, and any use by you of this program is subject to the terms
10 * A copy of the licence is included with the program, and can also be obtained
11 * from Free Software Foundation, Inc., 51 Franklin Street, Fifth Floor,
12 * Boston, MA 02110-1301, USA.
19 * Power policy API implementations
22 #include <mali_kbase.h>
23 #include <mali_midg_regmap.h>
24 #include <mali_kbase_pm.h>
25 #include <mali_kbase_config_defaults.h>
26 #include <backend/gpu/mali_kbase_pm_internal.h>
28 static const struct kbase_pm_policy
*const policy_list
[] = {
29 #ifdef CONFIG_MALI_NO_MALI
30 &kbase_pm_always_on_policy_ops
,
31 &kbase_pm_demand_policy_ops
,
32 &kbase_pm_coarse_demand_policy_ops
,
33 #if !MALI_CUSTOMER_RELEASE
34 &kbase_pm_demand_always_powered_policy_ops
,
35 &kbase_pm_fast_start_policy_ops
,
37 #else /* CONFIG_MALI_NO_MALI */
38 #if !PLATFORM_POWER_DOWN_ONLY
39 &kbase_pm_demand_policy_ops
,
40 #endif /* !PLATFORM_POWER_DOWN_ONLY */
41 &kbase_pm_coarse_demand_policy_ops
,
42 &kbase_pm_always_on_policy_ops
,
43 #if !MALI_CUSTOMER_RELEASE
44 #if !PLATFORM_POWER_DOWN_ONLY
45 &kbase_pm_demand_always_powered_policy_ops
,
46 &kbase_pm_fast_start_policy_ops
,
47 #endif /* !PLATFORM_POWER_DOWN_ONLY */
49 #endif /* CONFIG_MALI_NO_MALI */
52 /* The number of policies available in the system.
53 * This is derived from the number of functions listed in policy_get_functions.
55 #define POLICY_COUNT (sizeof(policy_list)/sizeof(*policy_list))
58 /* Function IDs for looking up Timeline Trace codes in
59 * kbase_pm_change_state_trace_code */
60 enum kbase_pm_func_id
{
61 KBASE_PM_FUNC_ID_REQUEST_CORES_START
,
62 KBASE_PM_FUNC_ID_REQUEST_CORES_END
,
63 KBASE_PM_FUNC_ID_RELEASE_CORES_START
,
64 KBASE_PM_FUNC_ID_RELEASE_CORES_END
,
65 /* Note: kbase_pm_unrequest_cores() is on the slow path, and we neither
66 * expect to hit it nor tend to hit it very much anyway. We can detect
67 * whether we need more instrumentation by a difference between
68 * PM_CHECKTRANS events and PM_SEND/HANDLE_EVENT. */
70 /* Must be the last */
71 KBASE_PM_FUNC_ID_COUNT
75 /* State changes during request/unrequest/release-ing cores */
77 KBASE_PM_CHANGE_STATE_SHADER
= (1u << 0),
78 KBASE_PM_CHANGE_STATE_TILER
= (1u << 1),
80 /* These two must be last */
81 KBASE_PM_CHANGE_STATE_MASK
= (KBASE_PM_CHANGE_STATE_TILER
|
82 KBASE_PM_CHANGE_STATE_SHADER
),
83 KBASE_PM_CHANGE_STATE_COUNT
= KBASE_PM_CHANGE_STATE_MASK
+ 1
85 typedef u32 kbase_pm_change_state
;
88 #ifdef CONFIG_MALI_TRACE_TIMELINE
89 /* Timeline Trace code lookups for each function */
90 static u32 kbase_pm_change_state_trace_code
[KBASE_PM_FUNC_ID_COUNT
]
91 [KBASE_PM_CHANGE_STATE_COUNT
] = {
92 /* kbase_pm_request_cores */
93 [KBASE_PM_FUNC_ID_REQUEST_CORES_START
][0] = 0,
94 [KBASE_PM_FUNC_ID_REQUEST_CORES_START
][KBASE_PM_CHANGE_STATE_SHADER
] =
95 SW_FLOW_PM_CHECKTRANS_PM_REQUEST_CORES_SHADER_START
,
96 [KBASE_PM_FUNC_ID_REQUEST_CORES_START
][KBASE_PM_CHANGE_STATE_TILER
] =
97 SW_FLOW_PM_CHECKTRANS_PM_REQUEST_CORES_TILER_START
,
98 [KBASE_PM_FUNC_ID_REQUEST_CORES_START
][KBASE_PM_CHANGE_STATE_SHADER
|
99 KBASE_PM_CHANGE_STATE_TILER
] =
100 SW_FLOW_PM_CHECKTRANS_PM_REQUEST_CORES_SHADER_TILER_START
,
102 [KBASE_PM_FUNC_ID_REQUEST_CORES_END
][0] = 0,
103 [KBASE_PM_FUNC_ID_REQUEST_CORES_END
][KBASE_PM_CHANGE_STATE_SHADER
] =
104 SW_FLOW_PM_CHECKTRANS_PM_REQUEST_CORES_SHADER_END
,
105 [KBASE_PM_FUNC_ID_REQUEST_CORES_END
][KBASE_PM_CHANGE_STATE_TILER
] =
106 SW_FLOW_PM_CHECKTRANS_PM_REQUEST_CORES_TILER_END
,
107 [KBASE_PM_FUNC_ID_REQUEST_CORES_END
][KBASE_PM_CHANGE_STATE_SHADER
|
108 KBASE_PM_CHANGE_STATE_TILER
] =
109 SW_FLOW_PM_CHECKTRANS_PM_REQUEST_CORES_SHADER_TILER_END
,
111 /* kbase_pm_release_cores */
112 [KBASE_PM_FUNC_ID_RELEASE_CORES_START
][0] = 0,
113 [KBASE_PM_FUNC_ID_RELEASE_CORES_START
][KBASE_PM_CHANGE_STATE_SHADER
] =
114 SW_FLOW_PM_CHECKTRANS_PM_RELEASE_CORES_SHADER_START
,
115 [KBASE_PM_FUNC_ID_RELEASE_CORES_START
][KBASE_PM_CHANGE_STATE_TILER
] =
116 SW_FLOW_PM_CHECKTRANS_PM_RELEASE_CORES_TILER_START
,
117 [KBASE_PM_FUNC_ID_RELEASE_CORES_START
][KBASE_PM_CHANGE_STATE_SHADER
|
118 KBASE_PM_CHANGE_STATE_TILER
] =
119 SW_FLOW_PM_CHECKTRANS_PM_RELEASE_CORES_SHADER_TILER_START
,
121 [KBASE_PM_FUNC_ID_RELEASE_CORES_END
][0] = 0,
122 [KBASE_PM_FUNC_ID_RELEASE_CORES_END
][KBASE_PM_CHANGE_STATE_SHADER
] =
123 SW_FLOW_PM_CHECKTRANS_PM_RELEASE_CORES_SHADER_END
,
124 [KBASE_PM_FUNC_ID_RELEASE_CORES_END
][KBASE_PM_CHANGE_STATE_TILER
] =
125 SW_FLOW_PM_CHECKTRANS_PM_RELEASE_CORES_TILER_END
,
126 [KBASE_PM_FUNC_ID_RELEASE_CORES_END
][KBASE_PM_CHANGE_STATE_SHADER
|
127 KBASE_PM_CHANGE_STATE_TILER
] =
128 SW_FLOW_PM_CHECKTRANS_PM_RELEASE_CORES_SHADER_TILER_END
131 static inline void kbase_timeline_pm_cores_func(struct kbase_device
*kbdev
,
132 enum kbase_pm_func_id func_id
,
133 kbase_pm_change_state state
)
137 KBASE_DEBUG_ASSERT(func_id
>= 0 && func_id
< KBASE_PM_FUNC_ID_COUNT
);
138 KBASE_DEBUG_ASSERT(state
!= 0 && (state
& KBASE_PM_CHANGE_STATE_MASK
) ==
141 trace_code
= kbase_pm_change_state_trace_code
[func_id
][state
];
142 KBASE_TIMELINE_PM_CHECKTRANS(kbdev
, trace_code
);
145 #else /* CONFIG_MALI_TRACE_TIMELINE */
146 static inline void kbase_timeline_pm_cores_func(struct kbase_device
*kbdev
,
147 enum kbase_pm_func_id func_id
, kbase_pm_change_state state
)
151 #endif /* CONFIG_MALI_TRACE_TIMELINE */
154 * kbasep_pm_do_poweroff_cores - Process a poweroff request and power down any
155 * requested shader cores
156 * @kbdev: Device pointer
158 static void kbasep_pm_do_poweroff_cores(struct kbase_device
*kbdev
)
160 u64 prev_shader_state
= kbdev
->pm
.backend
.desired_shader_state
;
161 u64 prev_tiler_state
= kbdev
->pm
.backend
.desired_tiler_state
;
163 lockdep_assert_held(&kbdev
->hwaccess_lock
);
165 kbdev
->pm
.backend
.desired_shader_state
&=
166 ~kbdev
->pm
.backend
.shader_poweroff_pending
;
167 kbdev
->pm
.backend
.desired_tiler_state
&=
168 ~kbdev
->pm
.backend
.tiler_poweroff_pending
;
170 kbdev
->pm
.backend
.shader_poweroff_pending
= 0;
171 kbdev
->pm
.backend
.tiler_poweroff_pending
= 0;
173 if (prev_shader_state
!= kbdev
->pm
.backend
.desired_shader_state
||
175 kbdev
->pm
.backend
.desired_tiler_state
||
176 kbdev
->pm
.backend
.ca_in_transition
) {
177 bool cores_are_available
;
179 KBASE_TIMELINE_PM_CHECKTRANS(kbdev
,
180 SW_FLOW_PM_CHECKTRANS_PM_RELEASE_CORES_DEFERRED_START
);
181 cores_are_available
= kbase_pm_check_transitions_nolock(kbdev
);
182 KBASE_TIMELINE_PM_CHECKTRANS(kbdev
,
183 SW_FLOW_PM_CHECKTRANS_PM_RELEASE_CORES_DEFERRED_END
);
185 /* Don't need 'cores_are_available',
186 * because we don't return anything */
187 CSTD_UNUSED(cores_are_available
);
191 static enum hrtimer_restart
192 kbasep_pm_do_gpu_poweroff_callback(struct hrtimer
*timer
)
194 struct kbase_device
*kbdev
;
197 kbdev
= container_of(timer
, struct kbase_device
,
198 pm
.backend
.gpu_poweroff_timer
);
200 spin_lock_irqsave(&kbdev
->hwaccess_lock
, flags
);
202 /* It is safe for this call to do nothing if the work item is already
203 * queued. The worker function will read the must up-to-date state of
204 * kbdev->pm.backend.gpu_poweroff_pending under lock.
206 * If a state change occurs while the worker function is processing,
207 * this call will succeed as a work item can be requeued once it has
208 * started processing.
210 if (kbdev
->pm
.backend
.gpu_poweroff_pending
)
211 queue_work(kbdev
->pm
.backend
.gpu_poweroff_wq
,
212 &kbdev
->pm
.backend
.gpu_poweroff_work
);
214 if (kbdev
->pm
.backend
.shader_poweroff_pending
||
215 kbdev
->pm
.backend
.tiler_poweroff_pending
) {
216 kbdev
->pm
.backend
.shader_poweroff_pending_time
--;
219 kbdev
->pm
.backend
.shader_poweroff_pending_time
222 if (!kbdev
->pm
.backend
.shader_poweroff_pending_time
)
223 kbasep_pm_do_poweroff_cores(kbdev
);
226 if (kbdev
->pm
.backend
.poweroff_timer_needed
) {
227 spin_unlock_irqrestore(&kbdev
->hwaccess_lock
, flags
);
229 hrtimer_add_expires(timer
, kbdev
->pm
.gpu_poweroff_time
);
231 return HRTIMER_RESTART
;
234 kbdev
->pm
.backend
.poweroff_timer_running
= false;
235 spin_unlock_irqrestore(&kbdev
->hwaccess_lock
, flags
);
237 return HRTIMER_NORESTART
;
240 static void kbasep_pm_do_gpu_poweroff_wq(struct work_struct
*data
)
243 struct kbase_device
*kbdev
;
244 bool do_poweroff
= false;
246 kbdev
= container_of(data
, struct kbase_device
,
247 pm
.backend
.gpu_poweroff_work
);
249 mutex_lock(&kbdev
->pm
.lock
);
251 if (kbdev
->pm
.backend
.gpu_poweroff_pending
== 0) {
252 mutex_unlock(&kbdev
->pm
.lock
);
256 kbdev
->pm
.backend
.gpu_poweroff_pending
--;
258 if (kbdev
->pm
.backend
.gpu_poweroff_pending
> 0) {
259 mutex_unlock(&kbdev
->pm
.lock
);
263 KBASE_DEBUG_ASSERT(kbdev
->pm
.backend
.gpu_poweroff_pending
== 0);
265 spin_lock_irqsave(&kbdev
->hwaccess_lock
, flags
);
267 /* Only power off the GPU if a request is still pending */
268 if (!kbdev
->pm
.backend
.pm_current_policy
->get_core_active(kbdev
))
271 spin_unlock_irqrestore(&kbdev
->hwaccess_lock
, flags
);
274 kbdev
->pm
.backend
.poweroff_timer_needed
= false;
275 hrtimer_cancel(&kbdev
->pm
.backend
.gpu_poweroff_timer
);
276 kbdev
->pm
.backend
.poweroff_timer_running
= false;
278 /* Power off the GPU */
279 kbase_pm_do_poweroff(kbdev
, false);
282 mutex_unlock(&kbdev
->pm
.lock
);
285 int kbase_pm_policy_init(struct kbase_device
*kbdev
)
287 struct workqueue_struct
*wq
;
289 wq
= alloc_workqueue("kbase_pm_do_poweroff",
290 WQ_HIGHPRI
| WQ_UNBOUND
, 1);
294 kbdev
->pm
.backend
.gpu_poweroff_wq
= wq
;
295 INIT_WORK(&kbdev
->pm
.backend
.gpu_poweroff_work
,
296 kbasep_pm_do_gpu_poweroff_wq
);
297 hrtimer_init(&kbdev
->pm
.backend
.gpu_poweroff_timer
,
298 CLOCK_MONOTONIC
, HRTIMER_MODE_REL
);
299 kbdev
->pm
.backend
.gpu_poweroff_timer
.function
=
300 kbasep_pm_do_gpu_poweroff_callback
;
301 kbdev
->pm
.backend
.pm_current_policy
= policy_list
[0];
302 kbdev
->pm
.backend
.pm_current_policy
->init(kbdev
);
303 kbdev
->pm
.gpu_poweroff_time
=
304 HR_TIMER_DELAY_NSEC(DEFAULT_PM_GPU_POWEROFF_TICK_NS
);
305 kbdev
->pm
.poweroff_shader_ticks
= DEFAULT_PM_POWEROFF_TICK_SHADER
;
306 kbdev
->pm
.poweroff_gpu_ticks
= DEFAULT_PM_POWEROFF_TICK_GPU
;
311 void kbase_pm_policy_term(struct kbase_device
*kbdev
)
313 kbdev
->pm
.backend
.pm_current_policy
->term(kbdev
);
314 destroy_workqueue(kbdev
->pm
.backend
.gpu_poweroff_wq
);
317 void kbase_pm_cancel_deferred_poweroff(struct kbase_device
*kbdev
)
321 lockdep_assert_held(&kbdev
->pm
.lock
);
323 kbdev
->pm
.backend
.poweroff_timer_needed
= false;
324 hrtimer_cancel(&kbdev
->pm
.backend
.gpu_poweroff_timer
);
325 spin_lock_irqsave(&kbdev
->hwaccess_lock
, flags
);
326 kbdev
->pm
.backend
.poweroff_timer_running
= false;
328 /* If wq is already running but is held off by pm.lock, make sure it has
330 kbdev
->pm
.backend
.gpu_poweroff_pending
= 0;
332 kbdev
->pm
.backend
.shader_poweroff_pending
= 0;
333 kbdev
->pm
.backend
.tiler_poweroff_pending
= 0;
334 kbdev
->pm
.backend
.shader_poweroff_pending_time
= 0;
336 spin_unlock_irqrestore(&kbdev
->hwaccess_lock
, flags
);
339 void kbase_pm_update_active(struct kbase_device
*kbdev
)
341 struct kbase_pm_device_data
*pm
= &kbdev
->pm
;
342 struct kbase_pm_backend_data
*backend
= &pm
->backend
;
346 lockdep_assert_held(&pm
->lock
);
348 /* pm_current_policy will never be NULL while pm.lock is held */
349 KBASE_DEBUG_ASSERT(backend
->pm_current_policy
);
351 spin_lock_irqsave(&kbdev
->hwaccess_lock
, flags
);
353 active
= backend
->pm_current_policy
->get_core_active(kbdev
);
356 if (backend
->gpu_poweroff_pending
) {
357 /* Cancel any pending power off request */
358 backend
->gpu_poweroff_pending
= 0;
360 /* If a request was pending then the GPU was still
361 * powered, so no need to continue */
362 if (!kbdev
->poweroff_pending
) {
363 spin_unlock_irqrestore(&kbdev
->hwaccess_lock
,
369 if (!backend
->poweroff_timer_running
&& !backend
->gpu_powered
&&
370 (pm
->poweroff_gpu_ticks
||
371 pm
->poweroff_shader_ticks
)) {
372 backend
->poweroff_timer_needed
= true;
373 backend
->poweroff_timer_running
= true;
374 hrtimer_start(&backend
->gpu_poweroff_timer
,
375 pm
->gpu_poweroff_time
,
379 /* Power on the GPU and any cores requested by the policy */
380 if (pm
->backend
.poweroff_wait_in_progress
) {
381 pm
->backend
.poweron_required
= true;
382 spin_unlock_irqrestore(&kbdev
->hwaccess_lock
, flags
);
384 spin_unlock_irqrestore(&kbdev
->hwaccess_lock
, flags
);
385 kbase_pm_do_poweron(kbdev
, false);
388 /* It is an error for the power policy to power off the GPU
389 * when there are contexts active */
390 KBASE_DEBUG_ASSERT(pm
->active_count
== 0);
392 if (backend
->shader_poweroff_pending
||
393 backend
->tiler_poweroff_pending
) {
394 backend
->shader_poweroff_pending
= 0;
395 backend
->tiler_poweroff_pending
= 0;
396 backend
->shader_poweroff_pending_time
= 0;
399 /* Request power off */
400 if (pm
->backend
.gpu_powered
) {
401 if (pm
->poweroff_gpu_ticks
) {
402 backend
->gpu_poweroff_pending
=
403 pm
->poweroff_gpu_ticks
;
404 backend
->poweroff_timer_needed
= true;
405 if (!backend
->poweroff_timer_running
) {
406 /* Start timer if not running (eg if
407 * power policy has been changed from
408 * always_on to something else). This
409 * will ensure the GPU is actually
411 backend
->poweroff_timer_running
414 &backend
->gpu_poweroff_timer
,
415 pm
->gpu_poweroff_time
,
418 spin_unlock_irqrestore(&kbdev
->hwaccess_lock
,
421 spin_unlock_irqrestore(&kbdev
->hwaccess_lock
,
424 /* Power off the GPU immediately */
425 kbase_pm_do_poweroff(kbdev
, false);
428 spin_unlock_irqrestore(&kbdev
->hwaccess_lock
, flags
);
433 void kbase_pm_update_cores_state_nolock(struct kbase_device
*kbdev
)
436 u64 desired_tiler_bitmap
;
437 bool cores_are_available
;
438 bool do_poweroff
= false;
440 lockdep_assert_held(&kbdev
->hwaccess_lock
);
442 if (kbdev
->pm
.backend
.pm_current_policy
== NULL
)
444 if (kbdev
->pm
.backend
.poweroff_wait_in_progress
)
447 if (kbdev
->protected_mode_transition
&& !kbdev
->shader_needed_bitmap
&&
448 !kbdev
->shader_inuse_bitmap
&& !kbdev
->tiler_needed_cnt
449 && !kbdev
->tiler_inuse_cnt
) {
450 /* We are trying to change in/out of protected mode - force all
451 * cores off so that the L2 powers down */
453 desired_tiler_bitmap
= 0;
456 kbdev
->pm
.backend
.pm_current_policy
->get_core_mask(kbdev
);
457 desired_bitmap
&= kbase_pm_ca_get_core_mask(kbdev
);
459 if (kbdev
->tiler_needed_cnt
> 0 || kbdev
->tiler_inuse_cnt
> 0)
460 desired_tiler_bitmap
= 1;
462 desired_tiler_bitmap
= 0;
464 if (!kbase_hw_has_feature(kbdev
, BASE_HW_FEATURE_XAFFINITY
)) {
465 /* Unless XAFFINITY is supported, enable core 0 if tiler
466 * required, regardless of core availability */
467 if (kbdev
->tiler_needed_cnt
> 0 ||
468 kbdev
->tiler_inuse_cnt
> 0)
473 if (kbdev
->pm
.backend
.desired_shader_state
!= desired_bitmap
)
474 KBASE_TRACE_ADD(kbdev
, PM_CORES_CHANGE_DESIRED
, NULL
, NULL
, 0u,
475 (u32
)desired_bitmap
);
476 /* Are any cores being powered on? */
477 if (~kbdev
->pm
.backend
.desired_shader_state
& desired_bitmap
||
478 ~kbdev
->pm
.backend
.desired_tiler_state
& desired_tiler_bitmap
||
479 kbdev
->pm
.backend
.ca_in_transition
) {
480 /* Check if we are powering off any cores before updating shader
482 if (kbdev
->pm
.backend
.desired_shader_state
& ~desired_bitmap
||
483 kbdev
->pm
.backend
.desired_tiler_state
&
484 ~desired_tiler_bitmap
) {
485 /* Start timer to power off cores */
486 kbdev
->pm
.backend
.shader_poweroff_pending
|=
487 (kbdev
->pm
.backend
.desired_shader_state
&
489 kbdev
->pm
.backend
.tiler_poweroff_pending
|=
490 (kbdev
->pm
.backend
.desired_tiler_state
&
491 ~desired_tiler_bitmap
);
493 if (kbdev
->pm
.poweroff_shader_ticks
&&
494 !kbdev
->protected_mode_transition
)
495 kbdev
->pm
.backend
.shader_poweroff_pending_time
=
496 kbdev
->pm
.poweroff_shader_ticks
;
501 kbdev
->pm
.backend
.desired_shader_state
= desired_bitmap
;
502 kbdev
->pm
.backend
.desired_tiler_state
= desired_tiler_bitmap
;
504 /* If any cores are being powered on, transition immediately */
505 cores_are_available
= kbase_pm_check_transitions_nolock(kbdev
);
506 } else if (kbdev
->pm
.backend
.desired_shader_state
& ~desired_bitmap
||
507 kbdev
->pm
.backend
.desired_tiler_state
&
508 ~desired_tiler_bitmap
) {
509 /* Start timer to power off cores */
510 kbdev
->pm
.backend
.shader_poweroff_pending
|=
511 (kbdev
->pm
.backend
.desired_shader_state
&
513 kbdev
->pm
.backend
.tiler_poweroff_pending
|=
514 (kbdev
->pm
.backend
.desired_tiler_state
&
515 ~desired_tiler_bitmap
);
516 if (kbdev
->pm
.poweroff_shader_ticks
&&
517 !kbdev
->protected_mode_transition
)
518 kbdev
->pm
.backend
.shader_poweroff_pending_time
=
519 kbdev
->pm
.poweroff_shader_ticks
;
521 kbasep_pm_do_poweroff_cores(kbdev
);
522 } else if (kbdev
->pm
.active_count
== 0 && desired_bitmap
!= 0 &&
523 desired_tiler_bitmap
!= 0 &&
524 kbdev
->pm
.backend
.poweroff_timer_needed
) {
525 /* If power policy is keeping cores on despite there being no
526 * active contexts then disable poweroff timer as it isn't
528 * Only reset poweroff_timer_needed if we're not in the middle
529 * of the power off callback */
530 kbdev
->pm
.backend
.poweroff_timer_needed
= false;
533 /* Ensure timer does not power off wanted cores and make sure to power
534 * off unwanted cores */
535 if (kbdev
->pm
.backend
.shader_poweroff_pending
||
536 kbdev
->pm
.backend
.tiler_poweroff_pending
) {
537 kbdev
->pm
.backend
.shader_poweroff_pending
&=
538 ~(kbdev
->pm
.backend
.desired_shader_state
&
540 kbdev
->pm
.backend
.tiler_poweroff_pending
&=
541 ~(kbdev
->pm
.backend
.desired_tiler_state
&
542 desired_tiler_bitmap
);
544 if (!kbdev
->pm
.backend
.shader_poweroff_pending
&&
545 !kbdev
->pm
.backend
.tiler_poweroff_pending
)
546 kbdev
->pm
.backend
.shader_poweroff_pending_time
= 0;
549 /* Shader poweroff is deferred to the end of the function, to eliminate
550 * issues caused by the core availability policy recursing into this
553 kbasep_pm_do_poweroff_cores(kbdev
);
555 /* Don't need 'cores_are_available', because we don't return anything */
556 CSTD_UNUSED(cores_are_available
);
559 void kbase_pm_update_cores_state(struct kbase_device
*kbdev
)
563 spin_lock_irqsave(&kbdev
->hwaccess_lock
, flags
);
565 kbase_pm_update_cores_state_nolock(kbdev
);
567 spin_unlock_irqrestore(&kbdev
->hwaccess_lock
, flags
);
570 int kbase_pm_list_policies(const struct kbase_pm_policy
* const **list
)
580 KBASE_EXPORT_TEST_API(kbase_pm_list_policies
);
582 const struct kbase_pm_policy
*kbase_pm_get_policy(struct kbase_device
*kbdev
)
584 KBASE_DEBUG_ASSERT(kbdev
!= NULL
);
586 return kbdev
->pm
.backend
.pm_current_policy
;
589 KBASE_EXPORT_TEST_API(kbase_pm_get_policy
);
591 int set_policy_by_name(struct kbase_device
*kbdev
, const char *name
)
593 const struct kbase_pm_policy
*new_policy
= NULL
;
594 const struct kbase_pm_policy
*const *policy_list
;
598 policy_count
= kbase_pm_list_policies(&policy_list
);
600 for (i
= 0; i
< policy_count
; i
++) {
601 if (sysfs_streq(policy_list
[i
]->name
, name
)) {
602 new_policy
= policy_list
[i
];
608 printk("power_policy: policy not found\n");
611 trace_printk("policy name=%s\n", name
);
613 kbase_pm_set_policy(kbdev
, new_policy
);
618 KBASE_EXPORT_TEST_API(set_policy_by_name
);
620 void kbase_pm_set_policy(struct kbase_device
*kbdev
,
621 const struct kbase_pm_policy
*new_policy
)
623 struct kbasep_js_device_data
*js_devdata
= &kbdev
->js_data
;
624 const struct kbase_pm_policy
*old_policy
;
627 KBASE_DEBUG_ASSERT(kbdev
!= NULL
);
628 KBASE_DEBUG_ASSERT(new_policy
!= NULL
);
630 KBASE_TRACE_ADD(kbdev
, PM_SET_POLICY
, NULL
, NULL
, 0u, new_policy
->id
);
632 /* During a policy change we pretend the GPU is active */
633 /* A suspend won't happen here, because we're in a syscall from a
634 * userspace thread */
635 kbase_pm_context_active(kbdev
);
637 mutex_lock(&js_devdata
->runpool_mutex
);
638 mutex_lock(&kbdev
->pm
.lock
);
640 /* Remove the policy to prevent IRQ handlers from working on it */
641 spin_lock_irqsave(&kbdev
->hwaccess_lock
, flags
);
642 old_policy
= kbdev
->pm
.backend
.pm_current_policy
;
643 kbdev
->pm
.backend
.pm_current_policy
= NULL
;
644 spin_unlock_irqrestore(&kbdev
->hwaccess_lock
, flags
);
646 KBASE_TRACE_ADD(kbdev
, PM_CURRENT_POLICY_TERM
, NULL
, NULL
, 0u,
648 if (old_policy
->term
)
649 old_policy
->term(kbdev
);
651 KBASE_TRACE_ADD(kbdev
, PM_CURRENT_POLICY_INIT
, NULL
, NULL
, 0u,
653 if (new_policy
->init
)
654 new_policy
->init(kbdev
);
656 spin_lock_irqsave(&kbdev
->hwaccess_lock
, flags
);
657 kbdev
->pm
.backend
.pm_current_policy
= new_policy
;
658 spin_unlock_irqrestore(&kbdev
->hwaccess_lock
, flags
);
660 /* If any core power state changes were previously attempted, but
661 * couldn't be made because the policy was changing (current_policy was
662 * NULL), then re-try them here. */
663 kbase_pm_update_active(kbdev
);
664 kbase_pm_update_cores_state(kbdev
);
666 mutex_unlock(&kbdev
->pm
.lock
);
667 mutex_unlock(&js_devdata
->runpool_mutex
);
669 /* Now the policy change is finished, we release our fake context active
671 kbase_pm_context_idle(kbdev
);
674 KBASE_EXPORT_TEST_API(kbase_pm_set_policy
);
676 /* Check whether a state change has finished, and trace it as completed */
678 kbase_pm_trace_check_and_finish_state_change(struct kbase_device
*kbdev
)
680 if ((kbdev
->shader_available_bitmap
&
681 kbdev
->pm
.backend
.desired_shader_state
)
682 == kbdev
->pm
.backend
.desired_shader_state
&&
683 (kbdev
->tiler_available_bitmap
&
684 kbdev
->pm
.backend
.desired_tiler_state
)
685 == kbdev
->pm
.backend
.desired_tiler_state
)
686 kbase_timeline_pm_check_handle_event(kbdev
,
687 KBASE_TIMELINE_PM_EVENT_GPU_STATE_CHANGED
);
690 void kbase_pm_request_cores(struct kbase_device
*kbdev
,
691 bool tiler_required
, u64 shader_cores
)
695 kbase_pm_change_state change_gpu_state
= 0u;
697 KBASE_DEBUG_ASSERT(kbdev
!= NULL
);
699 lockdep_assert_held(&kbdev
->hwaccess_lock
);
701 cores
= shader_cores
;
703 int bitnum
= fls64(cores
) - 1;
704 u64 bit
= 1ULL << bitnum
;
706 /* It should be almost impossible for this to overflow. It would
707 * require 2^32 atoms to request a particular core, which would
708 * require 2^24 contexts to submit. This would require an amount
709 * of memory that is impossible on a 32-bit system and extremely
710 * unlikely on a 64-bit system. */
711 int cnt
= ++kbdev
->shader_needed_cnt
[bitnum
];
714 kbdev
->shader_needed_bitmap
|= bit
;
715 change_gpu_state
|= KBASE_PM_CHANGE_STATE_SHADER
;
721 if (tiler_required
) {
722 int cnt
= ++kbdev
->tiler_needed_cnt
;
725 change_gpu_state
|= KBASE_PM_CHANGE_STATE_TILER
;
727 KBASE_DEBUG_ASSERT(kbdev
->tiler_needed_cnt
!= 0);
730 if (change_gpu_state
) {
731 KBASE_TRACE_ADD(kbdev
, PM_REQUEST_CHANGE_SHADER_NEEDED
, NULL
,
732 NULL
, 0u, (u32
) kbdev
->shader_needed_bitmap
);
734 kbase_timeline_pm_cores_func(kbdev
,
735 KBASE_PM_FUNC_ID_REQUEST_CORES_START
,
737 kbase_pm_update_cores_state_nolock(kbdev
);
738 kbase_timeline_pm_cores_func(kbdev
,
739 KBASE_PM_FUNC_ID_REQUEST_CORES_END
,
744 KBASE_EXPORT_TEST_API(kbase_pm_request_cores
);
746 void kbase_pm_unrequest_cores(struct kbase_device
*kbdev
,
747 bool tiler_required
, u64 shader_cores
)
749 kbase_pm_change_state change_gpu_state
= 0u;
751 KBASE_DEBUG_ASSERT(kbdev
!= NULL
);
753 lockdep_assert_held(&kbdev
->hwaccess_lock
);
755 while (shader_cores
) {
756 int bitnum
= fls64(shader_cores
) - 1;
757 u64 bit
= 1ULL << bitnum
;
760 KBASE_DEBUG_ASSERT(kbdev
->shader_needed_cnt
[bitnum
] > 0);
762 cnt
= --kbdev
->shader_needed_cnt
[bitnum
];
765 kbdev
->shader_needed_bitmap
&= ~bit
;
767 change_gpu_state
|= KBASE_PM_CHANGE_STATE_SHADER
;
770 shader_cores
&= ~bit
;
773 if (tiler_required
) {
776 KBASE_DEBUG_ASSERT(kbdev
->tiler_needed_cnt
> 0);
778 cnt
= --kbdev
->tiler_needed_cnt
;
781 change_gpu_state
|= KBASE_PM_CHANGE_STATE_TILER
;
784 if (change_gpu_state
) {
785 KBASE_TRACE_ADD(kbdev
, PM_UNREQUEST_CHANGE_SHADER_NEEDED
, NULL
,
786 NULL
, 0u, (u32
) kbdev
->shader_needed_bitmap
);
788 kbase_pm_update_cores_state_nolock(kbdev
);
790 /* Trace that any state change effectively completes immediately
791 * - no-one will wait on the state change */
792 kbase_pm_trace_check_and_finish_state_change(kbdev
);
796 KBASE_EXPORT_TEST_API(kbase_pm_unrequest_cores
);
798 enum kbase_pm_cores_ready
799 kbase_pm_register_inuse_cores(struct kbase_device
*kbdev
,
800 bool tiler_required
, u64 shader_cores
)
802 u64 prev_shader_needed
; /* Just for tracing */
803 u64 prev_shader_inuse
; /* Just for tracing */
805 lockdep_assert_held(&kbdev
->hwaccess_lock
);
807 prev_shader_needed
= kbdev
->shader_needed_bitmap
;
808 prev_shader_inuse
= kbdev
->shader_inuse_bitmap
;
810 /* If desired_shader_state does not contain the requested cores, then
811 * power management is not attempting to powering those cores (most
812 * likely due to core availability policy) and a new job affinity must
814 if ((kbdev
->pm
.backend
.desired_shader_state
& shader_cores
) !=
816 return (kbdev
->pm
.backend
.poweroff_wait_in_progress
||
817 kbdev
->pm
.backend
.pm_current_policy
== NULL
) ?
818 KBASE_CORES_NOT_READY
: KBASE_NEW_AFFINITY
;
821 if ((kbdev
->shader_available_bitmap
& shader_cores
) != shader_cores
||
822 (tiler_required
&& !kbdev
->tiler_available_bitmap
)) {
823 /* Trace ongoing core transition */
824 kbase_timeline_pm_l2_transition_start(kbdev
);
825 return KBASE_CORES_NOT_READY
;
828 /* If we started to trace a state change, then trace it has being
829 * finished by now, at the very latest */
830 kbase_pm_trace_check_and_finish_state_change(kbdev
);
831 /* Trace core transition done */
832 kbase_timeline_pm_l2_transition_done(kbdev
);
834 while (shader_cores
) {
835 int bitnum
= fls64(shader_cores
) - 1;
836 u64 bit
= 1ULL << bitnum
;
839 KBASE_DEBUG_ASSERT(kbdev
->shader_needed_cnt
[bitnum
] > 0);
841 cnt
= --kbdev
->shader_needed_cnt
[bitnum
];
844 kbdev
->shader_needed_bitmap
&= ~bit
;
846 /* shader_inuse_cnt should not overflow because there can only
847 * be a very limited number of jobs on the h/w at one time */
849 kbdev
->shader_inuse_cnt
[bitnum
]++;
850 kbdev
->shader_inuse_bitmap
|= bit
;
852 shader_cores
&= ~bit
;
855 if (tiler_required
) {
856 KBASE_DEBUG_ASSERT(kbdev
->tiler_needed_cnt
> 0);
858 --kbdev
->tiler_needed_cnt
;
860 kbdev
->tiler_inuse_cnt
++;
862 KBASE_DEBUG_ASSERT(kbdev
->tiler_inuse_cnt
!= 0);
865 if (prev_shader_needed
!= kbdev
->shader_needed_bitmap
)
866 KBASE_TRACE_ADD(kbdev
, PM_REGISTER_CHANGE_SHADER_NEEDED
, NULL
,
867 NULL
, 0u, (u32
) kbdev
->shader_needed_bitmap
);
869 if (prev_shader_inuse
!= kbdev
->shader_inuse_bitmap
)
870 KBASE_TRACE_ADD(kbdev
, PM_REGISTER_CHANGE_SHADER_INUSE
, NULL
,
871 NULL
, 0u, (u32
) kbdev
->shader_inuse_bitmap
);
873 return KBASE_CORES_READY
;
876 KBASE_EXPORT_TEST_API(kbase_pm_register_inuse_cores
);
878 void kbase_pm_release_cores(struct kbase_device
*kbdev
,
879 bool tiler_required
, u64 shader_cores
)
881 kbase_pm_change_state change_gpu_state
= 0u;
883 KBASE_DEBUG_ASSERT(kbdev
!= NULL
);
885 lockdep_assert_held(&kbdev
->hwaccess_lock
);
887 while (shader_cores
) {
888 int bitnum
= fls64(shader_cores
) - 1;
889 u64 bit
= 1ULL << bitnum
;
892 KBASE_DEBUG_ASSERT(kbdev
->shader_inuse_cnt
[bitnum
] > 0);
894 cnt
= --kbdev
->shader_inuse_cnt
[bitnum
];
897 kbdev
->shader_inuse_bitmap
&= ~bit
;
898 change_gpu_state
|= KBASE_PM_CHANGE_STATE_SHADER
;
901 shader_cores
&= ~bit
;
904 if (tiler_required
) {
907 KBASE_DEBUG_ASSERT(kbdev
->tiler_inuse_cnt
> 0);
909 cnt
= --kbdev
->tiler_inuse_cnt
;
912 change_gpu_state
|= KBASE_PM_CHANGE_STATE_TILER
;
915 if (change_gpu_state
) {
916 KBASE_TRACE_ADD(kbdev
, PM_RELEASE_CHANGE_SHADER_INUSE
, NULL
,
917 NULL
, 0u, (u32
) kbdev
->shader_inuse_bitmap
);
919 kbase_timeline_pm_cores_func(kbdev
,
920 KBASE_PM_FUNC_ID_RELEASE_CORES_START
,
922 kbase_pm_update_cores_state_nolock(kbdev
);
923 kbase_timeline_pm_cores_func(kbdev
,
924 KBASE_PM_FUNC_ID_RELEASE_CORES_END
,
927 /* Trace that any state change completed immediately */
928 kbase_pm_trace_check_and_finish_state_change(kbdev
);
932 KBASE_EXPORT_TEST_API(kbase_pm_release_cores
);
934 void kbase_pm_request_cores_sync(struct kbase_device
*kbdev
,
940 kbase_pm_wait_for_poweroff_complete(kbdev
);
942 spin_lock_irqsave(&kbdev
->hwaccess_lock
, flags
);
943 kbase_pm_request_cores(kbdev
, tiler_required
, shader_cores
);
944 spin_unlock_irqrestore(&kbdev
->hwaccess_lock
, flags
);
946 kbase_pm_check_transitions_sync(kbdev
);
949 KBASE_EXPORT_TEST_API(kbase_pm_request_cores_sync
);
951 void kbase_pm_request_l2_caches(struct kbase_device
*kbdev
)
954 u32 prior_l2_users_count
;
956 spin_lock_irqsave(&kbdev
->hwaccess_lock
, flags
);
958 prior_l2_users_count
= kbdev
->l2_users_count
++;
960 KBASE_DEBUG_ASSERT(kbdev
->l2_users_count
!= 0);
962 /* if the GPU is reset while the l2 is on, l2 will be off but
963 * prior_l2_users_count will be > 0. l2_available_bitmap will have been
964 * set to 0 though by kbase_pm_init_hw */
965 if (!prior_l2_users_count
|| !kbdev
->l2_available_bitmap
)
966 kbase_pm_check_transitions_nolock(kbdev
);
968 spin_unlock_irqrestore(&kbdev
->hwaccess_lock
, flags
);
969 wait_event(kbdev
->pm
.backend
.l2_powered_wait
,
970 kbdev
->pm
.backend
.l2_powered
== 1);
972 /* Trace that any state change completed immediately */
973 kbase_pm_trace_check_and_finish_state_change(kbdev
);
976 KBASE_EXPORT_TEST_API(kbase_pm_request_l2_caches
);
978 void kbase_pm_request_l2_caches_l2_is_on(struct kbase_device
*kbdev
)
980 lockdep_assert_held(&kbdev
->hwaccess_lock
);
982 kbdev
->l2_users_count
++;
985 KBASE_EXPORT_TEST_API(kbase_pm_request_l2_caches_l2_is_on
);
987 void kbase_pm_release_l2_caches(struct kbase_device
*kbdev
)
989 lockdep_assert_held(&kbdev
->hwaccess_lock
);
991 KBASE_DEBUG_ASSERT(kbdev
->l2_users_count
> 0);
993 --kbdev
->l2_users_count
;
995 if (!kbdev
->l2_users_count
) {
996 kbase_pm_check_transitions_nolock(kbdev
);
997 /* Trace that any state change completed immediately */
998 kbase_pm_trace_check_and_finish_state_change(kbdev
);
1002 KBASE_EXPORT_TEST_API(kbase_pm_release_l2_caches
);