/*
*
- * (C) COPYRIGHT 2014-2016 ARM Limited. All rights reserved.
+ * (C) COPYRIGHT 2014-2018 ARM Limited. All rights reserved.
*
* This program is free software and is provided to you under the terms of the
* GNU General Public License version 2 as published by the Free Software
* Foundation, and any use by you of this program is subject to the terms
* of such GNU licence.
*
- * A copy of the licence is included with the program, and can also be obtained
- * from Free Software Foundation, Inc., 51 Franklin Street, Fifth Floor,
- * Boston, MA 02110-1301, USA.
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+ * GNU General Public License for more details.
+ *
+ * You should have received a copy of the GNU General Public License
+ * along with this program; if not, you can access it online at
+ * http://www.gnu.org/licenses/gpl-2.0.html.
+ *
+ * SPDX-License-Identifier: GPL-2.0
*
*/
-
-
/*
* Register-based HW access backend specific APIs
*/
#include <mali_kbase_jm.h>
#include <mali_kbase_js.h>
#include <mali_kbase_tlstream.h>
+#include <mali_kbase_hwcnt_context.h>
#include <mali_kbase_10969_workaround.h>
#include <backend/gpu/mali_kbase_cache_policy_backend.h>
#include <backend/gpu/mali_kbase_device_internal.h>
#include <backend/gpu/mali_kbase_jm_internal.h>
-#include <backend/gpu/mali_kbase_js_affinity.h>
#include <backend/gpu/mali_kbase_pm_internal.h>
/* Return whether the specified ringbuffer is empty. HW access lock must be
katom->gpu_rb_state = KBASE_ATOM_GPU_RB_NOT_IN_SLOT_RB;
- kbase_js_debug_log_current_affinities(kbdev);
-
return katom;
}
return rb->entries[(rb->read_idx + idx) & SLOT_RB_MASK].katom;
}
-struct kbase_jd_atom *kbase_backend_inspect_head(struct kbase_device *kbdev,
- int js)
-{
- return kbase_gpu_inspect(kbdev, js, 0);
-}
-
struct kbase_jd_atom *kbase_backend_inspect_tail(struct kbase_device *kbdev,
int js)
{
}
-static void kbasep_js_job_check_deref_cores(struct kbase_device *kbdev,
- struct kbase_jd_atom *katom);
-
-static bool kbasep_js_job_check_ref_cores(struct kbase_device *kbdev,
- int js,
- struct kbase_jd_atom *katom)
-{
- /* The most recently checked affinity. Having this at this scope allows
- * us to guarantee that we've checked the affinity in this function
- * call.
- */
- u64 recently_chosen_affinity = 0;
- bool chosen_affinity = false;
- bool retry;
-
- do {
- retry = false;
-
- /* NOTE: The following uses a number of FALLTHROUGHs to optimize
- * the calls to this function. Ending of the function is
- * indicated by BREAK OUT */
- switch (katom->coreref_state) {
- /* State when job is first attempted to be run */
- case KBASE_ATOM_COREREF_STATE_NO_CORES_REQUESTED:
- KBASE_DEBUG_ASSERT(katom->affinity == 0);
-
- /* Compute affinity */
- if (false == kbase_js_choose_affinity(
- &recently_chosen_affinity, kbdev, katom,
- js)) {
- /* No cores are currently available */
- /* *** BREAK OUT: No state transition *** */
- break;
- }
-
- chosen_affinity = true;
-
- /* Request the cores */
- kbase_pm_request_cores(kbdev,
- katom->core_req & BASE_JD_REQ_T,
- recently_chosen_affinity);
-
- katom->affinity = recently_chosen_affinity;
-
- /* Proceed to next state */
- katom->coreref_state =
- KBASE_ATOM_COREREF_STATE_WAITING_FOR_REQUESTED_CORES;
-
- /* ***FALLTHROUGH: TRANSITION TO HIGHER STATE*** */
-
- case KBASE_ATOM_COREREF_STATE_WAITING_FOR_REQUESTED_CORES:
- {
- enum kbase_pm_cores_ready cores_ready;
-
- KBASE_DEBUG_ASSERT(katom->affinity != 0 ||
- (katom->core_req & BASE_JD_REQ_T));
-
- cores_ready = kbase_pm_register_inuse_cores(
- kbdev,
- katom->core_req & BASE_JD_REQ_T,
- katom->affinity);
- if (cores_ready == KBASE_NEW_AFFINITY) {
- /* Affinity no longer valid - return to
- * previous state */
- kbasep_js_job_check_deref_cores(kbdev,
- katom);
- KBASE_TRACE_ADD_SLOT_INFO(kbdev,
- JS_CORE_REF_REGISTER_INUSE_FAILED,
- katom->kctx, katom,
- katom->jc, js,
- (u32) katom->affinity);
- /* *** BREAK OUT: Return to previous
- * state, retry *** */
- retry = true;
- break;
- }
- if (cores_ready == KBASE_CORES_NOT_READY) {
- /* Stay in this state and return, to
- * retry at this state later */
- KBASE_TRACE_ADD_SLOT_INFO(kbdev,
- JS_CORE_REF_REGISTER_INUSE_FAILED,
- katom->kctx, katom,
- katom->jc, js,
- (u32) katom->affinity);
- /* *** BREAK OUT: No state transition
- * *** */
- break;
- }
- /* Proceed to next state */
- katom->coreref_state =
- KBASE_ATOM_COREREF_STATE_RECHECK_AFFINITY;
- }
-
- /* ***FALLTHROUGH: TRANSITION TO HIGHER STATE*** */
-
- case KBASE_ATOM_COREREF_STATE_RECHECK_AFFINITY:
- KBASE_DEBUG_ASSERT(katom->affinity != 0 ||
- (katom->core_req & BASE_JD_REQ_T));
-
- /* Optimize out choosing the affinity twice in the same
- * function call */
- if (chosen_affinity == false) {
- /* See if the affinity changed since a previous
- * call. */
- if (false == kbase_js_choose_affinity(
- &recently_chosen_affinity,
- kbdev, katom, js)) {
- /* No cores are currently available */
- kbasep_js_job_check_deref_cores(kbdev,
- katom);
- KBASE_TRACE_ADD_SLOT_INFO(kbdev,
- JS_CORE_REF_REQUEST_ON_RECHECK_FAILED,
- katom->kctx, katom,
- katom->jc, js,
- (u32) recently_chosen_affinity);
- /* *** BREAK OUT: Transition to lower
- * state *** */
- break;
- }
- chosen_affinity = true;
- }
-
- /* Now see if this requires a different set of cores */
- if (recently_chosen_affinity != katom->affinity) {
- enum kbase_pm_cores_ready cores_ready;
-
- kbase_pm_request_cores(kbdev,
- katom->core_req & BASE_JD_REQ_T,
- recently_chosen_affinity);
-
- /* Register new cores whilst we still hold the
- * old ones, to minimize power transitions */
- cores_ready =
- kbase_pm_register_inuse_cores(kbdev,
- katom->core_req & BASE_JD_REQ_T,
- recently_chosen_affinity);
- kbasep_js_job_check_deref_cores(kbdev, katom);
-
- /* Fixup the state that was reduced by
- * deref_cores: */
- katom->coreref_state =
- KBASE_ATOM_COREREF_STATE_RECHECK_AFFINITY;
- katom->affinity = recently_chosen_affinity;
- if (cores_ready == KBASE_NEW_AFFINITY) {
- /* Affinity no longer valid - return to
- * previous state */
- katom->coreref_state =
- KBASE_ATOM_COREREF_STATE_WAITING_FOR_REQUESTED_CORES;
-
- kbasep_js_job_check_deref_cores(kbdev,
- katom);
-
- KBASE_TRACE_ADD_SLOT_INFO(kbdev,
- JS_CORE_REF_REGISTER_INUSE_FAILED,
- katom->kctx, katom,
- katom->jc, js,
- (u32) katom->affinity);
- /* *** BREAK OUT: Return to previous
- * state, retry *** */
- retry = true;
- break;
- }
- /* Now might be waiting for powerup again, with
- * a new affinity */
- if (cores_ready == KBASE_CORES_NOT_READY) {
- /* Return to previous state */
- katom->coreref_state =
- KBASE_ATOM_COREREF_STATE_WAITING_FOR_REQUESTED_CORES;
- KBASE_TRACE_ADD_SLOT_INFO(kbdev,
- JS_CORE_REF_REGISTER_ON_RECHECK_FAILED,
- katom->kctx, katom,
- katom->jc, js,
- (u32) katom->affinity);
- /* *** BREAK OUT: Transition to lower
- * state *** */
- break;
- }
- }
- /* Proceed to next state */
- katom->coreref_state =
- KBASE_ATOM_COREREF_STATE_CHECK_AFFINITY_VIOLATIONS;
-
- /* ***FALLTHROUGH: TRANSITION TO HIGHER STATE*** */
- case KBASE_ATOM_COREREF_STATE_CHECK_AFFINITY_VIOLATIONS:
- KBASE_DEBUG_ASSERT(katom->affinity != 0 ||
- (katom->core_req & BASE_JD_REQ_T));
- KBASE_DEBUG_ASSERT(katom->affinity ==
- recently_chosen_affinity);
-
- /* Note: this is where the caller must've taken the
- * hwaccess_lock */
-
- /* Check for affinity violations - if there are any,
- * then we just ask the caller to requeue and try again
- * later */
- if (kbase_js_affinity_would_violate(kbdev, js,
- katom->affinity) != false) {
- /* Return to previous state */
- katom->coreref_state =
- KBASE_ATOM_COREREF_STATE_RECHECK_AFFINITY;
- /* *** BREAK OUT: Transition to lower state ***
- */
- KBASE_TRACE_ADD_SLOT_INFO(kbdev,
- JS_CORE_REF_AFFINITY_WOULD_VIOLATE,
- katom->kctx, katom, katom->jc, js,
- (u32) katom->affinity);
- break;
- }
-
- /* No affinity violations would result, so the cores are
- * ready */
- katom->coreref_state = KBASE_ATOM_COREREF_STATE_READY;
- /* *** BREAK OUT: Cores Ready *** */
- break;
-
- default:
- KBASE_DEBUG_ASSERT_MSG(false,
- "Unhandled kbase_atom_coreref_state %d",
- katom->coreref_state);
- break;
- }
- } while (retry != false);
-
- return (katom->coreref_state == KBASE_ATOM_COREREF_STATE_READY);
-}
-
-static void kbasep_js_job_check_deref_cores(struct kbase_device *kbdev,
- struct kbase_jd_atom *katom)
-{
- KBASE_DEBUG_ASSERT(kbdev != NULL);
- KBASE_DEBUG_ASSERT(katom != NULL);
-
- switch (katom->coreref_state) {
- case KBASE_ATOM_COREREF_STATE_READY:
- /* State where atom was submitted to the HW - just proceed to
- * power-down */
- KBASE_DEBUG_ASSERT(katom->affinity != 0 ||
- (katom->core_req & BASE_JD_REQ_T));
-
- /* *** FALLTHROUGH *** */
-
- case KBASE_ATOM_COREREF_STATE_RECHECK_AFFINITY:
- /* State where cores were registered */
- KBASE_DEBUG_ASSERT(katom->affinity != 0 ||
- (katom->core_req & BASE_JD_REQ_T));
- kbase_pm_release_cores(kbdev, katom->core_req & BASE_JD_REQ_T,
- katom->affinity);
-
- break;
-
- case KBASE_ATOM_COREREF_STATE_WAITING_FOR_REQUESTED_CORES:
- /* State where cores were requested, but not registered */
- KBASE_DEBUG_ASSERT(katom->affinity != 0 ||
- (katom->core_req & BASE_JD_REQ_T));
- kbase_pm_unrequest_cores(kbdev, katom->core_req & BASE_JD_REQ_T,
- katom->affinity);
- break;
-
- case KBASE_ATOM_COREREF_STATE_NO_CORES_REQUESTED:
- /* Initial state - nothing required */
- KBASE_DEBUG_ASSERT(katom->affinity == 0);
- break;
-
- default:
- KBASE_DEBUG_ASSERT_MSG(false,
- "Unhandled coreref_state: %d",
- katom->coreref_state);
- break;
- }
-
- katom->affinity = 0;
- katom->coreref_state = KBASE_ATOM_COREREF_STATE_NO_CORES_REQUESTED;
-}
-
-static void kbasep_js_job_check_deref_cores_nokatom(struct kbase_device *kbdev,
- base_jd_core_req core_req, u64 affinity,
- enum kbase_atom_coreref_state coreref_state)
-{
- KBASE_DEBUG_ASSERT(kbdev != NULL);
-
- switch (coreref_state) {
- case KBASE_ATOM_COREREF_STATE_READY:
- /* State where atom was submitted to the HW - just proceed to
- * power-down */
- KBASE_DEBUG_ASSERT(affinity != 0 ||
- (core_req & BASE_JD_REQ_T));
-
- /* *** FALLTHROUGH *** */
-
- case KBASE_ATOM_COREREF_STATE_RECHECK_AFFINITY:
- /* State where cores were registered */
- KBASE_DEBUG_ASSERT(affinity != 0 ||
- (core_req & BASE_JD_REQ_T));
- kbase_pm_release_cores(kbdev, core_req & BASE_JD_REQ_T,
- affinity);
-
- break;
-
- case KBASE_ATOM_COREREF_STATE_WAITING_FOR_REQUESTED_CORES:
- /* State where cores were requested, but not registered */
- KBASE_DEBUG_ASSERT(affinity != 0 ||
- (core_req & BASE_JD_REQ_T));
- kbase_pm_unrequest_cores(kbdev, core_req & BASE_JD_REQ_T,
- affinity);
- break;
-
- case KBASE_ATOM_COREREF_STATE_NO_CORES_REQUESTED:
- /* Initial state - nothing required */
- KBASE_DEBUG_ASSERT(affinity == 0);
- break;
-
- default:
- KBASE_DEBUG_ASSERT_MSG(false,
- "Unhandled coreref_state: %d",
- coreref_state);
- break;
- }
-}
-
static void kbase_gpu_release_atom(struct kbase_device *kbdev,
struct kbase_jd_atom *katom,
ktime_t *end_timestamp)
{
struct kbase_context *kctx = katom->kctx;
+ lockdep_assert_held(&kbdev->hwaccess_lock);
+
switch (katom->gpu_rb_state) {
case KBASE_ATOM_GPU_RB_NOT_IN_SLOT_RB:
/* Should be impossible */
/* ***FALLTHROUGH: TRANSITION TO LOWER STATE*** */
case KBASE_ATOM_GPU_RB_WAITING_AFFINITY:
- kbase_js_affinity_release_slot_cores(kbdev, katom->slot_nr,
- katom->affinity);
/* ***FALLTHROUGH: TRANSITION TO LOWER STATE*** */
case KBASE_ATOM_GPU_RB_WAITING_FOR_CORE_AVAILABLE:
break;
case KBASE_ATOM_GPU_RB_WAITING_PROTECTED_MODE_TRANSITION:
+ if (kbase_jd_katom_is_protected(katom) &&
+ (katom->protected_state.enter !=
+ KBASE_ATOM_ENTER_PROTECTED_CHECK) &&
+ (katom->protected_state.enter !=
+ KBASE_ATOM_ENTER_PROTECTED_HWCNT)) {
+ kbase_pm_protected_override_disable(kbdev);
+ kbase_pm_update_cores_state_nolock(kbdev);
+ }
+ if (!kbase_jd_katom_is_protected(katom) &&
+ (katom->protected_state.exit !=
+ KBASE_ATOM_EXIT_PROTECTED_CHECK) &&
+ (katom->protected_state.exit !=
+ KBASE_ATOM_EXIT_PROTECTED_RESET_WAIT)) {
+ kbase_pm_protected_override_disable(kbdev);
+ kbase_pm_update_cores_state_nolock(kbdev);
+ }
+
if (katom->protected_state.enter !=
KBASE_ATOM_ENTER_PROTECTED_CHECK ||
katom->protected_state.exit !=
KBASE_ATOM_EXIT_PROTECTED_CHECK)
kbdev->protected_mode_transition = false;
-
+ /* If the atom has suspended hwcnt but has not yet entered
+ * protected mode, then resume hwcnt now. If the GPU is now in
+ * protected mode then hwcnt will be resumed by GPU reset so
+ * don't resume it here.
+ */
if (kbase_jd_katom_is_protected(katom) &&
- (katom->protected_state.enter ==
- KBASE_ATOM_ENTER_PROTECTED_IDLE_L2)) {
- kbase_vinstr_resume(kbdev->vinstr_ctx);
-#ifdef CONFIG_DEVFREQ_THERMAL
- /* Go back to configured model for IPA */
- kbase_ipa_model_use_configured_locked(kbdev);
-#endif
+ ((katom->protected_state.enter ==
+ KBASE_ATOM_ENTER_PROTECTED_IDLE_L2) ||
+ (katom->protected_state.enter ==
+ KBASE_ATOM_ENTER_PROTECTED_SET_COHERENCY))) {
+ WARN_ON(!kbdev->protected_mode_hwcnt_disabled);
+ kbdev->protected_mode_hwcnt_desired = true;
+ if (kbdev->protected_mode_hwcnt_disabled) {
+ kbase_hwcnt_context_enable(
+ kbdev->hwcnt_gpu_ctx);
+ kbdev->protected_mode_hwcnt_disabled = false;
+ }
}
+ if (kbase_hw_has_issue(kbdev, BASE_HW_ISSUE_TGOX_R1_1234)) {
+ if (katom->atom_flags &
+ KBASE_KATOM_FLAG_HOLDING_L2_REF_PROT) {
+ kbase_pm_protected_l2_override(kbdev, false);
+ katom->atom_flags &=
+ ~KBASE_KATOM_FLAG_HOLDING_L2_REF_PROT;
+ }
+ }
/* ***FALLTHROUGH: TRANSITION TO LOWER STATE*** */
}
katom->gpu_rb_state = KBASE_ATOM_GPU_RB_WAITING_BLOCKED;
+ katom->protected_state.exit = KBASE_ATOM_EXIT_PROTECTED_CHECK;
}
static void kbase_gpu_mark_atom_for_return(struct kbase_device *kbdev,
struct kbase_jd_atom *katom)
{
+ lockdep_assert_held(&kbdev->hwaccess_lock);
+
kbase_gpu_release_atom(kbdev, katom, NULL);
katom->gpu_rb_state = KBASE_ATOM_GPU_RB_RETURN_TO_JS;
}
return kbdev->protected_mode;
}
-static int kbase_gpu_protected_mode_enter(struct kbase_device *kbdev)
+static void kbase_gpu_disable_coherent(struct kbase_device *kbdev)
{
- int err = -EINVAL;
-
lockdep_assert_held(&kbdev->hwaccess_lock);
- WARN_ONCE(!kbdev->protected_ops,
- "Cannot enter protected mode: protected callbacks not specified.\n");
-
/*
* When entering into protected mode, we must ensure that the
* GPU is not operating in coherent mode as well. This is to
*/
if (kbdev->system_coherency == COHERENCY_ACE)
kbase_cache_set_coherency_mode(kbdev, COHERENCY_ACE_LITE);
+}
+
+static int kbase_gpu_protected_mode_enter(struct kbase_device *kbdev)
+{
+ int err = -EINVAL;
+
+ lockdep_assert_held(&kbdev->hwaccess_lock);
+
+ WARN_ONCE(!kbdev->protected_ops,
+ "Cannot enter protected mode: protected callbacks not specified.\n");
if (kbdev->protected_ops) {
/* Switch GPU to protected mode */
- err = kbdev->protected_ops->protected_mode_enter(kbdev);
+ err = kbdev->protected_ops->protected_mode_enable(
+ kbdev->protected_dev);
- if (err)
+ if (err) {
dev_warn(kbdev->dev, "Failed to enable protected mode: %d\n",
err);
- else
+ } else {
kbdev->protected_mode = true;
+ kbase_ipa_protection_mode_switch_event(kbdev);
+ }
}
return err;
if (!kbdev->protected_ops)
return -EINVAL;
- kbase_reset_gpu_silent(kbdev);
+ /* The protected mode disable callback will be called as part of reset
+ */
+ return kbase_reset_gpu_silent(kbdev);
+}
- return 0;
+static int kbase_jm_protected_entry(struct kbase_device *kbdev,
+ struct kbase_jd_atom **katom, int idx, int js)
+{
+ int err = 0;
+
+ lockdep_assert_held(&kbdev->hwaccess_lock);
+
+ err = kbase_gpu_protected_mode_enter(kbdev);
+
+ /*
+ * Regardless of result before this call, we are no longer
+ * transitioning the GPU.
+ */
+
+ kbdev->protected_mode_transition = false;
+ kbase_pm_protected_override_disable(kbdev);
+ kbase_pm_update_cores_state_nolock(kbdev);
+
+ KBASE_TLSTREAM_AUX_PROTECTED_ENTER_END(kbdev);
+ if (err) {
+ /*
+ * Failed to switch into protected mode, resume
+ * GPU hwcnt and fail atom.
+ */
+ WARN_ON(!kbdev->protected_mode_hwcnt_disabled);
+ kbdev->protected_mode_hwcnt_desired = true;
+ if (kbdev->protected_mode_hwcnt_disabled) {
+ kbase_hwcnt_context_enable(
+ kbdev->hwcnt_gpu_ctx);
+ kbdev->protected_mode_hwcnt_disabled = false;
+ }
+
+ katom[idx]->event_code = BASE_JD_EVENT_JOB_INVALID;
+ kbase_gpu_mark_atom_for_return(kbdev, katom[idx]);
+ /*
+ * Only return if head atom or previous atom
+ * already removed - as atoms must be returned
+ * in order.
+ */
+ if (idx == 0 || katom[0]->gpu_rb_state ==
+ KBASE_ATOM_GPU_RB_NOT_IN_SLOT_RB) {
+ kbase_gpu_dequeue_atom(kbdev, js, NULL);
+ kbase_jm_return_atom_to_js(kbdev, katom[idx]);
+ }
+
+ return -EINVAL;
+ }
+
+ /*
+ * Protected mode sanity checks.
+ */
+ KBASE_DEBUG_ASSERT_MSG(
+ kbase_jd_katom_is_protected(katom[idx]) ==
+ kbase_gpu_in_protected_mode(kbdev),
+ "Protected mode of atom (%d) doesn't match protected mode of GPU (%d)",
+ kbase_jd_katom_is_protected(katom[idx]),
+ kbase_gpu_in_protected_mode(kbdev));
+ katom[idx]->gpu_rb_state =
+ KBASE_ATOM_GPU_RB_READY;
+
+ return err;
}
static int kbase_jm_enter_protected_mode(struct kbase_device *kbdev,
{
int err = 0;
+ lockdep_assert_held(&kbdev->hwaccess_lock);
+
switch (katom[idx]->protected_state.enter) {
case KBASE_ATOM_ENTER_PROTECTED_CHECK:
KBASE_TLSTREAM_AUX_PROTECTED_ENTER_START(kbdev);
* there are no atoms currently on the GPU. */
WARN_ON(kbdev->protected_mode_transition);
WARN_ON(kbase_gpu_atoms_submitted_any(kbdev));
+ /* If hwcnt is disabled, it means we didn't clean up correctly
+ * during last exit from protected mode.
+ */
+ WARN_ON(kbdev->protected_mode_hwcnt_disabled);
- kbdev->protected_mode_transition = true;
katom[idx]->protected_state.enter =
- KBASE_ATOM_ENTER_PROTECTED_VINSTR;
+ KBASE_ATOM_ENTER_PROTECTED_HWCNT;
+
+ kbdev->protected_mode_transition = true;
/* ***FALLTHROUGH: TRANSITION TO HIGHER STATE*** */
- case KBASE_ATOM_ENTER_PROTECTED_VINSTR:
- if (kbase_vinstr_try_suspend(kbdev->vinstr_ctx) < 0) {
- /*
- * We can't switch now because
- * the vinstr core state switch
- * is not done yet.
- */
- return -EAGAIN;
+ case KBASE_ATOM_ENTER_PROTECTED_HWCNT:
+ /* See if we can get away with disabling hwcnt atomically */
+ kbdev->protected_mode_hwcnt_desired = false;
+ if (!kbdev->protected_mode_hwcnt_disabled) {
+ if (kbase_hwcnt_context_disable_atomic(
+ kbdev->hwcnt_gpu_ctx))
+ kbdev->protected_mode_hwcnt_disabled = true;
}
-#ifdef CONFIG_DEVFREQ_THERMAL
- /* Use generic model for IPA in protected mode */
- kbase_ipa_model_use_fallback_locked(kbdev);
+ /* We couldn't disable atomically, so kick off a worker */
+ if (!kbdev->protected_mode_hwcnt_disabled) {
+#if KERNEL_VERSION(3, 16, 0) > LINUX_VERSION_CODE
+ queue_work(system_wq,
+ &kbdev->protected_mode_hwcnt_disable_work);
+#else
+ queue_work(system_highpri_wq,
+ &kbdev->protected_mode_hwcnt_disable_work);
#endif
+ return -EAGAIN;
+ }
/* Once reaching this point GPU must be
- * switched to protected mode or vinstr
+ * switched to protected mode or hwcnt
* re-enabled. */
/*
katom[idx]->protected_state.enter =
KBASE_ATOM_ENTER_PROTECTED_IDLE_L2;
+ kbase_pm_protected_override_enable(kbdev);
kbase_pm_update_cores_state_nolock(kbdev);
/* ***FALLTHROUGH: TRANSITION TO HIGHER STATE*** */
if (kbase_pm_get_ready_cores(kbdev, KBASE_PM_CORE_L2) ||
kbase_pm_get_trans_cores(kbdev, KBASE_PM_CORE_L2)) {
/*
- * The L2 is still powered, wait for all the users to
- * finish with it before doing the actual reset.
- */
+ * The L2 is still powered, wait for all the users to
+ * finish with it before doing the actual reset.
+ */
return -EAGAIN;
}
}
katom[idx]->protected_state.enter =
- KBASE_ATOM_ENTER_PROTECTED_FINISHED;
+ KBASE_ATOM_ENTER_PROTECTED_SET_COHERENCY;
/* ***FALLTHROUGH: TRANSITION TO HIGHER STATE*** */
- case KBASE_ATOM_ENTER_PROTECTED_FINISHED:
-
- /* No jobs running, so we can switch GPU mode right now. */
- err = kbase_gpu_protected_mode_enter(kbdev);
-
+ case KBASE_ATOM_ENTER_PROTECTED_SET_COHERENCY:
/*
- * Regardless of result, we are no longer transitioning
- * the GPU.
+ * When entering into protected mode, we must ensure that the
+ * GPU is not operating in coherent mode as well. This is to
+ * ensure that no protected memory can be leaked.
*/
- kbdev->protected_mode_transition = false;
- KBASE_TLSTREAM_AUX_PROTECTED_ENTER_END(kbdev);
- if (err) {
+ kbase_gpu_disable_coherent(kbdev);
+
+ if (kbase_hw_has_issue(kbdev, BASE_HW_ISSUE_TGOX_R1_1234)) {
/*
- * Failed to switch into protected mode, resume
- * vinstr core and fail atom.
+ * Power on L2 caches; this will also result in the
+ * correct value written to coherency enable register.
*/
- kbase_vinstr_resume(kbdev->vinstr_ctx);
- katom[idx]->event_code = BASE_JD_EVENT_JOB_INVALID;
- kbase_gpu_mark_atom_for_return(kbdev, katom[idx]);
- /* Only return if head atom or previous atom
- * already removed - as atoms must be returned
- * in order. */
- if (idx == 0 || katom[0]->gpu_rb_state ==
- KBASE_ATOM_GPU_RB_NOT_IN_SLOT_RB) {
- kbase_gpu_dequeue_atom(kbdev, js, NULL);
- kbase_jm_return_atom_to_js(kbdev, katom[idx]);
- }
-#ifdef CONFIG_DEVFREQ_THERMAL
- /* Go back to configured model for IPA */
- kbase_ipa_model_use_configured_locked(kbdev);
-#endif
+ kbase_pm_protected_l2_override(kbdev, true);
- return -EINVAL;
+ /*
+ * Set the flag on the atom that additional
+ * L2 references are taken.
+ */
+ katom[idx]->atom_flags |=
+ KBASE_KATOM_FLAG_HOLDING_L2_REF_PROT;
}
- /* Protected mode sanity checks. */
- KBASE_DEBUG_ASSERT_MSG(
- kbase_jd_katom_is_protected(katom[idx]) ==
- kbase_gpu_in_protected_mode(kbdev),
- "Protected mode of atom (%d) doesn't match protected mode of GPU (%d)",
- kbase_jd_katom_is_protected(katom[idx]),
- kbase_gpu_in_protected_mode(kbdev));
- katom[idx]->gpu_rb_state =
- KBASE_ATOM_GPU_RB_READY;
+ katom[idx]->protected_state.enter =
+ KBASE_ATOM_ENTER_PROTECTED_FINISHED;
+
+ if (kbase_hw_has_issue(kbdev, BASE_HW_ISSUE_TGOX_R1_1234))
+ return -EAGAIN;
+
+ /* ***FALLTHROUGH: TRANSITION TO HIGHER STATE*** */
+
+ case KBASE_ATOM_ENTER_PROTECTED_FINISHED:
+ if (kbase_hw_has_issue(kbdev, BASE_HW_ISSUE_TGOX_R1_1234)) {
+ /*
+ * Check that L2 caches are powered and, if so,
+ * enter protected mode.
+ */
+ if (kbdev->pm.backend.l2_state == KBASE_L2_ON) {
+ /*
+ * Remove additional L2 reference and reset
+ * the atom flag which denotes it.
+ */
+ if (katom[idx]->atom_flags &
+ KBASE_KATOM_FLAG_HOLDING_L2_REF_PROT) {
+ kbase_pm_protected_l2_override(kbdev,
+ false);
+ katom[idx]->atom_flags &=
+ ~KBASE_KATOM_FLAG_HOLDING_L2_REF_PROT;
+ }
+
+ err = kbase_jm_protected_entry(kbdev, katom, idx, js);
+
+ if (err)
+ return err;
+ } else {
+ /*
+ * still waiting for L2 caches to power up
+ */
+ return -EAGAIN;
+ }
+ } else {
+ err = kbase_jm_protected_entry(kbdev, katom, idx, js);
+
+ if (err)
+ return err;
+ }
}
return 0;
{
int err = 0;
+ lockdep_assert_held(&kbdev->hwaccess_lock);
switch (katom[idx]->protected_state.exit) {
case KBASE_ATOM_EXIT_PROTECTED_CHECK:
KBASE_ATOM_EXIT_PROTECTED_IDLE_L2;
kbdev->protected_mode_transition = true;
+ kbase_pm_protected_override_enable(kbdev);
kbase_pm_update_cores_state_nolock(kbdev);
/* ***FALLTHROUGH: TRANSITION TO HIGHER STATE*** */
/* Issue the reset to the GPU */
err = kbase_gpu_protected_mode_reset(kbdev);
+ if (err == -EAGAIN)
+ return -EAGAIN;
+
if (err) {
kbdev->protected_mode_transition = false;
+ kbase_pm_protected_override_disable(kbdev);
/* Failed to exit protected mode, fail atom */
katom[idx]->event_code = BASE_JD_EVENT_JOB_INVALID;
kbase_jm_return_atom_to_js(kbdev, katom[idx]);
}
- kbase_vinstr_resume(kbdev->vinstr_ctx);
-#ifdef CONFIG_DEVFREQ_THERMAL
- /* Use generic model for IPA in protected mode */
- kbase_ipa_model_use_fallback_locked(kbdev);
-#endif
+ /* If we're exiting from protected mode, hwcnt must have
+ * been disabled during entry.
+ */
+ WARN_ON(!kbdev->protected_mode_hwcnt_disabled);
+ kbdev->protected_mode_hwcnt_desired = true;
+ if (kbdev->protected_mode_hwcnt_disabled) {
+ kbase_hwcnt_context_enable(
+ kbdev->hwcnt_gpu_ctx);
+ kbdev->protected_mode_hwcnt_disabled = false;
+ }
return -EINVAL;
}
/* ***FALLTHROUGH: TRANSITION TO HIGHER STATE*** */
case KBASE_ATOM_EXIT_PROTECTED_RESET_WAIT:
- if (kbase_reset_gpu_active(kbdev))
- return -EAGAIN;
-
- kbdev->protected_mode_transition = false;
- kbdev->protected_mode = false;
- KBASE_TLSTREAM_AUX_PROTECTED_LEAVE_END(kbdev);
- /* protected mode sanity checks */
- KBASE_DEBUG_ASSERT_MSG(
- kbase_jd_katom_is_protected(katom[idx]) == kbase_gpu_in_protected_mode(kbdev),
- "Protected mode of atom (%d) doesn't match protected mode of GPU (%d)",
- kbase_jd_katom_is_protected(katom[idx]), kbase_gpu_in_protected_mode(kbdev));
- KBASE_DEBUG_ASSERT_MSG(
- (kbase_jd_katom_is_protected(katom[idx]) && js == 0) ||
- !kbase_jd_katom_is_protected(katom[idx]),
- "Protected atom on JS%d not supported", js);
+ /* A GPU reset is issued when exiting protected mode. Once the
+ * reset is done all atoms' state will also be reset. For this
+ * reason, if the atom is still in this state we can safely
+ * say that the reset has not completed i.e., we have not
+ * finished exiting protected mode yet.
+ */
+ return -EAGAIN;
}
return 0;
lockdep_assert_held(&kbdev->hwaccess_lock);
+ if (kbase_reset_gpu_active(kbdev))
+ return;
+
for (js = 0; js < kbdev->gpu_props.num_job_slots; js++) {
struct kbase_jd_atom *katom[2];
int idx;
break;
}
- cores_ready =
- kbasep_js_job_check_ref_cores(kbdev, js,
- katom[idx]);
+ cores_ready = kbase_pm_cores_requested(kbdev,
+ true);
if (katom[idx]->event_code ==
BASE_JD_EVENT_PM_EVENT) {
if (!cores_ready)
break;
- kbase_js_affinity_retain_slot_cores(kbdev, js,
- katom[idx]->affinity);
katom[idx]->gpu_rb_state =
KBASE_ATOM_GPU_RB_WAITING_AFFINITY;
#define HAS_DEP(katom) (katom->pre_dep || katom->atom_flags & \
(KBASE_KATOM_FLAG_X_DEP_BLOCKED | KBASE_KATOM_FLAG_FAIL_BLOCKER))
-bool kbase_gpu_irq_evict(struct kbase_device *kbdev, int js)
+bool kbase_gpu_irq_evict(struct kbase_device *kbdev, int js,
+ u32 completion_code)
{
struct kbase_jd_atom *katom;
struct kbase_jd_atom *next_katom;
if (next_katom && katom->kctx == next_katom->kctx &&
next_katom->gpu_rb_state == KBASE_ATOM_GPU_RB_SUBMITTED &&
- HAS_DEP(next_katom) &&
- (kbase_reg_read(kbdev, JOB_SLOT_REG(js, JS_HEAD_NEXT_LO), NULL)
+ (HAS_DEP(next_katom) || next_katom->sched_priority ==
+ katom->sched_priority) &&
+ (kbase_reg_read(kbdev, JOB_SLOT_REG(js, JS_HEAD_NEXT_LO))
!= 0 ||
- kbase_reg_read(kbdev, JOB_SLOT_REG(js, JS_HEAD_NEXT_HI), NULL)
+ kbase_reg_read(kbdev, JOB_SLOT_REG(js, JS_HEAD_NEXT_HI))
!= 0)) {
kbase_reg_write(kbdev, JOB_SLOT_REG(js, JS_COMMAND_NEXT),
- JS_COMMAND_NOP, NULL);
+ JS_COMMAND_NOP);
next_katom->gpu_rb_state = KBASE_ATOM_GPU_RB_READY;
- KBASE_TLSTREAM_TL_NRET_ATOM_LPU(katom,
+ if (completion_code == BASE_JD_EVENT_STOPPED) {
+ KBASE_TLSTREAM_TL_NRET_ATOM_LPU(next_katom,
&kbdev->gpu_props.props.raw_props.js_features
- [katom->slot_nr]);
- KBASE_TLSTREAM_TL_NRET_ATOM_AS(katom, &kbdev->as
- [katom->kctx->as_nr]);
- KBASE_TLSTREAM_TL_NRET_CTX_LPU(katom->kctx,
+ [next_katom->slot_nr]);
+ KBASE_TLSTREAM_TL_NRET_ATOM_AS(next_katom, &kbdev->as
+ [next_katom->kctx->as_nr]);
+ KBASE_TLSTREAM_TL_NRET_CTX_LPU(next_katom->kctx,
&kbdev->gpu_props.props.raw_props.js_features
- [katom->slot_nr]);
+ [next_katom->slot_nr]);
+ }
+
+ if (next_katom->core_req & BASE_JD_REQ_PERMON)
+ kbase_pm_release_gpu_cycle_counter_nolock(kbdev);
return true;
}
lockdep_assert_held(&kbdev->hwaccess_lock);
+ /*
+ * When a hard-stop is followed close after a soft-stop, the completion
+ * code may be set to STOPPED, even though the job is terminated
+ */
+ if (kbase_hw_has_issue(kbdev, BASE_HW_ISSUE_TMIX_8438)) {
+ if (completion_code == BASE_JD_EVENT_STOPPED &&
+ (katom->atom_flags &
+ KBASE_KATOM_FLAG_BEEN_HARD_STOPPED)) {
+ completion_code = BASE_JD_EVENT_TERMINATED;
+ }
+ }
+
if ((kbase_hw_has_issue(kbdev, BASE_HW_ISSUE_6787) || (katom->core_req &
BASE_JD_REQ_SKIP_CACHE_END)) &&
completion_code != BASE_JD_EVENT_DONE &&
* flushed. To prevent future evictions causing possible memory
* corruption we need to flush the cache manually before any
* affected memory gets reused. */
- katom->need_cache_flush_cores_retained = katom->affinity;
- kbase_pm_request_cores(kbdev, false, katom->affinity);
+ katom->need_cache_flush_cores_retained = true;
} else if (kbase_hw_has_issue(kbdev, BASE_HW_ISSUE_10676)) {
if (kbdev->gpu_props.num_core_groups > 1 &&
- !(katom->affinity &
- kbdev->gpu_props.props.coherency_info.group[0].core_mask
- ) &&
- (katom->affinity &
- kbdev->gpu_props.props.coherency_info.group[1].core_mask
- )) {
+ katom->device_nr >= 1) {
dev_info(kbdev->dev, "JD: Flushing cache due to PRLAM-10676\n");
- katom->need_cache_flush_cores_retained =
- katom->affinity;
- kbase_pm_request_cores(kbdev, false,
- katom->affinity);
+ katom->need_cache_flush_cores_retained = true;
}
}
katom = kbase_gpu_dequeue_atom(kbdev, js, end_timestamp);
- kbase_timeline_job_slot_done(kbdev, katom->kctx, katom, js, 0);
if (completion_code == BASE_JD_EVENT_STOPPED) {
struct kbase_jd_atom *next_katom = kbase_gpu_inspect(kbdev, js,
if (next_katom && katom->kctx == next_katom->kctx &&
next_katom->sched_priority ==
katom->sched_priority) {
+ WARN_ON(next_katom->gpu_rb_state ==
+ KBASE_ATOM_GPU_RB_SUBMITTED);
kbase_gpu_dequeue_atom(kbdev, js, end_timestamp);
kbase_jm_return_atom_to_js(kbdev, next_katom);
}
struct kbasep_js_device_data *js_devdata = &kbdev->js_data;
int i;
+ if (!kbase_ctx_flag(katom->kctx, KCTX_DYING))
+ dev_warn(kbdev->dev, "error detected from slot %d, job status 0x%08x (%s)",
+ js, completion_code,
+ kbase_exception_name
+ (kbdev,
+ completion_code));
+
#if KBASE_TRACE_DUMP_ON_JOB_SLOT_ERROR != 0
KBASE_TRACE_DUMP(kbdev);
#endif
if (katom->event_code != BASE_JD_EVENT_JOB_CANCELLED)
katom->event_code = (base_jd_event_code)completion_code;
- kbase_device_trace_register_access(kctx, REG_WRITE,
- JOB_CONTROL_REG(JOB_IRQ_CLEAR),
- 1 << js);
-
/* Complete the job, and start new ones
*
* Also defer remaining work onto the workqueue:
lockdep_assert_held(&kbdev->hwaccess_lock);
+ /* Reset should always take the GPU out of protected mode */
+ WARN_ON(kbase_gpu_in_protected_mode(kbdev));
+
for (js = 0; js < kbdev->gpu_props.num_job_slots; js++) {
int atom_idx = 0;
int idx;
if (!katom)
break;
if (katom->protected_state.exit ==
- KBASE_ATOM_EXIT_PROTECTED_RESET_WAIT)
- KBASE_TLSTREAM_AUX_PROTECTED_LEAVE_END(kbdev);
- if (katom->gpu_rb_state < KBASE_ATOM_GPU_RB_SUBMITTED)
+ KBASE_ATOM_EXIT_PROTECTED_RESET_WAIT) {
+ /* protected mode sanity checks */
+ KBASE_DEBUG_ASSERT_MSG(
+ kbase_jd_katom_is_protected(katom) == kbase_gpu_in_protected_mode(kbdev),
+ "Protected mode of atom (%d) doesn't match protected mode of GPU (%d)",
+ kbase_jd_katom_is_protected(katom), kbase_gpu_in_protected_mode(kbdev));
+ KBASE_DEBUG_ASSERT_MSG(
+ (kbase_jd_katom_is_protected(katom) && js == 0) ||
+ !kbase_jd_katom_is_protected(katom),
+ "Protected atom on JS%d not supported", js);
+ }
+ if ((katom->gpu_rb_state < KBASE_ATOM_GPU_RB_SUBMITTED) &&
+ !kbase_ctx_flag(katom->kctx, KCTX_DYING))
keep_in_jm_rb = true;
kbase_gpu_release_atom(kbdev, katom, NULL);
* it will be processed again from the starting state.
*/
if (keep_in_jm_rb) {
- kbasep_js_job_check_deref_cores(kbdev, katom);
- katom->coreref_state = KBASE_ATOM_COREREF_STATE_NO_CORES_REQUESTED;
- katom->affinity = 0;
katom->protected_state.exit = KBASE_ATOM_EXIT_PROTECTED_CHECK;
/* As the atom was not removed, increment the
* index so that we read the correct atom in the
}
}
+ /* Re-enable GPU hardware counters if we're resetting from protected
+ * mode.
+ */
+ kbdev->protected_mode_hwcnt_desired = true;
+ if (kbdev->protected_mode_hwcnt_disabled) {
+ kbase_hwcnt_context_enable(kbdev->hwcnt_gpu_ctx);
+ kbdev->protected_mode_hwcnt_disabled = false;
+
+ KBASE_TLSTREAM_AUX_PROTECTED_LEAVE_END(kbdev);
+ }
+
kbdev->protected_mode_transition = false;
- kbdev->protected_mode = false;
+ kbase_pm_protected_override_disable(kbdev);
}
static inline void kbase_gpu_stop_atom(struct kbase_device *kbdev,
u32 action,
bool disjoint)
{
+ lockdep_assert_held(&kbdev->hwaccess_lock);
+
katom->event_code = BASE_JD_EVENT_REMOVED_FROM_NEXT;
kbase_gpu_mark_atom_for_return(kbdev, katom);
katom->kctx->blocked_js[katom->slot_nr][katom->sched_priority] = true;
return -1;
}
-static void kbase_job_evicted(struct kbase_jd_atom *katom)
-{
- kbase_timeline_job_slot_done(katom->kctx->kbdev, katom->kctx, katom,
- katom->slot_nr, KBASE_JS_ATOM_DONE_EVICTED_FROM_NEXT);
-}
-
bool kbase_backend_soft_hard_stop_slot(struct kbase_device *kbdev,
struct kbase_context *kctx,
int js,
katom_idx0->kctx->blocked_js[js][prio_idx0] = true;
} else {
/* katom_idx0 is on GPU */
- if (katom_idx1 && katom_idx1->gpu_rb_state ==
+ if (katom_idx1_valid && katom_idx1->gpu_rb_state ==
KBASE_ATOM_GPU_RB_SUBMITTED) {
/* katom_idx0 and katom_idx1 are on GPU */
if (kbase_reg_read(kbdev, JOB_SLOT_REG(js,
- JS_COMMAND_NEXT), NULL) == 0) {
+ JS_COMMAND_NEXT)) == 0) {
/* idx0 has already completed - stop
* idx1 if needed*/
if (katom_idx1_valid) {
kbase_reg_write(kbdev,
JOB_SLOT_REG(js,
JS_COMMAND_NEXT),
- JS_COMMAND_NOP, NULL);
+ JS_COMMAND_NOP);
if (kbase_reg_read(kbdev,
JOB_SLOT_REG(js,
- JS_HEAD_NEXT_LO), NULL)
+ JS_HEAD_NEXT_LO))
!= 0 ||
kbase_reg_read(kbdev,
JOB_SLOT_REG(js,
- JS_HEAD_NEXT_HI), NULL)
+ JS_HEAD_NEXT_HI))
!= 0) {
/* idx1 removed successfully,
* will be handled in IRQ */
- kbase_job_evicted(katom_idx1);
kbase_gpu_remove_atom(kbdev,
katom_idx1,
action, true);
} else {
/* idx1 is on GPU */
if (kbase_reg_read(kbdev, JOB_SLOT_REG(js,
- JS_COMMAND_NEXT), NULL) == 0) {
+ JS_COMMAND_NEXT)) == 0) {
/* idx0 has already completed - stop idx1 */
kbase_gpu_stop_atom(kbdev, js, katom_idx1,
action);
* remove */
kbase_reg_write(kbdev, JOB_SLOT_REG(js,
JS_COMMAND_NEXT),
- JS_COMMAND_NOP, NULL);
+ JS_COMMAND_NOP);
if (kbase_reg_read(kbdev, JOB_SLOT_REG(js,
- JS_HEAD_NEXT_LO), NULL) != 0 ||
+ JS_HEAD_NEXT_LO)) != 0 ||
kbase_reg_read(kbdev, JOB_SLOT_REG(js,
- JS_HEAD_NEXT_HI), NULL) != 0) {
+ JS_HEAD_NEXT_HI)) != 0) {
/* idx1 removed successfully, will be
* handled in IRQ once idx0 completes */
- kbase_job_evicted(katom_idx1);
kbase_gpu_remove_atom(kbdev, katom_idx1,
action,
false);
return ret;
}
-void kbase_gpu_cacheclean(struct kbase_device *kbdev)
-{
- /* Limit the number of loops to avoid a hang if the interrupt is missed
- */
- u32 max_loops = KBASE_CLEAN_CACHE_MAX_LOOPS;
-
- mutex_lock(&kbdev->cacheclean_lock);
-
- /* use GPU_COMMAND completion solution */
- /* clean & invalidate the caches */
- KBASE_TRACE_ADD(kbdev, CORE_GPU_CLEAN_INV_CACHES, NULL, NULL, 0u, 0);
- kbase_reg_write(kbdev, GPU_CONTROL_REG(GPU_COMMAND),
- GPU_COMMAND_CLEAN_INV_CACHES, NULL);
-
- /* wait for cache flush to complete before continuing */
- while (--max_loops &&
- (kbase_reg_read(kbdev, GPU_CONTROL_REG(GPU_IRQ_RAWSTAT), NULL) &
- CLEAN_CACHES_COMPLETED) == 0)
- ;
-
- /* clear the CLEAN_CACHES_COMPLETED irq */
- KBASE_TRACE_ADD(kbdev, CORE_GPU_IRQ_CLEAR, NULL, NULL, 0u,
- CLEAN_CACHES_COMPLETED);
- kbase_reg_write(kbdev, GPU_CONTROL_REG(GPU_IRQ_CLEAR),
- CLEAN_CACHES_COMPLETED, NULL);
- KBASE_DEBUG_ASSERT_MSG(kbdev->hwcnt.backend.state !=
- KBASE_INSTR_STATE_CLEANING,
- "Instrumentation code was cleaning caches, but Job Management code cleared their IRQ - Instrumentation code will now hang.");
-
- mutex_unlock(&kbdev->cacheclean_lock);
-}
-
-void kbase_backend_cacheclean(struct kbase_device *kbdev,
+void kbase_backend_cache_clean(struct kbase_device *kbdev,
struct kbase_jd_atom *katom)
{
if (katom->need_cache_flush_cores_retained) {
- unsigned long flags;
-
- kbase_gpu_cacheclean(kbdev);
+ kbase_gpu_start_cache_clean(kbdev);
+ kbase_gpu_wait_cache_clean(kbdev);
- spin_lock_irqsave(&kbdev->hwaccess_lock, flags);
- kbase_pm_unrequest_cores(kbdev, false,
- katom->need_cache_flush_cores_retained);
- spin_unlock_irqrestore(&kbdev->hwaccess_lock, flags);
- katom->need_cache_flush_cores_retained = 0;
+ katom->need_cache_flush_cores_retained = false;
}
}
* If cache flush required due to HW workaround then perform the flush
* now
*/
- kbase_backend_cacheclean(kbdev, katom);
+ kbase_backend_cache_clean(kbdev, katom);
if (kbase_hw_has_issue(kbdev, BASE_HW_ISSUE_10969) &&
(katom->core_req & BASE_JD_REQ_FS) &&
katom->atom_flags |= KBASE_KATOM_FLAGS_RERUN;
}
}
-
- /* Clear the coreref_state now - while check_deref_cores() may not have
- * been called yet, the caller will have taken a copy of this field. If
- * this is not done, then if the atom is re-scheduled (following a soft
- * stop) then the core reference would not be retaken. */
- katom->coreref_state = KBASE_ATOM_COREREF_STATE_NO_CORES_REQUESTED;
- katom->affinity = 0;
}
void kbase_backend_complete_wq_post_sched(struct kbase_device *kbdev,
- base_jd_core_req core_req, u64 affinity,
- enum kbase_atom_coreref_state coreref_state)
+ base_jd_core_req core_req)
{
- unsigned long flags;
-
- spin_lock_irqsave(&kbdev->hwaccess_lock, flags);
- kbasep_js_job_check_deref_cores_nokatom(kbdev, core_req, affinity,
- coreref_state);
- spin_unlock_irqrestore(&kbdev->hwaccess_lock, flags);
-
if (!kbdev->pm.active_count) {
mutex_lock(&kbdev->js_data.runpool_mutex);
mutex_lock(&kbdev->pm.lock);
void kbase_gpu_dump_slots(struct kbase_device *kbdev)
{
- struct kbasep_js_device_data *js_devdata;
unsigned long flags;
int js;
- js_devdata = &kbdev->js_data;
-
spin_lock_irqsave(&kbdev->hwaccess_lock, flags);
dev_info(kbdev->dev, "kbase_gpu_dump_slots:\n");
spin_unlock_irqrestore(&kbdev->hwaccess_lock, flags);
}
-
-
-