struct drm_device *dev = data;
struct drm_i915_private *dev_priv = dev->dev_private;
- *val = atomic_read(&dev_priv->gpu_error.reset_counter);
+ *val = i915_reset_counter(&dev_priv->gpu_error);
return 0;
}
* while it is writing to 'i915_wedged'
*/
- if (i915_reset_in_progress(&dev_priv->gpu_error))
+ if (i915_reset_in_progress_or_wedged(&dev_priv->gpu_error))
return -EAGAIN;
intel_runtime_pm_get(dev_priv);
int __must_check i915_gem_check_wedge(struct i915_gpu_error *error,
bool interruptible);
+static inline u32 i915_reset_counter(struct i915_gpu_error *error)
+{
+ return atomic_read(&error->reset_counter);
+}
+
+static inline bool __i915_reset_in_progress(u32 reset)
+{
+ return unlikely(reset & I915_RESET_IN_PROGRESS_FLAG);
+}
+
+static inline bool __i915_reset_in_progress_or_wedged(u32 reset)
+{
+ return unlikely(reset & (I915_RESET_IN_PROGRESS_FLAG | I915_WEDGED));
+}
+
+static inline bool __i915_terminally_wedged(u32 reset)
+{
+ return unlikely(reset & I915_WEDGED);
+}
+
static inline bool i915_reset_in_progress(struct i915_gpu_error *error)
{
- return unlikely(atomic_read(&error->reset_counter)
- & (I915_RESET_IN_PROGRESS_FLAG | I915_WEDGED));
+ return __i915_reset_in_progress(i915_reset_counter(error));
+}
+
+static inline bool i915_reset_in_progress_or_wedged(struct i915_gpu_error *error)
+{
+ return __i915_reset_in_progress_or_wedged(i915_reset_counter(error));
}
static inline bool i915_terminally_wedged(struct i915_gpu_error *error)
{
- return atomic_read(&error->reset_counter) & I915_WEDGED;
+ return __i915_terminally_wedged(i915_reset_counter(error));
}
static inline u32 i915_reset_count(struct i915_gpu_error *error)
{
- return ((atomic_read(&error->reset_counter) & ~I915_WEDGED) + 1) / 2;
+ return ((i915_reset_counter(error) & ~I915_WEDGED) + 1) / 2;
}
static inline bool i915_stop_ring_allow_ban(struct drm_i915_private *dev_priv)
{
int ret;
-#define EXIT_COND (!i915_reset_in_progress(error) || \
+#define EXIT_COND (!i915_reset_in_progress_or_wedged(error) || \
i915_terminally_wedged(error))
if (EXIT_COND)
return 0;
i915_gem_check_wedge(struct i915_gpu_error *error,
bool interruptible)
{
- if (i915_reset_in_progress(error)) {
+ if (i915_reset_in_progress_or_wedged(error)) {
/* Non-interruptible callers can't handle -EAGAIN, hence return
* -EIO unconditionally for these. */
if (!interruptible)
/* We need to check whether any gpu reset happened in between
* the caller grabbing the seqno and now ... */
- if (reset_counter != atomic_read(&dev_priv->gpu_error.reset_counter)) {
+ if (reset_counter != i915_reset_counter(&dev_priv->gpu_error)) {
/* ... but upgrade the -EAGAIN to an -EIO if the gpu
* is truely gone. */
ret = i915_gem_check_wedge(&dev_priv->gpu_error, interruptible);
return ret;
ret = __i915_wait_request(req,
- atomic_read(&dev_priv->gpu_error.reset_counter),
+ i915_reset_counter(&dev_priv->gpu_error),
interruptible, NULL, NULL);
if (ret)
return ret;
if (ret)
return ret;
- reset_counter = atomic_read(&dev_priv->gpu_error.reset_counter);
+ reset_counter = i915_reset_counter(&dev_priv->gpu_error);
if (readonly) {
struct drm_i915_gem_request *req;
}
drm_gem_object_unreference(&obj->base);
- reset_counter = atomic_read(&dev_priv->gpu_error.reset_counter);
+ reset_counter = i915_reset_counter(&dev_priv->gpu_error);
for (i = 0; i < I915_NUM_ENGINES; i++) {
if (obj->last_read_req[i] == NULL)
if (!i915_semaphore_is_enabled(obj->base.dev)) {
struct drm_i915_private *i915 = to_i915(obj->base.dev);
ret = __i915_wait_request(from_req,
- atomic_read(&i915->gpu_error.reset_counter),
+ i915_reset_counter(&i915->gpu_error),
i915->mm.interruptible,
NULL,
&i915->rps.semaphores);
target = request;
}
- reset_counter = atomic_read(&dev_priv->gpu_error.reset_counter);
+ reset_counter = i915_reset_counter(&dev_priv->gpu_error);
if (target)
i915_gem_request_reference(target);
spin_unlock(&file_priv->mm.lock);
* the reset in-progress bit is only ever set by code outside of this
* work we don't need to worry about any other races.
*/
- if (i915_reset_in_progress(error) && !i915_terminally_wedged(error)) {
+ if (i915_reset_in_progress_or_wedged(error) && !i915_terminally_wedged(error)) {
DRM_DEBUG_DRIVER("resetting chip\n");
kobject_uevent_env(&dev->primary->kdev->kobj, KOBJ_CHANGE,
reset_event);
struct drm_device *dev = crtc->dev;
struct drm_i915_private *dev_priv = dev->dev_private;
struct intel_crtc *intel_crtc = to_intel_crtc(crtc);
+ unsigned reset_counter;
bool pending;
- if (i915_reset_in_progress(&dev_priv->gpu_error) ||
- intel_crtc->reset_counter != atomic_read(&dev_priv->gpu_error.reset_counter))
+ reset_counter = i915_reset_counter(&dev_priv->gpu_error);
+ if (intel_crtc->reset_counter != reset_counter ||
+ __i915_reset_in_progress_or_wedged(reset_counter))
return false;
spin_lock_irq(&dev->event_lock);
{
struct drm_device *dev = crtc->base.dev;
struct drm_i915_private *dev_priv = dev->dev_private;
+ unsigned reset_counter;
- if (i915_reset_in_progress(&dev_priv->gpu_error) ||
- crtc->reset_counter != atomic_read(&dev_priv->gpu_error.reset_counter))
+ reset_counter = i915_reset_counter(&dev_priv->gpu_error);
+ if (crtc->reset_counter != reset_counter ||
+ __i915_reset_in_progress_or_wedged(reset_counter))
return true;
/*
goto cleanup;
atomic_inc(&intel_crtc->unpin_work_count);
- intel_crtc->reset_counter = atomic_read(&dev_priv->gpu_error.reset_counter);
+ intel_crtc->reset_counter = i915_reset_counter(&dev_priv->gpu_error);
if (INTEL_INFO(dev)->gen >= 5 || IS_G4X(dev))
work->flip_count = I915_READ(PIPE_FLIPCOUNT_G4X(pipe)) + 1;
return ret;
ret = drm_atomic_helper_prepare_planes(dev, state);
- if (!ret && !async && !i915_reset_in_progress(&dev_priv->gpu_error)) {
+ if (!ret && !async && !i915_reset_in_progress_or_wedged(&dev_priv->gpu_error)) {
u32 reset_counter;
- reset_counter = atomic_read(&dev_priv->gpu_error.reset_counter);
+ reset_counter = i915_reset_counter(&dev_priv->gpu_error);
mutex_unlock(&dev->struct_mutex);
for_each_plane_in_state(state, plane, plane_state, i) {
return;
ret = intel_engine_idle(engine);
- if (ret && !i915_reset_in_progress(&to_i915(engine->dev)->gpu_error))
+ if (ret && !i915_reset_in_progress_or_wedged(&dev_priv->gpu_error))
DRM_ERROR("failed to quiesce %s whilst cleaning up: %d\n",
engine->name, ret);
/* Make sure we do not trigger any retires */
return __i915_wait_request(req,
- atomic_read(&to_i915(engine->dev)->gpu_error.reset_counter),
- to_i915(engine->dev)->mm.interruptible,
+ i915_reset_counter(&req->i915->gpu_error),
+ req->i915->mm.interruptible,
NULL, NULL);
}
return;
ret = intel_engine_idle(engine);
- if (ret && !i915_reset_in_progress(&to_i915(engine->dev)->gpu_error))
+ if (ret &&
+ !i915_reset_in_progress_or_wedged(&to_i915(engine->dev)->gpu_error))
DRM_ERROR("failed to quiesce %s whilst cleaning up: %d\n",
engine->name, ret);