void drm_atomic_legacy_backoff(struct drm_atomic_state *state)
{
struct drm_device *dev = state->dev;
- unsigned crtc_mask = 0;
- struct drm_crtc *crtc;
int ret;
bool global = false;
- drm_for_each_crtc(crtc, dev) {
- if (crtc->acquire_ctx != state->acquire_ctx)
- continue;
-
- crtc_mask |= drm_crtc_mask(crtc);
- crtc->acquire_ctx = NULL;
- }
-
if (WARN_ON(dev->mode_config.acquire_ctx == state->acquire_ctx)) {
global = true;
if (ret)
goto retry;
- drm_for_each_crtc(crtc, dev)
- if (drm_crtc_mask(crtc) & crtc_mask)
- crtc->acquire_ctx = state->acquire_ctx;
-
if (global)
dev->mode_config.acquire_ctx = state->acquire_ctx;
}
if (!state)
return -ENOMEM;
- state->acquire_ctx = drm_modeset_legacy_acquire_ctx(crtc);
+ state->acquire_ctx = crtc->dev->mode_config.acquire_ctx;
retry:
crtc_state = drm_atomic_get_crtc_state(state, crtc);
if (IS_ERR(crtc_state)) {
}
EXPORT_SYMBOL(drm_modeset_unlock_all);
-/**
- * drm_modeset_legacy_acquire_ctx - find acquire ctx for legacy ioctls
- * @crtc: drm crtc
- *
- * Legacy ioctl operations like cursor updates or page flips only have per-crtc
- * locking, and store the acquire ctx in the corresponding crtc. All other
- * legacy operations take all locks and use a global acquire context. This
- * function grabs the right one.
- */
-struct drm_modeset_acquire_ctx *
-drm_modeset_legacy_acquire_ctx(struct drm_crtc *crtc)
-{
- if (crtc->acquire_ctx)
- return crtc->acquire_ctx;
-
- WARN_ON(!crtc->dev->mode_config.acquire_ctx);
-
- return crtc->dev->mode_config.acquire_ctx;
-}
-EXPORT_SYMBOL(drm_modeset_legacy_acquire_ctx);
-
/**
* drm_warn_on_modeset_not_all_locked - check that all modeset locks are locked
* @dev: device
state = drm_atomic_state_alloc(dev);
if (!state)
return -ENOMEM;
- state->acquire_ctx = drm_modeset_legacy_acquire_ctx(crtc);
+ state->acquire_ctx = dev->mode_config.acquire_ctx;
retry:
plane_state = drm_atomic_get_plane_state(state, primary);
return;
}
- state->acquire_ctx = drm_modeset_legacy_acquire_ctx(crtc);
+ state->acquire_ctx = crtc->dev->mode_config.acquire_ctx;
retry:
crtc_state = drm_atomic_get_crtc_state(state, crtc);
goto unlock;
}
- state->acquire_ctx = drm_modeset_legacy_acquire_ctx(&crtc->base);
+ state->acquire_ctx = crtc->base.dev->mode_config.acquire_ctx;
pipe_config = intel_atomic_get_crtc_state(state, crtc);
if (IS_ERR(pipe_config)) {
ret = PTR_ERR(pipe_config);
*/
spinlock_t commit_lock;
- /**
- * @acquire_ctx:
- *
- * Per-CRTC implicit acquire context used by atomic drivers for legacy
- * IOCTLs, so that atomic drivers can get at the locking acquire
- * context.
- */
- struct drm_modeset_acquire_ctx *acquire_ctx;
-
#ifdef CONFIG_DEBUG_FS
/**
* @debugfs_entry:
void drm_modeset_lock_all(struct drm_device *dev);
void drm_modeset_unlock_all(struct drm_device *dev);
void drm_warn_on_modeset_not_all_locked(struct drm_device *dev);
-struct drm_modeset_acquire_ctx *
-drm_modeset_legacy_acquire_ctx(struct drm_crtc *crtc);
int drm_modeset_lock_all_ctx(struct drm_device *dev,
struct drm_modeset_acquire_ctx *ctx);