drm_fbdev_cma_hotplug_event(vc4->fbdev);
}
-struct vc4_commit {
- struct drm_device *dev;
- struct drm_atomic_state *state;
- struct vc4_seqno_cb cb;
-};
-
static void
-vc4_atomic_complete_commit(struct vc4_commit *c)
+vc4_atomic_complete_commit(struct drm_atomic_state *state)
{
- struct drm_atomic_state *state = c->state;
struct drm_device *dev = state->dev;
struct vc4_dev *vc4 = to_vc4_dev(dev);
drm_atomic_state_put(state);
up(&vc4->async_modeset);
-
- kfree(c);
-}
-
-static void
-vc4_atomic_complete_commit_seqno_cb(struct vc4_seqno_cb *cb)
-{
- struct vc4_commit *c = container_of(cb, struct vc4_commit, cb);
-
- vc4_atomic_complete_commit(c);
}
-static struct vc4_commit *commit_init(struct drm_atomic_state *state)
+static void commit_work(struct work_struct *work)
{
- struct vc4_commit *c = kzalloc(sizeof(*c), GFP_KERNEL);
-
- if (!c)
- return NULL;
- c->dev = state->dev;
- c->state = state;
-
- return c;
+ struct drm_atomic_state *state = container_of(work,
+ struct drm_atomic_state,
+ commit_work);
+ vc4_atomic_complete_commit(state);
}
/**
{
struct vc4_dev *vc4 = to_vc4_dev(dev);
int ret;
- int i;
- uint64_t wait_seqno = 0;
- struct vc4_commit *c;
- struct drm_plane *plane;
- struct drm_plane_state *new_state;
-
- c = commit_init(state);
- if (!c)
- return -ENOMEM;
ret = drm_atomic_helper_setup_commit(state, nonblock);
if (ret)
return ret;
+ INIT_WORK(&state->commit_work, commit_work);
+
ret = down_interruptible(&vc4->async_modeset);
- if (ret) {
- kfree(c);
+ if (ret)
return ret;
- }
ret = drm_atomic_helper_prepare_planes(dev, state);
if (ret) {
- kfree(c);
up(&vc4->async_modeset);
return ret;
}
ret = drm_atomic_helper_wait_for_fences(dev, state, true);
if (ret) {
drm_atomic_helper_cleanup_planes(dev, state);
- kfree(c);
up(&vc4->async_modeset);
return ret;
}
}
- for_each_plane_in_state(state, plane, new_state, i) {
- if ((plane->state->fb != new_state->fb) && new_state->fb) {
- struct drm_gem_cma_object *cma_bo =
- drm_fb_cma_get_gem_obj(new_state->fb, 0);
- struct vc4_bo *bo = to_vc4_bo(&cma_bo->base);
-
- wait_seqno = max(bo->seqno, wait_seqno);
- }
- }
-
/*
* This is the point of no return - everything below never fails except
* when the hw goes bonghits. Which means we can commit the new state on
*/
drm_atomic_state_get(state);
- if (nonblock) {
- vc4_queue_seqno_cb(dev, &c->cb, wait_seqno,
- vc4_atomic_complete_commit_seqno_cb);
- } else {
- vc4_wait_for_seqno(dev, wait_seqno, ~0ull, false);
- vc4_atomic_complete_commit(c);
- }
+ if (nonblock)
+ queue_work(system_unbound_wq, &state->commit_work);
+ else
+ vc4_atomic_complete_commit(state);
return 0;
}