#include "msm_drv.h"
#include "msm_kms.h"
#include "msm_gem.h"
-#include "msm_gpu.h" /* temporary */
#include "msm_fence.h"
struct msm_commit {
struct drm_device *dev;
struct drm_atomic_state *state;
- uint32_t fence;
- struct msm_fence_cb fence_cb;
+ struct work_struct work;
uint32_t crtc_mask;
};
-static void fence_cb(struct msm_fence_cb *cb);
+static void commit_worker(struct work_struct *work);
/* block until specified crtcs are no longer pending update, and
* atomically mark them as pending update
c->dev = state->dev;
c->state = state;
- /* TODO we might need a way to indicate to run the cb on a
- * different wq so wait_for_vblanks() doesn't block retiring
- * bo's..
- */
- INIT_FENCE_CB(&c->fence_cb, fence_cb);
+ INIT_WORK(&c->work, commit_worker);
return c;
}
}
}
+static void wait_fences(struct msm_commit *c, bool async)
+{
+ int nplanes = c->dev->mode_config.num_total_plane;
+ ktime_t timeout = ktime_add_ms(ktime_get(), 1000);
+ int i;
+
+ for (i = 0; i < nplanes; i++) {
+ struct drm_plane *plane = c->state->planes[i];
+ struct drm_plane_state *new_state = c->state->plane_states[i];
+
+ if (!plane)
+ continue;
+
+ if ((plane->state->fb != new_state->fb) && new_state->fb) {
+ struct drm_gem_object *obj =
+ msm_framebuffer_bo(new_state->fb, 0);
+ msm_gem_cpu_sync(obj, MSM_PREP_READ, &timeout);
+ }
+ }
+}
+
/* The (potentially) asynchronous part of the commit. At this point
* nothing can fail short of armageddon.
*/
-static void complete_commit(struct msm_commit *c)
+static void complete_commit(struct msm_commit *c, bool async)
{
struct drm_atomic_state *state = c->state;
struct drm_device *dev = state->dev;
struct msm_drm_private *priv = dev->dev_private;
struct msm_kms *kms = priv->kms;
+ wait_fences(c, async);
+
kms->funcs->prepare_commit(kms, state);
drm_atomic_helper_commit_modeset_disables(dev, state);
commit_destroy(c);
}
-static void fence_cb(struct msm_fence_cb *cb)
+static void commit_worker(struct work_struct *work)
{
- struct msm_commit *c =
- container_of(cb, struct msm_commit, fence_cb);
- complete_commit(c);
-}
-
-static void add_fb(struct msm_commit *c, struct drm_framebuffer *fb)
-{
- struct drm_gem_object *obj = msm_framebuffer_bo(fb, 0);
- c->fence = max(c->fence, msm_gem_fence(to_msm_bo(obj), MSM_PREP_READ));
+ complete_commit(container_of(work, struct msm_commit, work), true);
}
int msm_atomic_check(struct drm_device *dev,
struct drm_atomic_state *state, bool nonblock)
{
struct msm_drm_private *priv = dev->dev_private;
- int nplanes = dev->mode_config.num_total_plane;
int ncrtcs = dev->mode_config.num_crtc;
- ktime_t timeout;
struct msm_commit *c;
int i, ret;
c->crtc_mask |= (1 << drm_crtc_index(crtc));
}
- /*
- * Figure out what fence to wait for:
- */
- for (i = 0; i < nplanes; i++) {
- struct drm_plane *plane = state->planes[i];
- struct drm_plane_state *new_state = state->plane_states[i];
-
- if (!plane)
- continue;
-
- if ((plane->state->fb != new_state->fb) && new_state->fb)
- add_fb(c, new_state->fb);
- }
-
/*
* Wait for pending updates on any of the same crtc's and then
* mark our set of crtc's as busy:
* current layout.
*/
- if (nonblock && priv->gpu) {
- msm_queue_fence_cb(priv->gpu->fctx, &c->fence_cb, c->fence);
+ if (nonblock) {
+ queue_work(priv->atomic_wq, &c->work);
return 0;
}
- timeout = ktime_add_ms(ktime_get(), 1000);
-
- /* uninterruptible wait */
- if (priv->gpu)
- msm_wait_fence(priv->gpu->fctx, c->fence, &timeout, false);
-
- complete_commit(c);
+ complete_commit(c, false);
return 0;
flush_workqueue(priv->wq);
destroy_workqueue(priv->wq);
+ flush_workqueue(priv->atomic_wq);
+ destroy_workqueue(priv->atomic_wq);
+
if (kms) {
pm_runtime_disable(dev->dev);
kms->funcs->destroy(kms);
dev->dev_private = priv;
priv->wq = alloc_ordered_workqueue("msm", 0);
+ priv->atomic_wq = alloc_ordered_workqueue("msm:atomic", 0);
init_waitqueue_head(&priv->pending_crtcs_event);
INIT_LIST_HEAD(&priv->inactive_list);
struct list_head inactive_list;
struct workqueue_struct *wq;
+ struct workqueue_struct *atomic_wq;
/* crtcs pending async atomic updates: */
uint32_t pending_crtcs;
void msm_gem_move_to_active(struct drm_gem_object *obj,
struct msm_gpu *gpu, bool write, uint32_t fence);
void msm_gem_move_to_inactive(struct drm_gem_object *obj);
-int msm_gem_cpu_prep(struct drm_gem_object *obj, uint32_t op,
- ktime_t *timeout);
+int msm_gem_cpu_sync(struct drm_gem_object *obj, uint32_t op, ktime_t *timeout);
+int msm_gem_cpu_prep(struct drm_gem_object *obj, uint32_t op, ktime_t *timeout);
int msm_gem_cpu_fini(struct drm_gem_object *obj);
void msm_gem_free_object(struct drm_gem_object *obj);
int msm_gem_new_handle(struct drm_device *dev, struct drm_file *file,
fctx->dev = dev;
fctx->name = name;
init_waitqueue_head(&fctx->event);
- INIT_LIST_HEAD(&fctx->fence_cbs);
return fctx;
}
return ret;
}
-int msm_queue_fence_cb(struct msm_fence_context *fctx,
- struct msm_fence_cb *cb, uint32_t fence)
-{
- struct msm_drm_private *priv = fctx->dev->dev_private;
- int ret = 0;
-
- mutex_lock(&fctx->dev->struct_mutex);
- if (!list_empty(&cb->work.entry)) {
- ret = -EINVAL;
- } else if (fence > fctx->completed_fence) {
- cb->fence = fence;
- list_add_tail(&cb->work.entry, &fctx->fence_cbs);
- } else {
- queue_work(priv->wq, &cb->work);
- }
- mutex_unlock(&fctx->dev->struct_mutex);
-
- return ret;
-}
-
/* called from workqueue */
void msm_update_fence(struct msm_fence_context *fctx, uint32_t fence)
{
- struct msm_drm_private *priv = fctx->dev->dev_private;
-
mutex_lock(&fctx->dev->struct_mutex);
fctx->completed_fence = max(fence, fctx->completed_fence);
-
- while (!list_empty(&fctx->fence_cbs)) {
- struct msm_fence_cb *cb;
-
- cb = list_first_entry(&fctx->fence_cbs,
- struct msm_fence_cb, work.entry);
-
- if (cb->fence > fctx->completed_fence)
- break;
-
- list_del_init(&cb->work.entry);
- queue_work(priv->wq, &cb->work);
- }
-
mutex_unlock(&fctx->dev->struct_mutex);
wake_up_all(&fctx->event);
}
-
-void __msm_fence_worker(struct work_struct *work)
-{
- struct msm_fence_cb *cb = container_of(work, struct msm_fence_cb, work);
- cb->func(cb);
-}
uint32_t last_fence; /* last assigned fence */
uint32_t completed_fence; /* last completed fence */
wait_queue_head_t event;
- /* callbacks deferred until bo is inactive: */
- struct list_head fence_cbs;
};
struct msm_fence_context * msm_fence_context_alloc(struct drm_device *dev,
const char *name);
void msm_fence_context_free(struct msm_fence_context *fctx);
-/* callback from wq once fence has passed: */
-struct msm_fence_cb {
- struct work_struct work;
- uint32_t fence;
- void (*func)(struct msm_fence_cb *cb);
-};
-
-void __msm_fence_worker(struct work_struct *work);
-
-#define INIT_FENCE_CB(_cb, _func) do { \
- INIT_WORK(&(_cb)->work, __msm_fence_worker); \
- (_cb)->func = _func; \
- } while (0)
-
int msm_wait_fence(struct msm_fence_context *fctx, uint32_t fence,
ktime_t *timeout, bool interruptible);
int msm_queue_fence_cb(struct msm_fence_context *fctx,
list_add_tail(&msm_obj->mm_list, &priv->inactive_list);
}
-int msm_gem_cpu_prep(struct drm_gem_object *obj, uint32_t op, ktime_t *timeout)
+int msm_gem_cpu_sync(struct drm_gem_object *obj, uint32_t op, ktime_t *timeout)
{
struct drm_device *dev = obj->dev;
struct msm_drm_private *priv = dev->dev_private;
ret = msm_wait_fence(priv->gpu->fctx, fence, timeout, true);
}
+ return ret;
+}
+
+int msm_gem_cpu_prep(struct drm_gem_object *obj, uint32_t op, ktime_t *timeout)
+{
+ int ret = msm_gem_cpu_sync(obj, op, timeout);
+
/* TODO cache maintenance */
return ret;