gpu->rb->cur = gpu->rb->start;
/* reset completed fence seqno, just discard anything pending: */
- adreno_gpu->memptrs->fence = gpu->submitted_fence;
+ adreno_gpu->memptrs->fence = gpu->fctx->last_fence;
adreno_gpu->memptrs->rptr = 0;
adreno_gpu->memptrs->wptr = 0;
adreno_gpu->rev.patchid);
seq_printf(m, "fence: %d/%d\n", adreno_gpu->memptrs->fence,
- gpu->submitted_fence);
+ gpu->fctx->last_fence);
seq_printf(m, "rptr: %d\n", get_rptr(adreno_gpu));
seq_printf(m, "wptr: %d\n", adreno_gpu->memptrs->wptr);
seq_printf(m, "rb wptr: %d\n", get_wptr(gpu->rb));
adreno_gpu->rev.patchid);
printk("fence: %d/%d\n", adreno_gpu->memptrs->fence,
- gpu->submitted_fence);
+ gpu->fctx->last_fence);
printk("rptr: %d\n", get_rptr(adreno_gpu));
printk("wptr: %d\n", adreno_gpu->memptrs->wptr);
printk("rb wptr: %d\n", get_wptr(gpu->rb));
#include "msm_drv.h"
#include "msm_kms.h"
#include "msm_gem.h"
+#include "msm_gpu.h" /* temporary */
#include "msm_fence.h"
struct msm_commit {
int msm_atomic_commit(struct drm_device *dev,
struct drm_atomic_state *state, bool nonblock)
{
+ struct msm_drm_private *priv = dev->dev_private;
int nplanes = dev->mode_config.num_total_plane;
int ncrtcs = dev->mode_config.num_crtc;
ktime_t timeout;
* current layout.
*/
- if (nonblock) {
- msm_queue_fence_cb(dev, &c->fence_cb, c->fence);
+ if (nonblock && priv->gpu) {
+ msm_queue_fence_cb(priv->gpu->fctx, &c->fence_cb, c->fence);
return 0;
}
timeout = ktime_add_ms(ktime_get(), 1000);
/* uninterruptible wait */
- msm_wait_fence(dev, c->fence, &timeout, false);
+ if (priv->gpu)
+ msm_wait_fence(priv->gpu->fctx, c->fence, &timeout, false);
complete_commit(c);
dev->dev_private = priv;
priv->wq = alloc_ordered_workqueue("msm", 0);
- init_waitqueue_head(&priv->fence_event);
init_waitqueue_head(&priv->pending_crtcs_event);
INIT_LIST_HEAD(&priv->inactive_list);
- INIT_LIST_HEAD(&priv->fence_cbs);
INIT_LIST_HEAD(&priv->vblank_ctrl.event_list);
INIT_WORK(&priv->vblank_ctrl.work, vblank_ctrl_worker);
spin_lock_init(&priv->vblank_ctrl.lock);
static int msm_ioctl_wait_fence(struct drm_device *dev, void *data,
struct drm_file *file)
{
+ struct msm_drm_private *priv = dev->dev_private;
struct drm_msm_wait_fence *args = data;
ktime_t timeout = to_ktime(args->timeout);
return -EINVAL;
}
- return msm_wait_fence(dev, args->fence, &timeout, true);
+ if (!priv->gpu)
+ return 0;
+
+ return msm_wait_fence(priv->gpu->fctx, args->fence, &timeout, true);
}
static const struct drm_ioctl_desc msm_ioctls[] = {
struct msm_rd_state;
struct msm_perf_state;
struct msm_gem_submit;
+struct msm_fence_context;
struct msm_fence_cb;
#define NUM_DOMAINS 2 /* one for KMS, then one per gpu core (?) */
struct drm_fb_helper *fbdev;
- uint32_t next_fence, completed_fence;
- wait_queue_head_t fence_event;
-
struct msm_rd_state *rd;
struct msm_perf_state *perf;
struct workqueue_struct *wq;
- /* callbacks deferred until bo is inactive: */
- struct list_head fence_cbs;
-
/* crtcs pending async atomic updates: */
uint32_t pending_crtcs;
wait_queue_head_t pending_crtcs_event;
void msm_gem_prime_unpin(struct drm_gem_object *obj);
void *msm_gem_vaddr_locked(struct drm_gem_object *obj);
void *msm_gem_vaddr(struct drm_gem_object *obj);
-int msm_gem_queue_inactive_cb(struct drm_gem_object *obj,
- struct msm_fence_cb *cb);
void msm_gem_move_to_active(struct drm_gem_object *obj,
struct msm_gpu *gpu, bool write, uint32_t fence);
void msm_gem_move_to_inactive(struct drm_gem_object *obj);
* this program. If not, see <http://www.gnu.org/licenses/>.
*/
+#include <linux/fence.h>
+
#include "msm_drv.h"
#include "msm_fence.h"
-#include "msm_gpu.h"
-static inline bool fence_completed(struct drm_device *dev, uint32_t fence)
+
+struct msm_fence_context *
+msm_fence_context_alloc(struct drm_device *dev, const char *name)
{
- struct msm_drm_private *priv = dev->dev_private;
- return (int32_t)(priv->completed_fence - fence) >= 0;
+ struct msm_fence_context *fctx;
+
+ fctx = kzalloc(sizeof(*fctx), GFP_KERNEL);
+ if (!fctx)
+ return ERR_PTR(-ENOMEM);
+
+ fctx->dev = dev;
+ fctx->name = name;
+ init_waitqueue_head(&fctx->event);
+ INIT_LIST_HEAD(&fctx->fence_cbs);
+
+ return fctx;
}
-int msm_wait_fence(struct drm_device *dev, uint32_t fence,
- ktime_t *timeout , bool interruptible)
+void msm_fence_context_free(struct msm_fence_context *fctx)
{
- struct msm_drm_private *priv = dev->dev_private;
- int ret;
+ kfree(fctx);
+}
+
+static inline bool fence_completed(struct msm_fence_context *fctx, uint32_t fence)
+{
+ return (int32_t)(fctx->completed_fence - fence) >= 0;
+}
- if (!priv->gpu)
- return 0;
+int msm_wait_fence(struct msm_fence_context *fctx, uint32_t fence,
+ ktime_t *timeout, bool interruptible)
+{
+ int ret;
- if (fence > priv->gpu->submitted_fence) {
- DRM_ERROR("waiting on invalid fence: %u (of %u)\n",
- fence, priv->gpu->submitted_fence);
+ if (fence > fctx->last_fence) {
+ DRM_ERROR("%s: waiting on invalid fence: %u (of %u)\n",
+ fctx->name, fence, fctx->last_fence);
return -EINVAL;
}
if (!timeout) {
/* no-wait: */
- ret = fence_completed(dev, fence) ? 0 : -EBUSY;
+ ret = fence_completed(fctx, fence) ? 0 : -EBUSY;
} else {
unsigned long remaining_jiffies = timeout_to_jiffies(timeout);
if (interruptible)
- ret = wait_event_interruptible_timeout(priv->fence_event,
- fence_completed(dev, fence),
+ ret = wait_event_interruptible_timeout(fctx->event,
+ fence_completed(fctx, fence),
remaining_jiffies);
else
- ret = wait_event_timeout(priv->fence_event,
- fence_completed(dev, fence),
+ ret = wait_event_timeout(fctx->event,
+ fence_completed(fctx, fence),
remaining_jiffies);
if (ret == 0) {
DBG("timeout waiting for fence: %u (completed: %u)",
- fence, priv->completed_fence);
+ fence, fctx->completed_fence);
ret = -ETIMEDOUT;
} else if (ret != -ERESTARTSYS) {
ret = 0;
return ret;
}
-int msm_queue_fence_cb(struct drm_device *dev,
+int msm_queue_fence_cb(struct msm_fence_context *fctx,
struct msm_fence_cb *cb, uint32_t fence)
{
- struct msm_drm_private *priv = dev->dev_private;
+ struct msm_drm_private *priv = fctx->dev->dev_private;
int ret = 0;
- mutex_lock(&dev->struct_mutex);
+ mutex_lock(&fctx->dev->struct_mutex);
if (!list_empty(&cb->work.entry)) {
ret = -EINVAL;
- } else if (fence > priv->completed_fence) {
+ } else if (fence > fctx->completed_fence) {
cb->fence = fence;
- list_add_tail(&cb->work.entry, &priv->fence_cbs);
+ list_add_tail(&cb->work.entry, &fctx->fence_cbs);
} else {
queue_work(priv->wq, &cb->work);
}
- mutex_unlock(&dev->struct_mutex);
+ mutex_unlock(&fctx->dev->struct_mutex);
return ret;
}
/* called from workqueue */
-void msm_update_fence(struct drm_device *dev, uint32_t fence)
+void msm_update_fence(struct msm_fence_context *fctx, uint32_t fence)
{
- struct msm_drm_private *priv = dev->dev_private;
+ struct msm_drm_private *priv = fctx->dev->dev_private;
- mutex_lock(&dev->struct_mutex);
- priv->completed_fence = max(fence, priv->completed_fence);
+ mutex_lock(&fctx->dev->struct_mutex);
+ fctx->completed_fence = max(fence, fctx->completed_fence);
- while (!list_empty(&priv->fence_cbs)) {
+ while (!list_empty(&fctx->fence_cbs)) {
struct msm_fence_cb *cb;
- cb = list_first_entry(&priv->fence_cbs,
+ cb = list_first_entry(&fctx->fence_cbs,
struct msm_fence_cb, work.entry);
- if (cb->fence > priv->completed_fence)
+ if (cb->fence > fctx->completed_fence)
break;
list_del_init(&cb->work.entry);
queue_work(priv->wq, &cb->work);
}
- mutex_unlock(&dev->struct_mutex);
+ mutex_unlock(&fctx->dev->struct_mutex);
- wake_up_all(&priv->fence_event);
+ wake_up_all(&fctx->event);
}
void __msm_fence_worker(struct work_struct *work)
#include "msm_drv.h"
+struct msm_fence_context {
+ struct drm_device *dev;
+ const char *name;
+ /* last_fence == completed_fence --> no pending work */
+ uint32_t last_fence; /* last assigned fence */
+ uint32_t completed_fence; /* last completed fence */
+ wait_queue_head_t event;
+ /* callbacks deferred until bo is inactive: */
+ struct list_head fence_cbs;
+};
+
+struct msm_fence_context * msm_fence_context_alloc(struct drm_device *dev,
+ const char *name);
+void msm_fence_context_free(struct msm_fence_context *fctx);
+
/* callback from wq once fence has passed: */
struct msm_fence_cb {
struct work_struct work;
(_cb)->func = _func; \
} while (0)
-int msm_wait_fence(struct drm_device *dev, uint32_t fence,
+int msm_wait_fence(struct msm_fence_context *fctx, uint32_t fence,
ktime_t *timeout, bool interruptible);
-int msm_queue_fence_cb(struct drm_device *dev,
+int msm_queue_fence_cb(struct msm_fence_context *fctx,
struct msm_fence_cb *cb, uint32_t fence);
-void msm_update_fence(struct drm_device *dev, uint32_t fence);
+void msm_update_fence(struct msm_fence_context *fctx, uint32_t fence);
#endif
return ret;
}
-/* setup callback for when bo is no longer busy..
- * TODO probably want to differentiate read vs write..
- */
-int msm_gem_queue_inactive_cb(struct drm_gem_object *obj,
- struct msm_fence_cb *cb)
-{
- struct msm_gem_object *msm_obj = to_msm_bo(obj);
- uint32_t fence = msm_gem_fence(msm_obj,
- MSM_PREP_READ | MSM_PREP_WRITE);
- return msm_queue_fence_cb(obj->dev, cb, fence);
-}
-
void msm_gem_move_to_active(struct drm_gem_object *obj,
struct msm_gpu *gpu, bool write, uint32_t fence)
{
int msm_gem_cpu_prep(struct drm_gem_object *obj, uint32_t op, ktime_t *timeout)
{
struct drm_device *dev = obj->dev;
+ struct msm_drm_private *priv = dev->dev_private;
struct msm_gem_object *msm_obj = to_msm_bo(obj);
int ret = 0;
if (op & MSM_PREP_NOSYNC)
timeout = NULL;
- ret = msm_wait_fence(dev, fence, timeout, true);
+ if (priv->gpu)
+ ret = msm_wait_fence(priv->gpu->fctx, fence, timeout, true);
}
/* TODO cache maintenance */
if (fence != gpu->hangcheck_fence) {
/* some progress has been made.. ya! */
gpu->hangcheck_fence = fence;
- } else if (fence < gpu->submitted_fence) {
+ } else if (fence < gpu->fctx->last_fence) {
/* no progress and not done.. hung! */
gpu->hangcheck_fence = fence;
dev_err(dev->dev, "%s: hangcheck detected gpu lockup!\n",
dev_err(dev->dev, "%s: completed fence: %u\n",
gpu->name, fence);
dev_err(dev->dev, "%s: submitted fence: %u\n",
- gpu->name, gpu->submitted_fence);
+ gpu->name, gpu->fctx->last_fence);
queue_work(priv->wq, &gpu->recover_work);
}
/* if still more pending work, reset the hangcheck timer: */
- if (gpu->submitted_fence > gpu->hangcheck_fence)
+ if (gpu->fctx->last_fence > gpu->hangcheck_fence)
hangcheck_timer_reset(gpu);
/* workaround for missing irq: */
struct drm_device *dev = gpu->dev;
uint32_t fence = gpu->funcs->last_fence(gpu);
- msm_update_fence(gpu->dev, fence);
+ msm_update_fence(gpu->fctx, fence);
mutex_lock(&dev->struct_mutex);
retire_submits(gpu, fence);
WARN_ON(!mutex_is_locked(&dev->struct_mutex));
- submit->fence = ++priv->next_fence;
-
- gpu->submitted_fence = submit->fence;
+ submit->fence = ++gpu->fctx->last_fence;
inactive_cancel(gpu);
msm_rd_dump_submit(submit);
- gpu->submitted_fence = submit->fence;
-
update_sw_cntrs(gpu);
for (i = 0; i < submit->nr_bos; i++) {
gpu->funcs = funcs;
gpu->name = name;
gpu->inactive = true;
+ gpu->fctx = msm_fence_context_alloc(drm, name);
+ if (IS_ERR(gpu->fctx)) {
+ ret = PTR_ERR(gpu->fctx);
+ gpu->fctx = NULL;
+ goto fail;
+ }
INIT_LIST_HEAD(&gpu->active_list);
INIT_WORK(&gpu->retire_work, retire_worker);
if (gpu->mmu)
gpu->mmu->funcs->destroy(gpu->mmu);
+
+ if (gpu->fctx)
+ msm_fence_context_free(gpu->fctx);
}
#include <linux/regulator/consumer.h>
#include "msm_drv.h"
+#include "msm_fence.h"
#include "msm_ringbuffer.h"
struct msm_gem_submit;
const struct msm_gpu_perfcntr *perfcntrs;
uint32_t num_perfcntrs;
+ /* ringbuffer: */
struct msm_ringbuffer *rb;
uint32_t rb_iova;
/* list of GEM active objects: */
struct list_head active_list;
- uint32_t submitted_fence;
+ /* fencing: */
+ struct msm_fence_context *fctx;
/* is gpu powered/active? */
int active_cnt;
static inline bool msm_gpu_active(struct msm_gpu *gpu)
{
- return gpu->submitted_fence > gpu->funcs->last_fence(gpu);
+ return gpu->fctx->last_fence > gpu->funcs->last_fence(gpu);
}
/* Perf-Counters: