drm/amdgpu: add the interface of waiting multiple fences (v4)
authorJunwei Zhang <Jerry.Zhang@amd.com>
Fri, 4 Nov 2016 20:16:10 +0000 (16:16 -0400)
committerSumit Semwal <sumit.semwal@linaro.org>
Tue, 8 Nov 2016 18:58:42 +0000 (00:28 +0530)
v2: agd: rebase and squash in all the previous optimizations and
changes so everything compiles.
v3: squash in Slava's 32bit build fix
v4: rebase on drm-next (fence -> dma_fence),
    squash in Monk's ioctl update patch

Signed-off-by: Junwei Zhang <Jerry.Zhang@amd.com>
Reviewed-by: Monk Liu <monk.liu@amd.com>
Reviewed-by: Jammy Zhou <Jammy.Zhou@amd.com>
Signed-off-by: Alex Deucher <alexander.deucher@amd.com>
Signed-off-by: Sumit Semwal <sumit.semwal@linaro.org>
 [sumits: fix checkpatch warnings]
Link: http://patchwork.freedesktop.org/patch/msgid/1478290570-30982-2-git-send-email-alexander.deucher@amd.com
drivers/gpu/drm/amd/amdgpu/amdgpu.h
drivers/gpu/drm/amd/amdgpu/amdgpu_cs.c
drivers/gpu/drm/amd/amdgpu/amdgpu_kms.c
drivers/gpu/drm/amd/amdgpu/amdgpu_sa.c
include/uapi/drm/amdgpu_drm.h

index 2ec7b3baeec20c380eb2e8a49fb713790eb40176..c2b8496cdf632160d7e55ae521ba83edb8e2722e 100644 (file)
@@ -1212,6 +1212,8 @@ int amdgpu_gem_op_ioctl(struct drm_device *dev, void *data,
                        struct drm_file *filp);
 int amdgpu_cs_ioctl(struct drm_device *dev, void *data, struct drm_file *filp);
 int amdgpu_cs_wait_ioctl(struct drm_device *dev, void *data, struct drm_file *filp);
+int amdgpu_cs_wait_fences_ioctl(struct drm_device *dev, void *data,
+                               struct drm_file *filp);
 
 int amdgpu_gem_metadata_ioctl(struct drm_device *dev, void *data,
                                struct drm_file *filp);
index a024217896fdb0babbc28dc9d84584a48d010815..b4bc83aa5999400c32dfcf878e7c4804d3bf4ee6 100644 (file)
@@ -1139,6 +1139,180 @@ int amdgpu_cs_wait_ioctl(struct drm_device *dev, void *data,
        return 0;
 }
 
+/**
+ * amdgpu_cs_get_fence - helper to get fence from drm_amdgpu_fence
+ *
+ * @adev: amdgpu device
+ * @filp: file private
+ * @user: drm_amdgpu_fence copied from user space
+ */
+static struct dma_fence *amdgpu_cs_get_fence(struct amdgpu_device *adev,
+                                            struct drm_file *filp,
+                                            struct drm_amdgpu_fence *user)
+{
+       struct amdgpu_ring *ring;
+       struct amdgpu_ctx *ctx;
+       struct dma_fence *fence;
+       int r;
+
+       r = amdgpu_cs_get_ring(adev, user->ip_type, user->ip_instance,
+                              user->ring, &ring);
+       if (r)
+               return ERR_PTR(r);
+
+       ctx = amdgpu_ctx_get(filp->driver_priv, user->ctx_id);
+       if (ctx == NULL)
+               return ERR_PTR(-EINVAL);
+
+       fence = amdgpu_ctx_get_fence(ctx, ring, user->seq_no);
+       amdgpu_ctx_put(ctx);
+
+       return fence;
+}
+
+/**
+ * amdgpu_cs_wait_all_fence - wait on all fences to signal
+ *
+ * @adev: amdgpu device
+ * @filp: file private
+ * @wait: wait parameters
+ * @fences: array of drm_amdgpu_fence
+ */
+static int amdgpu_cs_wait_all_fences(struct amdgpu_device *adev,
+                                    struct drm_file *filp,
+                                    union drm_amdgpu_wait_fences *wait,
+                                    struct drm_amdgpu_fence *fences)
+{
+       uint32_t fence_count = wait->in.fence_count;
+       unsigned int i;
+       long r = 1;
+
+       for (i = 0; i < fence_count; i++) {
+               struct dma_fence *fence;
+               unsigned long timeout = amdgpu_gem_timeout(wait->in.timeout_ns);
+
+               fence = amdgpu_cs_get_fence(adev, filp, &fences[i]);
+               if (IS_ERR(fence))
+                       return PTR_ERR(fence);
+               else if (!fence)
+                       continue;
+
+               r = dma_fence_wait_timeout(fence, true, timeout);
+               if (r < 0)
+                       return r;
+
+               if (r == 0)
+                       break;
+       }
+
+       memset(wait, 0, sizeof(*wait));
+       wait->out.status = (r > 0);
+
+       return 0;
+}
+
+/**
+ * amdgpu_cs_wait_any_fence - wait on any fence to signal
+ *
+ * @adev: amdgpu device
+ * @filp: file private
+ * @wait: wait parameters
+ * @fences: array of drm_amdgpu_fence
+ */
+static int amdgpu_cs_wait_any_fence(struct amdgpu_device *adev,
+                                   struct drm_file *filp,
+                                   union drm_amdgpu_wait_fences *wait,
+                                   struct drm_amdgpu_fence *fences)
+{
+       unsigned long timeout = amdgpu_gem_timeout(wait->in.timeout_ns);
+       uint32_t fence_count = wait->in.fence_count;
+       uint32_t first = ~0;
+       struct dma_fence **array;
+       unsigned int i;
+       long r;
+
+       /* Prepare the fence array */
+       array = kcalloc(fence_count, sizeof(struct dma_fence *), GFP_KERNEL);
+
+       if (array == NULL)
+               return -ENOMEM;
+
+       for (i = 0; i < fence_count; i++) {
+               struct dma_fence *fence;
+
+               fence = amdgpu_cs_get_fence(adev, filp, &fences[i]);
+               if (IS_ERR(fence)) {
+                       r = PTR_ERR(fence);
+                       goto err_free_fence_array;
+               } else if (fence) {
+                       array[i] = fence;
+               } else { /* NULL, the fence has been already signaled */
+                       r = 1;
+                       goto out;
+               }
+       }
+
+       r = dma_fence_wait_any_timeout(array, fence_count, true, timeout,
+                                      &first);
+       if (r < 0)
+               goto err_free_fence_array;
+
+out:
+       memset(wait, 0, sizeof(*wait));
+       wait->out.status = (r > 0);
+       wait->out.first_signaled = first;
+       /* set return value 0 to indicate success */
+       r = 0;
+
+err_free_fence_array:
+       for (i = 0; i < fence_count; i++)
+               dma_fence_put(array[i]);
+       kfree(array);
+
+       return r;
+}
+
+/**
+ * amdgpu_cs_wait_fences_ioctl - wait for multiple command submissions to finish
+ *
+ * @dev: drm device
+ * @data: data from userspace
+ * @filp: file private
+ */
+int amdgpu_cs_wait_fences_ioctl(struct drm_device *dev, void *data,
+                               struct drm_file *filp)
+{
+       struct amdgpu_device *adev = dev->dev_private;
+       union drm_amdgpu_wait_fences *wait = data;
+       uint32_t fence_count = wait->in.fence_count;
+       struct drm_amdgpu_fence *fences_user;
+       struct drm_amdgpu_fence *fences;
+       int r;
+
+       /* Get the fences from userspace */
+       fences = kmalloc_array(fence_count, sizeof(struct drm_amdgpu_fence),
+                       GFP_KERNEL);
+       if (fences == NULL)
+               return -ENOMEM;
+
+       fences_user = (void __user *)(unsigned long)(wait->in.fences);
+       if (copy_from_user(fences, fences_user,
+               sizeof(struct drm_amdgpu_fence) * fence_count)) {
+               r = -EFAULT;
+               goto err_free_fences;
+       }
+
+       if (wait->in.wait_all)
+               r = amdgpu_cs_wait_all_fences(adev, filp, wait, fences);
+       else
+               r = amdgpu_cs_wait_any_fence(adev, filp, wait, fences);
+
+err_free_fences:
+       kfree(fences);
+
+       return r;
+}
+
 /**
  * amdgpu_cs_find_bo_va - find bo_va for VM address
  *
index 78392671046a06959119fda41542be2e1729e169..06e145b5afd79d5ce0eb51ec185e4673a5377813 100644 (file)
@@ -825,6 +825,7 @@ const struct drm_ioctl_desc amdgpu_ioctls_kms[] = {
        DRM_IOCTL_DEF_DRV(AMDGPU_CS, amdgpu_cs_ioctl, DRM_AUTH|DRM_RENDER_ALLOW),
        DRM_IOCTL_DEF_DRV(AMDGPU_INFO, amdgpu_info_ioctl, DRM_AUTH|DRM_RENDER_ALLOW),
        DRM_IOCTL_DEF_DRV(AMDGPU_WAIT_CS, amdgpu_cs_wait_ioctl, DRM_AUTH|DRM_RENDER_ALLOW),
+       DRM_IOCTL_DEF_DRV(AMDGPU_WAIT_FENCES, amdgpu_cs_wait_fences_ioctl, DRM_AUTH|DRM_RENDER_ALLOW),
        DRM_IOCTL_DEF_DRV(AMDGPU_GEM_METADATA, amdgpu_gem_metadata_ioctl, DRM_AUTH|DRM_RENDER_ALLOW),
        DRM_IOCTL_DEF_DRV(AMDGPU_GEM_VA, amdgpu_gem_va_ioctl, DRM_AUTH|DRM_RENDER_ALLOW),
        DRM_IOCTL_DEF_DRV(AMDGPU_GEM_OP, amdgpu_gem_op_ioctl, DRM_AUTH|DRM_RENDER_ALLOW),
index fd26c4b8d793efdb0b030f6df5248556804b7cf4..34a795463988bf16f57712d3a96d99afcabd87b7 100644 (file)
@@ -361,7 +361,8 @@ int amdgpu_sa_bo_new(struct amdgpu_sa_manager *sa_manager,
                if (count) {
                        spin_unlock(&sa_manager->wq.lock);
                        t = dma_fence_wait_any_timeout(fences, count, false,
-                                                      MAX_SCHEDULE_TIMEOUT);
+                                                      MAX_SCHEDULE_TIMEOUT,
+                                                      NULL);
                        for (i = 0; i < count; ++i)
                                dma_fence_put(fences[i]);
 
index 4684f378f046e2cda38d655d50e3899130987a03..2191a9e4f3db27182b79f38ac46456b9e2c25118 100644 (file)
@@ -50,6 +50,7 @@ extern "C" {
 #define DRM_AMDGPU_WAIT_CS             0x09
 #define DRM_AMDGPU_GEM_OP              0x10
 #define DRM_AMDGPU_GEM_USERPTR         0x11
+#define DRM_AMDGPU_WAIT_FENCES         0x12
 
 #define DRM_IOCTL_AMDGPU_GEM_CREATE    DRM_IOWR(DRM_COMMAND_BASE + DRM_AMDGPU_GEM_CREATE, union drm_amdgpu_gem_create)
 #define DRM_IOCTL_AMDGPU_GEM_MMAP      DRM_IOWR(DRM_COMMAND_BASE + DRM_AMDGPU_GEM_MMAP, union drm_amdgpu_gem_mmap)
@@ -63,6 +64,7 @@ extern "C" {
 #define DRM_IOCTL_AMDGPU_WAIT_CS       DRM_IOWR(DRM_COMMAND_BASE + DRM_AMDGPU_WAIT_CS, union drm_amdgpu_wait_cs)
 #define DRM_IOCTL_AMDGPU_GEM_OP                DRM_IOWR(DRM_COMMAND_BASE + DRM_AMDGPU_GEM_OP, struct drm_amdgpu_gem_op)
 #define DRM_IOCTL_AMDGPU_GEM_USERPTR   DRM_IOWR(DRM_COMMAND_BASE + DRM_AMDGPU_GEM_USERPTR, struct drm_amdgpu_gem_userptr)
+#define DRM_IOCTL_AMDGPU_WAIT_FENCES   DRM_IOWR(DRM_COMMAND_BASE + DRM_AMDGPU_WAIT_FENCES, union drm_amdgpu_wait_fences)
 
 #define AMDGPU_GEM_DOMAIN_CPU          0x1
 #define AMDGPU_GEM_DOMAIN_GTT          0x2
@@ -307,6 +309,32 @@ union drm_amdgpu_wait_cs {
        struct drm_amdgpu_wait_cs_out out;
 };
 
+struct drm_amdgpu_fence {
+       __u32 ctx_id;
+       __u32 ip_type;
+       __u32 ip_instance;
+       __u32 ring;
+       __u64 seq_no;
+};
+
+struct drm_amdgpu_wait_fences_in {
+       /** This points to uint64_t * which points to fences */
+       __u64 fences;
+       __u32 fence_count;
+       __u32 wait_all;
+       __u64 timeout_ns;
+};
+
+struct drm_amdgpu_wait_fences_out {
+       __u32 status;
+       __u32 first_signaled;
+};
+
+union drm_amdgpu_wait_fences {
+       struct drm_amdgpu_wait_fences_in in;
+       struct drm_amdgpu_wait_fences_out out;
+};
+
 #define AMDGPU_GEM_OP_GET_GEM_CREATE_INFO      0
 #define AMDGPU_GEM_OP_SET_PLACEMENT            1