drm/msm: Add a helper function for in-kernel buffer allocations
authorJordan Crouse <jcrouse@codeaurora.org>
Thu, 27 Jul 2017 16:42:40 +0000 (10:42 -0600)
committerRob Clark <robdclark@gmail.com>
Tue, 22 Aug 2017 17:19:17 +0000 (13:19 -0400)
Nearly all of the buffer allocations for kernel allocate an buffer object,
virtual address and GPU iova at the same time. Make a helper function to
handle the details.

Signed-off-by: Jordan Crouse <jcrouse@codeaurora.org>
[dropped msm_fbdev conversion to new helper, since it interferes with
display-handover work, where we want to separate allocation and mapping]
Signed-off-by: Rob Clark <robdclark@gmail.com>
drivers/gpu/drm/msm/adreno/a5xx_gpu.c
drivers/gpu/drm/msm/adreno/a5xx_power.c
drivers/gpu/drm/msm/adreno/adreno_gpu.c
drivers/gpu/drm/msm/msm_drv.h
drivers/gpu/drm/msm/msm_gem.c
drivers/gpu/drm/msm/msm_ringbuffer.c

index 1f4a0322b1279f4318588d347ff408ecbfc9a108..1e164008f5b9d1e4a1055f9f1e0f2d3a9ed4f8c1 100644 (file)
@@ -284,28 +284,14 @@ static int a5xx_me_init(struct msm_gpu *gpu)
 static struct drm_gem_object *a5xx_ucode_load_bo(struct msm_gpu *gpu,
                const struct firmware *fw, u64 *iova)
 {
-       struct drm_device *drm = gpu->dev;
        struct drm_gem_object *bo;
        void *ptr;
 
-       bo = msm_gem_new_locked(drm, fw->size - 4, MSM_BO_UNCACHED);
-       if (IS_ERR(bo))
-               return bo;
+       ptr = msm_gem_kernel_new_locked(gpu->dev, fw->size - 4,
+               MSM_BO_UNCACHED | MSM_BO_GPU_READONLY, gpu->aspace, &bo, iova);
 
-       ptr = msm_gem_get_vaddr(bo);
-       if (!ptr) {
-               drm_gem_object_unreference(bo);
-               return ERR_PTR(-ENOMEM);
-       }
-
-       if (iova) {
-               int ret = msm_gem_get_iova(bo, gpu->aspace, iova);
-
-               if (ret) {
-                       drm_gem_object_unreference(bo);
-                       return ERR_PTR(ret);
-               }
-       }
+       if (IS_ERR(ptr))
+               return ERR_CAST(ptr);
 
        memcpy(ptr, &fw->data[4], fw->size - 4);
 
index 87af6eea0483a01574578655925fa24f0d895591..04aab1dcae2b12e9f52dff5d0de618e85e5f21c9 100644 (file)
@@ -294,16 +294,10 @@ void a5xx_gpmu_ucode_init(struct msm_gpu *gpu)
         */
        bosize = (cmds_size + (cmds_size / TYPE4_MAX_PAYLOAD) + 1) << 2;
 
-       a5xx_gpu->gpmu_bo = msm_gem_new_locked(drm, bosize, MSM_BO_UNCACHED);
-       if (IS_ERR(a5xx_gpu->gpmu_bo))
-               goto err;
-
-       if (msm_gem_get_iova(a5xx_gpu->gpmu_bo, gpu->aspace,
-                       &a5xx_gpu->gpmu_iova))
-               goto err;
-
-       ptr = msm_gem_get_vaddr(a5xx_gpu->gpmu_bo);
-       if (!ptr)
+       ptr = msm_gem_kernel_new_locked(drm, bosize,
+               MSM_BO_UNCACHED | MSM_BO_GPU_READONLY, gpu->aspace,
+               &a5xx_gpu->gpmu_bo, &a5xx_gpu->gpmu_iova);
+       if (IS_ERR(ptr))
                goto err;
 
        while (cmds_size > 0) {
index f69a903c95aff6f51daf84a252c2f281415f7230..c8b4ac254bb5dfcf594769f604015e097bf1b554 100644 (file)
@@ -391,29 +391,17 @@ int adreno_gpu_init(struct drm_device *drm, struct platform_device *pdev,
                return ret;
        }
 
-       adreno_gpu->memptrs_bo = msm_gem_new(drm, sizeof(*adreno_gpu->memptrs),
-                       MSM_BO_UNCACHED);
-       if (IS_ERR(adreno_gpu->memptrs_bo)) {
-               ret = PTR_ERR(adreno_gpu->memptrs_bo);
-               adreno_gpu->memptrs_bo = NULL;
-               dev_err(drm->dev, "could not allocate memptrs: %d\n", ret);
-               return ret;
-       }
+       adreno_gpu->memptrs = msm_gem_kernel_new(drm,
+               sizeof(*adreno_gpu->memptrs), MSM_BO_UNCACHED, gpu->aspace,
+               &adreno_gpu->memptrs_bo, &adreno_gpu->memptrs_iova);
 
-       adreno_gpu->memptrs = msm_gem_get_vaddr(adreno_gpu->memptrs_bo);
        if (IS_ERR(adreno_gpu->memptrs)) {
-               dev_err(drm->dev, "could not vmap memptrs\n");
-               return -ENOMEM;
-       }
-
-       ret = msm_gem_get_iova(adreno_gpu->memptrs_bo, gpu->aspace,
-                       &adreno_gpu->memptrs_iova);
-       if (ret) {
-               dev_err(drm->dev, "could not map memptrs: %d\n", ret);
-               return ret;
+               ret = PTR_ERR(adreno_gpu->memptrs);
+               adreno_gpu->memptrs = NULL;
+               dev_err(drm->dev, "could not allocate memptrs: %d\n", ret);
        }
 
-       return 0;
+       return ret;
 }
 
 void adreno_gpu_cleanup(struct adreno_gpu *adreno_gpu)
index fc8d24f7c0841c3f6a3f840b47712a958d6b42c5..62ac0871bf14e4611170ab5663c8c1a4345bbe9c 100644 (file)
@@ -237,6 +237,12 @@ struct drm_gem_object *msm_gem_new(struct drm_device *dev,
                uint32_t size, uint32_t flags);
 struct drm_gem_object *msm_gem_new_locked(struct drm_device *dev,
                uint32_t size, uint32_t flags);
+void *msm_gem_kernel_new(struct drm_device *dev, uint32_t size,
+               uint32_t flags, struct msm_gem_address_space *aspace,
+               struct drm_gem_object **bo, uint64_t *iova);
+void *msm_gem_kernel_new_locked(struct drm_device *dev, uint32_t size,
+               uint32_t flags, struct msm_gem_address_space *aspace,
+               struct drm_gem_object **bo, uint64_t *iova);
 struct drm_gem_object *msm_gem_import(struct drm_device *dev,
                struct dma_buf *dmabuf, struct sg_table *sgt);
 
index a0c60e738db8d7be5e841311832e82f0b45bd7fa..f15821a0d90089d1045e810e4b28af552db9eec0 100644 (file)
@@ -1024,3 +1024,49 @@ fail:
        drm_gem_object_unreference_unlocked(obj);
        return ERR_PTR(ret);
 }
+
+static void *_msm_gem_kernel_new(struct drm_device *dev, uint32_t size,
+               uint32_t flags, struct msm_gem_address_space *aspace,
+               struct drm_gem_object **bo, uint64_t *iova, bool locked)
+{
+       void *vaddr;
+       struct drm_gem_object *obj = _msm_gem_new(dev, size, flags, locked);
+       int ret;
+
+       if (IS_ERR(obj))
+               return ERR_CAST(obj);
+
+       if (iova) {
+               ret = msm_gem_get_iova(obj, aspace, iova);
+               if (ret) {
+                       drm_gem_object_unreference(obj);
+                       return ERR_PTR(ret);
+               }
+       }
+
+       vaddr = msm_gem_get_vaddr(obj);
+       if (!vaddr) {
+               msm_gem_put_iova(obj, aspace);
+               drm_gem_object_unreference(obj);
+               return ERR_PTR(-ENOMEM);
+       }
+
+       if (bo)
+               *bo = obj;
+
+       return vaddr;
+}
+
+void *msm_gem_kernel_new(struct drm_device *dev, uint32_t size,
+               uint32_t flags, struct msm_gem_address_space *aspace,
+               struct drm_gem_object **bo, uint64_t *iova)
+{
+       return _msm_gem_kernel_new(dev, size, flags, aspace, bo, iova, false);
+}
+
+void *msm_gem_kernel_new_locked(struct drm_device *dev, uint32_t size,
+               uint32_t flags, struct msm_gem_address_space *aspace,
+               struct drm_gem_object **bo, uint64_t *iova)
+{
+       return _msm_gem_kernel_new(dev, size, flags, aspace, bo, iova, true);
+}
index 791bca3c6a9cc19410ed2963cdf7977eb43fd014..bf065a54013086b774dfbffb4175a8f407bfa51d 100644 (file)
@@ -33,16 +33,14 @@ struct msm_ringbuffer *msm_ringbuffer_new(struct msm_gpu *gpu, int size)
        }
 
        ring->gpu = gpu;
-       ring->bo = msm_gem_new(gpu->dev, size, MSM_BO_WC);
-       if (IS_ERR(ring->bo)) {
-               ret = PTR_ERR(ring->bo);
-               ring->bo = NULL;
-               goto fail;
-       }
 
-       ring->start = msm_gem_get_vaddr(ring->bo);
+       /* Pass NULL for the iova pointer - we will map it later */
+       ring->start = msm_gem_kernel_new(gpu->dev, size, MSM_BO_WC,
+               gpu->aspace, &ring->bo, NULL);
+
        if (IS_ERR(ring->start)) {
                ret = PTR_ERR(ring->start);
+               ring->start = 0;
                goto fail;
        }
        ring->end   = ring->start + (size / 4);