drm/msm: pass address-space to _get_iova() and friends
authorRob Clark <robdclark@gmail.com>
Tue, 13 Jun 2017 15:07:08 +0000 (11:07 -0400)
committerRob Clark <robdclark@gmail.com>
Fri, 16 Jun 2017 15:16:04 +0000 (11:16 -0400)
No functional change, that will come later.  But this will make it
easier to deal with dynamically created address spaces (ie. per-
process pagetables for gpu).

Signed-off-by: Rob Clark <robdclark@gmail.com>
17 files changed:
drivers/gpu/drm/msm/adreno/a5xx_gpu.c
drivers/gpu/drm/msm/adreno/a5xx_power.c
drivers/gpu/drm/msm/adreno/adreno_gpu.c
drivers/gpu/drm/msm/dsi/dsi_host.c
drivers/gpu/drm/msm/mdp/mdp4/mdp4_crtc.c
drivers/gpu/drm/msm/mdp/mdp4/mdp4_kms.c
drivers/gpu/drm/msm/mdp/mdp4/mdp4_plane.c
drivers/gpu/drm/msm/mdp/mdp5/mdp5_crtc.c
drivers/gpu/drm/msm/mdp/mdp5/mdp5_plane.c
drivers/gpu/drm/msm/msm_drv.c
drivers/gpu/drm/msm/msm_drv.h
drivers/gpu/drm/msm/msm_fb.c
drivers/gpu/drm/msm/msm_fbdev.c
drivers/gpu/drm/msm/msm_gem.c
drivers/gpu/drm/msm/msm_gem.h
drivers/gpu/drm/msm/msm_gem_submit.c
drivers/gpu/drm/msm/msm_gpu.c

index 8d17f525c417a728d5339ec6916885062281a990..f6a9eec71fec1bcf7367473970a49e1a073e1fb3 100644 (file)
@@ -308,7 +308,7 @@ static struct drm_gem_object *a5xx_ucode_load_bo(struct msm_gpu *gpu,
        }
 
        if (iova) {
-               int ret = msm_gem_get_iova_locked(bo, gpu->id, iova);
+               int ret = msm_gem_get_iova_locked(bo, gpu->aspace, iova);
 
                if (ret) {
                        drm_gem_object_unreference(bo);
@@ -696,19 +696,19 @@ static void a5xx_destroy(struct msm_gpu *gpu)
 
        if (a5xx_gpu->pm4_bo) {
                if (a5xx_gpu->pm4_iova)
-                       msm_gem_put_iova(a5xx_gpu->pm4_bo, gpu->id);
+                       msm_gem_put_iova(a5xx_gpu->pm4_bo, gpu->aspace);
                drm_gem_object_unreference_unlocked(a5xx_gpu->pm4_bo);
        }
 
        if (a5xx_gpu->pfp_bo) {
                if (a5xx_gpu->pfp_iova)
-                       msm_gem_put_iova(a5xx_gpu->pfp_bo, gpu->id);
+                       msm_gem_put_iova(a5xx_gpu->pfp_bo, gpu->aspace);
                drm_gem_object_unreference_unlocked(a5xx_gpu->pfp_bo);
        }
 
        if (a5xx_gpu->gpmu_bo) {
                if (a5xx_gpu->gpmu_iova)
-                       msm_gem_put_iova(a5xx_gpu->gpmu_bo, gpu->id);
+                       msm_gem_put_iova(a5xx_gpu->gpmu_bo, gpu->aspace);
                drm_gem_object_unreference_unlocked(a5xx_gpu->gpmu_bo);
        }
 
index f3274b827a491d38a0aa59ec401fa2f56b948502..feb7f4fd42fb7ceb2381f06c0e16bc58b5d78825 100644 (file)
@@ -298,7 +298,8 @@ void a5xx_gpmu_ucode_init(struct msm_gpu *gpu)
        if (IS_ERR(a5xx_gpu->gpmu_bo))
                goto err;
 
-       if (msm_gem_get_iova_locked(a5xx_gpu->gpmu_bo, gpu->id, &a5xx_gpu->gpmu_iova))
+       if (msm_gem_get_iova_locked(a5xx_gpu->gpmu_bo, gpu->aspace,
+                       &a5xx_gpu->gpmu_iova))
                goto err;
 
        ptr = msm_gem_get_vaddr_locked(a5xx_gpu->gpmu_bo);
@@ -327,7 +328,7 @@ void a5xx_gpmu_ucode_init(struct msm_gpu *gpu)
 
 err:
        if (a5xx_gpu->gpmu_iova)
-               msm_gem_put_iova(a5xx_gpu->gpmu_bo, gpu->id);
+               msm_gem_put_iova(a5xx_gpu->gpmu_bo, gpu->aspace);
        if (a5xx_gpu->gpmu_bo)
                drm_gem_object_unreference(a5xx_gpu->gpmu_bo);
 
index 30a2096ac9a253378251a348e6814b7431b6f0af..6fa694e6ae8c62c0fc7a85ed722423af95ca2d14 100644 (file)
@@ -64,7 +64,7 @@ int adreno_hw_init(struct msm_gpu *gpu)
 
        DBG("%s", gpu->name);
 
-       ret = msm_gem_get_iova_locked(gpu->rb->bo, gpu->id, &gpu->rb_iova);
+       ret = msm_gem_get_iova_locked(gpu->rb->bo, gpu->aspace, &gpu->rb_iova);
        if (ret) {
                gpu->rb_iova = 0;
                dev_err(gpu->dev->dev, "could not map ringbuffer: %d\n", ret);
@@ -414,7 +414,7 @@ int adreno_gpu_init(struct drm_device *drm, struct platform_device *pdev,
                return -ENOMEM;
        }
 
-       ret = msm_gem_get_iova(adreno_gpu->memptrs_bo, gpu->id,
+       ret = msm_gem_get_iova(adreno_gpu->memptrs_bo, gpu->aspace,
                        &adreno_gpu->memptrs_iova);
        if (ret) {
                dev_err(drm->dev, "could not map memptrs: %d\n", ret);
@@ -433,7 +433,7 @@ void adreno_gpu_cleanup(struct adreno_gpu *adreno_gpu)
                        msm_gem_put_vaddr(adreno_gpu->memptrs_bo);
 
                if (adreno_gpu->memptrs_iova)
-                       msm_gem_put_iova(adreno_gpu->memptrs_bo, gpu->id);
+                       msm_gem_put_iova(adreno_gpu->memptrs_bo, gpu->aspace);
 
                drm_gem_object_unreference_unlocked(adreno_gpu->memptrs_bo);
        }
index 3c752cd0cc1cd5764a7c22b7a1d88d74645fef86..2e7077194b21452bc2b59ce86790c3f63ab8f803 100644 (file)
@@ -994,7 +994,7 @@ static int dsi_tx_buf_alloc(struct msm_dsi_host *msm_host, int size)
                }
 
                ret = msm_gem_get_iova_locked(msm_host->tx_gem_obj,
-                               priv->kms->id, &iova);
+                               priv->kms->aspace, &iova);
                mutex_unlock(&dev->struct_mutex);
                if (ret) {
                        pr_err("%s: failed to get iova, %d\n", __func__, ret);
@@ -1152,7 +1152,7 @@ static int dsi_cmd_dma_tx(struct msm_dsi_host *msm_host, int len)
 
        if (cfg_hnd->major == MSM_DSI_VER_MAJOR_6G) {
                ret = msm_gem_get_iova(msm_host->tx_gem_obj,
-                               priv->kms->id, &dma_base);
+                               priv->kms->aspace, &dma_base);
                if (ret) {
                        pr_err("%s: failed to get iova: %d\n", __func__, ret);
                        return ret;
index d9ee73c3672dee3ac691bb5a30d226c32ee622f1..59153a4ebd185329d924fb821ba7a23761cc1720 100644 (file)
@@ -128,7 +128,7 @@ static void unref_cursor_worker(struct drm_flip_work *work, void *val)
        struct mdp4_kms *mdp4_kms = get_kms(&mdp4_crtc->base);
        struct msm_kms *kms = &mdp4_kms->base.base;
 
-       msm_gem_put_iova(val, kms->id);
+       msm_gem_put_iova(val, kms->aspace);
        drm_gem_object_unreference_unlocked(val);
 }
 
@@ -374,7 +374,7 @@ static void update_cursor(struct drm_crtc *crtc)
                if (next_bo) {
                        /* take a obj ref + iova ref when we start scanning out: */
                        drm_gem_object_reference(next_bo);
-                       msm_gem_get_iova_locked(next_bo, kms->id, &iova);
+                       msm_gem_get_iova_locked(next_bo, kms->aspace, &iova);
 
                        /* enable cursor: */
                        mdp4_write(mdp4_kms, REG_MDP4_DMA_CURSOR_SIZE(dma),
@@ -432,7 +432,7 @@ static int mdp4_crtc_cursor_set(struct drm_crtc *crtc,
        }
 
        if (cursor_bo) {
-               ret = msm_gem_get_iova(cursor_bo, kms->id, &iova);
+               ret = msm_gem_get_iova(cursor_bo, kms->aspace, &iova);
                if (ret)
                        goto fail;
        } else {
index 7cf4dd40de280c6459f1581ce70cc090d39ed6c4..0c01f9fe0ef00de4a1d35b3a50194da2f633dc06 100644 (file)
@@ -163,7 +163,7 @@ static void mdp4_destroy(struct msm_kms *kms)
        struct msm_gem_address_space *aspace = kms->aspace;
 
        if (mdp4_kms->blank_cursor_iova)
-               msm_gem_put_iova(mdp4_kms->blank_cursor_bo, kms->id);
+               msm_gem_put_iova(mdp4_kms->blank_cursor_bo, kms->aspace);
        drm_gem_object_unreference_unlocked(mdp4_kms->blank_cursor_bo);
 
        if (aspace) {
@@ -545,7 +545,7 @@ struct msm_kms *mdp4_kms_init(struct drm_device *dev)
                goto fail;
        }
 
-       ret = msm_gem_get_iova(mdp4_kms->blank_cursor_bo, kms->id,
+       ret = msm_gem_get_iova(mdp4_kms->blank_cursor_bo, kms->aspace,
                        &mdp4_kms->blank_cursor_iova);
        if (ret) {
                dev_err(dev->dev, "could not pin blank-cursor bo: %d\n", ret);
index 17fb1d6f2f232a9c340734afac2b09b91c3d7d7b..a20e3d644523572193893a2f26d19c19d2308590 100644 (file)
@@ -110,7 +110,7 @@ static int mdp4_plane_prepare_fb(struct drm_plane *plane,
                return 0;
 
        DBG("%s: prepare: FB[%u]", mdp4_plane->name, fb->base.id);
-       return msm_framebuffer_prepare(fb, kms->id);
+       return msm_framebuffer_prepare(fb, kms->aspace);
 }
 
 static void mdp4_plane_cleanup_fb(struct drm_plane *plane,
@@ -125,7 +125,7 @@ static void mdp4_plane_cleanup_fb(struct drm_plane *plane,
                return;
 
        DBG("%s: cleanup: FB[%u]", mdp4_plane->name, fb->base.id);
-       msm_framebuffer_cleanup(fb, kms->id);
+       msm_framebuffer_cleanup(fb, kms->aspace);
 }
 
 
@@ -175,13 +175,13 @@ static void mdp4_plane_set_scanout(struct drm_plane *plane,
                        MDP4_PIPE_SRC_STRIDE_B_P3(fb->pitches[3]));
 
        mdp4_write(mdp4_kms, REG_MDP4_PIPE_SRCP0_BASE(pipe),
-                       msm_framebuffer_iova(fb, kms->id, 0));
+                       msm_framebuffer_iova(fb, kms->aspace, 0));
        mdp4_write(mdp4_kms, REG_MDP4_PIPE_SRCP1_BASE(pipe),
-                       msm_framebuffer_iova(fb, kms->id, 1));
+                       msm_framebuffer_iova(fb, kms->aspace, 1));
        mdp4_write(mdp4_kms, REG_MDP4_PIPE_SRCP2_BASE(pipe),
-                       msm_framebuffer_iova(fb, kms->id, 2));
+                       msm_framebuffer_iova(fb, kms->aspace, 2));
        mdp4_write(mdp4_kms, REG_MDP4_PIPE_SRCP3_BASE(pipe),
-                       msm_framebuffer_iova(fb, kms->id, 3));
+                       msm_framebuffer_iova(fb, kms->aspace, 3));
 
        plane->fb = fb;
 }
index d79c5faba35e410cd218693f42e0642cfa388838..cb5415d6c04b7ab6e1e80503d26b32891a934dee 100644 (file)
@@ -162,7 +162,7 @@ static void unref_cursor_worker(struct drm_flip_work *work, void *val)
        struct mdp5_kms *mdp5_kms = get_kms(&mdp5_crtc->base);
        struct msm_kms *kms = &mdp5_kms->base.base;
 
-       msm_gem_put_iova(val, kms->id);
+       msm_gem_put_iova(val, kms->aspace);
        drm_gem_object_unreference_unlocked(val);
 }
 
@@ -760,7 +760,7 @@ static int mdp5_crtc_cursor_set(struct drm_crtc *crtc,
        if (!cursor_bo)
                return -ENOENT;
 
-       ret = msm_gem_get_iova(cursor_bo, kms->id, &cursor_addr);
+       ret = msm_gem_get_iova(cursor_bo, kms->aspace, &cursor_addr);
        if (ret)
                return -EINVAL;
 
index b6a66befd1b7252f372b928d0f454816d36d2dc5..fe3a4de1a4331ff86f0b4f0cc85a48b208bca3b5 100644 (file)
@@ -279,7 +279,7 @@ static int mdp5_plane_prepare_fb(struct drm_plane *plane,
                return 0;
 
        DBG("%s: prepare: FB[%u]", plane->name, fb->base.id);
-       return msm_framebuffer_prepare(fb, kms->id);
+       return msm_framebuffer_prepare(fb, kms->aspace);
 }
 
 static void mdp5_plane_cleanup_fb(struct drm_plane *plane,
@@ -293,7 +293,7 @@ static void mdp5_plane_cleanup_fb(struct drm_plane *plane,
                return;
 
        DBG("%s: cleanup: FB[%u]", plane->name, fb->base.id);
-       msm_framebuffer_cleanup(fb, kms->id);
+       msm_framebuffer_cleanup(fb, kms->aspace);
 }
 
 #define FRAC_16_16(mult, div)    (((mult) << 16) / (div))
@@ -511,13 +511,13 @@ static void set_scanout_locked(struct mdp5_kms *mdp5_kms,
                        MDP5_PIPE_SRC_STRIDE_B_P3(fb->pitches[3]));
 
        mdp5_write(mdp5_kms, REG_MDP5_PIPE_SRC0_ADDR(pipe),
-                       msm_framebuffer_iova(fb, kms->id, 0));
+                       msm_framebuffer_iova(fb, kms->aspace, 0));
        mdp5_write(mdp5_kms, REG_MDP5_PIPE_SRC1_ADDR(pipe),
-                       msm_framebuffer_iova(fb, kms->id, 1));
+                       msm_framebuffer_iova(fb, kms->aspace, 1));
        mdp5_write(mdp5_kms, REG_MDP5_PIPE_SRC2_ADDR(pipe),
-                       msm_framebuffer_iova(fb, kms->id, 2));
+                       msm_framebuffer_iova(fb, kms->aspace, 2));
        mdp5_write(mdp5_kms, REG_MDP5_PIPE_SRC3_ADDR(pipe),
-                       msm_framebuffer_iova(fb, kms->id, 3));
+                       msm_framebuffer_iova(fb, kms->aspace, 3));
 }
 
 /* Note: mdp5_plane->pipe_lock must be locked */
index beb4f6b3ac70e20f0bceadc692b2640c9b446aa6..a9c3c6b813d35c8554e4297d76a1aca1a859490a 100644 (file)
@@ -51,6 +51,7 @@ static const struct drm_mode_config_funcs mode_config_funcs = {
        .atomic_state_free = msm_atomic_state_free,
 };
 
+#include "msm_gem.h"  /* temporary */
 int msm_register_address_space(struct drm_device *dev,
                struct msm_gem_address_space *aspace)
 {
@@ -61,7 +62,9 @@ int msm_register_address_space(struct drm_device *dev,
 
        priv->aspace[priv->num_aspaces] = aspace;
 
-       return priv->num_aspaces++;
+       aspace->id = priv->num_aspaces++;
+
+       return aspace->id;
 }
 
 #ifdef CONFIG_DRM_MSM_REGISTER_LOGGING
@@ -707,7 +710,7 @@ static int msm_ioctl_gem_info_iova(struct drm_device *dev,
        if (!priv->gpu)
                return -EINVAL;
 
-       return msm_gem_get_iova(obj, priv->gpu->id, iova);
+       return msm_gem_get_iova(obj, priv->gpu->aspace, iova);
 }
 
 static int msm_ioctl_gem_info(struct drm_device *dev, void *data,
index 1b26ca626528ab5f4435f689d0a213f0e672aaa5..5570c5c91340fc15a9d1da990aa17d40474ce141 100644 (file)
@@ -209,13 +209,16 @@ int msm_gem_mmap_obj(struct drm_gem_object *obj,
 int msm_gem_mmap(struct file *filp, struct vm_area_struct *vma);
 int msm_gem_fault(struct vm_fault *vmf);
 uint64_t msm_gem_mmap_offset(struct drm_gem_object *obj);
-int msm_gem_get_iova_locked(struct drm_gem_object *obj, int id,
-               uint64_t *iova);
-int msm_gem_get_iova(struct drm_gem_object *obj, int id, uint64_t *iova);
-uint64_t msm_gem_iova(struct drm_gem_object *obj, int id);
+int msm_gem_get_iova_locked(struct drm_gem_object *obj,
+               struct msm_gem_address_space *aspace, uint64_t *iova);
+int msm_gem_get_iova(struct drm_gem_object *obj,
+               struct msm_gem_address_space *aspace, uint64_t *iova);
+uint64_t msm_gem_iova(struct drm_gem_object *obj,
+               struct msm_gem_address_space *aspace);
 struct page **msm_gem_get_pages(struct drm_gem_object *obj);
 void msm_gem_put_pages(struct drm_gem_object *obj);
-void msm_gem_put_iova(struct drm_gem_object *obj, int id);
+void msm_gem_put_iova(struct drm_gem_object *obj,
+               struct msm_gem_address_space *aspace);
 int msm_gem_dumb_create(struct drm_file *file, struct drm_device *dev,
                struct drm_mode_create_dumb *args);
 int msm_gem_dumb_map_offset(struct drm_file *file, struct drm_device *dev,
@@ -251,9 +254,12 @@ struct drm_gem_object *msm_gem_new(struct drm_device *dev,
 struct drm_gem_object *msm_gem_import(struct drm_device *dev,
                struct dma_buf *dmabuf, struct sg_table *sgt);
 
-int msm_framebuffer_prepare(struct drm_framebuffer *fb, int id);
-void msm_framebuffer_cleanup(struct drm_framebuffer *fb, int id);
-uint32_t msm_framebuffer_iova(struct drm_framebuffer *fb, int id, int plane);
+int msm_framebuffer_prepare(struct drm_framebuffer *fb,
+               struct msm_gem_address_space *aspace);
+void msm_framebuffer_cleanup(struct drm_framebuffer *fb,
+               struct msm_gem_address_space *aspace);
+uint32_t msm_framebuffer_iova(struct drm_framebuffer *fb,
+               struct msm_gem_address_space *aspace, int plane);
 struct drm_gem_object *msm_framebuffer_bo(struct drm_framebuffer *fb, int plane);
 const struct msm_format *msm_framebuffer_format(struct drm_framebuffer *fb);
 struct drm_framebuffer *msm_framebuffer_init(struct drm_device *dev,
index ba2733a95a4f03c60961c68cb096950d907294c6..6ecb7b17031631ceb34eca0a5b7a22ee23d7f0a4 100644 (file)
@@ -84,14 +84,15 @@ void msm_framebuffer_describe(struct drm_framebuffer *fb, struct seq_file *m)
  * should be fine, since only the scanout (mdpN) side of things needs
  * this, the gpu doesn't care about fb's.
  */
-int msm_framebuffer_prepare(struct drm_framebuffer *fb, int id)
+int msm_framebuffer_prepare(struct drm_framebuffer *fb,
+               struct msm_gem_address_space *aspace)
 {
        struct msm_framebuffer *msm_fb = to_msm_framebuffer(fb);
        int ret, i, n = fb->format->num_planes;
        uint64_t iova;
 
        for (i = 0; i < n; i++) {
-               ret = msm_gem_get_iova(msm_fb->planes[i], id, &iova);
+               ret = msm_gem_get_iova(msm_fb->planes[i], aspace, &iova);
                DBG("FB[%u]: iova[%d]: %08llx (%d)", fb->base.id, i, iova, ret);
                if (ret)
                        return ret;
@@ -100,21 +101,23 @@ int msm_framebuffer_prepare(struct drm_framebuffer *fb, int id)
        return 0;
 }
 
-void msm_framebuffer_cleanup(struct drm_framebuffer *fb, int id)
+void msm_framebuffer_cleanup(struct drm_framebuffer *fb,
+               struct msm_gem_address_space *aspace)
 {
        struct msm_framebuffer *msm_fb = to_msm_framebuffer(fb);
        int i, n = fb->format->num_planes;
 
        for (i = 0; i < n; i++)
-               msm_gem_put_iova(msm_fb->planes[i], id);
+               msm_gem_put_iova(msm_fb->planes[i], aspace);
 }
 
-uint32_t msm_framebuffer_iova(struct drm_framebuffer *fb, int id, int plane)
+uint32_t msm_framebuffer_iova(struct drm_framebuffer *fb,
+               struct msm_gem_address_space *aspace, int plane)
 {
        struct msm_framebuffer *msm_fb = to_msm_framebuffer(fb);
        if (!msm_fb->planes[plane])
                return 0;
-       return msm_gem_iova(msm_fb->planes[plane], id) + fb->offsets[plane];
+       return msm_gem_iova(msm_fb->planes[plane], aspace) + fb->offsets[plane];
 }
 
 struct drm_gem_object *msm_framebuffer_bo(struct drm_framebuffer *fb, int plane)
index 3c08d6d359448d947d7e98c5126a4ca9b9ce727f..803ed272dc6da12baf71fad82441258393f10506 100644 (file)
@@ -126,7 +126,7 @@ static int msm_fbdev_create(struct drm_fb_helper *helper,
         * in panic (ie. lock-safe, etc) we could avoid pinning the
         * buffer now:
         */
-       ret = msm_gem_get_iova_locked(fbdev->bo, priv->kms->id, &paddr);
+       ret = msm_gem_get_iova_locked(fbdev->bo, priv->kms->aspace, &paddr);
        if (ret) {
                dev_err(dev->dev, "failed to get buffer obj iova: %d\n", ret);
                goto fail_unlock;
index 38fbaadccfb7ee1acd90f4a1fc5158b64b684b82..0a38c5b1a799b730c9961a500097cdb8c4dbff5d 100644 (file)
@@ -308,10 +308,11 @@ put_iova(struct drm_gem_object *obj)
  * That means when I do eventually need to add support for unpinning
  * the refcnt counter needs to be atomic_t.
  */
-int msm_gem_get_iova_locked(struct drm_gem_object *obj, int id,
-               uint64_t *iova)
+int msm_gem_get_iova_locked(struct drm_gem_object *obj,
+               struct msm_gem_address_space *aspace, uint64_t *iova)
 {
        struct msm_gem_object *msm_obj = to_msm_bo(obj);
+       int id = aspace ? aspace->id : 0;
        int ret = 0;
 
        WARN_ON(!mutex_is_locked(&obj->dev->struct_mutex));
@@ -338,9 +339,11 @@ int msm_gem_get_iova_locked(struct drm_gem_object *obj, int id,
 }
 
 /* get iova, taking a reference.  Should have a matching put */
-int msm_gem_get_iova(struct drm_gem_object *obj, int id, uint64_t *iova)
+int msm_gem_get_iova(struct drm_gem_object *obj,
+               struct msm_gem_address_space *aspace, uint64_t *iova)
 {
        struct msm_gem_object *msm_obj = to_msm_bo(obj);
+       int id = aspace ? aspace->id : 0;
        int ret;
 
        /* this is safe right now because we don't unmap until the
@@ -353,7 +356,7 @@ int msm_gem_get_iova(struct drm_gem_object *obj, int id, uint64_t *iova)
        }
 
        mutex_lock(&obj->dev->struct_mutex);
-       ret = msm_gem_get_iova_locked(obj, id, iova);
+       ret = msm_gem_get_iova_locked(obj, aspace, iova);
        mutex_unlock(&obj->dev->struct_mutex);
        return ret;
 }
@@ -361,14 +364,17 @@ int msm_gem_get_iova(struct drm_gem_object *obj, int id, uint64_t *iova)
 /* get iova without taking a reference, used in places where you have
  * already done a 'msm_gem_get_iova()'.
  */
-uint64_t msm_gem_iova(struct drm_gem_object *obj, int id)
+uint64_t msm_gem_iova(struct drm_gem_object *obj,
+               struct msm_gem_address_space *aspace)
 {
        struct msm_gem_object *msm_obj = to_msm_bo(obj);
+       int id = aspace ? aspace->id : 0;
        WARN_ON(!msm_obj->domain[id].iova);
        return msm_obj->domain[id].iova;
 }
 
-void msm_gem_put_iova(struct drm_gem_object *obj, int id)
+void msm_gem_put_iova(struct drm_gem_object *obj,
+               struct msm_gem_address_space *aspace)
 {
        // XXX TODO ..
        // NOTE: probably don't need a _locked() version.. we wouldn't
index 1b4cf20043ea3367c02a7b711534fbadaa78409f..4b4b352b57183cf6a608a6b9c010068b280a74f7 100644 (file)
@@ -33,6 +33,7 @@ struct msm_gem_address_space {
        struct drm_mm mm;
        struct msm_mmu *mmu;
        struct kref kref;
+       int id;    /* temporary */
 };
 
 struct msm_gem_vma {
index 7832e6421d250d0bd78400057e46dce07dc2d18c..c8d01df993da098ae09d149c368d30f05fceb018 100644 (file)
@@ -158,7 +158,7 @@ static void submit_unlock_unpin_bo(struct msm_gem_submit *submit, int i)
        struct msm_gem_object *msm_obj = submit->bos[i].obj;
 
        if (submit->bos[i].flags & BO_PINNED)
-               msm_gem_put_iova(&msm_obj->base, submit->gpu->id);
+               msm_gem_put_iova(&msm_obj->base, submit->gpu->aspace);
 
        if (submit->bos[i].flags & BO_LOCKED)
                ww_mutex_unlock(&msm_obj->resv->lock);
@@ -246,7 +246,7 @@ static int submit_pin_objects(struct msm_gem_submit *submit)
 
                /* if locking succeeded, pin bo: */
                ret = msm_gem_get_iova_locked(&msm_obj->base,
-                               submit->gpu->id, &iova);
+                               submit->gpu->aspace, &iova);
 
                if (ret)
                        break;
index ebbaed442e8ad72145d646cfc598cb21d721c2fa..36f0f1e5fc81bd3b175abc76265ef1af2563390b 100644 (file)
@@ -416,7 +416,7 @@ static void retire_submit(struct msm_gpu *gpu, struct msm_gem_submit *submit)
                struct msm_gem_object *msm_obj = submit->bos[i].obj;
                /* move to inactive: */
                msm_gem_move_to_inactive(&msm_obj->base);
-               msm_gem_put_iova(&msm_obj->base, gpu->id);
+               msm_gem_put_iova(&msm_obj->base, gpu->aspace);
                drm_gem_object_unreference(&msm_obj->base);
        }
 
@@ -498,7 +498,7 @@ void msm_gpu_submit(struct msm_gpu *gpu, struct msm_gem_submit *submit,
                /* submit takes a reference to the bo and iova until retired: */
                drm_gem_object_reference(&msm_obj->base);
                msm_gem_get_iova_locked(&msm_obj->base,
-                               submit->gpu->id, &iova);
+                               submit->gpu->aspace, &iova);
 
                if (submit->bos[i].flags & MSM_SUBMIT_BO_WRITE)
                        msm_gem_move_to_active(&msm_obj->base, gpu, true, submit->fence);
@@ -694,7 +694,7 @@ void msm_gpu_cleanup(struct msm_gpu *gpu)
 
        if (gpu->rb) {
                if (gpu->rb_iova)
-                       msm_gem_put_iova(gpu->rb->bo, gpu->id);
+                       msm_gem_put_iova(gpu->rb->bo, gpu->aspace);
                msm_ringbuffer_destroy(gpu->rb);
        }