drm/amdgpu: separate bo_va structure
authorChristian König <christian.koenig@amd.com>
Tue, 1 Aug 2017 08:51:43 +0000 (10:51 +0200)
committerAlex Deucher <alexander.deucher@amd.com>
Thu, 17 Aug 2017 19:46:07 +0000 (15:46 -0400)
Split that into vm_bo_base and bo_va to allow other uses as well.

Signed-off-by: Christian König <christian.koenig@amd.com>
Reviewed-by: Alex Deucher <alexander.deucher@amd.com>
Signed-off-by: Alex Deucher <alexander.deucher@amd.com>
drivers/gpu/drm/amd/amdgpu/amdgpu_cs.c
drivers/gpu/drm/amd/amdgpu/amdgpu_gem.c
drivers/gpu/drm/amd/amdgpu/amdgpu_object.h
drivers/gpu/drm/amd/amdgpu/amdgpu_trace.h
drivers/gpu/drm/amd/amdgpu/amdgpu_virt.c
drivers/gpu/drm/amd/amdgpu/amdgpu_vm.c
drivers/gpu/drm/amd/amdgpu/amdgpu_vm.h

index 3c64248673ee0f099539c2eedacb8530aa166635..75e7141c8de46cac473d0fb9757f81ee4353087f 100644 (file)
@@ -1487,7 +1487,7 @@ amdgpu_cs_find_mapping(struct amdgpu_cs_parser *parser,
                            addr > mapping->last)
                                continue;
 
-                       *bo = lobj->bo_va->bo;
+                       *bo = lobj->bo_va->base.bo;
                        return mapping;
                }
 
@@ -1496,7 +1496,7 @@ amdgpu_cs_find_mapping(struct amdgpu_cs_parser *parser,
                            addr > mapping->last)
                                continue;
 
-                       *bo = lobj->bo_va->bo;
+                       *bo = lobj->bo_va->base.bo;
                        return mapping;
                }
        }
index 5ae9941bad7c6fcdc670b682ebf675df4ae1c2f4..7171968f261e1a13094f3c94b2d649a382b2f6a0 100644 (file)
@@ -621,7 +621,7 @@ int amdgpu_gem_va_ioctl(struct drm_device *dev, void *data,
 
        switch (args->operation) {
        case AMDGPU_VA_OP_MAP:
-               r = amdgpu_vm_alloc_pts(adev, bo_va->vm, args->va_address,
+               r = amdgpu_vm_alloc_pts(adev, bo_va->base.vm, args->va_address,
                                        args->map_size);
                if (r)
                        goto error_backoff;
@@ -641,7 +641,7 @@ int amdgpu_gem_va_ioctl(struct drm_device *dev, void *data,
                                                args->map_size);
                break;
        case AMDGPU_VA_OP_REPLACE:
-               r = amdgpu_vm_alloc_pts(adev, bo_va->vm, args->va_address,
+               r = amdgpu_vm_alloc_pts(adev, bo_va->base.vm, args->va_address,
                                        args->map_size);
                if (r)
                        goto error_backoff;
index 9b7b4fcb047bb9cc413d1a5e9934d54f0638de10..a288fa6d72c8026f60a62625f4bf421cc6d3a74e 100644 (file)
@@ -33,6 +33,7 @@
 
 #define AMDGPU_BO_INVALID_OFFSET       LONG_MAX
 
+/* bo virtual addresses in a vm */
 struct amdgpu_bo_va_mapping {
        struct list_head                list;
        struct rb_node                  rb;
@@ -43,26 +44,19 @@ struct amdgpu_bo_va_mapping {
        uint64_t                        flags;
 };
 
-/* bo virtual addresses in a specific vm */
+/* User space allocated BO in a VM */
 struct amdgpu_bo_va {
+       struct amdgpu_vm_bo_base        base;
+
        /* protected by bo being reserved */
-       struct list_head                bo_list;
        struct dma_fence                *last_pt_update;
        unsigned                        ref_count;
 
-       /* protected by vm mutex and spinlock */
-       struct list_head                vm_status;
-
        /* mappings for this bo_va */
        struct list_head                invalids;
        struct list_head                valids;
-
-       /* constant after initialization */
-       struct amdgpu_vm                *vm;
-       struct amdgpu_bo                *bo;
 };
 
-
 struct amdgpu_bo {
        /* Protected by tbo.reserved */
        u32                             preferred_domains;
index d8cd3e554488103ecbc6599429ac0d2a99a20219..1c88bd5e29adc161719a93bc07de4485c4fecce3 100644 (file)
@@ -284,7 +284,7 @@ TRACE_EVENT(amdgpu_vm_bo_map,
                             ),
 
            TP_fast_assign(
-                          __entry->bo = bo_va ? bo_va->bo : NULL;
+                          __entry->bo = bo_va ? bo_va->base.bo : NULL;
                           __entry->start = mapping->start;
                           __entry->last = mapping->last;
                           __entry->offset = mapping->offset;
@@ -308,7 +308,7 @@ TRACE_EVENT(amdgpu_vm_bo_unmap,
                             ),
 
            TP_fast_assign(
-                          __entry->bo = bo_va->bo;
+                          __entry->bo = bo_va->base.bo;
                           __entry->start = mapping->start;
                           __entry->last = mapping->last;
                           __entry->offset = mapping->offset;
index 89208456d36010b89cebdb58d1a4a567b5881023..ab05121b9272b9f11bf7ab52aa0ca085149aeeca 100644 (file)
@@ -76,7 +76,7 @@ int amdgpu_map_static_csa(struct amdgpu_device *adev, struct amdgpu_vm *vm,
                return -ENOMEM;
        }
 
-       r = amdgpu_vm_alloc_pts(adev, (*bo_va)->vm, AMDGPU_CSA_VADDR,
+       r = amdgpu_vm_alloc_pts(adev, (*bo_va)->base.vm, AMDGPU_CSA_VADDR,
                                AMDGPU_CSA_SIZE);
        if (r) {
                DRM_ERROR("failed to allocate pts for static CSA, err=%d\n", r);
index 14012e80fa27bbce11ff08007af55704efe221b4..f24554f2d0e5b3f9100e36c3bd70cd306700fdcb 100644 (file)
@@ -870,8 +870,8 @@ struct amdgpu_bo_va *amdgpu_vm_bo_find(struct amdgpu_vm *vm,
 {
        struct amdgpu_bo_va *bo_va;
 
-       list_for_each_entry(bo_va, &bo->va, bo_list) {
-               if (bo_va->vm == vm) {
+       list_for_each_entry(bo_va, &bo->va, base.bo_list) {
+               if (bo_va->base.vm == vm) {
                        return bo_va;
                }
        }
@@ -1726,7 +1726,8 @@ int amdgpu_vm_bo_update(struct amdgpu_device *adev,
                        struct amdgpu_bo_va *bo_va,
                        bool clear)
 {
-       struct amdgpu_vm *vm = bo_va->vm;
+       struct amdgpu_bo *bo = bo_va->base.bo;
+       struct amdgpu_vm *vm = bo_va->base.vm;
        struct amdgpu_bo_va_mapping *mapping;
        dma_addr_t *pages_addr = NULL;
        uint64_t gtt_flags, flags;
@@ -1735,27 +1736,27 @@ int amdgpu_vm_bo_update(struct amdgpu_device *adev,
        struct dma_fence *exclusive;
        int r;
 
-       if (clear || !bo_va->bo) {
+       if (clear || !bo_va->base.bo) {
                mem = NULL;
                nodes = NULL;
                exclusive = NULL;
        } else {
                struct ttm_dma_tt *ttm;
 
-               mem = &bo_va->bo->tbo.mem;
+               mem = &bo_va->base.bo->tbo.mem;
                nodes = mem->mm_node;
                if (mem->mem_type == TTM_PL_TT) {
-                       ttm = container_of(bo_va->bo->tbo.ttm, struct
-                                          ttm_dma_tt, ttm);
+                       ttm = container_of(bo_va->base.bo->tbo.ttm,
+                                          struct ttm_dma_tt, ttm);
                        pages_addr = ttm->dma_address;
                }
-               exclusive = reservation_object_get_excl(bo_va->bo->tbo.resv);
+               exclusive = reservation_object_get_excl(bo->tbo.resv);
        }
 
-       if (bo_va->bo) {
-               flags = amdgpu_ttm_tt_pte_flags(adev, bo_va->bo->tbo.ttm, mem);
-               gtt_flags = (amdgpu_ttm_is_bound(bo_va->bo->tbo.ttm) &&
-                       adev == amdgpu_ttm_adev(bo_va->bo->tbo.bdev)) ?
+       if (bo) {
+               flags = amdgpu_ttm_tt_pte_flags(adev, bo->tbo.ttm, mem);
+               gtt_flags = (amdgpu_ttm_is_bound(bo->tbo.ttm) &&
+                       adev == amdgpu_ttm_adev(bo->tbo.bdev)) ?
                        flags : 0;
        } else {
                flags = 0x0;
@@ -1763,7 +1764,7 @@ int amdgpu_vm_bo_update(struct amdgpu_device *adev,
        }
 
        spin_lock(&vm->status_lock);
-       if (!list_empty(&bo_va->vm_status))
+       if (!list_empty(&bo_va->base.vm_status))
                list_splice_init(&bo_va->valids, &bo_va->invalids);
        spin_unlock(&vm->status_lock);
 
@@ -1786,9 +1787,9 @@ int amdgpu_vm_bo_update(struct amdgpu_device *adev,
 
        spin_lock(&vm->status_lock);
        list_splice_init(&bo_va->invalids, &bo_va->valids);
-       list_del_init(&bo_va->vm_status);
+       list_del_init(&bo_va->base.vm_status);
        if (clear)
-               list_add(&bo_va->vm_status, &vm->cleared);
+               list_add(&bo_va->base.vm_status, &vm->cleared);
        spin_unlock(&vm->status_lock);
 
        if (vm->use_cpu_for_update) {
@@ -2001,7 +2002,7 @@ int amdgpu_vm_clear_invalids(struct amdgpu_device *adev,
        spin_lock(&vm->status_lock);
        while (!list_empty(&vm->invalidated)) {
                bo_va = list_first_entry(&vm->invalidated,
-                       struct amdgpu_bo_va, vm_status);
+                       struct amdgpu_bo_va, base.vm_status);
                spin_unlock(&vm->status_lock);
 
                r = amdgpu_vm_bo_update(adev, bo_va, true);
@@ -2041,16 +2042,17 @@ struct amdgpu_bo_va *amdgpu_vm_bo_add(struct amdgpu_device *adev,
        if (bo_va == NULL) {
                return NULL;
        }
-       bo_va->vm = vm;
-       bo_va->bo = bo;
+       bo_va->base.vm = vm;
+       bo_va->base.bo = bo;
+       INIT_LIST_HEAD(&bo_va->base.bo_list);
+       INIT_LIST_HEAD(&bo_va->base.vm_status);
+
        bo_va->ref_count = 1;
-       INIT_LIST_HEAD(&bo_va->bo_list);
        INIT_LIST_HEAD(&bo_va->valids);
        INIT_LIST_HEAD(&bo_va->invalids);
-       INIT_LIST_HEAD(&bo_va->vm_status);
 
        if (bo)
-               list_add_tail(&bo_va->bo_list, &bo->va);
+               list_add_tail(&bo_va->base.bo_list, &bo->va);
 
        return bo_va;
 }
@@ -2075,7 +2077,8 @@ int amdgpu_vm_bo_map(struct amdgpu_device *adev,
                     uint64_t size, uint64_t flags)
 {
        struct amdgpu_bo_va_mapping *mapping, *tmp;
-       struct amdgpu_vm *vm = bo_va->vm;
+       struct amdgpu_bo *bo = bo_va->base.bo;
+       struct amdgpu_vm *vm = bo_va->base.vm;
        uint64_t eaddr;
 
        /* validate the parameters */
@@ -2086,7 +2089,7 @@ int amdgpu_vm_bo_map(struct amdgpu_device *adev,
        /* make sure object fit at this offset */
        eaddr = saddr + size - 1;
        if (saddr >= eaddr ||
-           (bo_va->bo && offset + size > amdgpu_bo_size(bo_va->bo)))
+           (bo && offset + size > amdgpu_bo_size(bo)))
                return -EINVAL;
 
        saddr /= AMDGPU_GPU_PAGE_SIZE;
@@ -2096,7 +2099,7 @@ int amdgpu_vm_bo_map(struct amdgpu_device *adev,
        if (tmp) {
                /* bo and tmp overlap, invalid addr */
                dev_err(adev->dev, "bo %p va 0x%010Lx-0x%010Lx conflict with "
-                       "0x%010Lx-0x%010Lx\n", bo_va->bo, saddr, eaddr,
+                       "0x%010Lx-0x%010Lx\n", bo, saddr, eaddr,
                        tmp->start, tmp->last + 1);
                return -EINVAL;
        }
@@ -2141,7 +2144,8 @@ int amdgpu_vm_bo_replace_map(struct amdgpu_device *adev,
                             uint64_t size, uint64_t flags)
 {
        struct amdgpu_bo_va_mapping *mapping;
-       struct amdgpu_vm *vm = bo_va->vm;
+       struct amdgpu_bo *bo = bo_va->base.bo;
+       struct amdgpu_vm *vm = bo_va->base.vm;
        uint64_t eaddr;
        int r;
 
@@ -2153,7 +2157,7 @@ int amdgpu_vm_bo_replace_map(struct amdgpu_device *adev,
        /* make sure object fit at this offset */
        eaddr = saddr + size - 1;
        if (saddr >= eaddr ||
-           (bo_va->bo && offset + size > amdgpu_bo_size(bo_va->bo)))
+           (bo && offset + size > amdgpu_bo_size(bo)))
                return -EINVAL;
 
        /* Allocate all the needed memory */
@@ -2161,7 +2165,7 @@ int amdgpu_vm_bo_replace_map(struct amdgpu_device *adev,
        if (!mapping)
                return -ENOMEM;
 
-       r = amdgpu_vm_bo_clear_mappings(adev, bo_va->vm, saddr, size);
+       r = amdgpu_vm_bo_clear_mappings(adev, bo_va->base.vm, saddr, size);
        if (r) {
                kfree(mapping);
                return r;
@@ -2201,7 +2205,7 @@ int amdgpu_vm_bo_unmap(struct amdgpu_device *adev,
                       uint64_t saddr)
 {
        struct amdgpu_bo_va_mapping *mapping;
-       struct amdgpu_vm *vm = bo_va->vm;
+       struct amdgpu_vm *vm = bo_va->base.vm;
        bool valid = true;
 
        saddr /= AMDGPU_GPU_PAGE_SIZE;
@@ -2349,12 +2353,12 @@ void amdgpu_vm_bo_rmv(struct amdgpu_device *adev,
                      struct amdgpu_bo_va *bo_va)
 {
        struct amdgpu_bo_va_mapping *mapping, *next;
-       struct amdgpu_vm *vm = bo_va->vm;
+       struct amdgpu_vm *vm = bo_va->base.vm;
 
-       list_del(&bo_va->bo_list);
+       list_del(&bo_va->base.bo_list);
 
        spin_lock(&vm->status_lock);
-       list_del(&bo_va->vm_status);
+       list_del(&bo_va->base.vm_status);
        spin_unlock(&vm->status_lock);
 
        list_for_each_entry_safe(mapping, next, &bo_va->valids, list) {
@@ -2386,13 +2390,14 @@ void amdgpu_vm_bo_rmv(struct amdgpu_device *adev,
 void amdgpu_vm_bo_invalidate(struct amdgpu_device *adev,
                             struct amdgpu_bo *bo)
 {
-       struct amdgpu_bo_va *bo_va;
-
-       list_for_each_entry(bo_va, &bo->va, bo_list) {
-               spin_lock(&bo_va->vm->status_lock);
-               if (list_empty(&bo_va->vm_status))
-                       list_add(&bo_va->vm_status, &bo_va->vm->invalidated);
-               spin_unlock(&bo_va->vm->status_lock);
+       struct amdgpu_vm_bo_base *bo_base;
+
+       list_for_each_entry(bo_base, &bo->va, bo_list) {
+               spin_lock(&bo_base->vm->status_lock);
+               if (list_empty(&bo_base->vm_status))
+                       list_add(&bo_base->vm_status,
+                                &bo_base->vm->invalidated);
+               spin_unlock(&bo_base->vm->status_lock);
        }
 }
 
index f12c12fec3c03007d5f5dfcd2a4f82d7b18383aa..95e5e81e1026c93999810ef94347e86c7aed78f4 100644 (file)
@@ -99,6 +99,18 @@ struct amdgpu_bo_list_entry;
 #define AMDGPU_VM_USE_CPU_FOR_GFX (1 << 0)
 #define AMDGPU_VM_USE_CPU_FOR_COMPUTE (1 << 1)
 
+/* base structure for tracking BO usage in a VM */
+struct amdgpu_vm_bo_base {
+       /* constant after initialization */
+       struct amdgpu_vm                *vm;
+       struct amdgpu_bo                *bo;
+
+       /* protected by bo being reserved */
+       struct list_head                bo_list;
+
+       /* protected by spinlock */
+       struct list_head                vm_status;
+};
 
 struct amdgpu_vm_pt {
        struct amdgpu_bo        *bo;