drm/amdgpu:add new file for SRIOV
authorMonk Liu <Monk.Liu@amd.com>
Mon, 9 Jan 2017 07:21:13 +0000 (15:21 +0800)
committerAlex Deucher <alexander.deucher@amd.com>
Fri, 27 Jan 2017 16:13:19 +0000 (11:13 -0500)
for SRIOV usage, CSA is only used per device and each
VM will map on it.

Signed-off-by: Monk Liu <Monk.Liu@amd.com>
Reviewed-by: Christian König <christian.koenig@amd.com>
Signed-off-by: Alex Deucher <alexander.deucher@amd.com>
drivers/gpu/drm/amd/amdgpu/Makefile
drivers/gpu/drm/amd/amdgpu/amdgpu_virt.c [new file with mode: 0644]
drivers/gpu/drm/amd/amdgpu/amdgpu_virt.h

index dba097ccdd9b227dcdc987b355b27adad162d438..d119986ff2eacf325199a14a2f342df0a74de307 100644 (file)
@@ -24,7 +24,7 @@ amdgpu-y += amdgpu_device.o amdgpu_kms.o \
        atombios_encoders.o amdgpu_sa.o atombios_i2c.o \
        amdgpu_prime.o amdgpu_vm.o amdgpu_ib.o amdgpu_pll.o \
        amdgpu_ucode.o amdgpu_bo_list.o amdgpu_ctx.o amdgpu_sync.o \
-       amdgpu_gtt_mgr.o amdgpu_vram_mgr.o
+       amdgpu_gtt_mgr.o amdgpu_vram_mgr.o amdgpu_virt.o
 
 # add asic specific block
 amdgpu-$(CONFIG_DRM_AMDGPU_CIK)+= cik.o cik_ih.o kv_smc.o kv_dpm.o \
diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_virt.c b/drivers/gpu/drm/amd/amdgpu/amdgpu_virt.c
new file mode 100644 (file)
index 0000000..cfc4721
--- /dev/null
@@ -0,0 +1,93 @@
+/*
+ * Copyright 2016 Advanced Micro Devices, Inc.
+ *
+ * Permission is hereby granted, free of charge, to any person obtaining a
+ * copy of this software and associated documentation files (the "Software"),
+ * to deal in the Software without restriction, including without limitation
+ * the rights to use, copy, modify, merge, publish, distribute, sublicense,
+ * and/or sell copies of the Software, and to permit persons to whom the
+ * Software is furnished to do so, subject to the following conditions:
+ *
+ * The above copyright notice and this permission notice shall be included in
+ * all copies or substantial portions of the Software.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
+ * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
+ * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT.  IN NO EVENT SHALL
+ * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR
+ * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
+ * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
+ * OTHER DEALINGS IN THE SOFTWARE.
+ *
+ */
+
+#include "amdgpu.h"
+
+int amdgpu_allocate_static_csa(struct amdgpu_device *adev)
+{
+       int r;
+       void *ptr;
+
+       r = amdgpu_bo_create_kernel(adev, AMDGPU_CSA_SIZE, PAGE_SIZE,
+                               AMDGPU_GEM_DOMAIN_VRAM, &adev->virt.csa_obj,
+                               &adev->virt.csa_vmid0_addr, &ptr);
+       if (r)
+               return r;
+
+       memset(ptr, 0, AMDGPU_CSA_SIZE);
+       return 0;
+}
+
+/*
+ * amdgpu_map_static_csa should be called during amdgpu_vm_init
+ * it maps virtual address "AMDGPU_VA_RESERVED_SIZE - AMDGPU_CSA_SIZE"
+ * to this VM, and each command submission of GFX should use this virtual
+ * address within META_DATA init package to support SRIOV gfx preemption.
+ */
+
+int amdgpu_map_static_csa(struct amdgpu_device *adev, struct amdgpu_vm *vm)
+{
+       int r;
+       struct amdgpu_bo_va *bo_va;
+       struct ww_acquire_ctx ticket;
+       struct list_head list;
+       struct amdgpu_bo_list_entry pd;
+       struct ttm_validate_buffer csa_tv;
+
+       INIT_LIST_HEAD(&list);
+       INIT_LIST_HEAD(&csa_tv.head);
+       csa_tv.bo = &adev->virt.csa_obj->tbo;
+       csa_tv.shared = true;
+
+       list_add(&csa_tv.head, &list);
+       amdgpu_vm_get_pd_bo(vm, &list, &pd);
+
+       r = ttm_eu_reserve_buffers(&ticket, &list, true, NULL);
+       if (r) {
+               DRM_ERROR("failed to reserve CSA,PD BOs: err=%d\n", r);
+               return r;
+       }
+
+       bo_va = amdgpu_vm_bo_add(adev, vm, adev->virt.csa_obj);
+       if (!bo_va) {
+               ttm_eu_backoff_reservation(&ticket, &list);
+               DRM_ERROR("failed to create bo_va for static CSA\n");
+               return -ENOMEM;
+       }
+
+       r = amdgpu_vm_bo_map(adev, bo_va, AMDGPU_CSA_VADDR, 0,AMDGPU_CSA_SIZE,
+                                               AMDGPU_PTE_READABLE | AMDGPU_PTE_WRITEABLE |
+                                               AMDGPU_PTE_EXECUTABLE);
+
+       if (r) {
+               DRM_ERROR("failed to do bo_map on static CSA, err=%d\n", r);
+               amdgpu_vm_bo_rmv(adev, bo_va);
+               ttm_eu_backoff_reservation(&ticket, &list);
+               kfree(bo_va);
+               return r;
+       }
+
+       vm->csa_bo_va = bo_va;
+       ttm_eu_backoff_reservation(&ticket, &list);
+       return 0;
+}
index 63609e1796510e15ef58678ffce85d98aec1e436..fa1d569ec479fbbdf324fa75f80066dd5f304249 100644 (file)
@@ -60,4 +60,8 @@ static inline bool is_virtual_machine(void)
 #endif
 }
 
+struct amdgpu_vm;
+int amdgpu_allocate_static_csa(struct amdgpu_device *adev);
+int amdgpu_map_static_csa(struct amdgpu_device *adev, struct amdgpu_vm *vm);
+
 #endif