drm/amdgpu: add slap cache for sync objects as well
authorChristian König <christian.koenig@amd.com>
Tue, 16 Feb 2016 10:24:58 +0000 (11:24 +0100)
committerAlex Deucher <alexander.deucher@amd.com>
Tue, 8 Mar 2016 16:01:47 +0000 (11:01 -0500)
We need them all the time.

Signed-off-by: Christian König <christian.koenig@amd.com>
Reviewed-by: Alex Deucher <alexander.deucher@amd.com>
Reviewed-by: Chunming Zhou <david1.zhou@amd.com>
drivers/gpu/drm/amd/amdgpu/amdgpu.h
drivers/gpu/drm/amd/amdgpu/amdgpu_drv.c
drivers/gpu/drm/amd/amdgpu/amdgpu_sync.c

index fab6ddb26b5b7d260a8d17b59a74fe9663e9eb41..3e4ec56919c79a6e0056dee2c93b6222f1ca35e7 100644 (file)
@@ -634,6 +634,8 @@ int amdgpu_sync_resv(struct amdgpu_device *adev,
 struct fence *amdgpu_sync_get_fence(struct amdgpu_sync *sync);
 int amdgpu_sync_wait(struct amdgpu_sync *sync);
 void amdgpu_sync_free(struct amdgpu_sync *sync);
+int amdgpu_sync_init(void);
+void amdgpu_sync_fini(void);
 
 /*
  * GART structures, functions & helpers
index ce79a8b605a0664a26ac2b42c56f06538fcaf9b8..875333bb4e45d85914b026c9cf914b1fcc32b168 100644 (file)
@@ -539,6 +539,7 @@ static struct pci_driver amdgpu_kms_pci_driver = {
 
 static int __init amdgpu_init(void)
 {
+       amdgpu_sync_init();
 #ifdef CONFIG_VGA_CONSOLE
        if (vgacon_text_force()) {
                DRM_ERROR("VGACON disables amdgpu kernel modesetting.\n");
@@ -563,6 +564,7 @@ static void __exit amdgpu_exit(void)
        amdgpu_amdkfd_fini();
        drm_pci_exit(driver, pdriver);
        amdgpu_unregister_atpx_handler();
+       amdgpu_sync_fini();
 }
 
 module_init(amdgpu_init);
index e3673422aac8dce628e36cb073eb03ae87e0c216..c48b4fce5e57af9196eb2887f9969d49dec1a7ed 100644 (file)
@@ -37,6 +37,8 @@ struct amdgpu_sync_entry {
        struct fence            *fence;
 };
 
+static struct kmem_cache *amdgpu_sync_slab;
+
 /**
  * amdgpu_sync_create - zero init sync object
  *
@@ -133,7 +135,7 @@ int amdgpu_sync_fence(struct amdgpu_device *adev, struct amdgpu_sync *sync,
                return 0;
        }
 
-       e = kmalloc(sizeof(struct amdgpu_sync_entry), GFP_KERNEL);
+       e = kmem_cache_alloc(amdgpu_sync_slab, GFP_KERNEL);
        if (!e)
                return -ENOMEM;
 
@@ -214,7 +216,7 @@ struct fence *amdgpu_sync_get_fence(struct amdgpu_sync *sync)
                f = e->fence;
 
                hash_del(&e->node);
-               kfree(e);
+               kmem_cache_free(amdgpu_sync_slab, e);
 
                if (!fence_is_signaled(f))
                        return f;
@@ -237,7 +239,7 @@ int amdgpu_sync_wait(struct amdgpu_sync *sync)
 
                hash_del(&e->node);
                fence_put(e->fence);
-               kfree(e);
+               kmem_cache_free(amdgpu_sync_slab, e);
        }
 
        return 0;
@@ -259,8 +261,34 @@ void amdgpu_sync_free(struct amdgpu_sync *sync)
        hash_for_each_safe(sync->fences, i, tmp, e, node) {
                hash_del(&e->node);
                fence_put(e->fence);
-               kfree(e);
+               kmem_cache_free(amdgpu_sync_slab, e);
        }
 
        fence_put(sync->last_vm_update);
 }
+
+/**
+ * amdgpu_sync_init - init sync object subsystem
+ *
+ * Allocate the slab allocator.
+ */
+int amdgpu_sync_init(void)
+{
+       amdgpu_sync_slab = kmem_cache_create(
+               "amdgpu_sync", sizeof(struct amdgpu_sync_entry), 0,
+               SLAB_HWCACHE_ALIGN, NULL);
+       if (!amdgpu_sync_slab)
+               return -ENOMEM;
+
+       return 0;
+}
+
+/**
+ * amdgpu_sync_fini - fini sync object subsystem
+ *
+ * Free the slab allocator.
+ */
+void amdgpu_sync_fini(void)
+{
+       kmem_cache_destroy(amdgpu_sync_slab);
+}