drm/amdgpu: switch ih handling to two levels (v3)
authorAlex Deucher <alexander.deucher@amd.com>
Tue, 29 Mar 2016 22:28:50 +0000 (18:28 -0400)
committerAlex Deucher <alexander.deucher@amd.com>
Thu, 30 Mar 2017 03:53:37 +0000 (23:53 -0400)
Newer asics have a two levels of irq ids now:
client id - the IP
src id - the interrupt src within the IP

v2: integrated Christian's comments.
v3: fix rebase fail in SI and CIK

Signed-off-by: Alex Deucher <alexander.deucher@amd.com>
Signed-off-by: Ken Wang <Qingqing.Wang@amd.com>
Reviewed-by: Ken Wang <Qingqing.Wang@amd.com>
Signed-off-by: Alex Deucher <alexander.deucher@amd.com>
34 files changed:
drivers/gpu/drm/amd/amdgpu/amdgpu_cgs.c
drivers/gpu/drm/amd/amdgpu/amdgpu_ih.h
drivers/gpu/drm/amd/amdgpu/amdgpu_irq.c
drivers/gpu/drm/amd/amdgpu/amdgpu_irq.h
drivers/gpu/drm/amd/amdgpu/ci_dpm.c
drivers/gpu/drm/amd/amdgpu/cik_ih.c
drivers/gpu/drm/amd/amdgpu/cik_sdma.c
drivers/gpu/drm/amd/amdgpu/cz_ih.c
drivers/gpu/drm/amd/amdgpu/dce_v10_0.c
drivers/gpu/drm/amd/amdgpu/dce_v11_0.c
drivers/gpu/drm/amd/amdgpu/dce_v6_0.c
drivers/gpu/drm/amd/amdgpu/dce_v8_0.c
drivers/gpu/drm/amd/amdgpu/dce_virtual.c
drivers/gpu/drm/amd/amdgpu/gfx_v6_0.c
drivers/gpu/drm/amd/amdgpu/gfx_v7_0.c
drivers/gpu/drm/amd/amdgpu/gfx_v8_0.c
drivers/gpu/drm/amd/amdgpu/gmc_v6_0.c
drivers/gpu/drm/amd/amdgpu/gmc_v7_0.c
drivers/gpu/drm/amd/amdgpu/gmc_v8_0.c
drivers/gpu/drm/amd/amdgpu/iceland_ih.c
drivers/gpu/drm/amd/amdgpu/kv_dpm.c
drivers/gpu/drm/amd/amdgpu/mxgpu_vi.c
drivers/gpu/drm/amd/amdgpu/sdma_v2_4.c
drivers/gpu/drm/amd/amdgpu/sdma_v3_0.c
drivers/gpu/drm/amd/amdgpu/si_dma.c
drivers/gpu/drm/amd/amdgpu/si_dpm.c
drivers/gpu/drm/amd/amdgpu/si_ih.c
drivers/gpu/drm/amd/amdgpu/tonga_ih.c
drivers/gpu/drm/amd/amdgpu/uvd_v4_2.c
drivers/gpu/drm/amd/amdgpu/uvd_v5_0.c
drivers/gpu/drm/amd/amdgpu/uvd_v6_0.c
drivers/gpu/drm/amd/amdgpu/vce_v2_0.c
drivers/gpu/drm/amd/amdgpu/vce_v3_0.c
drivers/gpu/drm/amd/include/cgs_linux.h

index d9e5aa4a79efae12f541d93aab8d7098b06aa8d9..f0e36240365ae4341b0b3b5487a54167565aa91b 100644 (file)
@@ -571,7 +571,9 @@ static const struct amdgpu_irq_src_funcs cgs_irq_funcs = {
        .process = cgs_process_irq,
 };
 
-static int amdgpu_cgs_add_irq_source(struct cgs_device *cgs_device, unsigned src_id,
+static int amdgpu_cgs_add_irq_source(void *cgs_device,
+                                    unsigned client_id,
+                                    unsigned src_id,
                                     unsigned num_types,
                                     cgs_irq_source_set_func_t set,
                                     cgs_irq_handler_func_t handler,
@@ -597,7 +599,7 @@ static int amdgpu_cgs_add_irq_source(struct cgs_device *cgs_device, unsigned src
        irq_params->handler = handler;
        irq_params->private_data = private_data;
        source->data = (void *)irq_params;
-       ret = amdgpu_irq_add_id(adev, src_id, source);
+       ret = amdgpu_irq_add_id(adev, client_id, src_id, source);
        if (ret) {
                kfree(irq_params);
                kfree(source);
@@ -606,16 +608,26 @@ static int amdgpu_cgs_add_irq_source(struct cgs_device *cgs_device, unsigned src
        return ret;
 }
 
-static int amdgpu_cgs_irq_get(struct cgs_device *cgs_device, unsigned src_id, unsigned type)
+static int amdgpu_cgs_irq_get(void *cgs_device, unsigned client_id,
+                             unsigned src_id, unsigned type)
 {
        CGS_FUNC_ADEV;
-       return amdgpu_irq_get(adev, adev->irq.sources[src_id], type);
+
+       if (!adev->irq.client[client_id].sources)
+               return -EINVAL;
+
+       return amdgpu_irq_get(adev, adev->irq.client[client_id].sources[src_id], type);
 }
 
-static int amdgpu_cgs_irq_put(struct cgs_device *cgs_device, unsigned src_id, unsigned type)
+static int amdgpu_cgs_irq_put(void *cgs_device, unsigned client_id,
+                             unsigned src_id, unsigned type)
 {
        CGS_FUNC_ADEV;
-       return amdgpu_irq_put(adev, adev->irq.sources[src_id], type);
+
+       if (!adev->irq.client[client_id].sources)
+               return -EINVAL;
+
+       return amdgpu_irq_put(adev, adev->irq.client[client_id].sources[src_id], type);
 }
 
 static int amdgpu_cgs_set_clockgating_state(struct cgs_device *cgs_device,
index ba38ae6a14637fecc9dbd371e1a9fb3c3796967a..d77c63940a3c779c7297676f531fabff4a6ee652 100644 (file)
 
 struct amdgpu_device;
 
+#define AMDGPU_IH_CLIENTID_LEGACY 0
+
+#define AMDGPU_IH_CLIENTID_MAX 0x1f
+
 /*
  * R6xx+ IH ring
  */
@@ -47,10 +51,12 @@ struct amdgpu_ih_ring {
 };
 
 struct amdgpu_iv_entry {
+       unsigned client_id;
        unsigned src_id;
        unsigned src_data;
        unsigned ring_id;
        unsigned vm_id;
+       unsigned vm_id_src;
        unsigned pas_id;
        const uint32_t *iv_entry;
 };
index e63ece049b05da62f2233c9630f5559d3c8caeb5..7e7acd47ec783c8841ddc5136c6ac50a4e704490 100644 (file)
@@ -89,23 +89,28 @@ static void amdgpu_irq_reset_work_func(struct work_struct *work)
 static void amdgpu_irq_disable_all(struct amdgpu_device *adev)
 {
        unsigned long irqflags;
-       unsigned i, j;
+       unsigned i, j, k;
        int r;
 
        spin_lock_irqsave(&adev->irq.lock, irqflags);
-       for (i = 0; i < AMDGPU_MAX_IRQ_SRC_ID; ++i) {
-               struct amdgpu_irq_src *src = adev->irq.sources[i];
-
-               if (!src || !src->funcs->set || !src->num_types)
+       for (i = 0; i < AMDGPU_IH_CLIENTID_MAX; ++i) {
+               if (!adev->irq.client[i].sources)
                        continue;
 
-               for (j = 0; j < src->num_types; ++j) {
-                       atomic_set(&src->enabled_types[j], 0);
-                       r = src->funcs->set(adev, src, j,
-                                           AMDGPU_IRQ_STATE_DISABLE);
-                       if (r)
-                               DRM_ERROR("error disabling interrupt (%d)\n",
-                                         r);
+               for (j = 0; j < AMDGPU_MAX_IRQ_SRC_ID; ++j) {
+                       struct amdgpu_irq_src *src = adev->irq.client[i].sources[j];
+
+                       if (!src || !src->funcs->set || !src->num_types)
+                               continue;
+
+                       for (k = 0; k < src->num_types; ++k) {
+                               atomic_set(&src->enabled_types[k], 0);
+                               r = src->funcs->set(adev, src, k,
+                                                   AMDGPU_IRQ_STATE_DISABLE);
+                               if (r)
+                                       DRM_ERROR("error disabling interrupt (%d)\n",
+                                                 r);
+                       }
                }
        }
        spin_unlock_irqrestore(&adev->irq.lock, irqflags);
@@ -254,7 +259,7 @@ int amdgpu_irq_init(struct amdgpu_device *adev)
  */
 void amdgpu_irq_fini(struct amdgpu_device *adev)
 {
-       unsigned i;
+       unsigned i, j;
 
        drm_vblank_cleanup(adev->ddev);
        if (adev->irq.installed) {
@@ -266,19 +271,25 @@ void amdgpu_irq_fini(struct amdgpu_device *adev)
                cancel_work_sync(&adev->reset_work);
        }
 
-       for (i = 0; i < AMDGPU_MAX_IRQ_SRC_ID; ++i) {
-               struct amdgpu_irq_src *src = adev->irq.sources[i];
-
-               if (!src)
+       for (i = 0; i < AMDGPU_IH_CLIENTID_MAX; ++i) {
+               if (!adev->irq.client[i].sources)
                        continue;
 
-               kfree(src->enabled_types);
-               src->enabled_types = NULL;
-               if (src->data) {
-                       kfree(src->data);
-                       kfree(src);
-                       adev->irq.sources[i] = NULL;
+               for (j = 0; j < AMDGPU_MAX_IRQ_SRC_ID; ++j) {
+                       struct amdgpu_irq_src *src = adev->irq.client[i].sources[j];
+
+                       if (!src)
+                               continue;
+
+                       kfree(src->enabled_types);
+                       src->enabled_types = NULL;
+                       if (src->data) {
+                               kfree(src->data);
+                               kfree(src);
+                               adev->irq.client[i].sources[j] = NULL;
+                       }
                }
+               kfree(adev->irq.client[i].sources);
        }
 }
 
@@ -290,18 +301,30 @@ void amdgpu_irq_fini(struct amdgpu_device *adev)
  * @source: irq source
  *
  */
-int amdgpu_irq_add_id(struct amdgpu_device *adev, unsigned src_id,
+int amdgpu_irq_add_id(struct amdgpu_device *adev,
+                     unsigned client_id, unsigned src_id,
                      struct amdgpu_irq_src *source)
 {
-       if (src_id >= AMDGPU_MAX_IRQ_SRC_ID)
+       if (client_id >= AMDGPU_IH_CLIENTID_MAX)
                return -EINVAL;
 
-       if (adev->irq.sources[src_id] != NULL)
+       if (src_id >= AMDGPU_MAX_IRQ_SRC_ID)
                return -EINVAL;
 
        if (!source->funcs)
                return -EINVAL;
 
+       if (!adev->irq.client[client_id].sources) {
+               adev->irq.client[client_id].sources = kcalloc(AMDGPU_MAX_IRQ_SRC_ID,
+                                                             sizeof(struct amdgpu_irq_src),
+                                                             GFP_KERNEL);
+               if (!adev->irq.client[client_id].sources)
+                       return -ENOMEM;
+       }
+
+       if (adev->irq.client[client_id].sources[src_id] != NULL)
+               return -EINVAL;
+
        if (source->num_types && !source->enabled_types) {
                atomic_t *types;
 
@@ -313,8 +336,7 @@ int amdgpu_irq_add_id(struct amdgpu_device *adev, unsigned src_id,
                source->enabled_types = types;
        }
 
-       adev->irq.sources[src_id] = source;
-
+       adev->irq.client[client_id].sources[src_id] = source;
        return 0;
 }
 
@@ -329,10 +351,16 @@ int amdgpu_irq_add_id(struct amdgpu_device *adev, unsigned src_id,
 void amdgpu_irq_dispatch(struct amdgpu_device *adev,
                         struct amdgpu_iv_entry *entry)
 {
+       unsigned client_id = entry->client_id;
        unsigned src_id = entry->src_id;
        struct amdgpu_irq_src *src;
        int r;
 
+       if (client_id >= AMDGPU_IH_CLIENTID_MAX) {
+               DRM_DEBUG("Invalid client_id in IV: %d\n", client_id);
+               return;
+       }
+
        if (src_id >= AMDGPU_MAX_IRQ_SRC_ID) {
                DRM_DEBUG("Invalid src_id in IV: %d\n", src_id);
                return;
@@ -341,7 +369,13 @@ void amdgpu_irq_dispatch(struct amdgpu_device *adev,
        if (adev->irq.virq[src_id]) {
                generic_handle_irq(irq_find_mapping(adev->irq.domain, src_id));
        } else {
-               src = adev->irq.sources[src_id];
+               if (!adev->irq.client[client_id].sources) {
+                       DRM_DEBUG("Unregistered interrupt client_id: %d src_id: %d\n",
+                                 client_id, src_id);
+                       return;
+               }
+
+               src = adev->irq.client[client_id].sources[src_id];
                if (!src) {
                        DRM_DEBUG("Unhandled interrupt src_id: %d\n", src_id);
                        return;
@@ -385,13 +419,20 @@ int amdgpu_irq_update(struct amdgpu_device *adev,
 
 void amdgpu_irq_gpu_reset_resume_helper(struct amdgpu_device *adev)
 {
-       int i, j;
-       for (i = 0; i < AMDGPU_MAX_IRQ_SRC_ID; i++) {
-               struct amdgpu_irq_src *src = adev->irq.sources[i];
-               if (!src)
+       int i, j, k;
+
+       for (i = 0; i < AMDGPU_IH_CLIENTID_MAX; ++i) {
+               if (!adev->irq.client[i].sources)
                        continue;
-               for (j = 0; j < src->num_types; j++)
-                       amdgpu_irq_update(adev, src, j);
+
+               for (j = 0; j < AMDGPU_MAX_IRQ_SRC_ID; ++j) {
+                       struct amdgpu_irq_src *src = adev->irq.client[i].sources[j];
+
+                       if (!src)
+                               continue;
+                       for (k = 0; k < src->num_types; k++)
+                               amdgpu_irq_update(adev, src, k);
+               }
        }
 }
 
index 1642f410829704fb0cf8fabb7c8e787122bfaf8c..0610cc4a97881619cd57661a5c3bfcd601c192de 100644 (file)
@@ -28,6 +28,7 @@
 #include "amdgpu_ih.h"
 
 #define AMDGPU_MAX_IRQ_SRC_ID  0x100
+#define AMDGPU_MAX_IRQ_CLIENT_ID       0x100
 
 struct amdgpu_device;
 struct amdgpu_iv_entry;
@@ -44,6 +45,10 @@ struct amdgpu_irq_src {
        void *data;
 };
 
+struct amdgpu_irq_client {
+       struct amdgpu_irq_src **sources;
+};
+
 /* provided by interrupt generating IP blocks */
 struct amdgpu_irq_src_funcs {
        int (*set)(struct amdgpu_device *adev, struct amdgpu_irq_src *source,
@@ -58,7 +63,7 @@ struct amdgpu_irq {
        bool                            installed;
        spinlock_t                      lock;
        /* interrupt sources */
-       struct amdgpu_irq_src           *sources[AMDGPU_MAX_IRQ_SRC_ID];
+       struct amdgpu_irq_client        client[AMDGPU_IH_CLIENTID_MAX];
 
        /* status, etc. */
        bool                            msi_enabled; /* msi enabled */
@@ -80,7 +85,8 @@ irqreturn_t amdgpu_irq_handler(int irq, void *arg);
 
 int amdgpu_irq_init(struct amdgpu_device *adev);
 void amdgpu_irq_fini(struct amdgpu_device *adev);
-int amdgpu_irq_add_id(struct amdgpu_device *adev, unsigned src_id,
+int amdgpu_irq_add_id(struct amdgpu_device *adev,
+                     unsigned client_id, unsigned src_id,
                      struct amdgpu_irq_src *source);
 void amdgpu_irq_dispatch(struct amdgpu_device *adev,
                         struct amdgpu_iv_entry *entry);
index c8cb964091257a9a58486a4754e016f5cc971a8a..11ccda83d767b0e416667c56e48cb72f1ecb9668 100644 (file)
@@ -6284,11 +6284,13 @@ static int ci_dpm_sw_init(void *handle)
        int ret;
        struct amdgpu_device *adev = (struct amdgpu_device *)handle;
 
-       ret = amdgpu_irq_add_id(adev, 230, &adev->pm.dpm.thermal.irq);
+       ret = amdgpu_irq_add_id(adev, AMDGPU_IH_CLIENTID_LEGACY, 230,
+                               &adev->pm.dpm.thermal.irq);
        if (ret)
                return ret;
 
-       ret = amdgpu_irq_add_id(adev, 231, &adev->pm.dpm.thermal.irq);
+       ret = amdgpu_irq_add_id(adev, AMDGPU_IH_CLIENTID_LEGACY, 231,
+                               &adev->pm.dpm.thermal.irq);
        if (ret)
                return ret;
 
index 319b32cdea84cf9e92f59691df598436b306367e..c222a438dbeaebf9917a6c8baea829f25525d62d 100644 (file)
@@ -248,6 +248,7 @@ static void cik_ih_decode_iv(struct amdgpu_device *adev,
        dw[2] = le32_to_cpu(adev->irq.ih.ring[ring_index + 2]);
        dw[3] = le32_to_cpu(adev->irq.ih.ring[ring_index + 3]);
 
+       entry->client_id = AMDGPU_IH_CLIENTID_LEGACY;
        entry->src_id = dw[0] & 0xff;
        entry->src_data = dw[1] & 0xfffffff;
        entry->ring_id = dw[2] & 0xff;
index 131f69b3f70e4b7080addb6ff8de52b4a68cfaf1..3eee569701d7febb1a344884df14eb7b3be3644d 100644 (file)
@@ -923,17 +923,20 @@ static int cik_sdma_sw_init(void *handle)
        }
 
        /* SDMA trap event */
-       r = amdgpu_irq_add_id(adev, 224, &adev->sdma.trap_irq);
+       r = amdgpu_irq_add_id(adev, AMDGPU_IH_CLIENTID_LEGACY, 224,
+                             &adev->sdma.trap_irq);
        if (r)
                return r;
 
        /* SDMA Privileged inst */
-       r = amdgpu_irq_add_id(adev, 241, &adev->sdma.illegal_inst_irq);
+       r = amdgpu_irq_add_id(adev, AMDGPU_IH_CLIENTID_LEGACY, 241,
+                             &adev->sdma.illegal_inst_irq);
        if (r)
                return r;
 
        /* SDMA Privileged inst */
-       r = amdgpu_irq_add_id(adev, 247, &adev->sdma.illegal_inst_irq);
+       r = amdgpu_irq_add_id(adev, AMDGPU_IH_CLIENTID_LEGACY, 247,
+                             &adev->sdma.illegal_inst_irq);
        if (r)
                return r;
 
index fe7cbb24da7b835298cdda54f5b187b4dbf306a8..f1ccbe684469d9cdd61494c97772a64a5db607b8 100644 (file)
@@ -227,6 +227,7 @@ static void cz_ih_decode_iv(struct amdgpu_device *adev,
        dw[2] = le32_to_cpu(adev->irq.ih.ring[ring_index + 2]);
        dw[3] = le32_to_cpu(adev->irq.ih.ring[ring_index + 3]);
 
+       entry->client_id = AMDGPU_IH_CLIENTID_LEGACY;
        entry->src_id = dw[0] & 0xff;
        entry->src_data = dw[1] & 0xfffffff;
        entry->ring_id = dw[2] & 0xff;
index d4452d8f76caa18d84936e4d5b644a12c2ea04df..fadc2d475524ea49c277bc49501500f9c6cc51d4 100644 (file)
@@ -2947,19 +2947,19 @@ static int dce_v10_0_sw_init(void *handle)
        struct amdgpu_device *adev = (struct amdgpu_device *)handle;
 
        for (i = 0; i < adev->mode_info.num_crtc; i++) {
-               r = amdgpu_irq_add_id(adev, i + 1, &adev->crtc_irq);
+               r = amdgpu_irq_add_id(adev, AMDGPU_IH_CLIENTID_LEGACY, i + 1, &adev->crtc_irq);
                if (r)
                        return r;
        }
 
        for (i = 8; i < 20; i += 2) {
-               r = amdgpu_irq_add_id(adev, i, &adev->pageflip_irq);
+               r = amdgpu_irq_add_id(adev, AMDGPU_IH_CLIENTID_LEGACY, i, &adev->pageflip_irq);
                if (r)
                        return r;
        }
 
        /* HPD hotplug */
-       r = amdgpu_irq_add_id(adev, 42, &adev->hpd_irq);
+       r = amdgpu_irq_add_id(adev, AMDGPU_IH_CLIENTID_LEGACY, 42, &adev->hpd_irq);
        if (r)
                return r;
 
index 5b24e89552ec4533f6b3ae6624d2a89063ef7aed..2462a4218df3d10191cfce1fa9c2c314d3abce49 100644 (file)
@@ -3007,19 +3007,19 @@ static int dce_v11_0_sw_init(void *handle)
        struct amdgpu_device *adev = (struct amdgpu_device *)handle;
 
        for (i = 0; i < adev->mode_info.num_crtc; i++) {
-               r = amdgpu_irq_add_id(adev, i + 1, &adev->crtc_irq);
+               r = amdgpu_irq_add_id(adev, AMDGPU_IH_CLIENTID_LEGACY, i + 1, &adev->crtc_irq);
                if (r)
                        return r;
        }
 
        for (i = 8; i < 20; i += 2) {
-               r = amdgpu_irq_add_id(adev, i, &adev->pageflip_irq);
+               r = amdgpu_irq_add_id(adev, AMDGPU_IH_CLIENTID_LEGACY, i, &adev->pageflip_irq);
                if (r)
                        return r;
        }
 
        /* HPD hotplug */
-       r = amdgpu_irq_add_id(adev, 42, &adev->hpd_irq);
+       r = amdgpu_irq_add_id(adev, AMDGPU_IH_CLIENTID_LEGACY, 42, &adev->hpd_irq);
        if (r)
                return r;
 
index 809aa94a0cc1f936a02c950898d840d2a6bd13a9..aae1d5959fe1d87b3e198662007dde8622a6d0fc 100644 (file)
@@ -2295,19 +2295,19 @@ static int dce_v6_0_sw_init(void *handle)
        struct amdgpu_device *adev = (struct amdgpu_device *)handle;
 
        for (i = 0; i < adev->mode_info.num_crtc; i++) {
-               r = amdgpu_irq_add_id(adev, i + 1, &adev->crtc_irq);
+               r = amdgpu_irq_add_id(adev, AMDGPU_IH_CLIENTID_LEGACY, i + 1, &adev->crtc_irq);
                if (r)
                        return r;
        }
 
        for (i = 8; i < 20; i += 2) {
-               r = amdgpu_irq_add_id(adev, i, &adev->pageflip_irq);
+               r = amdgpu_irq_add_id(adev, AMDGPU_IH_CLIENTID_LEGACY, i, &adev->pageflip_irq);
                if (r)
                        return r;
        }
 
        /* HPD hotplug */
-       r = amdgpu_irq_add_id(adev, 42, &adev->hpd_irq);
+       r = amdgpu_irq_add_id(adev, AMDGPU_IH_CLIENTID_LEGACY, 42, &adev->hpd_irq);
        if (r)
                return r;
 
index d2590d75aa1129d0b790b753da6e2c8163aaeccd..0382f2522389f5476986755251f800d58643d1d3 100644 (file)
@@ -2794,19 +2794,19 @@ static int dce_v8_0_sw_init(void *handle)
        struct amdgpu_device *adev = (struct amdgpu_device *)handle;
 
        for (i = 0; i < adev->mode_info.num_crtc; i++) {
-               r = amdgpu_irq_add_id(adev, i + 1, &adev->crtc_irq);
+               r = amdgpu_irq_add_id(adev, AMDGPU_IH_CLIENTID_LEGACY, i + 1, &adev->crtc_irq);
                if (r)
                        return r;
        }
 
        for (i = 8; i < 20; i += 2) {
-               r = amdgpu_irq_add_id(adev, i, &adev->pageflip_irq);
+               r = amdgpu_irq_add_id(adev, AMDGPU_IH_CLIENTID_LEGACY, i, &adev->pageflip_irq);
                if (r)
                        return r;
        }
 
        /* HPD hotplug */
-       r = amdgpu_irq_add_id(adev, 42, &adev->hpd_irq);
+       r = amdgpu_irq_add_id(adev, AMDGPU_IH_CLIENTID_LEGACY, 42, &adev->hpd_irq);
        if (r)
                return r;
 
index 1538fd119c9e4fe70b37feaa6b30e094b09328bc..788c0231d193ed5933fb22506e84c4ca20ee20d4 100644 (file)
@@ -464,7 +464,7 @@ static int dce_virtual_sw_init(void *handle)
        int r, i;
        struct amdgpu_device *adev = (struct amdgpu_device *)handle;
 
-       r = amdgpu_irq_add_id(adev, 229, &adev->crtc_irq);
+       r = amdgpu_irq_add_id(adev, AMDGPU_IH_CLIENTID_LEGACY, 229, &adev->crtc_irq);
        if (r)
                return r;
 
index 1a1de6499517ce4411acc548ce970e3eba89b0df..7259dedb88b41d4fe948c6ee7a3666e5e451566e 100644 (file)
@@ -3242,15 +3242,15 @@ static int gfx_v6_0_sw_init(void *handle)
        struct amdgpu_device *adev = (struct amdgpu_device *)handle;
        int i, r;
 
-       r = amdgpu_irq_add_id(adev, 181, &adev->gfx.eop_irq);
+       r = amdgpu_irq_add_id(adev, AMDGPU_IH_CLIENTID_LEGACY, 181, &adev->gfx.eop_irq);
        if (r)
                return r;
 
-       r = amdgpu_irq_add_id(adev, 184, &adev->gfx.priv_reg_irq);
+       r = amdgpu_irq_add_id(adev, AMDGPU_IH_CLIENTID_LEGACY, 184, &adev->gfx.priv_reg_irq);
        if (r)
                return r;
 
-       r = amdgpu_irq_add_id(adev, 185, &adev->gfx.priv_inst_irq);
+       r = amdgpu_irq_add_id(adev, AMDGPU_IH_CLIENTID_LEGACY, 185, &adev->gfx.priv_inst_irq);
        if (r)
                return r;
 
index 1de2e5318b673c434ca3fbf04e964c6fa4e98beb..8a8bc2fe6f2e4c81265219bda0f3928f412e8e7c 100644 (file)
@@ -4669,17 +4669,19 @@ static int gfx_v7_0_sw_init(void *handle)
        int i, r;
 
        /* EOP Event */
-       r = amdgpu_irq_add_id(adev, 181, &adev->gfx.eop_irq);
+       r = amdgpu_irq_add_id(adev, AMDGPU_IH_CLIENTID_LEGACY, 181, &adev->gfx.eop_irq);
        if (r)
                return r;
 
        /* Privileged reg */
-       r = amdgpu_irq_add_id(adev, 184, &adev->gfx.priv_reg_irq);
+       r = amdgpu_irq_add_id(adev, AMDGPU_IH_CLIENTID_LEGACY, 184,
+                             &adev->gfx.priv_reg_irq);
        if (r)
                return r;
 
        /* Privileged inst */
-       r = amdgpu_irq_add_id(adev, 185, &adev->gfx.priv_inst_irq);
+       r = amdgpu_irq_add_id(adev, AMDGPU_IH_CLIENTID_LEGACY, 185,
+                             &adev->gfx.priv_inst_irq);
        if (r)
                return r;
 
index 03ff1399fbfaa3c4e8e399cce26c9d7509fdbb6b..91db997a8fa7fbe399e05f7132c491fec8bc3ad4 100644 (file)
@@ -2073,22 +2073,24 @@ static int gfx_v8_0_sw_init(void *handle)
        struct amdgpu_device *adev = (struct amdgpu_device *)handle;
 
        /* KIQ event */
-       r = amdgpu_irq_add_id(adev, 178, &adev->gfx.kiq.irq);
+       r = amdgpu_irq_add_id(adev, AMDGPU_IH_CLIENTID_LEGACY, 178, &adev->gfx.kiq.irq);
        if (r)
                return r;
 
        /* EOP Event */
-       r = amdgpu_irq_add_id(adev, 181, &adev->gfx.eop_irq);
+       r = amdgpu_irq_add_id(adev, AMDGPU_IH_CLIENTID_LEGACY, 181, &adev->gfx.eop_irq);
        if (r)
                return r;
 
        /* Privileged reg */
-       r = amdgpu_irq_add_id(adev, 184, &adev->gfx.priv_reg_irq);
+       r = amdgpu_irq_add_id(adev, AMDGPU_IH_CLIENTID_LEGACY, 184,
+                             &adev->gfx.priv_reg_irq);
        if (r)
                return r;
 
        /* Privileged inst */
-       r = amdgpu_irq_add_id(adev, 185, &adev->gfx.priv_inst_irq);
+       r = amdgpu_irq_add_id(adev, AMDGPU_IH_CLIENTID_LEGACY, 185,
+                             &adev->gfx.priv_inst_irq);
        if (r)
                return r;
 
index 33284287cdf3930efb1f9cc61622ce2012f919b8..01ce49f160fc507fa6b13e8f13b68f01f5a9aaf2 100644 (file)
@@ -823,11 +823,11 @@ static int gmc_v6_0_sw_init(void *handle)
        int dma_bits;
        struct amdgpu_device *adev = (struct amdgpu_device *)handle;
 
-       r = amdgpu_irq_add_id(adev, 146, &adev->mc.vm_fault);
+       r = amdgpu_irq_add_id(adev, AMDGPU_IH_CLIENTID_LEGACY, 146, &adev->mc.vm_fault);
        if (r)
                return r;
 
-       r = amdgpu_irq_add_id(adev, 147, &adev->mc.vm_fault);
+       r = amdgpu_irq_add_id(adev, AMDGPU_IH_CLIENTID_LEGACY, 147, &adev->mc.vm_fault);
        if (r)
                return r;
 
index 13efb679dfa1bda349ac1b10e35fcf5b3314358a..4e4176403e5a6e43c9aacd666e0eb7ac445f4ca3 100644 (file)
@@ -969,11 +969,11 @@ static int gmc_v7_0_sw_init(void *handle)
                adev->mc.vram_type = gmc_v7_0_convert_vram_type(tmp);
        }
 
-       r = amdgpu_irq_add_id(adev, 146, &adev->mc.vm_fault);
+       r = amdgpu_irq_add_id(adev, AMDGPU_IH_CLIENTID_LEGACY, 146, &adev->mc.vm_fault);
        if (r)
                return r;
 
-       r = amdgpu_irq_add_id(adev, 147, &adev->mc.vm_fault);
+       r = amdgpu_irq_add_id(adev, AMDGPU_IH_CLIENTID_LEGACY, 147, &adev->mc.vm_fault);
        if (r)
                return r;
 
index 952ba1e02a77d7333e8e130a73ac5567447956e4..c737ada8a014dab80a7f3cf5bbe99be92f67c013 100644 (file)
@@ -981,11 +981,11 @@ static int gmc_v8_0_sw_init(void *handle)
                adev->mc.vram_type = gmc_v8_0_convert_vram_type(tmp);
        }
 
-       r = amdgpu_irq_add_id(adev, 146, &adev->mc.vm_fault);
+       r = amdgpu_irq_add_id(adev, AMDGPU_IH_CLIENTID_LEGACY, 146, &adev->mc.vm_fault);
        if (r)
                return r;
 
-       r = amdgpu_irq_add_id(adev, 147, &adev->mc.vm_fault);
+       r = amdgpu_irq_add_id(adev, AMDGPU_IH_CLIENTID_LEGACY, 147, &adev->mc.vm_fault);
        if (r)
                return r;
 
index ac21bb7bc0f335713fb86e093999870d92bc6eb3..5d5860cab469558936e8e63c48c086156589351e 100644 (file)
@@ -227,6 +227,7 @@ static void iceland_ih_decode_iv(struct amdgpu_device *adev,
        dw[2] = le32_to_cpu(adev->irq.ih.ring[ring_index + 2]);
        dw[3] = le32_to_cpu(adev->irq.ih.ring[ring_index + 3]);
 
+       entry->client_id = AMDGPU_IH_CLIENTID_LEGACY;
        entry->src_id = dw[0] & 0xff;
        entry->src_data = dw[1] & 0xfffffff;
        entry->ring_id = dw[2] & 0xff;
index 13f323745729a76c6555fa4e7b6bce1f4849cf69..79a52ad2c80df4728acf96ac8007e9afb0e45fc7 100644 (file)
@@ -2981,11 +2981,13 @@ static int kv_dpm_sw_init(void *handle)
        int ret;
        struct amdgpu_device *adev = (struct amdgpu_device *)handle;
 
-       ret = amdgpu_irq_add_id(adev, 230, &adev->pm.dpm.thermal.irq);
+       ret = amdgpu_irq_add_id(adev, AMDGPU_IH_CLIENTID_LEGACY, 230,
+                               &adev->pm.dpm.thermal.irq);
        if (ret)
                return ret;
 
-       ret = amdgpu_irq_add_id(adev, 231, &adev->pm.dpm.thermal.irq);
+       ret = amdgpu_irq_add_id(adev, AMDGPU_IH_CLIENTID_LEGACY, 231,
+                               &adev->pm.dpm.thermal.irq);
        if (ret)
                return ret;
 
index 3164d61aaa9e8c6b9a15bbdc3ecae7cd2915f949..70a3dd13cb02db3170e9f8a50ada744e8753b3c3 100644 (file)
@@ -566,11 +566,11 @@ int xgpu_vi_mailbox_add_irq_id(struct amdgpu_device *adev)
 {
        int r;
 
-       r = amdgpu_irq_add_id(adev, 135, &adev->virt.rcv_irq);
+       r = amdgpu_irq_add_id(adev, AMDGPU_IH_CLIENTID_LEGACY, 135, &adev->virt.rcv_irq);
        if (r)
                return r;
 
-       r = amdgpu_irq_add_id(adev, 138, &adev->virt.ack_irq);
+       r = amdgpu_irq_add_id(adev, AMDGPU_IH_CLIENTID_LEGACY, 138, &adev->virt.ack_irq);
        if (r) {
                amdgpu_irq_put(adev, &adev->virt.rcv_irq, 0);
                return r;
index a733c0f63bbaa06102d6d86364e8143594de5857..0b1aa946260845fdb1ac782955e416cf4e84e1e5 100644 (file)
@@ -921,17 +921,20 @@ static int sdma_v2_4_sw_init(void *handle)
        struct amdgpu_device *adev = (struct amdgpu_device *)handle;
 
        /* SDMA trap event */
-       r = amdgpu_irq_add_id(adev, 224, &adev->sdma.trap_irq);
+       r = amdgpu_irq_add_id(adev, AMDGPU_IH_CLIENTID_LEGACY, 224,
+                             &adev->sdma.trap_irq);
        if (r)
                return r;
 
        /* SDMA Privileged inst */
-       r = amdgpu_irq_add_id(adev, 241, &adev->sdma.illegal_inst_irq);
+       r = amdgpu_irq_add_id(adev, AMDGPU_IH_CLIENTID_LEGACY, 241,
+                             &adev->sdma.illegal_inst_irq);
        if (r)
                return r;
 
        /* SDMA Privileged inst */
-       r = amdgpu_irq_add_id(adev, 247, &adev->sdma.illegal_inst_irq);
+       r = amdgpu_irq_add_id(adev, AMDGPU_IH_CLIENTID_LEGACY, 247,
+                             &adev->sdma.illegal_inst_irq);
        if (r)
                return r;
 
index cafa3852143d951a0bc098b420c27b36dbd78b44..610bc3e4a72868cb56718e9ac5eb3b48c742d232 100644 (file)
@@ -1137,17 +1137,20 @@ static int sdma_v3_0_sw_init(void *handle)
        struct amdgpu_device *adev = (struct amdgpu_device *)handle;
 
        /* SDMA trap event */
-       r = amdgpu_irq_add_id(adev, 224, &adev->sdma.trap_irq);
+       r = amdgpu_irq_add_id(adev, AMDGPU_IH_CLIENTID_LEGACY, 224,
+                             &adev->sdma.trap_irq);
        if (r)
                return r;
 
        /* SDMA Privileged inst */
-       r = amdgpu_irq_add_id(adev, 241, &adev->sdma.illegal_inst_irq);
+       r = amdgpu_irq_add_id(adev, AMDGPU_IH_CLIENTID_LEGACY, 241,
+                             &adev->sdma.illegal_inst_irq);
        if (r)
                return r;
 
        /* SDMA Privileged inst */
-       r = amdgpu_irq_add_id(adev, 247, &adev->sdma.illegal_inst_irq);
+       r = amdgpu_irq_add_id(adev, AMDGPU_IH_CLIENTID_LEGACY, 247,
+                             &adev->sdma.illegal_inst_irq);
        if (r)
                return r;
 
index c4fb3f94c26f45a88603b3b5f5072066bec4ef12..d2edd3212344dcc121135dcda656b7ff745ab40c 100644 (file)
@@ -517,12 +517,12 @@ static int si_dma_sw_init(void *handle)
        struct amdgpu_device *adev = (struct amdgpu_device *)handle;
 
        /* DMA0 trap event */
-       r = amdgpu_irq_add_id(adev, 224, &adev->sdma.trap_irq);
+       r = amdgpu_irq_add_id(adev, AMDGPU_IH_CLIENTID_LEGACY, 224, &adev->sdma.trap_irq);
        if (r)
                return r;
 
        /* DMA1 trap event */
-       r = amdgpu_irq_add_id(adev, 244, &adev->sdma.trap_irq_1);
+       r = amdgpu_irq_add_id(adev, AMDGPU_IH_CLIENTID_LEGACY, 244, &adev->sdma.trap_irq_1);
        if (r)
                return r;
 
index eb84c2a6d951dd5de366e7a07cf731a3071f3ff5..7c1c5d127281cdbb1a970d8492344043fd61f7fc 100644 (file)
@@ -7700,11 +7700,11 @@ static int si_dpm_sw_init(void *handle)
        int ret;
        struct amdgpu_device *adev = (struct amdgpu_device *)handle;
 
-       ret = amdgpu_irq_add_id(adev, 230, &adev->pm.dpm.thermal.irq);
+       ret = amdgpu_irq_add_id(adev, AMDGPU_IH_CLIENTID_LEGACY, 230, &adev->pm.dpm.thermal.irq);
        if (ret)
                return ret;
 
-       ret = amdgpu_irq_add_id(adev, 231, &adev->pm.dpm.thermal.irq);
+       ret = amdgpu_irq_add_id(adev, AMDGPU_IH_CLIENTID_LEGACY, 231, &adev->pm.dpm.thermal.irq);
        if (ret)
                return ret;
 
index 81f90800ba7305362cb9d21557f53d2a8174c3b8..c30d9ef1a42529c16ba03fc1a3ba0ee98bc139f3 100644 (file)
@@ -129,6 +129,7 @@ static void si_ih_decode_iv(struct amdgpu_device *adev,
        dw[2] = le32_to_cpu(adev->irq.ih.ring[ring_index + 2]);
        dw[3] = le32_to_cpu(adev->irq.ih.ring[ring_index + 3]);
 
+       entry->client_id = AMDGPU_IH_CLIENTID_LEGACY;
        entry->src_id = dw[0] & 0xff;
        entry->src_data = dw[1] & 0xfffffff;
        entry->ring_id = dw[2] & 0xff;
index 52b71ee587930980b8e9e360b784cc8e8fcf83ff..a6a2c2f486b23b32614eb82f9946bc23a032de40 100644 (file)
@@ -238,6 +238,7 @@ static void tonga_ih_decode_iv(struct amdgpu_device *adev,
        dw[2] = le32_to_cpu(adev->irq.ih.ring[ring_index + 2]);
        dw[3] = le32_to_cpu(adev->irq.ih.ring[ring_index + 3]);
 
+       entry->client_id = AMDGPU_IH_CLIENTID_LEGACY;
        entry->src_id = dw[0] & 0xff;
        entry->src_data = dw[1] & 0xfffffff;
        entry->ring_id = dw[2] & 0xff;
index 4bcb2f37cb7f2c1e4cf1949114a2f50047af77db..9a4129d881aa2e551255233d4c827006d2ba885d 100644 (file)
@@ -107,7 +107,7 @@ static int uvd_v4_2_sw_init(void *handle)
        int r;
 
        /* UVD TRAP */
-       r = amdgpu_irq_add_id(adev, 124, &adev->uvd.irq);
+       r = amdgpu_irq_add_id(adev, AMDGPU_IH_CLIENTID_LEGACY, 124, &adev->uvd.irq);
        if (r)
                return r;
 
index 35008c181363c890fdecb03556cf2b3896c2d9d8..e448f7d86bc09dfc8b751a122d0bd50e7a94feb5 100644 (file)
@@ -103,7 +103,7 @@ static int uvd_v5_0_sw_init(void *handle)
        int r;
 
        /* UVD TRAP */
-       r = amdgpu_irq_add_id(adev, 124, &adev->uvd.irq);
+       r = amdgpu_irq_add_id(adev, AMDGPU_IH_CLIENTID_LEGACY, 124, &adev->uvd.irq);
        if (r)
                return r;
 
index 46fe4980accc269f03ea7974598eeb0f3905440a..07aa2451aaebce6da897f12512886a3f00c4d1ac 100644 (file)
@@ -106,7 +106,7 @@ static int uvd_v6_0_sw_init(void *handle)
        struct amdgpu_device *adev = (struct amdgpu_device *)handle;
 
        /* UVD TRAP */
-       r = amdgpu_irq_add_id(adev, 124, &adev->uvd.irq);
+       r = amdgpu_irq_add_id(adev, AMDGPU_IH_CLIENTID_LEGACY, 124, &adev->uvd.irq);
        if (r)
                return r;
 
index 3433a73ae04bc9a8d35852b9471e7b0bfd5b5969..c7fd4b7ddbff2be6779a1a841990362ce2ade5ad 100644 (file)
@@ -430,7 +430,7 @@ static int vce_v2_0_sw_init(void *handle)
        struct amdgpu_device *adev = (struct amdgpu_device *)handle;
 
        /* VCE */
-       r = amdgpu_irq_add_id(adev, 167, &adev->vce.irq);
+       r = amdgpu_irq_add_id(adev, AMDGPU_IH_CLIENTID_LEGACY, 167, &adev->vce.irq);
        if (r)
                return r;
 
index 2c5f88cad8ac42355a976bc494714c8a9788ad08..684e53f81bdc26cad4b4b11846d4992e8e8564b3 100644 (file)
@@ -383,7 +383,7 @@ static int vce_v3_0_sw_init(void *handle)
        int r, i;
 
        /* VCE */
-       r = amdgpu_irq_add_id(adev, 167, &adev->vce.irq);
+       r = amdgpu_irq_add_id(adev, AMDGPU_IH_CLIENTID_LEGACY, 167, &adev->vce.irq);
        if (r)
                return r;
 
index ca4f6007a9b30661e1f7ffb6db0d1ee356b93c45..bc7446c1d22ee1d2f004ad28e0688ba71f32efdb 100644 (file)
@@ -66,7 +66,8 @@ typedef int (*cgs_irq_handler_func_t)(void *private_data,
  *
  * Return:  0 on success, -errno otherwise
  */
-typedef int (*cgs_add_irq_source_t)(struct cgs_device *cgs_device, unsigned src_id,
+typedef int (*cgs_add_irq_source_t)(void *cgs_device, unsigned client_id,
+                                   unsigned src_id,
                                    unsigned num_types,
                                    cgs_irq_source_set_func_t set,
                                    cgs_irq_handler_func_t handler,
@@ -83,7 +84,7 @@ typedef int (*cgs_add_irq_source_t)(struct cgs_device *cgs_device, unsigned src_
  *
  * Return:  0 on success, -errno otherwise
  */
-typedef int (*cgs_irq_get_t)(struct cgs_device *cgs_device, unsigned src_id, unsigned type);
+typedef int (*cgs_irq_get_t)(void *cgs_device, unsigned client_id, unsigned src_id, unsigned type);
 
 /**
  * cgs_irq_put() - Indicate IRQ source is no longer needed
@@ -98,7 +99,7 @@ typedef int (*cgs_irq_get_t)(struct cgs_device *cgs_device, unsigned src_id, uns
  *
  * Return:  0 on success, -errno otherwise
  */
-typedef int (*cgs_irq_put_t)(struct cgs_device *cgs_device, unsigned src_id, unsigned type);
+typedef int (*cgs_irq_put_t)(void *cgs_device, unsigned client_id, unsigned src_id, unsigned type);
 
 struct cgs_os_ops {
        /* IRQ handling */
@@ -107,12 +108,12 @@ struct cgs_os_ops {
        cgs_irq_put_t irq_put;
 };
 
-#define cgs_add_irq_source(dev,src_id,num_types,set,handler,private_data) \
-       CGS_OS_CALL(add_irq_source,dev,src_id,num_types,set,handler,    \
+#define cgs_add_irq_source(dev,client_id,src_id,num_types,set,handler,private_data) \
+       CGS_OS_CALL(add_irq_source,dev,client_id,src_id,num_types,set,handler, \
                    private_data)
-#define cgs_irq_get(dev,src_id,type)           \
-       CGS_OS_CALL(irq_get,dev,src_id,type)
-#define cgs_irq_put(dev,src_id,type)           \
-       CGS_OS_CALL(irq_put,dev,src_id,type)
+#define cgs_irq_get(dev,client_id,src_id,type) \
+       CGS_OS_CALL(irq_get,dev,client_id,src_id,type)
+#define cgs_irq_put(dev,client_id,src_id,type) \
+       CGS_OS_CALL(irq_put,dev,client_id,src_id,type)
 
 #endif /* _CGS_LINUX_H */