.process = cgs_process_irq,
};
-static int amdgpu_cgs_add_irq_source(struct cgs_device *cgs_device, unsigned src_id,
+static int amdgpu_cgs_add_irq_source(void *cgs_device,
+ unsigned client_id,
+ unsigned src_id,
unsigned num_types,
cgs_irq_source_set_func_t set,
cgs_irq_handler_func_t handler,
irq_params->handler = handler;
irq_params->private_data = private_data;
source->data = (void *)irq_params;
- ret = amdgpu_irq_add_id(adev, src_id, source);
+ ret = amdgpu_irq_add_id(adev, client_id, src_id, source);
if (ret) {
kfree(irq_params);
kfree(source);
return ret;
}
-static int amdgpu_cgs_irq_get(struct cgs_device *cgs_device, unsigned src_id, unsigned type)
+static int amdgpu_cgs_irq_get(void *cgs_device, unsigned client_id,
+ unsigned src_id, unsigned type)
{
CGS_FUNC_ADEV;
- return amdgpu_irq_get(adev, adev->irq.sources[src_id], type);
+
+ if (!adev->irq.client[client_id].sources)
+ return -EINVAL;
+
+ return amdgpu_irq_get(adev, adev->irq.client[client_id].sources[src_id], type);
}
-static int amdgpu_cgs_irq_put(struct cgs_device *cgs_device, unsigned src_id, unsigned type)
+static int amdgpu_cgs_irq_put(void *cgs_device, unsigned client_id,
+ unsigned src_id, unsigned type)
{
CGS_FUNC_ADEV;
- return amdgpu_irq_put(adev, adev->irq.sources[src_id], type);
+
+ if (!adev->irq.client[client_id].sources)
+ return -EINVAL;
+
+ return amdgpu_irq_put(adev, adev->irq.client[client_id].sources[src_id], type);
}
static int amdgpu_cgs_set_clockgating_state(struct cgs_device *cgs_device,
struct amdgpu_device;
+#define AMDGPU_IH_CLIENTID_LEGACY 0
+
+#define AMDGPU_IH_CLIENTID_MAX 0x1f
+
/*
* R6xx+ IH ring
*/
};
struct amdgpu_iv_entry {
+ unsigned client_id;
unsigned src_id;
unsigned src_data;
unsigned ring_id;
unsigned vm_id;
+ unsigned vm_id_src;
unsigned pas_id;
const uint32_t *iv_entry;
};
static void amdgpu_irq_disable_all(struct amdgpu_device *adev)
{
unsigned long irqflags;
- unsigned i, j;
+ unsigned i, j, k;
int r;
spin_lock_irqsave(&adev->irq.lock, irqflags);
- for (i = 0; i < AMDGPU_MAX_IRQ_SRC_ID; ++i) {
- struct amdgpu_irq_src *src = adev->irq.sources[i];
-
- if (!src || !src->funcs->set || !src->num_types)
+ for (i = 0; i < AMDGPU_IH_CLIENTID_MAX; ++i) {
+ if (!adev->irq.client[i].sources)
continue;
- for (j = 0; j < src->num_types; ++j) {
- atomic_set(&src->enabled_types[j], 0);
- r = src->funcs->set(adev, src, j,
- AMDGPU_IRQ_STATE_DISABLE);
- if (r)
- DRM_ERROR("error disabling interrupt (%d)\n",
- r);
+ for (j = 0; j < AMDGPU_MAX_IRQ_SRC_ID; ++j) {
+ struct amdgpu_irq_src *src = adev->irq.client[i].sources[j];
+
+ if (!src || !src->funcs->set || !src->num_types)
+ continue;
+
+ for (k = 0; k < src->num_types; ++k) {
+ atomic_set(&src->enabled_types[k], 0);
+ r = src->funcs->set(adev, src, k,
+ AMDGPU_IRQ_STATE_DISABLE);
+ if (r)
+ DRM_ERROR("error disabling interrupt (%d)\n",
+ r);
+ }
}
}
spin_unlock_irqrestore(&adev->irq.lock, irqflags);
*/
void amdgpu_irq_fini(struct amdgpu_device *adev)
{
- unsigned i;
+ unsigned i, j;
drm_vblank_cleanup(adev->ddev);
if (adev->irq.installed) {
cancel_work_sync(&adev->reset_work);
}
- for (i = 0; i < AMDGPU_MAX_IRQ_SRC_ID; ++i) {
- struct amdgpu_irq_src *src = adev->irq.sources[i];
-
- if (!src)
+ for (i = 0; i < AMDGPU_IH_CLIENTID_MAX; ++i) {
+ if (!adev->irq.client[i].sources)
continue;
- kfree(src->enabled_types);
- src->enabled_types = NULL;
- if (src->data) {
- kfree(src->data);
- kfree(src);
- adev->irq.sources[i] = NULL;
+ for (j = 0; j < AMDGPU_MAX_IRQ_SRC_ID; ++j) {
+ struct amdgpu_irq_src *src = adev->irq.client[i].sources[j];
+
+ if (!src)
+ continue;
+
+ kfree(src->enabled_types);
+ src->enabled_types = NULL;
+ if (src->data) {
+ kfree(src->data);
+ kfree(src);
+ adev->irq.client[i].sources[j] = NULL;
+ }
}
+ kfree(adev->irq.client[i].sources);
}
}
* @source: irq source
*
*/
-int amdgpu_irq_add_id(struct amdgpu_device *adev, unsigned src_id,
+int amdgpu_irq_add_id(struct amdgpu_device *adev,
+ unsigned client_id, unsigned src_id,
struct amdgpu_irq_src *source)
{
- if (src_id >= AMDGPU_MAX_IRQ_SRC_ID)
+ if (client_id >= AMDGPU_IH_CLIENTID_MAX)
return -EINVAL;
- if (adev->irq.sources[src_id] != NULL)
+ if (src_id >= AMDGPU_MAX_IRQ_SRC_ID)
return -EINVAL;
if (!source->funcs)
return -EINVAL;
+ if (!adev->irq.client[client_id].sources) {
+ adev->irq.client[client_id].sources = kcalloc(AMDGPU_MAX_IRQ_SRC_ID,
+ sizeof(struct amdgpu_irq_src),
+ GFP_KERNEL);
+ if (!adev->irq.client[client_id].sources)
+ return -ENOMEM;
+ }
+
+ if (adev->irq.client[client_id].sources[src_id] != NULL)
+ return -EINVAL;
+
if (source->num_types && !source->enabled_types) {
atomic_t *types;
source->enabled_types = types;
}
- adev->irq.sources[src_id] = source;
-
+ adev->irq.client[client_id].sources[src_id] = source;
return 0;
}
void amdgpu_irq_dispatch(struct amdgpu_device *adev,
struct amdgpu_iv_entry *entry)
{
+ unsigned client_id = entry->client_id;
unsigned src_id = entry->src_id;
struct amdgpu_irq_src *src;
int r;
+ if (client_id >= AMDGPU_IH_CLIENTID_MAX) {
+ DRM_DEBUG("Invalid client_id in IV: %d\n", client_id);
+ return;
+ }
+
if (src_id >= AMDGPU_MAX_IRQ_SRC_ID) {
DRM_DEBUG("Invalid src_id in IV: %d\n", src_id);
return;
if (adev->irq.virq[src_id]) {
generic_handle_irq(irq_find_mapping(adev->irq.domain, src_id));
} else {
- src = adev->irq.sources[src_id];
+ if (!adev->irq.client[client_id].sources) {
+ DRM_DEBUG("Unregistered interrupt client_id: %d src_id: %d\n",
+ client_id, src_id);
+ return;
+ }
+
+ src = adev->irq.client[client_id].sources[src_id];
if (!src) {
DRM_DEBUG("Unhandled interrupt src_id: %d\n", src_id);
return;
void amdgpu_irq_gpu_reset_resume_helper(struct amdgpu_device *adev)
{
- int i, j;
- for (i = 0; i < AMDGPU_MAX_IRQ_SRC_ID; i++) {
- struct amdgpu_irq_src *src = adev->irq.sources[i];
- if (!src)
+ int i, j, k;
+
+ for (i = 0; i < AMDGPU_IH_CLIENTID_MAX; ++i) {
+ if (!adev->irq.client[i].sources)
continue;
- for (j = 0; j < src->num_types; j++)
- amdgpu_irq_update(adev, src, j);
+
+ for (j = 0; j < AMDGPU_MAX_IRQ_SRC_ID; ++j) {
+ struct amdgpu_irq_src *src = adev->irq.client[i].sources[j];
+
+ if (!src)
+ continue;
+ for (k = 0; k < src->num_types; k++)
+ amdgpu_irq_update(adev, src, k);
+ }
}
}
#include "amdgpu_ih.h"
#define AMDGPU_MAX_IRQ_SRC_ID 0x100
+#define AMDGPU_MAX_IRQ_CLIENT_ID 0x100
struct amdgpu_device;
struct amdgpu_iv_entry;
void *data;
};
+struct amdgpu_irq_client {
+ struct amdgpu_irq_src **sources;
+};
+
/* provided by interrupt generating IP blocks */
struct amdgpu_irq_src_funcs {
int (*set)(struct amdgpu_device *adev, struct amdgpu_irq_src *source,
bool installed;
spinlock_t lock;
/* interrupt sources */
- struct amdgpu_irq_src *sources[AMDGPU_MAX_IRQ_SRC_ID];
+ struct amdgpu_irq_client client[AMDGPU_IH_CLIENTID_MAX];
/* status, etc. */
bool msi_enabled; /* msi enabled */
int amdgpu_irq_init(struct amdgpu_device *adev);
void amdgpu_irq_fini(struct amdgpu_device *adev);
-int amdgpu_irq_add_id(struct amdgpu_device *adev, unsigned src_id,
+int amdgpu_irq_add_id(struct amdgpu_device *adev,
+ unsigned client_id, unsigned src_id,
struct amdgpu_irq_src *source);
void amdgpu_irq_dispatch(struct amdgpu_device *adev,
struct amdgpu_iv_entry *entry);
int ret;
struct amdgpu_device *adev = (struct amdgpu_device *)handle;
- ret = amdgpu_irq_add_id(adev, 230, &adev->pm.dpm.thermal.irq);
+ ret = amdgpu_irq_add_id(adev, AMDGPU_IH_CLIENTID_LEGACY, 230,
+ &adev->pm.dpm.thermal.irq);
if (ret)
return ret;
- ret = amdgpu_irq_add_id(adev, 231, &adev->pm.dpm.thermal.irq);
+ ret = amdgpu_irq_add_id(adev, AMDGPU_IH_CLIENTID_LEGACY, 231,
+ &adev->pm.dpm.thermal.irq);
if (ret)
return ret;
dw[2] = le32_to_cpu(adev->irq.ih.ring[ring_index + 2]);
dw[3] = le32_to_cpu(adev->irq.ih.ring[ring_index + 3]);
+ entry->client_id = AMDGPU_IH_CLIENTID_LEGACY;
entry->src_id = dw[0] & 0xff;
entry->src_data = dw[1] & 0xfffffff;
entry->ring_id = dw[2] & 0xff;
}
/* SDMA trap event */
- r = amdgpu_irq_add_id(adev, 224, &adev->sdma.trap_irq);
+ r = amdgpu_irq_add_id(adev, AMDGPU_IH_CLIENTID_LEGACY, 224,
+ &adev->sdma.trap_irq);
if (r)
return r;
/* SDMA Privileged inst */
- r = amdgpu_irq_add_id(adev, 241, &adev->sdma.illegal_inst_irq);
+ r = amdgpu_irq_add_id(adev, AMDGPU_IH_CLIENTID_LEGACY, 241,
+ &adev->sdma.illegal_inst_irq);
if (r)
return r;
/* SDMA Privileged inst */
- r = amdgpu_irq_add_id(adev, 247, &adev->sdma.illegal_inst_irq);
+ r = amdgpu_irq_add_id(adev, AMDGPU_IH_CLIENTID_LEGACY, 247,
+ &adev->sdma.illegal_inst_irq);
if (r)
return r;
dw[2] = le32_to_cpu(adev->irq.ih.ring[ring_index + 2]);
dw[3] = le32_to_cpu(adev->irq.ih.ring[ring_index + 3]);
+ entry->client_id = AMDGPU_IH_CLIENTID_LEGACY;
entry->src_id = dw[0] & 0xff;
entry->src_data = dw[1] & 0xfffffff;
entry->ring_id = dw[2] & 0xff;
struct amdgpu_device *adev = (struct amdgpu_device *)handle;
for (i = 0; i < adev->mode_info.num_crtc; i++) {
- r = amdgpu_irq_add_id(adev, i + 1, &adev->crtc_irq);
+ r = amdgpu_irq_add_id(adev, AMDGPU_IH_CLIENTID_LEGACY, i + 1, &adev->crtc_irq);
if (r)
return r;
}
for (i = 8; i < 20; i += 2) {
- r = amdgpu_irq_add_id(adev, i, &adev->pageflip_irq);
+ r = amdgpu_irq_add_id(adev, AMDGPU_IH_CLIENTID_LEGACY, i, &adev->pageflip_irq);
if (r)
return r;
}
/* HPD hotplug */
- r = amdgpu_irq_add_id(adev, 42, &adev->hpd_irq);
+ r = amdgpu_irq_add_id(adev, AMDGPU_IH_CLIENTID_LEGACY, 42, &adev->hpd_irq);
if (r)
return r;
struct amdgpu_device *adev = (struct amdgpu_device *)handle;
for (i = 0; i < adev->mode_info.num_crtc; i++) {
- r = amdgpu_irq_add_id(adev, i + 1, &adev->crtc_irq);
+ r = amdgpu_irq_add_id(adev, AMDGPU_IH_CLIENTID_LEGACY, i + 1, &adev->crtc_irq);
if (r)
return r;
}
for (i = 8; i < 20; i += 2) {
- r = amdgpu_irq_add_id(adev, i, &adev->pageflip_irq);
+ r = amdgpu_irq_add_id(adev, AMDGPU_IH_CLIENTID_LEGACY, i, &adev->pageflip_irq);
if (r)
return r;
}
/* HPD hotplug */
- r = amdgpu_irq_add_id(adev, 42, &adev->hpd_irq);
+ r = amdgpu_irq_add_id(adev, AMDGPU_IH_CLIENTID_LEGACY, 42, &adev->hpd_irq);
if (r)
return r;
struct amdgpu_device *adev = (struct amdgpu_device *)handle;
for (i = 0; i < adev->mode_info.num_crtc; i++) {
- r = amdgpu_irq_add_id(adev, i + 1, &adev->crtc_irq);
+ r = amdgpu_irq_add_id(adev, AMDGPU_IH_CLIENTID_LEGACY, i + 1, &adev->crtc_irq);
if (r)
return r;
}
for (i = 8; i < 20; i += 2) {
- r = amdgpu_irq_add_id(adev, i, &adev->pageflip_irq);
+ r = amdgpu_irq_add_id(adev, AMDGPU_IH_CLIENTID_LEGACY, i, &adev->pageflip_irq);
if (r)
return r;
}
/* HPD hotplug */
- r = amdgpu_irq_add_id(adev, 42, &adev->hpd_irq);
+ r = amdgpu_irq_add_id(adev, AMDGPU_IH_CLIENTID_LEGACY, 42, &adev->hpd_irq);
if (r)
return r;
struct amdgpu_device *adev = (struct amdgpu_device *)handle;
for (i = 0; i < adev->mode_info.num_crtc; i++) {
- r = amdgpu_irq_add_id(adev, i + 1, &adev->crtc_irq);
+ r = amdgpu_irq_add_id(adev, AMDGPU_IH_CLIENTID_LEGACY, i + 1, &adev->crtc_irq);
if (r)
return r;
}
for (i = 8; i < 20; i += 2) {
- r = amdgpu_irq_add_id(adev, i, &adev->pageflip_irq);
+ r = amdgpu_irq_add_id(adev, AMDGPU_IH_CLIENTID_LEGACY, i, &adev->pageflip_irq);
if (r)
return r;
}
/* HPD hotplug */
- r = amdgpu_irq_add_id(adev, 42, &adev->hpd_irq);
+ r = amdgpu_irq_add_id(adev, AMDGPU_IH_CLIENTID_LEGACY, 42, &adev->hpd_irq);
if (r)
return r;
int r, i;
struct amdgpu_device *adev = (struct amdgpu_device *)handle;
- r = amdgpu_irq_add_id(adev, 229, &adev->crtc_irq);
+ r = amdgpu_irq_add_id(adev, AMDGPU_IH_CLIENTID_LEGACY, 229, &adev->crtc_irq);
if (r)
return r;
struct amdgpu_device *adev = (struct amdgpu_device *)handle;
int i, r;
- r = amdgpu_irq_add_id(adev, 181, &adev->gfx.eop_irq);
+ r = amdgpu_irq_add_id(adev, AMDGPU_IH_CLIENTID_LEGACY, 181, &adev->gfx.eop_irq);
if (r)
return r;
- r = amdgpu_irq_add_id(adev, 184, &adev->gfx.priv_reg_irq);
+ r = amdgpu_irq_add_id(adev, AMDGPU_IH_CLIENTID_LEGACY, 184, &adev->gfx.priv_reg_irq);
if (r)
return r;
- r = amdgpu_irq_add_id(adev, 185, &adev->gfx.priv_inst_irq);
+ r = amdgpu_irq_add_id(adev, AMDGPU_IH_CLIENTID_LEGACY, 185, &adev->gfx.priv_inst_irq);
if (r)
return r;
int i, r;
/* EOP Event */
- r = amdgpu_irq_add_id(adev, 181, &adev->gfx.eop_irq);
+ r = amdgpu_irq_add_id(adev, AMDGPU_IH_CLIENTID_LEGACY, 181, &adev->gfx.eop_irq);
if (r)
return r;
/* Privileged reg */
- r = amdgpu_irq_add_id(adev, 184, &adev->gfx.priv_reg_irq);
+ r = amdgpu_irq_add_id(adev, AMDGPU_IH_CLIENTID_LEGACY, 184,
+ &adev->gfx.priv_reg_irq);
if (r)
return r;
/* Privileged inst */
- r = amdgpu_irq_add_id(adev, 185, &adev->gfx.priv_inst_irq);
+ r = amdgpu_irq_add_id(adev, AMDGPU_IH_CLIENTID_LEGACY, 185,
+ &adev->gfx.priv_inst_irq);
if (r)
return r;
struct amdgpu_device *adev = (struct amdgpu_device *)handle;
/* KIQ event */
- r = amdgpu_irq_add_id(adev, 178, &adev->gfx.kiq.irq);
+ r = amdgpu_irq_add_id(adev, AMDGPU_IH_CLIENTID_LEGACY, 178, &adev->gfx.kiq.irq);
if (r)
return r;
/* EOP Event */
- r = amdgpu_irq_add_id(adev, 181, &adev->gfx.eop_irq);
+ r = amdgpu_irq_add_id(adev, AMDGPU_IH_CLIENTID_LEGACY, 181, &adev->gfx.eop_irq);
if (r)
return r;
/* Privileged reg */
- r = amdgpu_irq_add_id(adev, 184, &adev->gfx.priv_reg_irq);
+ r = amdgpu_irq_add_id(adev, AMDGPU_IH_CLIENTID_LEGACY, 184,
+ &adev->gfx.priv_reg_irq);
if (r)
return r;
/* Privileged inst */
- r = amdgpu_irq_add_id(adev, 185, &adev->gfx.priv_inst_irq);
+ r = amdgpu_irq_add_id(adev, AMDGPU_IH_CLIENTID_LEGACY, 185,
+ &adev->gfx.priv_inst_irq);
if (r)
return r;
int dma_bits;
struct amdgpu_device *adev = (struct amdgpu_device *)handle;
- r = amdgpu_irq_add_id(adev, 146, &adev->mc.vm_fault);
+ r = amdgpu_irq_add_id(adev, AMDGPU_IH_CLIENTID_LEGACY, 146, &adev->mc.vm_fault);
if (r)
return r;
- r = amdgpu_irq_add_id(adev, 147, &adev->mc.vm_fault);
+ r = amdgpu_irq_add_id(adev, AMDGPU_IH_CLIENTID_LEGACY, 147, &adev->mc.vm_fault);
if (r)
return r;
adev->mc.vram_type = gmc_v7_0_convert_vram_type(tmp);
}
- r = amdgpu_irq_add_id(adev, 146, &adev->mc.vm_fault);
+ r = amdgpu_irq_add_id(adev, AMDGPU_IH_CLIENTID_LEGACY, 146, &adev->mc.vm_fault);
if (r)
return r;
- r = amdgpu_irq_add_id(adev, 147, &adev->mc.vm_fault);
+ r = amdgpu_irq_add_id(adev, AMDGPU_IH_CLIENTID_LEGACY, 147, &adev->mc.vm_fault);
if (r)
return r;
adev->mc.vram_type = gmc_v8_0_convert_vram_type(tmp);
}
- r = amdgpu_irq_add_id(adev, 146, &adev->mc.vm_fault);
+ r = amdgpu_irq_add_id(adev, AMDGPU_IH_CLIENTID_LEGACY, 146, &adev->mc.vm_fault);
if (r)
return r;
- r = amdgpu_irq_add_id(adev, 147, &adev->mc.vm_fault);
+ r = amdgpu_irq_add_id(adev, AMDGPU_IH_CLIENTID_LEGACY, 147, &adev->mc.vm_fault);
if (r)
return r;
dw[2] = le32_to_cpu(adev->irq.ih.ring[ring_index + 2]);
dw[3] = le32_to_cpu(adev->irq.ih.ring[ring_index + 3]);
+ entry->client_id = AMDGPU_IH_CLIENTID_LEGACY;
entry->src_id = dw[0] & 0xff;
entry->src_data = dw[1] & 0xfffffff;
entry->ring_id = dw[2] & 0xff;
int ret;
struct amdgpu_device *adev = (struct amdgpu_device *)handle;
- ret = amdgpu_irq_add_id(adev, 230, &adev->pm.dpm.thermal.irq);
+ ret = amdgpu_irq_add_id(adev, AMDGPU_IH_CLIENTID_LEGACY, 230,
+ &adev->pm.dpm.thermal.irq);
if (ret)
return ret;
- ret = amdgpu_irq_add_id(adev, 231, &adev->pm.dpm.thermal.irq);
+ ret = amdgpu_irq_add_id(adev, AMDGPU_IH_CLIENTID_LEGACY, 231,
+ &adev->pm.dpm.thermal.irq);
if (ret)
return ret;
{
int r;
- r = amdgpu_irq_add_id(adev, 135, &adev->virt.rcv_irq);
+ r = amdgpu_irq_add_id(adev, AMDGPU_IH_CLIENTID_LEGACY, 135, &adev->virt.rcv_irq);
if (r)
return r;
- r = amdgpu_irq_add_id(adev, 138, &adev->virt.ack_irq);
+ r = amdgpu_irq_add_id(adev, AMDGPU_IH_CLIENTID_LEGACY, 138, &adev->virt.ack_irq);
if (r) {
amdgpu_irq_put(adev, &adev->virt.rcv_irq, 0);
return r;
struct amdgpu_device *adev = (struct amdgpu_device *)handle;
/* SDMA trap event */
- r = amdgpu_irq_add_id(adev, 224, &adev->sdma.trap_irq);
+ r = amdgpu_irq_add_id(adev, AMDGPU_IH_CLIENTID_LEGACY, 224,
+ &adev->sdma.trap_irq);
if (r)
return r;
/* SDMA Privileged inst */
- r = amdgpu_irq_add_id(adev, 241, &adev->sdma.illegal_inst_irq);
+ r = amdgpu_irq_add_id(adev, AMDGPU_IH_CLIENTID_LEGACY, 241,
+ &adev->sdma.illegal_inst_irq);
if (r)
return r;
/* SDMA Privileged inst */
- r = amdgpu_irq_add_id(adev, 247, &adev->sdma.illegal_inst_irq);
+ r = amdgpu_irq_add_id(adev, AMDGPU_IH_CLIENTID_LEGACY, 247,
+ &adev->sdma.illegal_inst_irq);
if (r)
return r;
struct amdgpu_device *adev = (struct amdgpu_device *)handle;
/* SDMA trap event */
- r = amdgpu_irq_add_id(adev, 224, &adev->sdma.trap_irq);
+ r = amdgpu_irq_add_id(adev, AMDGPU_IH_CLIENTID_LEGACY, 224,
+ &adev->sdma.trap_irq);
if (r)
return r;
/* SDMA Privileged inst */
- r = amdgpu_irq_add_id(adev, 241, &adev->sdma.illegal_inst_irq);
+ r = amdgpu_irq_add_id(adev, AMDGPU_IH_CLIENTID_LEGACY, 241,
+ &adev->sdma.illegal_inst_irq);
if (r)
return r;
/* SDMA Privileged inst */
- r = amdgpu_irq_add_id(adev, 247, &adev->sdma.illegal_inst_irq);
+ r = amdgpu_irq_add_id(adev, AMDGPU_IH_CLIENTID_LEGACY, 247,
+ &adev->sdma.illegal_inst_irq);
if (r)
return r;
struct amdgpu_device *adev = (struct amdgpu_device *)handle;
/* DMA0 trap event */
- r = amdgpu_irq_add_id(adev, 224, &adev->sdma.trap_irq);
+ r = amdgpu_irq_add_id(adev, AMDGPU_IH_CLIENTID_LEGACY, 224, &adev->sdma.trap_irq);
if (r)
return r;
/* DMA1 trap event */
- r = amdgpu_irq_add_id(adev, 244, &adev->sdma.trap_irq_1);
+ r = amdgpu_irq_add_id(adev, AMDGPU_IH_CLIENTID_LEGACY, 244, &adev->sdma.trap_irq_1);
if (r)
return r;
int ret;
struct amdgpu_device *adev = (struct amdgpu_device *)handle;
- ret = amdgpu_irq_add_id(adev, 230, &adev->pm.dpm.thermal.irq);
+ ret = amdgpu_irq_add_id(adev, AMDGPU_IH_CLIENTID_LEGACY, 230, &adev->pm.dpm.thermal.irq);
if (ret)
return ret;
- ret = amdgpu_irq_add_id(adev, 231, &adev->pm.dpm.thermal.irq);
+ ret = amdgpu_irq_add_id(adev, AMDGPU_IH_CLIENTID_LEGACY, 231, &adev->pm.dpm.thermal.irq);
if (ret)
return ret;
dw[2] = le32_to_cpu(adev->irq.ih.ring[ring_index + 2]);
dw[3] = le32_to_cpu(adev->irq.ih.ring[ring_index + 3]);
+ entry->client_id = AMDGPU_IH_CLIENTID_LEGACY;
entry->src_id = dw[0] & 0xff;
entry->src_data = dw[1] & 0xfffffff;
entry->ring_id = dw[2] & 0xff;
dw[2] = le32_to_cpu(adev->irq.ih.ring[ring_index + 2]);
dw[3] = le32_to_cpu(adev->irq.ih.ring[ring_index + 3]);
+ entry->client_id = AMDGPU_IH_CLIENTID_LEGACY;
entry->src_id = dw[0] & 0xff;
entry->src_data = dw[1] & 0xfffffff;
entry->ring_id = dw[2] & 0xff;
int r;
/* UVD TRAP */
- r = amdgpu_irq_add_id(adev, 124, &adev->uvd.irq);
+ r = amdgpu_irq_add_id(adev, AMDGPU_IH_CLIENTID_LEGACY, 124, &adev->uvd.irq);
if (r)
return r;
int r;
/* UVD TRAP */
- r = amdgpu_irq_add_id(adev, 124, &adev->uvd.irq);
+ r = amdgpu_irq_add_id(adev, AMDGPU_IH_CLIENTID_LEGACY, 124, &adev->uvd.irq);
if (r)
return r;
struct amdgpu_device *adev = (struct amdgpu_device *)handle;
/* UVD TRAP */
- r = amdgpu_irq_add_id(adev, 124, &adev->uvd.irq);
+ r = amdgpu_irq_add_id(adev, AMDGPU_IH_CLIENTID_LEGACY, 124, &adev->uvd.irq);
if (r)
return r;
struct amdgpu_device *adev = (struct amdgpu_device *)handle;
/* VCE */
- r = amdgpu_irq_add_id(adev, 167, &adev->vce.irq);
+ r = amdgpu_irq_add_id(adev, AMDGPU_IH_CLIENTID_LEGACY, 167, &adev->vce.irq);
if (r)
return r;
int r, i;
/* VCE */
- r = amdgpu_irq_add_id(adev, 167, &adev->vce.irq);
+ r = amdgpu_irq_add_id(adev, AMDGPU_IH_CLIENTID_LEGACY, 167, &adev->vce.irq);
if (r)
return r;
*
* Return: 0 on success, -errno otherwise
*/
-typedef int (*cgs_add_irq_source_t)(struct cgs_device *cgs_device, unsigned src_id,
+typedef int (*cgs_add_irq_source_t)(void *cgs_device, unsigned client_id,
+ unsigned src_id,
unsigned num_types,
cgs_irq_source_set_func_t set,
cgs_irq_handler_func_t handler,
*
* Return: 0 on success, -errno otherwise
*/
-typedef int (*cgs_irq_get_t)(struct cgs_device *cgs_device, unsigned src_id, unsigned type);
+typedef int (*cgs_irq_get_t)(void *cgs_device, unsigned client_id, unsigned src_id, unsigned type);
/**
* cgs_irq_put() - Indicate IRQ source is no longer needed
*
* Return: 0 on success, -errno otherwise
*/
-typedef int (*cgs_irq_put_t)(struct cgs_device *cgs_device, unsigned src_id, unsigned type);
+typedef int (*cgs_irq_put_t)(void *cgs_device, unsigned client_id, unsigned src_id, unsigned type);
struct cgs_os_ops {
/* IRQ handling */
cgs_irq_put_t irq_put;
};
-#define cgs_add_irq_source(dev,src_id,num_types,set,handler,private_data) \
- CGS_OS_CALL(add_irq_source,dev,src_id,num_types,set,handler, \
+#define cgs_add_irq_source(dev,client_id,src_id,num_types,set,handler,private_data) \
+ CGS_OS_CALL(add_irq_source,dev,client_id,src_id,num_types,set,handler, \
private_data)
-#define cgs_irq_get(dev,src_id,type) \
- CGS_OS_CALL(irq_get,dev,src_id,type)
-#define cgs_irq_put(dev,src_id,type) \
- CGS_OS_CALL(irq_put,dev,src_id,type)
+#define cgs_irq_get(dev,client_id,src_id,type) \
+ CGS_OS_CALL(irq_get,dev,client_id,src_id,type)
+#define cgs_irq_put(dev,client_id,src_id,type) \
+ CGS_OS_CALL(irq_put,dev,client_id,src_id,type)
#endif /* _CGS_LINUX_H */