From: Andres Rodriguez Date: Tue, 4 Apr 2017 21:18:28 +0000 (-0400) Subject: drm/amdgpu: remove hardcoded queue_mask in PACKET3_SET_RESOURCES X-Git-Url: https://git.stricted.de/?a=commitdiff_plain;h=de65513af1124b28f2a858bc19b71a54dd93824e;p=GitHub%2Fmoto-9609%2Fandroid_kernel_motorola_exynos9610.git drm/amdgpu: remove hardcoded queue_mask in PACKET3_SET_RESOURCES The assumption that we are only using the first pipe no longer holds. Instead, calculate the queue_mask from the queue_bitmap. Acked-by: Felix Kuehling Signed-off-by: Andres Rodriguez Signed-off-by: Alex Deucher --- diff --git a/drivers/gpu/drm/amd/amdgpu/gfx_v8_0.c b/drivers/gpu/drm/amd/amdgpu/gfx_v8_0.c index 65c45ae67157..bd0b2310f1d9 100644 --- a/drivers/gpu/drm/amd/amdgpu/gfx_v8_0.c +++ b/drivers/gpu/drm/amd/amdgpu/gfx_v8_0.c @@ -4734,8 +4734,24 @@ static int gfx_v8_0_kiq_kcq_enable(struct amdgpu_device *adev) { struct amdgpu_ring *kiq_ring = &adev->gfx.kiq.ring; uint32_t scratch, tmp = 0; + uint64_t queue_mask = 0; int r, i; + for (i = 0; i < AMDGPU_MAX_COMPUTE_QUEUES; ++i) { + if (!test_bit(i, adev->gfx.mec.queue_bitmap)) + continue; + + /* This situation may be hit in the future if a new HW + * generation exposes more than 64 queues. If so, the + * definition of queue_mask needs updating */ + if (WARN_ON(i > (sizeof(queue_mask)*8))) { + DRM_ERROR("Invalid KCQ enabled: %d\n", i); + break; + } + + queue_mask |= (1ull << i); + } + r = amdgpu_gfx_scratch_get(adev, &scratch); if (r) { DRM_ERROR("Failed to get scratch reg (%d).\n", r); @@ -4752,8 +4768,8 @@ static int gfx_v8_0_kiq_kcq_enable(struct amdgpu_device *adev) /* set resources */ amdgpu_ring_write(kiq_ring, PACKET3(PACKET3_SET_RESOURCES, 6)); amdgpu_ring_write(kiq_ring, 0); /* vmid_mask:0 queue_type:0 (KIQ) */ - amdgpu_ring_write(kiq_ring, 0x000000FF); /* queue mask lo */ - amdgpu_ring_write(kiq_ring, 0); /* queue mask hi */ + amdgpu_ring_write(kiq_ring, lower_32_bits(queue_mask)); /* queue mask lo */ + amdgpu_ring_write(kiq_ring, upper_32_bits(queue_mask)); /* queue mask hi */ amdgpu_ring_write(kiq_ring, 0); /* gws mask lo */ amdgpu_ring_write(kiq_ring, 0); /* gws mask hi */ amdgpu_ring_write(kiq_ring, 0); /* oac mask */ diff --git a/drivers/gpu/drm/amd/amdgpu/gfx_v9_0.c b/drivers/gpu/drm/amd/amdgpu/gfx_v9_0.c index db1d50e45985..155a13911023 100644 --- a/drivers/gpu/drm/amd/amdgpu/gfx_v9_0.c +++ b/drivers/gpu/drm/amd/amdgpu/gfx_v9_0.c @@ -2608,8 +2608,24 @@ static int gfx_v9_0_kiq_kcq_enable(struct amdgpu_device *adev) { struct amdgpu_ring *kiq_ring = &adev->gfx.kiq.ring; uint32_t scratch, tmp = 0; + uint64_t queue_mask = 0; int r, i; + for (i = 0; i < AMDGPU_MAX_COMPUTE_QUEUES; ++i) { + if (!test_bit(i, adev->gfx.mec.queue_bitmap)) + continue; + + /* This situation may be hit in the future if a new HW + * generation exposes more than 64 queues. If so, the + * definition of queue_mask needs updating */ + if (WARN_ON(i > (sizeof(queue_mask)*8))) { + DRM_ERROR("Invalid KCQ enabled: %d\n", i); + break; + } + + queue_mask |= (1ull << i); + } + r = amdgpu_gfx_scratch_get(adev, &scratch); if (r) { DRM_ERROR("Failed to get scratch reg (%d).\n", r); @@ -2628,8 +2644,8 @@ static int gfx_v9_0_kiq_kcq_enable(struct amdgpu_device *adev) amdgpu_ring_write(kiq_ring, PACKET3(PACKET3_SET_RESOURCES, 6)); amdgpu_ring_write(kiq_ring, PACKET3_SET_RESOURCES_VMID_MASK(0) | PACKET3_SET_RESOURCES_QUEUE_TYPE(0)); /* vmid_mask:0 queue_type:0 (KIQ) */ - amdgpu_ring_write(kiq_ring, 0x000000FF); /* queue mask lo */ - amdgpu_ring_write(kiq_ring, 0); /* queue mask hi */ + amdgpu_ring_write(kiq_ring, lower_32_bits(queue_mask)); /* queue mask lo */ + amdgpu_ring_write(kiq_ring, upper_32_bits(queue_mask)); /* queue mask hi */ amdgpu_ring_write(kiq_ring, 0); /* gws mask lo */ amdgpu_ring_write(kiq_ring, 0); /* gws mask hi */ amdgpu_ring_write(kiq_ring, 0); /* oac mask */