ring->idx, r);
return r;
}
- amdgpu_ring_write(ring, VCE_CMD_END);
+ amdgpu_ring_write(ring, VCN_ENC_CMD_END);
amdgpu_ring_commit(ring);
for (i = 0; i < adev->usec_timeout; i++) {
{
struct amdgpu_device *adev = (struct amdgpu_device *)handle;
struct amdgpu_ring *ring = &adev->vcn.ring_dec;
- int r;
+ int i, r;
r = vcn_v1_0_start(adev);
if (r)
goto done;
}
+ for (i = 0; i < adev->vcn.num_enc_rings; ++i) {
+ ring = &adev->vcn.ring_enc[i];
+ ring->ready = true;
+ r = amdgpu_ring_test_ring(ring);
+ if (r) {
+ ring->ready = false;
+ goto done;
+ }
+ }
+
done:
if (!r)
- DRM_INFO("VCN decode initialized successfully.\n");
+ DRM_INFO("VCN decode and encode initialized successfully.\n");
return r;
}
.emit_ib = vcn_v1_0_enc_ring_emit_ib,
.emit_fence = vcn_v1_0_enc_ring_emit_fence,
.emit_vm_flush = vcn_v1_0_enc_ring_emit_vm_flush,
+ .test_ring = amdgpu_vcn_enc_ring_test_ring,
.insert_nop = amdgpu_ring_insert_nop,
.insert_end = vcn_v1_0_enc_ring_insert_end,
.pad_ib = amdgpu_ring_generic_pad_ib,