adev->accel_working = true;
+ amdgpu_vm_check_compute_bug(adev);
+
/* Initialize the buffer migration limit. */
if (amdgpu_moverate >= 0)
max_MBps = amdgpu_moverate;
ring->funcs->end_use(ring);
}
-/**
- * amdgpu_ring_check_compute_vm_bug - check whether this ring has compute vm bug
- *
- * @adev: amdgpu_device pointer
- * @ring: amdgpu_ring structure holding ring information
- */
-static void amdgpu_ring_check_compute_vm_bug(struct amdgpu_device *adev,
- struct amdgpu_ring *ring)
-{
- const struct amdgpu_ip_block *ip_block;
-
- ring->has_compute_vm_bug = false;
-
- if (ring->funcs->type != AMDGPU_RING_TYPE_COMPUTE)
- /* only compute rings */
- return;
-
- ip_block = amdgpu_get_ip_block(adev, AMD_IP_BLOCK_TYPE_GFX);
- if (!ip_block)
- return;
-
- /* Compute ring has a VM bug for GFX version < 7.
- And compute ring has a VM bug for GFX 8 MEC firmware version < 673.*/
- if (ip_block->version->major <= 7) {
- ring->has_compute_vm_bug = true;
- } else if (ip_block->version->major == 8)
- if (adev->gfx.mec_fw_version < 673)
- ring->has_compute_vm_bug = true;
-}
-
/**
* amdgpu_ring_init - init driver ring struct.
*
DRM_ERROR("Failed to register debugfs file for rings !\n");
}
- amdgpu_ring_check_compute_vm_bug(adev, ring);
-
return 0;
}
}
-static inline bool amdgpu_ring_has_compute_vm_bug(struct amdgpu_ring *ring)
-{
- return ring->has_compute_vm_bug;
-}
-
#endif
return r;
}
+/**
+ * amdgpu_vm_check_compute_bug - check whether asic has compute vm bug
+ *
+ * @adev: amdgpu_device pointer
+ */
+void amdgpu_vm_check_compute_bug(struct amdgpu_device *adev)
+{
+ const struct amdgpu_ip_block *ip_block;
+ bool has_compute_vm_bug;
+ struct amdgpu_ring *ring;
+ int i;
+
+ has_compute_vm_bug = false;
+
+ ip_block = amdgpu_get_ip_block(adev, AMD_IP_BLOCK_TYPE_GFX);
+ if (ip_block) {
+ /* Compute has a VM bug for GFX version < 7.
+ Compute has a VM bug for GFX 8 MEC firmware version < 673.*/
+ if (ip_block->version->major <= 7)
+ has_compute_vm_bug = true;
+ else if (ip_block->version->major == 8)
+ if (adev->gfx.mec_fw_version < 673)
+ has_compute_vm_bug = true;
+ }
+
+ for (i = 0; i < adev->num_rings; i++) {
+ ring = adev->rings[i];
+ if (ring->funcs->type == AMDGPU_RING_TYPE_COMPUTE)
+ /* only compute rings */
+ ring->has_compute_vm_bug = has_compute_vm_bug;
+ else
+ ring->has_compute_vm_bug = false;
+ }
+}
+
bool amdgpu_vm_need_pipeline_sync(struct amdgpu_ring *ring,
struct amdgpu_job *job)
{
struct amdgpu_vm_id_manager *id_mgr = &adev->vm_manager.id_mgr[vmhub];
struct amdgpu_vm_id *id;
bool gds_switch_needed;
- bool vm_flush_needed = job->vm_needs_flush ||
- amdgpu_ring_has_compute_vm_bug(ring);
+ bool vm_flush_needed = job->vm_needs_flush || ring->has_compute_vm_bug;
if (job->vm_id == 0)
return false;
int amdgpu_vm_ioctl(struct drm_device *dev, void *data, struct drm_file *filp);
bool amdgpu_vm_need_pipeline_sync(struct amdgpu_ring *ring,
struct amdgpu_job *job);
+void amdgpu_vm_check_compute_bug(struct amdgpu_device *adev);
#endif