id->oa_size != job->oa_size);
int r;
- if (ring->funcs->emit_pipeline_sync && (
- job->vm_needs_flush || gds_switch_needed ||
- amdgpu_vm_ring_has_compute_vm_bug(ring)))
- amdgpu_ring_emit_pipeline_sync(ring);
+ if (job->vm_needs_flush || gds_switch_needed ||
+ amdgpu_vm_is_gpu_reset(adev, id) ||
+ amdgpu_vm_ring_has_compute_vm_bug(ring)) {
+ unsigned patch_offset = 0;
- if (ring->funcs->emit_vm_flush && (job->vm_needs_flush ||
- amdgpu_vm_is_gpu_reset(adev, id))) {
- struct dma_fence *fence;
- u64 pd_addr = amdgpu_vm_adjust_mc_addr(adev, job->vm_pd_addr);
+ if (ring->funcs->init_cond_exec)
+ patch_offset = amdgpu_ring_init_cond_exec(ring);
- trace_amdgpu_vm_flush(pd_addr, ring->idx, job->vm_id);
- amdgpu_ring_emit_vm_flush(ring, job->vm_id, pd_addr);
+ if (ring->funcs->emit_pipeline_sync &&
+ (job->vm_needs_flush || gds_switch_needed ||
+ amdgpu_vm_ring_has_compute_vm_bug(ring)))
+ amdgpu_ring_emit_pipeline_sync(ring);
- r = amdgpu_fence_emit(ring, &fence);
- if (r)
- return r;
+ if (ring->funcs->emit_vm_flush && (job->vm_needs_flush ||
+ amdgpu_vm_is_gpu_reset(adev, id))) {
+ struct dma_fence *fence;
+ u64 pd_addr = amdgpu_vm_adjust_mc_addr(adev, job->vm_pd_addr);
- mutex_lock(&adev->vm_manager.lock);
- dma_fence_put(id->last_flush);
- id->last_flush = fence;
- mutex_unlock(&adev->vm_manager.lock);
- }
+ trace_amdgpu_vm_flush(pd_addr, ring->idx, job->vm_id);
+ amdgpu_ring_emit_vm_flush(ring, job->vm_id, pd_addr);
- if (gds_switch_needed) {
- id->gds_base = job->gds_base;
- id->gds_size = job->gds_size;
- id->gws_base = job->gws_base;
- id->gws_size = job->gws_size;
- id->oa_base = job->oa_base;
- id->oa_size = job->oa_size;
- amdgpu_ring_emit_gds_switch(ring, job->vm_id,
- job->gds_base, job->gds_size,
- job->gws_base, job->gws_size,
- job->oa_base, job->oa_size);
- }
+ r = amdgpu_fence_emit(ring, &fence);
+ if (r)
+ return r;
+ mutex_lock(&adev->vm_manager.lock);
+ dma_fence_put(id->last_flush);
+ id->last_flush = fence;
+ mutex_unlock(&adev->vm_manager.lock);
+ }
+
+ if (gds_switch_needed) {
+ id->gds_base = job->gds_base;
+ id->gds_size = job->gds_size;
+ id->gws_base = job->gws_base;
+ id->gws_size = job->gws_size;
+ id->oa_base = job->oa_base;
+ id->oa_size = job->oa_size;
+ amdgpu_ring_emit_gds_switch(ring, job->vm_id,
+ job->gds_base, job->gds_size,
+ job->gws_base, job->gws_size,
+ job->oa_base, job->oa_size);
+ }
+
+ if (ring->funcs->patch_cond_exec)
+ amdgpu_ring_patch_cond_exec(ring, patch_offset);
+
+ /* the double SWITCH_BUFFER here *cannot* be skipped by COND_EXEC */
+ if (ring->funcs->emit_switch_buffer) {
+ amdgpu_ring_emit_switch_buffer(ring);
+ amdgpu_ring_emit_switch_buffer(ring);
+ }
+ }
return 0;
}
/* sync PFP to ME, otherwise we might get invalid PFP reads */
amdgpu_ring_write(ring, PACKET3(PACKET3_PFP_SYNC_ME, 0));
amdgpu_ring_write(ring, 0x0);
- /* GFX8 emits 128 dw nop to prevent CE access VM before vm_flush finish */
- amdgpu_ring_insert_nop(ring, 128);
}
}
.get_rptr = gfx_v8_0_ring_get_rptr,
.get_wptr = gfx_v8_0_ring_get_wptr_gfx,
.set_wptr = gfx_v8_0_ring_set_wptr_gfx,
- .emit_frame_size =
- 20 + /* gfx_v8_0_ring_emit_gds_switch */
- 7 + /* gfx_v8_0_ring_emit_hdp_flush */
- 5 + /* gfx_v8_0_ring_emit_hdp_invalidate */
- 6 + 6 + 6 +/* gfx_v8_0_ring_emit_fence_gfx x3 for user fence, vm fence */
- 7 + /* gfx_v8_0_ring_emit_pipeline_sync */
- 128 + 19 + /* gfx_v8_0_ring_emit_vm_flush */
- 2 + /* gfx_v8_ring_emit_sb */
- 3 + 4 + 29, /* gfx_v8_ring_emit_cntxcntl including vgt flush/meta-data */
+ .emit_frame_size = /* maximum 215dw if count 16 IBs in */
+ 5 + /* COND_EXEC */
+ 7 + /* PIPELINE_SYNC */
+ 19 + /* VM_FLUSH */
+ 8 + /* FENCE for VM_FLUSH */
+ 20 + /* GDS switch */
+ 4 + /* double SWITCH_BUFFER,
+ the first COND_EXEC jump to the place just
+ prior to this double SWITCH_BUFFER */
+ 5 + /* COND_EXEC */
+ 7 + /* HDP_flush */
+ 4 + /* VGT_flush */
+ 14 + /* CE_META */
+ 31 + /* DE_META */
+ 3 + /* CNTX_CTRL */
+ 5 + /* HDP_INVL */
+ 8 + 8 + /* FENCE x2 */
+ 2, /* SWITCH_BUFFER */
.emit_ib_size = 4, /* gfx_v8_0_ring_emit_ib_gfx */
.emit_ib = gfx_v8_0_ring_emit_ib_gfx,
.emit_fence = gfx_v8_0_ring_emit_fence_gfx,
/* sync PFP to ME, otherwise we might get invalid PFP reads */
amdgpu_ring_write(ring, PACKET3(PACKET3_PFP_SYNC_ME, 0));
amdgpu_ring_write(ring, 0x0);
- /* Emits 128 dw nop to prevent CE access VM before vm_flush finish */
- amdgpu_ring_insert_nop(ring, 128);
}
}
.get_rptr = gfx_v9_0_ring_get_rptr_gfx,
.get_wptr = gfx_v9_0_ring_get_wptr_gfx,
.set_wptr = gfx_v9_0_ring_set_wptr_gfx,
- .emit_frame_size =
- 20 + /* gfx_v9_0_ring_emit_gds_switch */
- 7 + /* gfx_v9_0_ring_emit_hdp_flush */
- 5 + /* gfx_v9_0_ring_emit_hdp_invalidate */
- 8 + 8 + 8 +/* gfx_v9_0_ring_emit_fence x3 for user fence, vm fence */
- 7 + /* gfx_v9_0_ring_emit_pipeline_sync */
- 128 + 66 + /* gfx_v9_0_ring_emit_vm_flush */
- 2 + /* gfx_v9_ring_emit_sb */
- 3, /* gfx_v9_ring_emit_cntxcntl */
+ .emit_frame_size = /* totally 242 maximum if 16 IBs */
+ 5 + /* COND_EXEC */
+ 7 + /* PIPELINE_SYNC */
+ 46 + /* VM_FLUSH */
+ 8 + /* FENCE for VM_FLUSH */
+ 20 + /* GDS switch */
+ 4 + /* double SWITCH_BUFFER,
+ the first COND_EXEC jump to the place just
+ prior to this double SWITCH_BUFFER */
+ 5 + /* COND_EXEC */
+ 7 + /* HDP_flush */
+ 4 + /* VGT_flush */
+ 14 + /* CE_META */
+ 31 + /* DE_META */
+ 3 + /* CNTX_CTRL */
+ 5 + /* HDP_INVL */
+ 8 + 8 + /* FENCE x2 */
+ 2, /* SWITCH_BUFFER */
.emit_ib_size = 4, /* gfx_v9_0_ring_emit_ib_gfx */
.emit_ib = gfx_v9_0_ring_emit_ib_gfx,
.emit_fence = gfx_v9_0_ring_emit_fence,