u32 disabled_rbs = 0;
u32 enabled_rbs = 0;
+ mutex_lock(&adev->grbm_idx_mutex);
for (i = 0; i < se_num; i++) {
for (j = 0; j < sh_per_se; j++) {
gfx_v6_0_select_se_sh(adev, i, j, 0xffffffff);
}
}
gfx_v6_0_select_se_sh(adev, 0xffffffff, 0xffffffff, 0xffffffff);
+ mutex_unlock(&adev->grbm_idx_mutex);
mask = 1;
for (i = 0; i < max_rb_num_per_se * se_num; i++) {
adev->gfx.config.backend_enable_mask = enabled_rbs;
adev->gfx.config.num_rbs = hweight32(enabled_rbs);
+ mutex_lock(&adev->grbm_idx_mutex);
for (i = 0; i < se_num; i++) {
gfx_v6_0_select_se_sh(adev, i, 0xffffffff, 0xffffffff);
data = 0;
WREG32(PA_SC_RASTER_CONFIG, data);
}
gfx_v6_0_select_se_sh(adev, 0xffffffff, 0xffffffff, 0xffffffff);
+ mutex_unlock(&adev->grbm_idx_mutex);
}
/*
static void gmc_v6_0_init_compute_vmid(struct amdgpu_device *adev)
u32 data, mask;
u32 active_cu = 0;
+ mutex_lock(&adev->grbm_idx_mutex);
for (i = 0; i < se_num; i++) {
for (j = 0; j < sh_per_se; j++) {
gfx_v6_0_select_se_sh(adev, i, j, 0xffffffff);
}
}
gfx_v6_0_select_se_sh(adev, 0xffffffff, 0xffffffff, 0xffffffff);
+ mutex_unlock(&adev->grbm_idx_mutex);
}
static void gfx_v6_0_gpu_init(struct amdgpu_device *adev)
u32 mask = 0, tmp, tmp1;
int i;
+ mutex_lock(&adev->grbm_idx_mutex);
gfx_v6_0_select_se_sh(adev, se, sh, 0xffffffff);
tmp = RREG32(CC_GC_SHADER_ARRAY_CONFIG);
tmp1 = RREG32(GC_USER_SHADER_ARRAY_CONFIG);
gfx_v6_0_select_se_sh(adev, 0xffffffff, 0xffffffff, 0xffffffff);
+ mutex_unlock(&adev->grbm_idx_mutex);
tmp &= 0xffff0000;