bool (*read_disabled_bios)(struct amdgpu_device *adev);
bool (*read_bios_from_rom)(struct amdgpu_device *adev,
u8 *bios, u32 length_bytes);
+ void (*detect_hw_virtualization) (struct amdgpu_device *adev);
int (*read_register)(struct amdgpu_device *adev, u32 se_num,
u32 sh_num, u32 reg_offset, u32 *value);
void (*set_vga_state)(struct amdgpu_device *adev, bool state);
/* MM block clocks */
int (*set_uvd_clocks)(struct amdgpu_device *adev, u32 vclk, u32 dclk);
int (*set_vce_clocks)(struct amdgpu_device *adev, u32 evclk, u32 ecclk);
- /* query virtual capabilities */
- u32 (*get_virtual_caps)(struct amdgpu_device *adev);
/* static power management */
int (*get_pcie_lanes)(struct amdgpu_device *adev);
void (*set_pcie_lanes)(struct amdgpu_device *adev, int lanes);
void amdgpu_cgs_destroy_device(struct cgs_device *cgs_device);
+#define AMDGPU_SRIOV_CAPS_SRIOV_VBIOS (1 << 0) /* vBIOS is sr-iov ready */
+#define AMDGPU_SRIOV_CAPS_ENABLE_IOV (1 << 1) /* sr-iov is enabled on this GPU */
+#define AMDGPU_SRIOV_CAPS_IS_VF (1 << 2) /* this GPU is a virtual function */
+#define AMDGPU_PASSTHROUGH_MODE (1 << 3) /* thw whole GPU is pass through for VM */
/* GPU virtualization */
-#define AMDGPU_VIRT_CAPS_SRIOV_EN (1 << 0)
-#define AMDGPU_VIRT_CAPS_IS_VF (1 << 1)
struct amdgpu_virtualization {
- bool supports_sr_iov;
- bool is_virtual;
- u32 caps;
+ uint32_t virtual_caps;
};
+#define amdgpu_sriov_enabled(adev) \
+((adev)->virtualization.virtual_caps & AMDGPU_SRIOV_CAPS_ENABLE_IOV)
+
+#define amdgpu_sriov_vf(adev) \
+((adev)->virtualization.virtual_caps & AMDGPU_SRIOV_CAPS_IS_VF)
+
+#define amdgpu_sriov_bios(adev) \
+((adev)->virtualization.virtual_caps & AMDGPU_SRIOV_CAPS_SRIOV_VBIOS)
+
+#define amdgpu_passthrough(adev) \
+((adev)->virtualization.virtual_caps & AMDGPU_PASSTHROUGH_MODE)
+
+static inline bool is_virtual_machine(void)
+{
+#ifdef CONFIG_X86
+ return boot_cpu_has(X86_FEATURE_HYPERVISOR);
+#else
+ return false;
+#endif
+}
+
/*
* Core structure, functions and helpers.
*/
#define amdgpu_asic_get_xclk(adev) (adev)->asic_funcs->get_xclk((adev))
#define amdgpu_asic_set_uvd_clocks(adev, v, d) (adev)->asic_funcs->set_uvd_clocks((adev), (v), (d))
#define amdgpu_asic_set_vce_clocks(adev, ev, ec) (adev)->asic_funcs->set_vce_clocks((adev), (ev), (ec))
-#define amdgpu_asic_get_virtual_caps(adev) ((adev)->asic_funcs->get_virtual_caps((adev)))
#define amdgpu_get_pcie_lanes(adev) (adev)->asic_funcs->get_pcie_lanes((adev))
#define amdgpu_set_pcie_lanes(adev, l) (adev)->asic_funcs->set_pcie_lanes((adev), (l))
#define amdgpu_asic_get_gpu_clock_counter(adev) (adev)->asic_funcs->get_gpu_clock_counter((adev))
#define amdgpu_asic_read_disabled_bios(adev) (adev)->asic_funcs->read_disabled_bios((adev))
#define amdgpu_asic_read_bios_from_rom(adev, b, l) (adev)->asic_funcs->read_bios_from_rom((adev), (b), (l))
+#define amdgpu_asic_detect_hw_virtualization(adev) (adev)->asic_funcs->detect_hw_virtualization((adev))
#define amdgpu_asic_read_register(adev, se, sh, offset, v)((adev)->asic_funcs->read_register((adev), (se), (sh), (offset), (v)))
#define amdgpu_gart_flush_gpu_tlb(adev, vmid) (adev)->gart.gart_funcs->flush_gpu_tlb((adev), (vmid))
#define amdgpu_gart_set_pte_pde(adev, pt, idx, addr, flags) (adev)->gart.gart_funcs->set_pte_pde((adev), (pt), (idx), (addr), (flags))
bool always_indirect)
{
trace_amdgpu_mm_wreg(adev->pdev->device, reg, v);
-
+
if ((reg * 4) < adev->rmmio_size && !always_indirect)
writel(v, ((void __iomem *)adev->rmmio) + (reg * 4));
else {
return 0;
}
-static bool amdgpu_device_is_virtual(void)
+static void amdgpu_device_detect_sriov_bios(struct amdgpu_device *adev)
{
-#ifdef CONFIG_X86
- return boot_cpu_has(X86_FEATURE_HYPERVISOR);
-#else
- return false;
-#endif
+ if (amdgpu_atombios_has_gpu_virtualization_table(adev))
+ adev->virtualization.virtual_caps |= AMDGPU_SRIOV_CAPS_SRIOV_VBIOS;
}
/**
goto failed;
}
- /* See if the asic supports SR-IOV */
- adev->virtualization.supports_sr_iov =
- amdgpu_atombios_has_gpu_virtualization_table(adev);
-
- /* Check if we are executing in a virtualized environment */
- adev->virtualization.is_virtual = amdgpu_device_is_virtual();
- adev->virtualization.caps = amdgpu_asic_get_virtual_caps(adev);
+ /* detect if we are with an SRIOV vbios */
+ amdgpu_device_detect_sriov_bios(adev);
/* Post card if necessary */
- if (!amdgpu_card_posted(adev) ||
- (adev->virtualization.is_virtual &&
- !(adev->virtualization.caps & AMDGPU_VIRT_CAPS_SRIOV_EN))) {
+ if (!amdgpu_sriov_vf(adev) &&
+ (!amdgpu_card_posted(adev) || amdgpu_passthrough(adev))) {
if (!adev->bios) {
dev_err(adev->dev, "Card not posted and no BIOS - ignoring\n");
r = -EINVAL;
goto failed;
}
DRM_INFO("GPU not posted. posting now...\n");
- amdgpu_atom_asic_init(adev->mode_info.atom_context);
+ r = amdgpu_atom_asic_init(adev->mode_info.atom_context);
+ if (r) {
+ dev_err(adev->dev, "gpu post error!\n");
+ goto failed;
+ }
+ } else {
+ DRM_INFO("GPU post is not needed\n");
}
/* Initialize clocks */
/* if we are running in a VM, make sure the device
* torn down properly on reboot/shutdown
*/
- if (adev->virtualization.is_virtual)
+ if (amdgpu_passthrough(adev))
amdgpu_pci_remove(pdev);
}
return true;
}
-static u32 cik_get_virtual_caps(struct amdgpu_device *adev)
-{
- /* CIK does not support SR-IOV */
- return 0;
-}
-
static const struct amdgpu_allowed_register_entry cik_allowed_read_registers[] = {
{mmGRBM_STATUS, false},
{mmGB_ADDR_CONFIG, false},
>> CC_DRM_ID_STRAPS__ATI_REV_ID__SHIFT;
}
+static void cik_detect_hw_virtualization(struct amdgpu_device *adev)
+{
+ if (is_virtual_machine()) /* passthrough mode */
+ adev->virtualization.virtual_caps |= AMDGPU_PASSTHROUGH_MODE;
+}
+
static const struct amdgpu_ip_block_version bonaire_ip_blocks[] =
{
/* ORDER MATTERS! */
{
.read_disabled_bios = &cik_read_disabled_bios,
.read_bios_from_rom = &cik_read_bios_from_rom,
+ .detect_hw_virtualization = cik_detect_hw_virtualization,
.read_register = &cik_read_register,
.reset = &cik_asic_reset,
.set_vga_state = &cik_vga_set_state,
.get_xclk = &cik_get_xclk,
.set_uvd_clocks = &cik_set_uvd_clocks,
.set_vce_clocks = &cik_set_vce_clocks,
- .get_virtual_caps = &cik_get_virtual_caps,
};
static int cik_common_early_init(void *handle)
/* Skip SMC ucode loading on SR-IOV capable boards.
* vbios does this for us in asic_init in that case.
*/
- if (adev->virtualization.supports_sr_iov)
+ if (amdgpu_sriov_bios(adev))
return 0;
hdr = (const struct smc_firmware_header_v1_0 *)adev->pm.fw->data;
/* Skip MC ucode loading on SR-IOV capable boards.
* vbios does this for us in asic_init in that case.
+ * Skip MC ucode loading on VF, because hypervisor will do that
+ * for this adaptor.
*/
- if (adev->virtualization.supports_sr_iov)
+ if (amdgpu_sriov_bios(adev))
return 0;
hdr = (const struct mc_firmware_header_v1_0 *)adev->mc.fw->data;
/* Skip SMC ucode loading on SR-IOV capable boards.
* vbios does this for us in asic_init in that case.
*/
- if (adev->virtualization.supports_sr_iov)
+ if (amdgpu_sriov_bios(adev))
return 0;
hdr = (const struct smc_firmware_header_v1_0 *)adev->pm.fw->data;
spin_unlock_irqrestore(&adev->smc_idx_lock, flags);
}
-static u32 si_get_virtual_caps(struct amdgpu_device *adev)
-{
- /* SI does not support SR-IOV */
- return 0;
-}
-
static struct amdgpu_allowed_register_entry si_allowed_read_registers[] = {
{GRBM_STATUS, false},
{GB_ADDR_CONFIG, false},
return 0;
}
+static void si_detect_hw_virtualization(struct amdgpu_device *adev)
+{
+ if (is_virtual_machine()) /* passthrough mode */
+ adev->virtualization.virtual_caps |= AMDGPU_PASSTHROUGH_MODE;
+}
+
static const struct amdgpu_asic_funcs si_asic_funcs =
{
.read_disabled_bios = &si_read_disabled_bios,
+ .detect_hw_virtualization = si_detect_hw_virtualization,
.read_register = &si_read_register,
.reset = &si_asic_reset,
.set_vga_state = &si_vga_set_state,
.get_xclk = &si_get_xclk,
.set_uvd_clocks = &si_set_uvd_clocks,
.set_vce_clocks = NULL,
- .get_virtual_caps = &si_get_virtual_caps,
};
static uint32_t si_get_rev_id(struct amdgpu_device *adev)
/* Skip SMC ucode loading on SR-IOV capable boards.
* vbios does this for us in asic_init in that case.
*/
- if (adev->virtualization.supports_sr_iov)
+ if (amdgpu_sriov_bios(adev))
return 0;
hdr = (const struct smc_firmware_header_v1_0 *)adev->pm.fw->data;
return true;
}
-static u32 vi_get_virtual_caps(struct amdgpu_device *adev)
+static void vi_detect_hw_virtualization(struct amdgpu_device *adev)
{
- u32 caps = 0;
- u32 reg = RREG32(mmBIF_IOV_FUNC_IDENTIFIER);
+ uint32_t reg = RREG32(mmBIF_IOV_FUNC_IDENTIFIER);
+ /* bit0: 0 means pf and 1 means vf */
+ /* bit31: 0 means disable IOV and 1 means enable */
+ if (reg & 1)
+ adev->virtualization.virtual_caps |= AMDGPU_SRIOV_CAPS_IS_VF;
- if (REG_GET_FIELD(reg, BIF_IOV_FUNC_IDENTIFIER, IOV_ENABLE))
- caps |= AMDGPU_VIRT_CAPS_SRIOV_EN;
+ if (reg & 0x80000000)
+ adev->virtualization.virtual_caps |= AMDGPU_SRIOV_CAPS_ENABLE_IOV;
- if (REG_GET_FIELD(reg, BIF_IOV_FUNC_IDENTIFIER, FUNC_IDENTIFIER))
- caps |= AMDGPU_VIRT_CAPS_IS_VF;
-
- return caps;
+ if (reg == 0) {
+ if (is_virtual_machine()) /* passthrough mode exclus sr-iov mode */
+ adev->virtualization.virtual_caps |= AMDGPU_PASSTHROUGH_MODE;
+ }
}
static const struct amdgpu_allowed_register_entry tonga_allowed_read_registers[] = {
{
.read_disabled_bios = &vi_read_disabled_bios,
.read_bios_from_rom = &vi_read_bios_from_rom,
+ .detect_hw_virtualization = vi_detect_hw_virtualization,
.read_register = &vi_read_register,
.reset = &vi_asic_reset,
.set_vga_state = &vi_vga_set_state,
.get_xclk = &vi_get_xclk,
.set_uvd_clocks = &vi_set_uvd_clocks,
.set_vce_clocks = &vi_set_vce_clocks,
- .get_virtual_caps = &vi_get_virtual_caps,
};
static int vi_common_early_init(void *handle)
return -EINVAL;
}
+ /* in early init stage, vbios code won't work */
+ if (adev->asic_funcs->detect_hw_virtualization)
+ amdgpu_asic_detect_hw_virtualization(adev);
+
if (amdgpu_smc_load_fw && smc_enabled)
adev->firmware.smu_load = true;