ret = intel_gvt_hypervisor_map_gfn_to_mfn(vgpu, first_gfn,
first_mfn,
- vgpu_aperture_sz(vgpu)
- >> PAGE_SHIFT, map,
- GVT_MAP_APERTURE);
+ vgpu_aperture_sz(vgpu) >>
+ PAGE_SHIFT, map);
if (ret)
return ret;
unsigned long len);
unsigned long (*gfn_to_mfn)(unsigned long handle, unsigned long gfn);
int (*map_gfn_to_mfn)(unsigned long handle, unsigned long gfn,
- unsigned long mfn, unsigned int nr, bool map,
- int type);
+ unsigned long mfn, unsigned int nr, bool map);
int (*set_trap_area)(unsigned long handle, u64 start, u64 end,
bool map);
};
return intel_gvt_host.mpt->gfn_to_mfn(vgpu->handle, gfn);
}
-enum {
- GVT_MAP_APERTURE = 0,
- GVT_MAP_OPREGION,
-};
-
/**
* intel_gvt_hypervisor_map_gfn_to_mfn - map a GFN region to MFN
* @vgpu: a vGPU
* @mfn: host PFN
* @nr: amount of PFNs
* @map: map or unmap
- * @type: map type
*
* Returns:
* Zero on success, negative error code if failed.
static inline int intel_gvt_hypervisor_map_gfn_to_mfn(
struct intel_vgpu *vgpu, unsigned long gfn,
unsigned long mfn, unsigned int nr,
- bool map, int type)
+ bool map)
{
return intel_gvt_host.mpt->map_gfn_to_mfn(vgpu->handle, gfn, mfn, nr,
- map, type);
+ map);
}
/**
}
ret = intel_gvt_hypervisor_map_gfn_to_mfn(vgpu,
vgpu_opregion(vgpu)->gfn[i],
- mfn, 1, map, GVT_MAP_OPREGION);
+ mfn, 1, map);
if (ret) {
gvt_err("fail to map GFN to MFN, errno: %d\n", ret);
return ret;
*/
void intel_vgpu_clean_opregion(struct intel_vgpu *vgpu)
{
- int i;
-
gvt_dbg_core("vgpu%d: clean vgpu opregion\n", vgpu->id);
if (!vgpu_opregion(vgpu)->va)
return;
- if (intel_gvt_host.hypervisor_type == INTEL_GVT_HYPERVISOR_KVM) {
- vunmap(vgpu_opregion(vgpu)->va);
- for (i = 0; i < INTEL_GVT_OPREGION_PAGES; i++) {
- if (vgpu_opregion(vgpu)->pages[i]) {
- put_page(vgpu_opregion(vgpu)->pages[i]);
- vgpu_opregion(vgpu)->pages[i] = NULL;
- }
- }
- } else {
+ if (intel_gvt_host.hypervisor_type == INTEL_GVT_HYPERVISOR_XEN) {
map_vgpu_opregion(vgpu, false);
free_pages((unsigned long)vgpu_opregion(vgpu)->va,
INTEL_GVT_OPREGION_PORDER);
- }
- vgpu_opregion(vgpu)->va = NULL;
+ vgpu_opregion(vgpu)->va = NULL;
+ }
}
/**
ret = map_vgpu_opregion(vgpu, true);
if (ret)
return ret;
- } else {
- gvt_dbg_core("emulate opregion from userspace\n");
-
- /*
- * If opregion pages are not allocated from host kenrel,
- * most of the params are meaningless
- */
- ret = intel_gvt_hypervisor_map_gfn_to_mfn(vgpu,
- 0, /* not used */
- 0, /* not used */
- 2, /* not used */
- 1,
- GVT_MAP_OPREGION);
- if (ret)
- return ret;
}
+
return 0;
}
if (ret)
goto out_detach_hypervisor_vgpu;
- if (intel_gvt_host.hypervisor_type == INTEL_GVT_HYPERVISOR_KVM) {
- ret = intel_vgpu_init_opregion(vgpu, 0);
- if (ret)
- goto out_clean_gtt;
- }
-
ret = intel_vgpu_init_display(vgpu);
if (ret)
- goto out_clean_opregion;
+ goto out_clean_gtt;
ret = intel_vgpu_init_execlist(vgpu);
if (ret)
intel_vgpu_clean_execlist(vgpu);
out_clean_display:
intel_vgpu_clean_display(vgpu);
-out_clean_opregion:
- intel_vgpu_clean_opregion(vgpu);
out_clean_gtt:
intel_vgpu_clean_gtt(vgpu);
out_detach_hypervisor_vgpu: