entry_obj->len = bb_size;
INIT_LIST_HEAD(&entry_obj->list);
- ret = i915_gem_object_get_pages(entry_obj->obj);
- if (ret)
+ dst = i915_gem_object_pin_map(entry_obj->obj, I915_MAP_WB);
+ if (IS_ERR(dst)) {
+ ret = PTR_ERR(dst);
goto put_obj;
-
- i915_gem_object_pin_pages(entry_obj->obj);
-
- /* get the va of the shadow batch buffer */
- dst = (void *)vmap_batch(entry_obj->obj, 0, bb_size);
- if (!dst) {
- gvt_err("failed to vmap shadow batch\n");
- ret = -ENOMEM;
- goto unpin_src;
}
ret = i915_gem_object_set_to_cpu_domain(entry_obj->obj, false);
/* copy batch buffer to shadow batch buffer*/
ret = copy_gma_to_hva(s->vgpu, s->vgpu->gtt.ggtt_mm,
- gma, gma + bb_size, dst);
+ gma, gma + bb_size,
+ dst);
if (ret) {
gvt_err("fail to copy guest ring buffer\n");
goto unmap_src;
return 0;
unmap_src:
- vunmap(dst);
-unpin_src:
- i915_gem_object_unpin_pages(entry_obj->obj);
+ i915_gem_object_unpin_map(entry_obj->obj);
put_obj:
i915_gem_object_put(entry_obj->obj);
free_entry:
list_for_each_entry_safe(entry_obj, temp, &workload->shadow_bb,
list) {
+ i915_gem_object_unpin_map(entry_obj->obj);
i915_gem_object_put(entry_obj->obj);
- kvfree(entry_obj->va);
list_del(&entry_obj->list);
kfree(entry_obj);
}