drm/i915/gvt: use kmap instead of kmap_atomic around guest memory access
authorXiaoguang Chen <xiaoguang.chen@intel.com>
Thu, 3 Nov 2016 10:38:30 +0000 (18:38 +0800)
committerZhenyu Wang <zhenyuw@linux.intel.com>
Thu, 10 Nov 2016 07:42:39 +0000 (15:42 +0800)
kmap_atomic doesn't allow sleep until unmapped. However,
it's necessary to allow sleep during reading/writing guest
memory, so use kmap instead.

Signed-off-by: Bing Niu <bing.niu@intel.com>
Signed-off-by: Xiaoguang Chen <xiaoguang.chen@intel.com>
Signed-off-by: Jike Song <jike.song@intel.com>
Signed-off-by: Zhenyu Wang <zhenyuw@linux.intel.com>
drivers/gpu/drm/i915/gvt/scheduler.c

index 843a5de4300d16653f6b0606fc981f8fa37ee04c..7d87c43661c58627012bbc6d870b70b9a9854678 100644 (file)
@@ -89,15 +89,15 @@ static int populate_shadow_context(struct intel_vgpu_workload *workload)
                }
 
                page = i915_gem_object_get_page(ctx_obj, LRC_PPHWSP_PN + i);
-               dst = kmap_atomic(page);
+               dst = kmap(page);
                intel_gvt_hypervisor_read_gpa(vgpu, context_gpa, dst,
                                GTT_PAGE_SIZE);
-               kunmap_atomic(dst);
+               kunmap(page);
                i++;
        }
 
        page = i915_gem_object_get_page(ctx_obj, LRC_STATE_PN);
-       shadow_ring_context = kmap_atomic(page);
+       shadow_ring_context = kmap(page);
 
 #define COPY_REG(name) \
        intel_gvt_hypervisor_read_gpa(vgpu, workload->ring_context_gpa \
@@ -123,7 +123,7 @@ static int populate_shadow_context(struct intel_vgpu_workload *workload)
                        sizeof(*shadow_ring_context),
                        GTT_PAGE_SIZE - sizeof(*shadow_ring_context));
 
-       kunmap_atomic(shadow_ring_context);
+       kunmap(page);
        return 0;
 }
 
@@ -318,10 +318,10 @@ static void update_guest_context(struct intel_vgpu_workload *workload)
                }
 
                page = i915_gem_object_get_page(ctx_obj, LRC_PPHWSP_PN + i);
-               src = kmap_atomic(page);
+               src = kmap(page);
                intel_gvt_hypervisor_write_gpa(vgpu, context_gpa, src,
                                GTT_PAGE_SIZE);
-               kunmap_atomic(src);
+               kunmap(page);
                i++;
        }
 
@@ -329,7 +329,7 @@ static void update_guest_context(struct intel_vgpu_workload *workload)
                RING_CTX_OFF(ring_header.val), &workload->rb_tail, 4);
 
        page = i915_gem_object_get_page(ctx_obj, LRC_STATE_PN);
-       shadow_ring_context = kmap_atomic(page);
+       shadow_ring_context = kmap(page);
 
 #define COPY_REG(name) \
        intel_gvt_hypervisor_write_gpa(vgpu, workload->ring_context_gpa + \
@@ -347,7 +347,7 @@ static void update_guest_context(struct intel_vgpu_workload *workload)
                        sizeof(*shadow_ring_context),
                        GTT_PAGE_SIZE - sizeof(*shadow_ring_context));
 
-       kunmap_atomic(shadow_ring_context);
+       kunmap(page);
 }
 
 static void complete_current_workload(struct intel_gvt *gvt, int ring_id)