xen: add pages parameter to xen_remap_domain_mfn_range
authorIan Campbell <ian.campbell@citrix.com>
Wed, 17 Oct 2012 20:37:49 +0000 (13:37 -0700)
committerIan Campbell <ian.campbell@citrix.com>
Thu, 29 Nov 2012 12:57:36 +0000 (12:57 +0000)
Also introduce xen_unmap_domain_mfn_range. These are the parts of
Mukesh's "xen/pvh: Implement MMU changes for PVH" which are also
needed as a baseline for ARM privcmd support.

The original patch was:

Signed-off-by: Mukesh Rathor <mukesh.rathor@oracle.com>
Signed-off-by: Konrad Rzeszutek Wilk <konrad.wilk@oracle.com>
This derivative is also:

Signed-off-by: Ian Campbell <ian.campbell@citrix.com>
arch/x86/xen/mmu.c
drivers/xen/privcmd.c
include/xen/xen-ops.h

index 6226c99729b963594a2e133290cbf8f3c6e681b8..0f6386a5b43745c52811a8690a0171b234530e1c 100644 (file)
@@ -2479,7 +2479,9 @@ static int remap_area_mfn_pte_fn(pte_t *ptep, pgtable_t token,
 int xen_remap_domain_mfn_range(struct vm_area_struct *vma,
                               unsigned long addr,
                               unsigned long mfn, int nr,
-                              pgprot_t prot, unsigned domid)
+                              pgprot_t prot, unsigned domid,
+                              struct page **pages)
+
 {
        struct remap_data rmd;
        struct mmu_update mmu_update[REMAP_BATCH_SIZE];
@@ -2523,3 +2525,14 @@ out:
        return err;
 }
 EXPORT_SYMBOL_GPL(xen_remap_domain_mfn_range);
+
+/* Returns: 0 success */
+int xen_unmap_domain_mfn_range(struct vm_area_struct *vma,
+                              int numpgs, struct page **pages)
+{
+       if (!pages || !xen_feature(XENFEAT_auto_translated_physmap))
+               return 0;
+
+       return -EINVAL;
+}
+EXPORT_SYMBOL_GPL(xen_unmap_domain_mfn_range);
index 8adb9cc267f96e201ade041441ef8a317ce336d8..b612267a8cb6dfd8209774e68548c4895d04a3b1 100644 (file)
@@ -178,7 +178,7 @@ static int mmap_mfn_range(void *data, void *state)
                                        msg->va & PAGE_MASK,
                                        msg->mfn, msg->npages,
                                        vma->vm_page_prot,
-                                       st->domain);
+                                       st->domain, NULL);
        if (rc < 0)
                return rc;
 
@@ -267,7 +267,8 @@ static int mmap_batch_fn(void *data, void *state)
        int ret;
 
        ret = xen_remap_domain_mfn_range(st->vma, st->va & PAGE_MASK, *mfnp, 1,
-                                        st->vma->vm_page_prot, st->domain);
+                                        st->vma->vm_page_prot, st->domain,
+                                        NULL);
 
        /* Store error code for second pass. */
        *(st->err++) = ret;
index 6a198e46ab6e1c50cdff7fe76d58b3a4afb43d43..990b43e441e6371f231da27ffb139145ee5f84b6 100644 (file)
@@ -27,6 +27,9 @@ struct vm_area_struct;
 int xen_remap_domain_mfn_range(struct vm_area_struct *vma,
                               unsigned long addr,
                               unsigned long mfn, int nr,
-                              pgprot_t prot, unsigned domid);
+                              pgprot_t prot, unsigned domid,
+                              struct page **pages);
+int xen_unmap_domain_mfn_range(struct vm_area_struct *vma,
+                              int numpgs, struct page **pages);
 
 #endif /* INCLUDE_XEN_OPS_H */