From 68fa965dd923177eafad49b7a0045fc610917341 Mon Sep 17 00:00:00 2001 From: Mats Petersson Date: Fri, 16 Nov 2012 18:36:49 +0000 Subject: [PATCH] xen/privcmd: Correctly return success from IOCTL_PRIVCMD_MMAPBATCH This is a regression introduced by ceb90fa0 (xen/privcmd: add PRIVCMD_MMAPBATCH_V2 ioctl). It broke xentrace as it used xc_map_foreign() instead of xc_map_foreign_bulk(). Most code-paths prefer the MMAPBATCH_V2, so this wasn't very obvious that it broke. The return value is set early on to -EINVAL, and if all goes well, the "set top bits of the MFN's" never gets called, so the return value is still EINVAL when the function gets to the end, causing the caller to think it went wrong (which it didn't!) Now also including Andres "move the ret = -EINVAL into the error handling path, as this avoids other similar errors in future. Signed-off-by: Mats Petersson Acked-by: Andres Lagar-Cavilla Acked-by: David Vrabel Signed-off-by: Konrad Rzeszutek Wilk --- drivers/xen/privcmd.c | 18 +++++++++++------- 1 file changed, 11 insertions(+), 7 deletions(-) diff --git a/drivers/xen/privcmd.c b/drivers/xen/privcmd.c index 8adb9cc267f9..71f5c459b088 100644 --- a/drivers/xen/privcmd.c +++ b/drivers/xen/privcmd.c @@ -361,13 +361,13 @@ static long privcmd_ioctl_mmap_batch(void __user *udata, int version) down_write(&mm->mmap_sem); vma = find_vma(mm, m.addr); - ret = -EINVAL; if (!vma || vma->vm_ops != &privcmd_vm_ops || (m.addr != vma->vm_start) || ((m.addr + (nr_pages << PAGE_SHIFT)) != vma->vm_end) || !privcmd_enforce_singleshot_mapping(vma)) { up_write(&mm->mmap_sem); + ret = -EINVAL; goto out; } @@ -383,12 +383,16 @@ static long privcmd_ioctl_mmap_batch(void __user *udata, int version) up_write(&mm->mmap_sem); - if (state.global_error && (version == 1)) { - /* Write back errors in second pass. */ - state.user_mfn = (xen_pfn_t *)m.arr; - state.err = err_array; - ret = traverse_pages(m.num, sizeof(xen_pfn_t), - &pagelist, mmap_return_errors_v1, &state); + if (version == 1) { + if (state.global_error) { + /* Write back errors in second pass. */ + state.user_mfn = (xen_pfn_t *)m.arr; + state.err = err_array; + ret = traverse_pages(m.num, sizeof(xen_pfn_t), + &pagelist, mmap_return_errors_v1, &state); + } else + ret = 0; + } else if (version == 2) { ret = __copy_to_user(m.err, err_array, m.num * sizeof(int)); if (ret) -- 2.20.1