xen/privcmd: Relax access control in privcmd_ioctl_mmap
[GitHub/mt8127/android_kernel_alcatel_ttab.git] / drivers / xen / privcmd.c
CommitLineData
1c5de193
JF
1/******************************************************************************
2 * privcmd.c
3 *
4 * Interface to privileged domain-0 commands.
5 *
6 * Copyright (c) 2002-2004, K A Fraser, B Dragovic
7 */
8
9#include <linux/kernel.h>
d8414d3c 10#include <linux/module.h>
1c5de193
JF
11#include <linux/sched.h>
12#include <linux/slab.h>
13#include <linux/string.h>
14#include <linux/errno.h>
15#include <linux/mm.h>
16#include <linux/mman.h>
17#include <linux/uaccess.h>
18#include <linux/swap.h>
1c5de193
JF
19#include <linux/highmem.h>
20#include <linux/pagemap.h>
21#include <linux/seq_file.h>
d8414d3c 22#include <linux/miscdevice.h>
1c5de193
JF
23
24#include <asm/pgalloc.h>
25#include <asm/pgtable.h>
26#include <asm/tlb.h>
27#include <asm/xen/hypervisor.h>
28#include <asm/xen/hypercall.h>
29
30#include <xen/xen.h>
31#include <xen/privcmd.h>
32#include <xen/interface/xen.h>
33#include <xen/features.h>
34#include <xen/page.h>
de1ef206 35#include <xen/xen-ops.h>
d71f5139 36#include <xen/balloon.h>
f020e290 37
d8414d3c
BB
38#include "privcmd.h"
39
40MODULE_LICENSE("GPL");
41
d71f5139
MR
42#define PRIV_VMA_LOCKED ((void *)1)
43
1c5de193
JF
44#ifndef HAVE_ARCH_PRIVCMD_MMAP
45static int privcmd_enforce_singleshot_mapping(struct vm_area_struct *vma);
46#endif
47
1c5de193
JF
48static long privcmd_ioctl_hypercall(void __user *udata)
49{
50 struct privcmd_hypercall hypercall;
51 long ret;
52
53 if (copy_from_user(&hypercall, udata, sizeof(hypercall)))
54 return -EFAULT;
55
56 ret = privcmd_call(hypercall.op,
57 hypercall.arg[0], hypercall.arg[1],
58 hypercall.arg[2], hypercall.arg[3],
59 hypercall.arg[4]);
60
61 return ret;
62}
63
64static void free_page_list(struct list_head *pages)
65{
66 struct page *p, *n;
67
68 list_for_each_entry_safe(p, n, pages, lru)
69 __free_page(p);
70
71 INIT_LIST_HEAD(pages);
72}
73
74/*
75 * Given an array of items in userspace, return a list of pages
76 * containing the data. If copying fails, either because of memory
77 * allocation failure or a problem reading user memory, return an
78 * error code; its up to the caller to dispose of any partial list.
79 */
80static int gather_array(struct list_head *pagelist,
81 unsigned nelem, size_t size,
ceb90fa0 82 const void __user *data)
1c5de193
JF
83{
84 unsigned pageidx;
85 void *pagedata;
86 int ret;
87
88 if (size > PAGE_SIZE)
89 return 0;
90
91 pageidx = PAGE_SIZE;
92 pagedata = NULL; /* quiet, gcc */
93 while (nelem--) {
94 if (pageidx > PAGE_SIZE-size) {
95 struct page *page = alloc_page(GFP_KERNEL);
96
97 ret = -ENOMEM;
98 if (page == NULL)
99 goto fail;
100
101 pagedata = page_address(page);
102
103 list_add_tail(&page->lru, pagelist);
104 pageidx = 0;
105 }
106
107 ret = -EFAULT;
108 if (copy_from_user(pagedata + pageidx, data, size))
109 goto fail;
110
111 data += size;
112 pageidx += size;
113 }
114
115 ret = 0;
116
117fail:
118 return ret;
119}
120
121/*
122 * Call function "fn" on each element of the array fragmented
123 * over a list of pages.
124 */
125static int traverse_pages(unsigned nelem, size_t size,
126 struct list_head *pos,
127 int (*fn)(void *data, void *state),
128 void *state)
129{
130 void *pagedata;
131 unsigned pageidx;
f020e290 132 int ret = 0;
1c5de193
JF
133
134 BUG_ON(size > PAGE_SIZE);
135
136 pageidx = PAGE_SIZE;
137 pagedata = NULL; /* hush, gcc */
138
139 while (nelem--) {
140 if (pageidx > PAGE_SIZE-size) {
141 struct page *page;
142 pos = pos->next;
143 page = list_entry(pos, struct page, lru);
144 pagedata = page_address(page);
145 pageidx = 0;
146 }
147
148 ret = (*fn)(pagedata + pageidx, state);
149 if (ret)
150 break;
151 pageidx += size;
152 }
153
154 return ret;
155}
156
157struct mmap_mfn_state {
158 unsigned long va;
159 struct vm_area_struct *vma;
160 domid_t domain;
161};
162
163static int mmap_mfn_range(void *data, void *state)
164{
165 struct privcmd_mmap_entry *msg = data;
166 struct mmap_mfn_state *st = state;
167 struct vm_area_struct *vma = st->vma;
168 int rc;
169
170 /* Do not allow range to wrap the address space. */
171 if ((msg->npages > (LONG_MAX >> PAGE_SHIFT)) ||
172 ((unsigned long)(msg->npages << PAGE_SHIFT) >= -st->va))
173 return -EINVAL;
174
175 /* Range chunks must be contiguous in va space. */
176 if ((msg->va != st->va) ||
177 ((msg->va+(msg->npages<<PAGE_SHIFT)) > vma->vm_end))
178 return -EINVAL;
179
de1ef206
IC
180 rc = xen_remap_domain_mfn_range(vma,
181 msg->va & PAGE_MASK,
182 msg->mfn, msg->npages,
183 vma->vm_page_prot,
9a032e39 184 st->domain, NULL);
1c5de193
JF
185 if (rc < 0)
186 return rc;
187
188 st->va += msg->npages << PAGE_SHIFT;
189
190 return 0;
191}
192
193static long privcmd_ioctl_mmap(void __user *udata)
194{
195 struct privcmd_mmap mmapcmd;
196 struct mm_struct *mm = current->mm;
197 struct vm_area_struct *vma;
198 int rc;
199 LIST_HEAD(pagelist);
200 struct mmap_mfn_state state;
201
d71f5139
MR
202 /* We only support privcmd_ioctl_mmap_batch for auto translated. */
203 if (xen_feature(XENFEAT_auto_translated_physmap))
204 return -ENOSYS;
205
1c5de193
JF
206 if (copy_from_user(&mmapcmd, udata, sizeof(mmapcmd)))
207 return -EFAULT;
208
209 rc = gather_array(&pagelist,
210 mmapcmd.num, sizeof(struct privcmd_mmap_entry),
211 mmapcmd.entry);
212
213 if (rc || list_empty(&pagelist))
214 goto out;
215
216 down_write(&mm->mmap_sem);
217
218 {
219 struct page *page = list_first_entry(&pagelist,
220 struct page, lru);
221 struct privcmd_mmap_entry *msg = page_address(page);
222
223 vma = find_vma(mm, msg->va);
224 rc = -EINVAL;
225
226 if (!vma || (msg->va != vma->vm_start) ||
227 !privcmd_enforce_singleshot_mapping(vma))
228 goto out_up;
229 }
230
231 state.va = vma->vm_start;
232 state.vma = vma;
233 state.domain = mmapcmd.dom;
234
235 rc = traverse_pages(mmapcmd.num, sizeof(struct privcmd_mmap_entry),
236 &pagelist,
237 mmap_mfn_range, &state);
238
239
240out_up:
241 up_write(&mm->mmap_sem);
242
243out:
244 free_page_list(&pagelist);
245
246 return rc;
247}
248
249struct mmap_batch_state {
250 domid_t domain;
251 unsigned long va;
252 struct vm_area_struct *vma;
d71f5139 253 int index;
ceb90fa0
ALC
254 /* A tristate:
255 * 0 for no errors
256 * 1 if at least one error has happened (and no
257 * -ENOENT errors have happened)
258 * -ENOENT if at least 1 -ENOENT has happened.
259 */
260 int global_error;
261 /* An array for individual errors */
262 int *err;
263
264 /* User-space mfn array to store errors in the second pass for V1. */
265 xen_pfn_t __user *user_mfn;
1c5de193
JF
266};
267
d71f5139
MR
268/* auto translated dom0 note: if domU being created is PV, then mfn is
269 * mfn(addr on bus). If it's auto xlated, then mfn is pfn (input to HAP).
270 */
1c5de193
JF
271static int mmap_batch_fn(void *data, void *state)
272{
273 xen_pfn_t *mfnp = data;
274 struct mmap_batch_state *st = state;
d71f5139
MR
275 struct vm_area_struct *vma = st->vma;
276 struct page **pages = vma->vm_private_data;
277 struct page *cur_page = NULL;
ceb90fa0
ALC
278 int ret;
279
d71f5139
MR
280 if (xen_feature(XENFEAT_auto_translated_physmap))
281 cur_page = pages[st->index++];
282
ceb90fa0 283 ret = xen_remap_domain_mfn_range(st->vma, st->va & PAGE_MASK, *mfnp, 1,
9a032e39 284 st->vma->vm_page_prot, st->domain,
d71f5139 285 &cur_page);
1c5de193 286
ceb90fa0
ALC
287 /* Store error code for second pass. */
288 *(st->err++) = ret;
289
290 /* And see if it affects the global_error. */
291 if (ret < 0) {
292 if (ret == -ENOENT)
293 st->global_error = -ENOENT;
294 else {
295 /* Record that at least one error has happened. */
296 if (st->global_error == 0)
297 st->global_error = 1;
298 }
1c5de193
JF
299 }
300 st->va += PAGE_SIZE;
301
302 return 0;
303}
304
ceb90fa0 305static int mmap_return_errors_v1(void *data, void *state)
1c5de193
JF
306{
307 xen_pfn_t *mfnp = data;
308 struct mmap_batch_state *st = state;
ceb90fa0
ALC
309 int err = *(st->err++);
310
311 /*
312 * V1 encodes the error codes in the 32bit top nibble of the
313 * mfn (with its known limitations vis-a-vis 64 bit callers).
314 */
315 *mfnp |= (err == -ENOENT) ?
316 PRIVCMD_MMAPBATCH_PAGED_ERROR :
317 PRIVCMD_MMAPBATCH_MFN_ERROR;
318 return __put_user(*mfnp, st->user_mfn++);
1c5de193
JF
319}
320
d71f5139
MR
321/* Allocate pfns that are then mapped with gmfns from foreign domid. Update
322 * the vma with the page info to use later.
323 * Returns: 0 if success, otherwise -errno
324 */
325static int alloc_empty_pages(struct vm_area_struct *vma, int numpgs)
326{
327 int rc;
328 struct page **pages;
329
330 pages = kcalloc(numpgs, sizeof(pages[0]), GFP_KERNEL);
331 if (pages == NULL)
332 return -ENOMEM;
333
334 rc = alloc_xenballooned_pages(numpgs, pages, 0);
335 if (rc != 0) {
336 pr_warn("%s Could not alloc %d pfns rc:%d\n", __func__,
337 numpgs, rc);
338 kfree(pages);
339 return -ENOMEM;
340 }
341 BUG_ON(vma->vm_private_data != PRIV_VMA_LOCKED);
342 vma->vm_private_data = pages;
343
344 return 0;
345}
346
f31fdf51
JF
347static struct vm_operations_struct privcmd_vm_ops;
348
ceb90fa0 349static long privcmd_ioctl_mmap_batch(void __user *udata, int version)
1c5de193
JF
350{
351 int ret;
ceb90fa0 352 struct privcmd_mmapbatch_v2 m;
1c5de193
JF
353 struct mm_struct *mm = current->mm;
354 struct vm_area_struct *vma;
355 unsigned long nr_pages;
356 LIST_HEAD(pagelist);
ceb90fa0 357 int *err_array = NULL;
1c5de193
JF
358 struct mmap_batch_state state;
359
ceb90fa0
ALC
360 switch (version) {
361 case 1:
362 if (copy_from_user(&m, udata, sizeof(struct privcmd_mmapbatch)))
363 return -EFAULT;
364 /* Returns per-frame error in m.arr. */
365 m.err = NULL;
366 if (!access_ok(VERIFY_WRITE, m.arr, m.num * sizeof(*m.arr)))
367 return -EFAULT;
368 break;
369 case 2:
370 if (copy_from_user(&m, udata, sizeof(struct privcmd_mmapbatch_v2)))
371 return -EFAULT;
372 /* Returns per-frame error code in m.err. */
373 if (!access_ok(VERIFY_WRITE, m.err, m.num * (sizeof(*m.err))))
374 return -EFAULT;
375 break;
376 default:
377 return -EINVAL;
378 }
1c5de193
JF
379
380 nr_pages = m.num;
381 if ((m.num <= 0) || (nr_pages > (LONG_MAX >> PAGE_SHIFT)))
382 return -EINVAL;
383
ceb90fa0 384 ret = gather_array(&pagelist, m.num, sizeof(xen_pfn_t), m.arr);
1c5de193 385
ceb90fa0 386 if (ret)
1c5de193 387 goto out;
ceb90fa0
ALC
388 if (list_empty(&pagelist)) {
389 ret = -EINVAL;
390 goto out;
391 }
392
393 err_array = kcalloc(m.num, sizeof(int), GFP_KERNEL);
394 if (err_array == NULL) {
395 ret = -ENOMEM;
396 goto out;
397 }
1c5de193
JF
398
399 down_write(&mm->mmap_sem);
400
401 vma = find_vma(mm, m.addr);
402 ret = -EINVAL;
403 if (!vma ||
f31fdf51 404 vma->vm_ops != &privcmd_vm_ops ||
1c5de193
JF
405 (m.addr != vma->vm_start) ||
406 ((m.addr + (nr_pages << PAGE_SHIFT)) != vma->vm_end) ||
407 !privcmd_enforce_singleshot_mapping(vma)) {
408 up_write(&mm->mmap_sem);
409 goto out;
410 }
d71f5139
MR
411 if (xen_feature(XENFEAT_auto_translated_physmap)) {
412 ret = alloc_empty_pages(vma, m.num);
413 if (ret < 0) {
414 up_write(&mm->mmap_sem);
415 goto out;
416 }
417 }
1c5de193 418
ceb90fa0
ALC
419 state.domain = m.dom;
420 state.vma = vma;
421 state.va = m.addr;
d71f5139 422 state.index = 0;
ceb90fa0
ALC
423 state.global_error = 0;
424 state.err = err_array;
1c5de193 425
ceb90fa0
ALC
426 /* mmap_batch_fn guarantees ret == 0 */
427 BUG_ON(traverse_pages(m.num, sizeof(xen_pfn_t),
428 &pagelist, mmap_batch_fn, &state));
1c5de193
JF
429
430 up_write(&mm->mmap_sem);
431
ceb90fa0
ALC
432 if (state.global_error && (version == 1)) {
433 /* Write back errors in second pass. */
434 state.user_mfn = (xen_pfn_t *)m.arr;
435 state.err = err_array;
313e7441 436 ret = traverse_pages(m.num, sizeof(xen_pfn_t),
ceb90fa0 437 &pagelist, mmap_return_errors_v1, &state);
9d2be928 438 } else if (version == 2) {
ceb90fa0 439 ret = __copy_to_user(m.err, err_array, m.num * sizeof(int));
9d2be928
DC
440 if (ret)
441 ret = -EFAULT;
442 }
ceb90fa0
ALC
443
444 /* If we have not had any EFAULT-like global errors then set the global
445 * error to -ENOENT if necessary. */
446 if ((ret == 0) && (state.global_error == -ENOENT))
447 ret = -ENOENT;
1c5de193
JF
448
449out:
ceb90fa0 450 kfree(err_array);
1c5de193
JF
451 free_page_list(&pagelist);
452
453 return ret;
454}
455
456static long privcmd_ioctl(struct file *file,
457 unsigned int cmd, unsigned long data)
458{
459 int ret = -ENOSYS;
460 void __user *udata = (void __user *) data;
461
462 switch (cmd) {
463 case IOCTL_PRIVCMD_HYPERCALL:
464 ret = privcmd_ioctl_hypercall(udata);
465 break;
466
467 case IOCTL_PRIVCMD_MMAP:
468 ret = privcmd_ioctl_mmap(udata);
469 break;
470
471 case IOCTL_PRIVCMD_MMAPBATCH:
ceb90fa0
ALC
472 ret = privcmd_ioctl_mmap_batch(udata, 1);
473 break;
474
475 case IOCTL_PRIVCMD_MMAPBATCH_V2:
476 ret = privcmd_ioctl_mmap_batch(udata, 2);
1c5de193
JF
477 break;
478
479 default:
480 ret = -EINVAL;
481 break;
482 }
483
484 return ret;
485}
486
d71f5139
MR
487static void privcmd_close(struct vm_area_struct *vma)
488{
489 struct page **pages = vma->vm_private_data;
490 int numpgs = (vma->vm_end - vma->vm_start) >> PAGE_SHIFT;
491
492 if (!xen_feature(XENFEAT_auto_translated_physmap || !numpgs || !pages))
493 return;
494
495 xen_unmap_domain_mfn_range(vma, numpgs, pages);
496 free_xenballooned_pages(numpgs, pages);
497 kfree(pages);
498}
499
1c5de193
JF
500static int privcmd_fault(struct vm_area_struct *vma, struct vm_fault *vmf)
501{
441c7416
JF
502 printk(KERN_DEBUG "privcmd_fault: vma=%p %lx-%lx, pgoff=%lx, uv=%p\n",
503 vma, vma->vm_start, vma->vm_end,
504 vmf->pgoff, vmf->virtual_address);
505
1c5de193
JF
506 return VM_FAULT_SIGBUS;
507}
508
509static struct vm_operations_struct privcmd_vm_ops = {
d71f5139 510 .close = privcmd_close,
1c5de193
JF
511 .fault = privcmd_fault
512};
513
514static int privcmd_mmap(struct file *file, struct vm_area_struct *vma)
515{
e060e7af
SS
516 /* DONTCOPY is essential for Xen because copy_page_range doesn't know
517 * how to recreate these mappings */
314e51b9
KK
518 vma->vm_flags |= VM_IO | VM_PFNMAP | VM_DONTCOPY |
519 VM_DONTEXPAND | VM_DONTDUMP;
1c5de193
JF
520 vma->vm_ops = &privcmd_vm_ops;
521 vma->vm_private_data = NULL;
522
523 return 0;
524}
525
526static int privcmd_enforce_singleshot_mapping(struct vm_area_struct *vma)
527{
d71f5139 528 return !cmpxchg(&vma->vm_private_data, NULL, PRIV_VMA_LOCKED);
1c5de193 529}
1c5de193 530
d8414d3c
BB
531const struct file_operations xen_privcmd_fops = {
532 .owner = THIS_MODULE,
1c5de193
JF
533 .unlocked_ioctl = privcmd_ioctl,
534 .mmap = privcmd_mmap,
535};
d8414d3c
BB
536EXPORT_SYMBOL_GPL(xen_privcmd_fops);
537
538static struct miscdevice privcmd_dev = {
539 .minor = MISC_DYNAMIC_MINOR,
540 .name = "xen/privcmd",
541 .fops = &xen_privcmd_fops,
542};
543
544static int __init privcmd_init(void)
545{
546 int err;
547
548 if (!xen_domain())
549 return -ENODEV;
550
551 err = misc_register(&privcmd_dev);
552 if (err != 0) {
553 printk(KERN_ERR "Could not register Xen privcmd device\n");
554 return err;
555 }
556 return 0;
557}
558
559static void __exit privcmd_exit(void)
560{
561 misc_deregister(&privcmd_dev);
562}
563
564module_init(privcmd_init);
565module_exit(privcmd_exit);