1 /******************************************************************************
4 * Device for accessing (in user-space) pages that have been granted by other
7 * Copyright (c) 2006-2007, D G Murray.
8 * (c) 2009 Gerd Hoffmann <kraxel@redhat.com>
10 * This program is distributed in the hope that it will be useful,
11 * but WITHOUT ANY WARRANTY; without even the implied warranty of
12 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
13 * GNU General Public License for more details.
15 * You should have received a copy of the GNU General Public License
16 * along with this program; if not, write to the Free Software
17 * Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA
22 #include <linux/module.h>
23 #include <linux/kernel.h>
24 #include <linux/init.h>
25 #include <linux/miscdevice.h>
28 #include <linux/mman.h>
29 #include <linux/mmu_notifier.h>
30 #include <linux/types.h>
31 #include <linux/uaccess.h>
32 #include <linux/sched.h>
33 #include <linux/spinlock.h>
34 #include <linux/slab.h>
35 #include <linux/highmem.h>
38 #include <xen/grant_table.h>
39 #include <xen/balloon.h>
40 #include <xen/gntdev.h>
41 #include <xen/events.h>
42 #include <asm/xen/hypervisor.h>
43 #include <asm/xen/hypercall.h>
44 #include <asm/xen/page.h>
46 MODULE_LICENSE("GPL");
47 MODULE_AUTHOR("Derek G. Murray <Derek.Murray@cl.cam.ac.uk>, "
48 "Gerd Hoffmann <kraxel@redhat.com>");
49 MODULE_DESCRIPTION("User-space granted page access driver");
51 static int limit
= 1024*1024;
52 module_param(limit
, int, 0644);
53 MODULE_PARM_DESC(limit
, "Maximum number of grants that may be mapped by "
56 static atomic_t pages_mapped
= ATOMIC_INIT(0);
58 static int use_ptemod
;
59 #define populate_freeable_maps use_ptemod
62 /* maps with visible offsets in the file descriptor */
63 struct list_head maps
;
64 /* maps that are not visible; will be freed on munmap.
65 * Only populated if populate_freeable_maps == 1 */
66 struct list_head freeable_maps
;
67 /* lock protects maps and freeable_maps */
70 struct mmu_notifier mn
;
75 /* Address relative to the start of the grant_map */
81 struct list_head next
;
82 struct vm_area_struct
*vma
;
87 struct unmap_notify notify
;
88 struct ioctl_gntdev_grant_ref
*grants
;
89 struct gnttab_map_grant_ref
*map_ops
;
90 struct gnttab_unmap_grant_ref
*unmap_ops
;
91 struct gnttab_map_grant_ref
*kmap_ops
;
95 static int unmap_grant_pages(struct grant_map
*map
, int offset
, int pages
);
97 /* ------------------------------------------------------------------ */
99 static void gntdev_print_maps(struct gntdev_priv
*priv
,
100 char *text
, int text_index
)
103 struct grant_map
*map
;
105 pr_debug("%s: maps list (priv %p)\n", __func__
, priv
);
106 list_for_each_entry(map
, &priv
->maps
, next
)
107 pr_debug(" index %2d, count %2d %s\n",
108 map
->index
, map
->count
,
109 map
->index
== text_index
&& text
? text
: "");
113 static void gntdev_free_map(struct grant_map
*map
)
119 free_xenballooned_pages(map
->count
, map
->pages
);
123 kfree(map
->unmap_ops
);
124 kfree(map
->kmap_ops
);
128 static struct grant_map
*gntdev_alloc_map(struct gntdev_priv
*priv
, int count
)
130 struct grant_map
*add
;
133 add
= kzalloc(sizeof(struct grant_map
), GFP_KERNEL
);
137 add
->grants
= kcalloc(count
, sizeof(add
->grants
[0]), GFP_KERNEL
);
138 add
->map_ops
= kcalloc(count
, sizeof(add
->map_ops
[0]), GFP_KERNEL
);
139 add
->unmap_ops
= kcalloc(count
, sizeof(add
->unmap_ops
[0]), GFP_KERNEL
);
140 add
->kmap_ops
= kcalloc(count
, sizeof(add
->kmap_ops
[0]), GFP_KERNEL
);
141 add
->pages
= kcalloc(count
, sizeof(add
->pages
[0]), GFP_KERNEL
);
142 if (NULL
== add
->grants
||
143 NULL
== add
->map_ops
||
144 NULL
== add
->unmap_ops
||
145 NULL
== add
->kmap_ops
||
149 if (alloc_xenballooned_pages(count
, add
->pages
, false /* lowmem */))
152 for (i
= 0; i
< count
; i
++) {
153 add
->map_ops
[i
].handle
= -1;
154 add
->unmap_ops
[i
].handle
= -1;
155 add
->kmap_ops
[i
].handle
= -1;
160 atomic_set(&add
->users
, 1);
165 gntdev_free_map(add
);
169 static void gntdev_add_map(struct gntdev_priv
*priv
, struct grant_map
*add
)
171 struct grant_map
*map
;
173 list_for_each_entry(map
, &priv
->maps
, next
) {
174 if (add
->index
+ add
->count
< map
->index
) {
175 list_add_tail(&add
->next
, &map
->next
);
178 add
->index
= map
->index
+ map
->count
;
180 list_add_tail(&add
->next
, &priv
->maps
);
183 gntdev_print_maps(priv
, "[new]", add
->index
);
186 static struct grant_map
*gntdev_find_map_index(struct gntdev_priv
*priv
,
187 int index
, int count
)
189 struct grant_map
*map
;
191 list_for_each_entry(map
, &priv
->maps
, next
) {
192 if (map
->index
!= index
)
194 if (count
&& map
->count
!= count
)
201 static void gntdev_put_map(struct gntdev_priv
*priv
, struct grant_map
*map
)
206 if (!atomic_dec_and_test(&map
->users
))
209 atomic_sub(map
->count
, &pages_mapped
);
211 if (map
->notify
.flags
& UNMAP_NOTIFY_SEND_EVENT
) {
212 notify_remote_via_evtchn(map
->notify
.event
);
213 evtchn_put(map
->notify
.event
);
216 if (populate_freeable_maps
&& priv
) {
217 spin_lock(&priv
->lock
);
218 list_del(&map
->next
);
219 spin_unlock(&priv
->lock
);
222 if (map
->pages
&& !use_ptemod
)
223 unmap_grant_pages(map
, 0, map
->count
);
224 gntdev_free_map(map
);
227 /* ------------------------------------------------------------------ */
229 static int find_grant_ptes(pte_t
*pte
, pgtable_t token
,
230 unsigned long addr
, void *data
)
232 struct grant_map
*map
= data
;
233 unsigned int pgnr
= (addr
- map
->vma
->vm_start
) >> PAGE_SHIFT
;
234 int flags
= map
->flags
| GNTMAP_application_map
| GNTMAP_contains_pte
;
237 BUG_ON(pgnr
>= map
->count
);
238 pte_maddr
= arbitrary_virt_to_machine(pte
).maddr
;
240 gnttab_set_map_op(&map
->map_ops
[pgnr
], pte_maddr
, flags
,
241 map
->grants
[pgnr
].ref
,
242 map
->grants
[pgnr
].domid
);
243 gnttab_set_unmap_op(&map
->unmap_ops
[pgnr
], pte_maddr
, flags
,
248 static int map_grant_pages(struct grant_map
*map
)
253 /* Note: it could already be mapped */
254 if (map
->map_ops
[0].handle
!= -1)
256 for (i
= 0; i
< map
->count
; i
++) {
257 unsigned long addr
= (unsigned long)
258 pfn_to_kaddr(page_to_pfn(map
->pages
[i
]));
259 gnttab_set_map_op(&map
->map_ops
[i
], addr
, map
->flags
,
261 map
->grants
[i
].domid
);
262 gnttab_set_unmap_op(&map
->unmap_ops
[i
], addr
,
263 map
->flags
, -1 /* handle */);
267 * Setup the map_ops corresponding to the pte entries pointing
268 * to the kernel linear addresses of the struct pages.
269 * These ptes are completely different from the user ptes dealt
270 * with find_grant_ptes.
272 for (i
= 0; i
< map
->count
; i
++) {
274 unsigned long address
= (unsigned long)
275 pfn_to_kaddr(page_to_pfn(map
->pages
[i
]));
278 BUG_ON(PageHighMem(map
->pages
[i
]));
280 ptep
= lookup_address(address
, &level
);
281 pte_maddr
= arbitrary_virt_to_machine(ptep
).maddr
;
282 gnttab_set_map_op(&map
->kmap_ops
[i
], pte_maddr
,
287 map
->grants
[i
].domid
);
291 pr_debug("map %d+%d\n", map
->index
, map
->count
);
292 err
= gnttab_map_refs(map
->map_ops
, use_ptemod
? map
->kmap_ops
: NULL
,
293 map
->pages
, map
->count
);
297 for (i
= 0; i
< map
->count
; i
++) {
298 if (map
->map_ops
[i
].status
)
301 BUG_ON(map
->map_ops
[i
].handle
== -1);
302 map
->unmap_ops
[i
].handle
= map
->map_ops
[i
].handle
;
303 pr_debug("map handle=%d\n", map
->map_ops
[i
].handle
);
309 static int __unmap_grant_pages(struct grant_map
*map
, int offset
, int pages
)
313 if (map
->notify
.flags
& UNMAP_NOTIFY_CLEAR_BYTE
) {
314 int pgno
= (map
->notify
.addr
>> PAGE_SHIFT
);
315 if (pgno
>= offset
&& pgno
< offset
+ pages
) {
316 /* No need for kmap, pages are in lowmem */
317 uint8_t *tmp
= pfn_to_kaddr(page_to_pfn(map
->pages
[pgno
]));
318 tmp
[map
->notify
.addr
& (PAGE_SIZE
-1)] = 0;
319 map
->notify
.flags
&= ~UNMAP_NOTIFY_CLEAR_BYTE
;
323 err
= gnttab_unmap_refs(map
->unmap_ops
+ offset
,
324 use_ptemod
? map
->kmap_ops
+ offset
: NULL
, map
->pages
+ offset
,
329 for (i
= 0; i
< pages
; i
++) {
330 if (map
->unmap_ops
[offset
+i
].status
)
332 pr_debug("unmap handle=%d st=%d\n",
333 map
->unmap_ops
[offset
+i
].handle
,
334 map
->unmap_ops
[offset
+i
].status
);
335 map
->unmap_ops
[offset
+i
].handle
= -1;
340 static int unmap_grant_pages(struct grant_map
*map
, int offset
, int pages
)
344 pr_debug("unmap %d+%d [%d+%d]\n", map
->index
, map
->count
, offset
, pages
);
346 /* It is possible the requested range will have a "hole" where we
347 * already unmapped some of the grants. Only unmap valid ranges.
349 while (pages
&& !err
) {
350 while (pages
&& map
->unmap_ops
[offset
].handle
== -1) {
355 while (range
< pages
) {
356 if (map
->unmap_ops
[offset
+range
].handle
== -1) {
362 err
= __unmap_grant_pages(map
, offset
, range
);
370 /* ------------------------------------------------------------------ */
372 static void gntdev_vma_open(struct vm_area_struct
*vma
)
374 struct grant_map
*map
= vma
->vm_private_data
;
376 pr_debug("gntdev_vma_open %p\n", vma
);
377 atomic_inc(&map
->users
);
380 static void gntdev_vma_close(struct vm_area_struct
*vma
)
382 struct grant_map
*map
= vma
->vm_private_data
;
383 struct file
*file
= vma
->vm_file
;
384 struct gntdev_priv
*priv
= file
->private_data
;
386 pr_debug("gntdev_vma_close %p\n", vma
);
388 /* It is possible that an mmu notifier could be running
389 * concurrently, so take priv->lock to ensure that the vma won't
390 * vanishing during the unmap_grant_pages call, since we will
391 * spin here until that completes. Such a concurrent call will
392 * not do any unmapping, since that has been done prior to
393 * closing the vma, but it may still iterate the unmap_ops list.
395 spin_lock(&priv
->lock
);
397 spin_unlock(&priv
->lock
);
399 vma
->vm_private_data
= NULL
;
400 gntdev_put_map(priv
, map
);
403 static struct vm_operations_struct gntdev_vmops
= {
404 .open
= gntdev_vma_open
,
405 .close
= gntdev_vma_close
,
408 /* ------------------------------------------------------------------ */
410 static void unmap_if_in_range(struct grant_map
*map
,
411 unsigned long start
, unsigned long end
)
413 unsigned long mstart
, mend
;
418 if (map
->vma
->vm_start
>= end
)
420 if (map
->vma
->vm_end
<= start
)
422 mstart
= max(start
, map
->vma
->vm_start
);
423 mend
= min(end
, map
->vma
->vm_end
);
424 pr_debug("map %d+%d (%lx %lx), range %lx %lx, mrange %lx %lx\n",
425 map
->index
, map
->count
,
426 map
->vma
->vm_start
, map
->vma
->vm_end
,
427 start
, end
, mstart
, mend
);
428 err
= unmap_grant_pages(map
,
429 (mstart
- map
->vma
->vm_start
) >> PAGE_SHIFT
,
430 (mend
- mstart
) >> PAGE_SHIFT
);
434 static void mn_invl_range_start(struct mmu_notifier
*mn
,
435 struct mm_struct
*mm
,
436 unsigned long start
, unsigned long end
)
438 struct gntdev_priv
*priv
= container_of(mn
, struct gntdev_priv
, mn
);
439 struct grant_map
*map
;
441 spin_lock(&priv
->lock
);
442 list_for_each_entry(map
, &priv
->maps
, next
) {
443 unmap_if_in_range(map
, start
, end
);
445 list_for_each_entry(map
, &priv
->freeable_maps
, next
) {
446 unmap_if_in_range(map
, start
, end
);
448 spin_unlock(&priv
->lock
);
451 static void mn_invl_page(struct mmu_notifier
*mn
,
452 struct mm_struct
*mm
,
453 unsigned long address
)
455 mn_invl_range_start(mn
, mm
, address
, address
+ PAGE_SIZE
);
458 static void mn_release(struct mmu_notifier
*mn
,
459 struct mm_struct
*mm
)
461 struct gntdev_priv
*priv
= container_of(mn
, struct gntdev_priv
, mn
);
462 struct grant_map
*map
;
465 spin_lock(&priv
->lock
);
466 list_for_each_entry(map
, &priv
->maps
, next
) {
469 pr_debug("map %d+%d (%lx %lx)\n",
470 map
->index
, map
->count
,
471 map
->vma
->vm_start
, map
->vma
->vm_end
);
472 err
= unmap_grant_pages(map
, /* offset */ 0, map
->count
);
475 list_for_each_entry(map
, &priv
->freeable_maps
, next
) {
478 pr_debug("map %d+%d (%lx %lx)\n",
479 map
->index
, map
->count
,
480 map
->vma
->vm_start
, map
->vma
->vm_end
);
481 err
= unmap_grant_pages(map
, /* offset */ 0, map
->count
);
484 spin_unlock(&priv
->lock
);
487 static struct mmu_notifier_ops gntdev_mmu_ops
= {
488 .release
= mn_release
,
489 .invalidate_page
= mn_invl_page
,
490 .invalidate_range_start
= mn_invl_range_start
,
493 /* ------------------------------------------------------------------ */
495 static int gntdev_open(struct inode
*inode
, struct file
*flip
)
497 struct gntdev_priv
*priv
;
500 priv
= kzalloc(sizeof(*priv
), GFP_KERNEL
);
504 INIT_LIST_HEAD(&priv
->maps
);
505 INIT_LIST_HEAD(&priv
->freeable_maps
);
506 spin_lock_init(&priv
->lock
);
509 priv
->mm
= get_task_mm(current
);
514 priv
->mn
.ops
= &gntdev_mmu_ops
;
515 ret
= mmu_notifier_register(&priv
->mn
, priv
->mm
);
524 flip
->private_data
= priv
;
525 pr_debug("priv %p\n", priv
);
530 static int gntdev_release(struct inode
*inode
, struct file
*flip
)
532 struct gntdev_priv
*priv
= flip
->private_data
;
533 struct grant_map
*map
;
535 pr_debug("priv %p\n", priv
);
537 while (!list_empty(&priv
->maps
)) {
538 map
= list_entry(priv
->maps
.next
, struct grant_map
, next
);
539 list_del(&map
->next
);
540 gntdev_put_map(NULL
/* already removed */, map
);
542 WARN_ON(!list_empty(&priv
->freeable_maps
));
545 mmu_notifier_unregister(&priv
->mn
, priv
->mm
);
550 static long gntdev_ioctl_map_grant_ref(struct gntdev_priv
*priv
,
551 struct ioctl_gntdev_map_grant_ref __user
*u
)
553 struct ioctl_gntdev_map_grant_ref op
;
554 struct grant_map
*map
;
557 if (copy_from_user(&op
, u
, sizeof(op
)) != 0)
559 pr_debug("priv %p, add %d\n", priv
, op
.count
);
560 if (unlikely(op
.count
<= 0))
564 map
= gntdev_alloc_map(priv
, op
.count
);
568 if (unlikely(atomic_add_return(op
.count
, &pages_mapped
) > limit
)) {
569 pr_debug("can't map: over limit\n");
570 gntdev_put_map(NULL
, map
);
574 if (copy_from_user(map
->grants
, &u
->refs
,
575 sizeof(map
->grants
[0]) * op
.count
) != 0) {
576 gntdev_put_map(NULL
, map
);
580 spin_lock(&priv
->lock
);
581 gntdev_add_map(priv
, map
);
582 op
.index
= map
->index
<< PAGE_SHIFT
;
583 spin_unlock(&priv
->lock
);
585 if (copy_to_user(u
, &op
, sizeof(op
)) != 0)
591 static long gntdev_ioctl_unmap_grant_ref(struct gntdev_priv
*priv
,
592 struct ioctl_gntdev_unmap_grant_ref __user
*u
)
594 struct ioctl_gntdev_unmap_grant_ref op
;
595 struct grant_map
*map
;
598 if (copy_from_user(&op
, u
, sizeof(op
)) != 0)
600 pr_debug("priv %p, del %d+%d\n", priv
, (int)op
.index
, (int)op
.count
);
602 spin_lock(&priv
->lock
);
603 map
= gntdev_find_map_index(priv
, op
.index
>> PAGE_SHIFT
, op
.count
);
605 list_del(&map
->next
);
606 if (populate_freeable_maps
)
607 list_add_tail(&map
->next
, &priv
->freeable_maps
);
610 spin_unlock(&priv
->lock
);
612 gntdev_put_map(priv
, map
);
616 static long gntdev_ioctl_get_offset_for_vaddr(struct gntdev_priv
*priv
,
617 struct ioctl_gntdev_get_offset_for_vaddr __user
*u
)
619 struct ioctl_gntdev_get_offset_for_vaddr op
;
620 struct vm_area_struct
*vma
;
621 struct grant_map
*map
;
624 if (copy_from_user(&op
, u
, sizeof(op
)) != 0)
626 pr_debug("priv %p, offset for vaddr %lx\n", priv
, (unsigned long)op
.vaddr
);
628 down_read(¤t
->mm
->mmap_sem
);
629 vma
= find_vma(current
->mm
, op
.vaddr
);
630 if (!vma
|| vma
->vm_ops
!= &gntdev_vmops
)
633 map
= vma
->vm_private_data
;
637 op
.offset
= map
->index
<< PAGE_SHIFT
;
638 op
.count
= map
->count
;
642 up_read(¤t
->mm
->mmap_sem
);
644 if (rv
== 0 && copy_to_user(u
, &op
, sizeof(op
)) != 0)
649 static long gntdev_ioctl_notify(struct gntdev_priv
*priv
, void __user
*u
)
651 struct ioctl_gntdev_unmap_notify op
;
652 struct grant_map
*map
;
655 unsigned int out_event
;
657 if (copy_from_user(&op
, u
, sizeof(op
)))
660 if (op
.action
& ~(UNMAP_NOTIFY_CLEAR_BYTE
|UNMAP_NOTIFY_SEND_EVENT
))
663 /* We need to grab a reference to the event channel we are going to use
664 * to send the notify before releasing the reference we may already have
665 * (if someone has called this ioctl twice). This is required so that
666 * it is possible to change the clear_byte part of the notification
667 * without disturbing the event channel part, which may now be the last
668 * reference to that event channel.
670 if (op
.action
& UNMAP_NOTIFY_SEND_EVENT
) {
671 if (evtchn_get(op
.event_channel_port
))
675 out_flags
= op
.action
;
676 out_event
= op
.event_channel_port
;
678 spin_lock(&priv
->lock
);
680 list_for_each_entry(map
, &priv
->maps
, next
) {
681 uint64_t begin
= map
->index
<< PAGE_SHIFT
;
682 uint64_t end
= (map
->index
+ map
->count
) << PAGE_SHIFT
;
683 if (op
.index
>= begin
&& op
.index
< end
)
690 if ((op
.action
& UNMAP_NOTIFY_CLEAR_BYTE
) &&
691 (map
->flags
& GNTMAP_readonly
)) {
696 out_flags
= map
->notify
.flags
;
697 out_event
= map
->notify
.event
;
699 map
->notify
.flags
= op
.action
;
700 map
->notify
.addr
= op
.index
- (map
->index
<< PAGE_SHIFT
);
701 map
->notify
.event
= op
.event_channel_port
;
706 spin_unlock(&priv
->lock
);
708 /* Drop the reference to the event channel we did not save in the map */
709 if (out_flags
& UNMAP_NOTIFY_SEND_EVENT
)
710 evtchn_put(out_event
);
715 static long gntdev_ioctl(struct file
*flip
,
716 unsigned int cmd
, unsigned long arg
)
718 struct gntdev_priv
*priv
= flip
->private_data
;
719 void __user
*ptr
= (void __user
*)arg
;
722 case IOCTL_GNTDEV_MAP_GRANT_REF
:
723 return gntdev_ioctl_map_grant_ref(priv
, ptr
);
725 case IOCTL_GNTDEV_UNMAP_GRANT_REF
:
726 return gntdev_ioctl_unmap_grant_ref(priv
, ptr
);
728 case IOCTL_GNTDEV_GET_OFFSET_FOR_VADDR
:
729 return gntdev_ioctl_get_offset_for_vaddr(priv
, ptr
);
731 case IOCTL_GNTDEV_SET_UNMAP_NOTIFY
:
732 return gntdev_ioctl_notify(priv
, ptr
);
735 pr_debug("priv %p, unknown cmd %x\n", priv
, cmd
);
742 static int gntdev_mmap(struct file
*flip
, struct vm_area_struct
*vma
)
744 struct gntdev_priv
*priv
= flip
->private_data
;
745 int index
= vma
->vm_pgoff
;
746 int count
= (vma
->vm_end
- vma
->vm_start
) >> PAGE_SHIFT
;
747 struct grant_map
*map
;
748 int i
, err
= -EINVAL
;
750 if ((vma
->vm_flags
& VM_WRITE
) && !(vma
->vm_flags
& VM_SHARED
))
753 pr_debug("map %d+%d at %lx (pgoff %lx)\n",
754 index
, count
, vma
->vm_start
, vma
->vm_pgoff
);
756 spin_lock(&priv
->lock
);
757 map
= gntdev_find_map_index(priv
, index
, count
);
760 if (use_ptemod
&& map
->vma
)
762 if (use_ptemod
&& priv
->mm
!= vma
->vm_mm
) {
763 printk(KERN_WARNING
"Huh? Other mm?\n");
767 atomic_inc(&map
->users
);
769 vma
->vm_ops
= &gntdev_vmops
;
771 vma
->vm_flags
|= VM_DONTEXPAND
| VM_DONTDUMP
;
774 vma
->vm_flags
|= VM_DONTCOPY
;
776 vma
->vm_private_data
= map
;
782 if ((vma
->vm_flags
& VM_WRITE
) &&
783 (map
->flags
& GNTMAP_readonly
))
786 map
->flags
= GNTMAP_host_map
;
787 if (!(vma
->vm_flags
& VM_WRITE
))
788 map
->flags
|= GNTMAP_readonly
;
791 spin_unlock(&priv
->lock
);
794 err
= apply_to_page_range(vma
->vm_mm
, vma
->vm_start
,
795 vma
->vm_end
- vma
->vm_start
,
796 find_grant_ptes
, map
);
798 printk(KERN_WARNING
"find_grant_ptes() failure.\n");
803 err
= map_grant_pages(map
);
808 for (i
= 0; i
< count
; i
++) {
809 err
= vm_insert_page(vma
, vma
->vm_start
+ i
*PAGE_SIZE
,
819 spin_unlock(&priv
->lock
);
823 spin_unlock(&priv
->lock
);
827 gntdev_put_map(priv
, map
);
831 static const struct file_operations gntdev_fops
= {
832 .owner
= THIS_MODULE
,
834 .release
= gntdev_release
,
836 .unlocked_ioctl
= gntdev_ioctl
839 static struct miscdevice gntdev_miscdev
= {
840 .minor
= MISC_DYNAMIC_MINOR
,
841 .name
= "xen/gntdev",
842 .fops
= &gntdev_fops
,
845 /* ------------------------------------------------------------------ */
847 static int __init
gntdev_init(void)
854 use_ptemod
= xen_pv_domain();
856 err
= misc_register(&gntdev_miscdev
);
858 printk(KERN_ERR
"Could not register gntdev device\n");
864 static void __exit
gntdev_exit(void)
866 misc_deregister(&gntdev_miscdev
);
869 module_init(gntdev_init
);
870 module_exit(gntdev_exit
);
872 /* ------------------------------------------------------------------ */