2 * VFIO PCI interrupt handling
4 * Copyright (C) 2012 Red Hat, Inc. All rights reserved.
5 * Author: Alex Williamson <alex.williamson@redhat.com>
7 * This program is free software; you can redistribute it and/or modify
8 * it under the terms of the GNU General Public License version 2 as
9 * published by the Free Software Foundation.
11 * Derived from original vfio:
12 * Copyright 2010 Cisco Systems, Inc. All rights reserved.
13 * Author: Tom Lyon, pugs@cisco.com
16 #include <linux/device.h>
17 #include <linux/interrupt.h>
18 #include <linux/eventfd.h>
19 #include <linux/pci.h>
20 #include <linux/file.h>
21 #include <linux/poll.h>
22 #include <linux/vfio.h>
23 #include <linux/wait.h>
24 #include <linux/workqueue.h>
26 #include "vfio_pci_private.h"
32 struct vfio_pci_device
*vdev
;
33 struct eventfd_ctx
*eventfd
;
34 int (*handler
)(struct vfio_pci_device
*, void *);
35 void (*thread
)(struct vfio_pci_device
*, void *);
37 struct work_struct inject
;
40 struct work_struct shutdown
;
41 struct virqfd
**pvirqfd
;
44 static struct workqueue_struct
*vfio_irqfd_cleanup_wq
;
46 int __init
vfio_pci_virqfd_init(void)
48 vfio_irqfd_cleanup_wq
=
49 create_singlethread_workqueue("vfio-irqfd-cleanup");
50 if (!vfio_irqfd_cleanup_wq
)
56 void vfio_pci_virqfd_exit(void)
58 destroy_workqueue(vfio_irqfd_cleanup_wq
);
61 static void virqfd_deactivate(struct virqfd
*virqfd
)
63 queue_work(vfio_irqfd_cleanup_wq
, &virqfd
->shutdown
);
66 static int virqfd_wakeup(wait_queue_t
*wait
, unsigned mode
, int sync
, void *key
)
68 struct virqfd
*virqfd
= container_of(wait
, struct virqfd
, wait
);
69 unsigned long flags
= (unsigned long)key
;
72 /* An event has been signaled, call function */
73 if ((!virqfd
->handler
||
74 virqfd
->handler(virqfd
->vdev
, virqfd
->data
)) &&
76 schedule_work(&virqfd
->inject
);
79 if (flags
& POLLHUP
) {
81 spin_lock_irqsave(&virqfd
->vdev
->irqlock
, flags
);
84 * The eventfd is closing, if the virqfd has not yet been
85 * queued for release, as determined by testing whether the
86 * vdev pointer to it is still valid, queue it now. As
87 * with kvm irqfds, we know we won't race against the virqfd
88 * going away because we hold wqh->lock to get here.
90 if (*(virqfd
->pvirqfd
) == virqfd
) {
91 *(virqfd
->pvirqfd
) = NULL
;
92 virqfd_deactivate(virqfd
);
95 spin_unlock_irqrestore(&virqfd
->vdev
->irqlock
, flags
);
101 static void virqfd_ptable_queue_proc(struct file
*file
,
102 wait_queue_head_t
*wqh
, poll_table
*pt
)
104 struct virqfd
*virqfd
= container_of(pt
, struct virqfd
, pt
);
105 add_wait_queue(wqh
, &virqfd
->wait
);
108 static void virqfd_shutdown(struct work_struct
*work
)
110 struct virqfd
*virqfd
= container_of(work
, struct virqfd
, shutdown
);
113 eventfd_ctx_remove_wait_queue(virqfd
->eventfd
, &virqfd
->wait
, &cnt
);
114 flush_work(&virqfd
->inject
);
115 eventfd_ctx_put(virqfd
->eventfd
);
120 static void virqfd_inject(struct work_struct
*work
)
122 struct virqfd
*virqfd
= container_of(work
, struct virqfd
, inject
);
124 virqfd
->thread(virqfd
->vdev
, virqfd
->data
);
127 static int virqfd_enable(struct vfio_pci_device
*vdev
,
128 int (*handler
)(struct vfio_pci_device
*, void *),
129 void (*thread
)(struct vfio_pci_device
*, void *),
130 void *data
, struct virqfd
**pvirqfd
, int fd
)
132 struct file
*file
= NULL
;
133 struct eventfd_ctx
*ctx
= NULL
;
134 struct virqfd
*virqfd
;
138 virqfd
= kzalloc(sizeof(*virqfd
), GFP_KERNEL
);
142 virqfd
->pvirqfd
= pvirqfd
;
144 virqfd
->handler
= handler
;
145 virqfd
->thread
= thread
;
148 INIT_WORK(&virqfd
->shutdown
, virqfd_shutdown
);
149 INIT_WORK(&virqfd
->inject
, virqfd_inject
);
151 file
= eventfd_fget(fd
);
157 ctx
= eventfd_ctx_fileget(file
);
163 virqfd
->eventfd
= ctx
;
166 * virqfds can be released by closing the eventfd or directly
167 * through ioctl. These are both done through a workqueue, so
168 * we update the pointer to the virqfd under lock to avoid
169 * pushing multiple jobs to release the same virqfd.
171 spin_lock_irq(&vdev
->irqlock
);
174 spin_unlock_irq(&vdev
->irqlock
);
180 spin_unlock_irq(&vdev
->irqlock
);
183 * Install our own custom wake-up handling so we are notified via
184 * a callback whenever someone signals the underlying eventfd.
186 init_waitqueue_func_entry(&virqfd
->wait
, virqfd_wakeup
);
187 init_poll_funcptr(&virqfd
->pt
, virqfd_ptable_queue_proc
);
189 events
= file
->f_op
->poll(file
, &virqfd
->pt
);
192 * Check if there was an event already pending on the eventfd
193 * before we registered and trigger it as if we didn't miss it.
195 if (events
& POLLIN
) {
196 if ((!handler
|| handler(vdev
, data
)) && thread
)
197 schedule_work(&virqfd
->inject
);
201 * Do not drop the file until the irqfd is fully initialized,
202 * otherwise we might race against the POLLHUP.
209 if (ctx
&& !IS_ERR(ctx
))
210 eventfd_ctx_put(ctx
);
212 if (file
&& !IS_ERR(file
))
220 static void virqfd_disable(struct vfio_pci_device
*vdev
,
221 struct virqfd
**pvirqfd
)
225 spin_lock_irqsave(&vdev
->irqlock
, flags
);
228 virqfd_deactivate(*pvirqfd
);
232 spin_unlock_irqrestore(&vdev
->irqlock
, flags
);
235 * Block until we know all outstanding shutdown jobs have completed.
236 * Even if we don't queue the job, flush the wq to be sure it's
239 flush_workqueue(vfio_irqfd_cleanup_wq
);
245 static void vfio_send_intx_eventfd(struct vfio_pci_device
*vdev
, void *unused
)
247 if (likely(is_intx(vdev
) && !vdev
->virq_disabled
))
248 eventfd_signal(vdev
->ctx
[0].trigger
, 1);
251 void vfio_pci_intx_mask(struct vfio_pci_device
*vdev
)
253 struct pci_dev
*pdev
= vdev
->pdev
;
256 spin_lock_irqsave(&vdev
->irqlock
, flags
);
259 * Masking can come from interrupt, ioctl, or config space
260 * via INTx disable. The latter means this can get called
261 * even when not using intx delivery. In this case, just
262 * try to have the physical bit follow the virtual bit.
264 if (unlikely(!is_intx(vdev
))) {
267 } else if (!vdev
->ctx
[0].masked
) {
269 * Can't use check_and_mask here because we always want to
270 * mask, not just when something is pending.
275 disable_irq_nosync(pdev
->irq
);
277 vdev
->ctx
[0].masked
= true;
280 spin_unlock_irqrestore(&vdev
->irqlock
, flags
);
284 * If this is triggered by an eventfd, we can't call eventfd_signal
285 * or else we'll deadlock on the eventfd wait queue. Return >0 when
286 * a signal is necessary, which can then be handled via a work queue
287 * or directly depending on the caller.
289 int vfio_pci_intx_unmask_handler(struct vfio_pci_device
*vdev
, void *unused
)
291 struct pci_dev
*pdev
= vdev
->pdev
;
295 spin_lock_irqsave(&vdev
->irqlock
, flags
);
298 * Unmasking comes from ioctl or config, so again, have the
299 * physical bit follow the virtual even when not using INTx.
301 if (unlikely(!is_intx(vdev
))) {
304 } else if (vdev
->ctx
[0].masked
&& !vdev
->virq_disabled
) {
306 * A pending interrupt here would immediately trigger,
307 * but we can avoid that overhead by just re-sending
308 * the interrupt to the user.
311 if (!pci_check_and_unmask_intx(pdev
))
314 enable_irq(pdev
->irq
);
316 vdev
->ctx
[0].masked
= (ret
> 0);
319 spin_unlock_irqrestore(&vdev
->irqlock
, flags
);
324 void vfio_pci_intx_unmask(struct vfio_pci_device
*vdev
)
326 if (vfio_pci_intx_unmask_handler(vdev
, NULL
) > 0)
327 vfio_send_intx_eventfd(vdev
, NULL
);
330 static irqreturn_t
vfio_intx_handler(int irq
, void *dev_id
)
332 struct vfio_pci_device
*vdev
= dev_id
;
336 spin_lock_irqsave(&vdev
->irqlock
, flags
);
338 if (!vdev
->pci_2_3
) {
339 disable_irq_nosync(vdev
->pdev
->irq
);
340 vdev
->ctx
[0].masked
= true;
342 } else if (!vdev
->ctx
[0].masked
&& /* may be shared */
343 pci_check_and_mask_intx(vdev
->pdev
)) {
344 vdev
->ctx
[0].masked
= true;
348 spin_unlock_irqrestore(&vdev
->irqlock
, flags
);
350 if (ret
== IRQ_HANDLED
)
351 vfio_send_intx_eventfd(vdev
, NULL
);
356 static int vfio_intx_enable(struct vfio_pci_device
*vdev
)
358 if (!is_irq_none(vdev
))
361 if (!vdev
->pdev
->irq
)
364 vdev
->ctx
= kzalloc(sizeof(struct vfio_pci_irq_ctx
), GFP_KERNEL
);
371 * If the virtual interrupt is masked, restore it. Devices
372 * supporting DisINTx can be masked at the hardware level
373 * here, non-PCI-2.3 devices will have to wait until the
374 * interrupt is enabled.
376 vdev
->ctx
[0].masked
= vdev
->virq_disabled
;
378 pci_intx(vdev
->pdev
, !vdev
->ctx
[0].masked
);
380 vdev
->irq_type
= VFIO_PCI_INTX_IRQ_INDEX
;
385 static int vfio_intx_set_signal(struct vfio_pci_device
*vdev
, int fd
)
387 struct pci_dev
*pdev
= vdev
->pdev
;
388 unsigned long irqflags
= IRQF_SHARED
;
389 struct eventfd_ctx
*trigger
;
393 if (vdev
->ctx
[0].trigger
) {
394 free_irq(pdev
->irq
, vdev
);
395 kfree(vdev
->ctx
[0].name
);
396 eventfd_ctx_put(vdev
->ctx
[0].trigger
);
397 vdev
->ctx
[0].trigger
= NULL
;
400 if (fd
< 0) /* Disable only */
403 vdev
->ctx
[0].name
= kasprintf(GFP_KERNEL
, "vfio-intx(%s)",
405 if (!vdev
->ctx
[0].name
)
408 trigger
= eventfd_ctx_fdget(fd
);
409 if (IS_ERR(trigger
)) {
410 kfree(vdev
->ctx
[0].name
);
411 return PTR_ERR(trigger
);
414 vdev
->ctx
[0].trigger
= trigger
;
419 ret
= request_irq(pdev
->irq
, vfio_intx_handler
,
420 irqflags
, vdev
->ctx
[0].name
, vdev
);
422 vdev
->ctx
[0].trigger
= NULL
;
423 kfree(vdev
->ctx
[0].name
);
424 eventfd_ctx_put(trigger
);
429 * INTx disable will stick across the new irq setup,
432 spin_lock_irqsave(&vdev
->irqlock
, flags
);
433 if (!vdev
->pci_2_3
&& vdev
->ctx
[0].masked
)
434 disable_irq_nosync(pdev
->irq
);
435 spin_unlock_irqrestore(&vdev
->irqlock
, flags
);
440 static void vfio_intx_disable(struct vfio_pci_device
*vdev
)
442 vfio_intx_set_signal(vdev
, -1);
443 virqfd_disable(vdev
, &vdev
->ctx
[0].unmask
);
444 virqfd_disable(vdev
, &vdev
->ctx
[0].mask
);
445 vdev
->irq_type
= VFIO_PCI_NUM_IRQS
;
453 static irqreturn_t
vfio_msihandler(int irq
, void *arg
)
455 struct eventfd_ctx
*trigger
= arg
;
457 eventfd_signal(trigger
, 1);
461 static int vfio_msi_enable(struct vfio_pci_device
*vdev
, int nvec
, bool msix
)
463 struct pci_dev
*pdev
= vdev
->pdev
;
466 if (!is_irq_none(vdev
))
469 vdev
->ctx
= kzalloc(nvec
* sizeof(struct vfio_pci_irq_ctx
), GFP_KERNEL
);
476 vdev
->msix
= kzalloc(nvec
* sizeof(struct msix_entry
),
483 for (i
= 0; i
< nvec
; i
++)
484 vdev
->msix
[i
].entry
= i
;
486 ret
= pci_enable_msix(pdev
, vdev
->msix
, nvec
);
493 ret
= pci_enable_msi_block(pdev
, nvec
);
500 vdev
->num_ctx
= nvec
;
501 vdev
->irq_type
= msix
? VFIO_PCI_MSIX_IRQ_INDEX
:
502 VFIO_PCI_MSI_IRQ_INDEX
;
506 * Compute the virtual hardware field for max msi vectors -
507 * it is the log base 2 of the number of vectors.
509 vdev
->msi_qmax
= fls(nvec
* 2 - 1) - 1;
515 static int vfio_msi_set_vector_signal(struct vfio_pci_device
*vdev
,
516 int vector
, int fd
, bool msix
)
518 struct pci_dev
*pdev
= vdev
->pdev
;
519 int irq
= msix
? vdev
->msix
[vector
].vector
: pdev
->irq
+ vector
;
520 char *name
= msix
? "vfio-msix" : "vfio-msi";
521 struct eventfd_ctx
*trigger
;
524 if (vector
>= vdev
->num_ctx
)
527 if (vdev
->ctx
[vector
].trigger
) {
528 free_irq(irq
, vdev
->ctx
[vector
].trigger
);
529 kfree(vdev
->ctx
[vector
].name
);
530 eventfd_ctx_put(vdev
->ctx
[vector
].trigger
);
531 vdev
->ctx
[vector
].trigger
= NULL
;
537 vdev
->ctx
[vector
].name
= kasprintf(GFP_KERNEL
, "%s[%d](%s)",
538 name
, vector
, pci_name(pdev
));
539 if (!vdev
->ctx
[vector
].name
)
542 trigger
= eventfd_ctx_fdget(fd
);
543 if (IS_ERR(trigger
)) {
544 kfree(vdev
->ctx
[vector
].name
);
545 return PTR_ERR(trigger
);
548 ret
= request_irq(irq
, vfio_msihandler
, 0,
549 vdev
->ctx
[vector
].name
, trigger
);
551 kfree(vdev
->ctx
[vector
].name
);
552 eventfd_ctx_put(trigger
);
556 vdev
->ctx
[vector
].trigger
= trigger
;
561 static int vfio_msi_set_block(struct vfio_pci_device
*vdev
, unsigned start
,
562 unsigned count
, int32_t *fds
, bool msix
)
566 if (start
+ count
> vdev
->num_ctx
)
569 for (i
= 0, j
= start
; i
< count
&& !ret
; i
++, j
++) {
570 int fd
= fds
? fds
[i
] : -1;
571 ret
= vfio_msi_set_vector_signal(vdev
, j
, fd
, msix
);
575 for (--j
; j
>= start
; j
--)
576 vfio_msi_set_vector_signal(vdev
, j
, -1, msix
);
582 static void vfio_msi_disable(struct vfio_pci_device
*vdev
, bool msix
)
584 struct pci_dev
*pdev
= vdev
->pdev
;
587 vfio_msi_set_block(vdev
, 0, vdev
->num_ctx
, NULL
, msix
);
589 for (i
= 0; i
< vdev
->num_ctx
; i
++) {
590 virqfd_disable(vdev
, &vdev
->ctx
[i
].unmask
);
591 virqfd_disable(vdev
, &vdev
->ctx
[i
].mask
);
595 pci_disable_msix(vdev
->pdev
);
598 pci_disable_msi(pdev
);
600 vdev
->irq_type
= VFIO_PCI_NUM_IRQS
;
608 static int vfio_pci_set_intx_unmask(struct vfio_pci_device
*vdev
,
609 unsigned index
, unsigned start
,
610 unsigned count
, uint32_t flags
, void *data
)
612 if (!is_intx(vdev
) || start
!= 0 || count
!= 1)
615 if (flags
& VFIO_IRQ_SET_DATA_NONE
) {
616 vfio_pci_intx_unmask(vdev
);
617 } else if (flags
& VFIO_IRQ_SET_DATA_BOOL
) {
618 uint8_t unmask
= *(uint8_t *)data
;
620 vfio_pci_intx_unmask(vdev
);
621 } else if (flags
& VFIO_IRQ_SET_DATA_EVENTFD
) {
622 int32_t fd
= *(int32_t *)data
;
624 return virqfd_enable(vdev
, vfio_pci_intx_unmask_handler
,
625 vfio_send_intx_eventfd
, NULL
,
626 &vdev
->ctx
[0].unmask
, fd
);
628 virqfd_disable(vdev
, &vdev
->ctx
[0].unmask
);
634 static int vfio_pci_set_intx_mask(struct vfio_pci_device
*vdev
,
635 unsigned index
, unsigned start
,
636 unsigned count
, uint32_t flags
, void *data
)
638 if (!is_intx(vdev
) || start
!= 0 || count
!= 1)
641 if (flags
& VFIO_IRQ_SET_DATA_NONE
) {
642 vfio_pci_intx_mask(vdev
);
643 } else if (flags
& VFIO_IRQ_SET_DATA_BOOL
) {
644 uint8_t mask
= *(uint8_t *)data
;
646 vfio_pci_intx_mask(vdev
);
647 } else if (flags
& VFIO_IRQ_SET_DATA_EVENTFD
) {
648 return -ENOTTY
; /* XXX implement me */
654 static int vfio_pci_set_intx_trigger(struct vfio_pci_device
*vdev
,
655 unsigned index
, unsigned start
,
656 unsigned count
, uint32_t flags
, void *data
)
658 if (is_intx(vdev
) && !count
&& (flags
& VFIO_IRQ_SET_DATA_NONE
)) {
659 vfio_intx_disable(vdev
);
663 if (!(is_intx(vdev
) || is_irq_none(vdev
)) || start
!= 0 || count
!= 1)
666 if (flags
& VFIO_IRQ_SET_DATA_EVENTFD
) {
667 int32_t fd
= *(int32_t *)data
;
671 return vfio_intx_set_signal(vdev
, fd
);
673 ret
= vfio_intx_enable(vdev
);
677 ret
= vfio_intx_set_signal(vdev
, fd
);
679 vfio_intx_disable(vdev
);
687 if (flags
& VFIO_IRQ_SET_DATA_NONE
) {
688 vfio_send_intx_eventfd(vdev
, NULL
);
689 } else if (flags
& VFIO_IRQ_SET_DATA_BOOL
) {
690 uint8_t trigger
= *(uint8_t *)data
;
692 vfio_send_intx_eventfd(vdev
, NULL
);
697 static int vfio_pci_set_msi_trigger(struct vfio_pci_device
*vdev
,
698 unsigned index
, unsigned start
,
699 unsigned count
, uint32_t flags
, void *data
)
702 bool msix
= (index
== VFIO_PCI_MSIX_IRQ_INDEX
) ? true : false;
704 if (irq_is(vdev
, index
) && !count
&& (flags
& VFIO_IRQ_SET_DATA_NONE
)) {
705 vfio_msi_disable(vdev
, msix
);
709 if (!(irq_is(vdev
, index
) || is_irq_none(vdev
)))
712 if (flags
& VFIO_IRQ_SET_DATA_EVENTFD
) {
716 if (vdev
->irq_type
== index
)
717 return vfio_msi_set_block(vdev
, start
, count
,
720 ret
= vfio_msi_enable(vdev
, start
+ count
, msix
);
724 ret
= vfio_msi_set_block(vdev
, start
, count
, fds
, msix
);
726 vfio_msi_disable(vdev
, msix
);
731 if (!irq_is(vdev
, index
) || start
+ count
> vdev
->num_ctx
)
734 for (i
= start
; i
< start
+ count
; i
++) {
735 if (!vdev
->ctx
[i
].trigger
)
737 if (flags
& VFIO_IRQ_SET_DATA_NONE
) {
738 eventfd_signal(vdev
->ctx
[i
].trigger
, 1);
739 } else if (flags
& VFIO_IRQ_SET_DATA_BOOL
) {
740 uint8_t *bools
= data
;
741 if (bools
[i
- start
])
742 eventfd_signal(vdev
->ctx
[i
].trigger
, 1);
748 static int vfio_pci_set_err_trigger(struct vfio_pci_device
*vdev
,
749 unsigned index
, unsigned start
,
750 unsigned count
, uint32_t flags
, void *data
)
752 int32_t fd
= *(int32_t *)data
;
753 struct pci_dev
*pdev
= vdev
->pdev
;
755 if ((index
!= VFIO_PCI_ERR_IRQ_INDEX
) ||
756 !(flags
& VFIO_IRQ_SET_DATA_TYPE_MASK
))
760 * device_lock synchronizes setting and checking of
761 * err_trigger. The vfio_pci_aer_err_detected() is also
762 * called with device_lock held.
765 /* DATA_NONE/DATA_BOOL enables loopback testing */
767 if (flags
& VFIO_IRQ_SET_DATA_NONE
) {
768 device_lock(&pdev
->dev
);
769 if (vdev
->err_trigger
)
770 eventfd_signal(vdev
->err_trigger
, 1);
771 device_unlock(&pdev
->dev
);
773 } else if (flags
& VFIO_IRQ_SET_DATA_BOOL
) {
774 uint8_t trigger
= *(uint8_t *)data
;
775 device_lock(&pdev
->dev
);
776 if (trigger
&& vdev
->err_trigger
)
777 eventfd_signal(vdev
->err_trigger
, 1);
778 device_unlock(&pdev
->dev
);
782 /* Handle SET_DATA_EVENTFD */
785 device_lock(&pdev
->dev
);
786 if (vdev
->err_trigger
)
787 eventfd_ctx_put(vdev
->err_trigger
);
788 vdev
->err_trigger
= NULL
;
789 device_unlock(&pdev
->dev
);
791 } else if (fd
>= 0) {
792 struct eventfd_ctx
*efdctx
;
793 efdctx
= eventfd_ctx_fdget(fd
);
795 return PTR_ERR(efdctx
);
796 device_lock(&pdev
->dev
);
797 if (vdev
->err_trigger
)
798 eventfd_ctx_put(vdev
->err_trigger
);
799 vdev
->err_trigger
= efdctx
;
800 device_unlock(&pdev
->dev
);
805 int vfio_pci_set_irqs_ioctl(struct vfio_pci_device
*vdev
, uint32_t flags
,
806 unsigned index
, unsigned start
, unsigned count
,
809 int (*func
)(struct vfio_pci_device
*vdev
, unsigned index
,
810 unsigned start
, unsigned count
, uint32_t flags
,
814 case VFIO_PCI_INTX_IRQ_INDEX
:
815 switch (flags
& VFIO_IRQ_SET_ACTION_TYPE_MASK
) {
816 case VFIO_IRQ_SET_ACTION_MASK
:
817 func
= vfio_pci_set_intx_mask
;
819 case VFIO_IRQ_SET_ACTION_UNMASK
:
820 func
= vfio_pci_set_intx_unmask
;
822 case VFIO_IRQ_SET_ACTION_TRIGGER
:
823 func
= vfio_pci_set_intx_trigger
;
827 case VFIO_PCI_MSI_IRQ_INDEX
:
828 case VFIO_PCI_MSIX_IRQ_INDEX
:
829 switch (flags
& VFIO_IRQ_SET_ACTION_TYPE_MASK
) {
830 case VFIO_IRQ_SET_ACTION_MASK
:
831 case VFIO_IRQ_SET_ACTION_UNMASK
:
832 /* XXX Need masking support exported */
834 case VFIO_IRQ_SET_ACTION_TRIGGER
:
835 func
= vfio_pci_set_msi_trigger
;
839 case VFIO_PCI_ERR_IRQ_INDEX
:
840 switch (flags
& VFIO_IRQ_SET_ACTION_TYPE_MASK
) {
841 case VFIO_IRQ_SET_ACTION_TRIGGER
:
842 if (pci_is_pcie(vdev
->pdev
))
843 func
= vfio_pci_set_err_trigger
;
851 return func(vdev
, index
, start
, count
, flags
, data
);