Commit | Line | Data |
---|---|---|
89e1f7d4 AW |
1 | /* |
2 | * VFIO PCI interrupt handling | |
3 | * | |
4 | * Copyright (C) 2012 Red Hat, Inc. All rights reserved. | |
5 | * Author: Alex Williamson <alex.williamson@redhat.com> | |
6 | * | |
7 | * This program is free software; you can redistribute it and/or modify | |
8 | * it under the terms of the GNU General Public License version 2 as | |
9 | * published by the Free Software Foundation. | |
10 | * | |
11 | * Derived from original vfio: | |
12 | * Copyright 2010 Cisco Systems, Inc. All rights reserved. | |
13 | * Author: Tom Lyon, pugs@cisco.com | |
14 | */ | |
15 | ||
16 | #include <linux/device.h> | |
17 | #include <linux/interrupt.h> | |
18 | #include <linux/eventfd.h> | |
19 | #include <linux/pci.h> | |
20 | #include <linux/file.h> | |
21 | #include <linux/poll.h> | |
22 | #include <linux/vfio.h> | |
23 | #include <linux/wait.h> | |
24 | #include <linux/workqueue.h> | |
25e9789d | 25 | #include <linux/slab.h> |
89e1f7d4 AW |
26 | |
27 | #include "vfio_pci_private.h" | |
28 | ||
29 | /* | |
30 | * IRQfd - generic | |
31 | */ | |
32 | struct virqfd { | |
33 | struct vfio_pci_device *vdev; | |
34 | struct eventfd_ctx *eventfd; | |
35 | int (*handler)(struct vfio_pci_device *, void *); | |
36 | void (*thread)(struct vfio_pci_device *, void *); | |
37 | void *data; | |
38 | struct work_struct inject; | |
39 | wait_queue_t wait; | |
40 | poll_table pt; | |
41 | struct work_struct shutdown; | |
42 | struct virqfd **pvirqfd; | |
43 | }; | |
44 | ||
45 | static struct workqueue_struct *vfio_irqfd_cleanup_wq; | |
46 | ||
47 | int __init vfio_pci_virqfd_init(void) | |
48 | { | |
49 | vfio_irqfd_cleanup_wq = | |
50 | create_singlethread_workqueue("vfio-irqfd-cleanup"); | |
51 | if (!vfio_irqfd_cleanup_wq) | |
52 | return -ENOMEM; | |
53 | ||
54 | return 0; | |
55 | } | |
56 | ||
57 | void vfio_pci_virqfd_exit(void) | |
58 | { | |
59 | destroy_workqueue(vfio_irqfd_cleanup_wq); | |
60 | } | |
61 | ||
62 | static void virqfd_deactivate(struct virqfd *virqfd) | |
63 | { | |
64 | queue_work(vfio_irqfd_cleanup_wq, &virqfd->shutdown); | |
65 | } | |
66 | ||
67 | static int virqfd_wakeup(wait_queue_t *wait, unsigned mode, int sync, void *key) | |
68 | { | |
69 | struct virqfd *virqfd = container_of(wait, struct virqfd, wait); | |
70 | unsigned long flags = (unsigned long)key; | |
71 | ||
72 | if (flags & POLLIN) { | |
73 | /* An event has been signaled, call function */ | |
74 | if ((!virqfd->handler || | |
75 | virqfd->handler(virqfd->vdev, virqfd->data)) && | |
76 | virqfd->thread) | |
77 | schedule_work(&virqfd->inject); | |
78 | } | |
79 | ||
b68e7fa8 AW |
80 | if (flags & POLLHUP) { |
81 | unsigned long flags; | |
82 | spin_lock_irqsave(&virqfd->vdev->irqlock, flags); | |
83 | ||
84 | /* | |
85 | * The eventfd is closing, if the virqfd has not yet been | |
86 | * queued for release, as determined by testing whether the | |
87 | * vdev pointer to it is still valid, queue it now. As | |
88 | * with kvm irqfds, we know we won't race against the virqfd | |
89 | * going away because we hold wqh->lock to get here. | |
90 | */ | |
91 | if (*(virqfd->pvirqfd) == virqfd) { | |
92 | *(virqfd->pvirqfd) = NULL; | |
93 | virqfd_deactivate(virqfd); | |
94 | } | |
95 | ||
96 | spin_unlock_irqrestore(&virqfd->vdev->irqlock, flags); | |
97 | } | |
89e1f7d4 AW |
98 | |
99 | return 0; | |
100 | } | |
101 | ||
102 | static void virqfd_ptable_queue_proc(struct file *file, | |
103 | wait_queue_head_t *wqh, poll_table *pt) | |
104 | { | |
105 | struct virqfd *virqfd = container_of(pt, struct virqfd, pt); | |
106 | add_wait_queue(wqh, &virqfd->wait); | |
107 | } | |
108 | ||
109 | static void virqfd_shutdown(struct work_struct *work) | |
110 | { | |
111 | struct virqfd *virqfd = container_of(work, struct virqfd, shutdown); | |
89e1f7d4 AW |
112 | u64 cnt; |
113 | ||
114 | eventfd_ctx_remove_wait_queue(virqfd->eventfd, &virqfd->wait, &cnt); | |
115 | flush_work(&virqfd->inject); | |
116 | eventfd_ctx_put(virqfd->eventfd); | |
117 | ||
118 | kfree(virqfd); | |
89e1f7d4 AW |
119 | } |
120 | ||
121 | static void virqfd_inject(struct work_struct *work) | |
122 | { | |
123 | struct virqfd *virqfd = container_of(work, struct virqfd, inject); | |
124 | if (virqfd->thread) | |
125 | virqfd->thread(virqfd->vdev, virqfd->data); | |
126 | } | |
127 | ||
128 | static int virqfd_enable(struct vfio_pci_device *vdev, | |
129 | int (*handler)(struct vfio_pci_device *, void *), | |
130 | void (*thread)(struct vfio_pci_device *, void *), | |
131 | void *data, struct virqfd **pvirqfd, int fd) | |
132 | { | |
133 | struct file *file = NULL; | |
134 | struct eventfd_ctx *ctx = NULL; | |
135 | struct virqfd *virqfd; | |
136 | int ret = 0; | |
137 | unsigned int events; | |
138 | ||
89e1f7d4 AW |
139 | virqfd = kzalloc(sizeof(*virqfd), GFP_KERNEL); |
140 | if (!virqfd) | |
141 | return -ENOMEM; | |
142 | ||
143 | virqfd->pvirqfd = pvirqfd; | |
89e1f7d4 AW |
144 | virqfd->vdev = vdev; |
145 | virqfd->handler = handler; | |
146 | virqfd->thread = thread; | |
147 | virqfd->data = data; | |
148 | ||
149 | INIT_WORK(&virqfd->shutdown, virqfd_shutdown); | |
150 | INIT_WORK(&virqfd->inject, virqfd_inject); | |
151 | ||
152 | file = eventfd_fget(fd); | |
153 | if (IS_ERR(file)) { | |
154 | ret = PTR_ERR(file); | |
155 | goto fail; | |
156 | } | |
157 | ||
158 | ctx = eventfd_ctx_fileget(file); | |
159 | if (IS_ERR(ctx)) { | |
160 | ret = PTR_ERR(ctx); | |
161 | goto fail; | |
162 | } | |
163 | ||
164 | virqfd->eventfd = ctx; | |
165 | ||
b68e7fa8 AW |
166 | /* |
167 | * virqfds can be released by closing the eventfd or directly | |
168 | * through ioctl. These are both done through a workqueue, so | |
169 | * we update the pointer to the virqfd under lock to avoid | |
170 | * pushing multiple jobs to release the same virqfd. | |
171 | */ | |
172 | spin_lock_irq(&vdev->irqlock); | |
173 | ||
174 | if (*pvirqfd) { | |
175 | spin_unlock_irq(&vdev->irqlock); | |
176 | ret = -EBUSY; | |
177 | goto fail; | |
178 | } | |
179 | *pvirqfd = virqfd; | |
180 | ||
181 | spin_unlock_irq(&vdev->irqlock); | |
182 | ||
89e1f7d4 AW |
183 | /* |
184 | * Install our own custom wake-up handling so we are notified via | |
185 | * a callback whenever someone signals the underlying eventfd. | |
186 | */ | |
187 | init_waitqueue_func_entry(&virqfd->wait, virqfd_wakeup); | |
188 | init_poll_funcptr(&virqfd->pt, virqfd_ptable_queue_proc); | |
189 | ||
190 | events = file->f_op->poll(file, &virqfd->pt); | |
191 | ||
192 | /* | |
193 | * Check if there was an event already pending on the eventfd | |
194 | * before we registered and trigger it as if we didn't miss it. | |
195 | */ | |
196 | if (events & POLLIN) { | |
197 | if ((!handler || handler(vdev, data)) && thread) | |
198 | schedule_work(&virqfd->inject); | |
199 | } | |
200 | ||
201 | /* | |
202 | * Do not drop the file until the irqfd is fully initialized, | |
203 | * otherwise we might race against the POLLHUP. | |
204 | */ | |
205 | fput(file); | |
206 | ||
207 | return 0; | |
208 | ||
209 | fail: | |
210 | if (ctx && !IS_ERR(ctx)) | |
211 | eventfd_ctx_put(ctx); | |
212 | ||
213 | if (file && !IS_ERR(file)) | |
214 | fput(file); | |
215 | ||
216 | kfree(virqfd); | |
89e1f7d4 AW |
217 | |
218 | return ret; | |
219 | } | |
220 | ||
b68e7fa8 AW |
221 | static void virqfd_disable(struct vfio_pci_device *vdev, |
222 | struct virqfd **pvirqfd) | |
89e1f7d4 | 223 | { |
b68e7fa8 AW |
224 | unsigned long flags; |
225 | ||
226 | spin_lock_irqsave(&vdev->irqlock, flags); | |
227 | ||
228 | if (*pvirqfd) { | |
229 | virqfd_deactivate(*pvirqfd); | |
230 | *pvirqfd = NULL; | |
231 | } | |
89e1f7d4 | 232 | |
b68e7fa8 | 233 | spin_unlock_irqrestore(&vdev->irqlock, flags); |
89e1f7d4 | 234 | |
b68e7fa8 AW |
235 | /* |
236 | * Block until we know all outstanding shutdown jobs have completed. | |
237 | * Even if we don't queue the job, flush the wq to be sure it's | |
238 | * been released. | |
239 | */ | |
89e1f7d4 AW |
240 | flush_workqueue(vfio_irqfd_cleanup_wq); |
241 | } | |
242 | ||
243 | /* | |
244 | * INTx | |
245 | */ | |
246 | static void vfio_send_intx_eventfd(struct vfio_pci_device *vdev, void *unused) | |
247 | { | |
248 | if (likely(is_intx(vdev) && !vdev->virq_disabled)) | |
249 | eventfd_signal(vdev->ctx[0].trigger, 1); | |
250 | } | |
251 | ||
252 | void vfio_pci_intx_mask(struct vfio_pci_device *vdev) | |
253 | { | |
254 | struct pci_dev *pdev = vdev->pdev; | |
255 | unsigned long flags; | |
256 | ||
257 | spin_lock_irqsave(&vdev->irqlock, flags); | |
258 | ||
259 | /* | |
260 | * Masking can come from interrupt, ioctl, or config space | |
261 | * via INTx disable. The latter means this can get called | |
262 | * even when not using intx delivery. In this case, just | |
263 | * try to have the physical bit follow the virtual bit. | |
264 | */ | |
265 | if (unlikely(!is_intx(vdev))) { | |
266 | if (vdev->pci_2_3) | |
267 | pci_intx(pdev, 0); | |
268 | } else if (!vdev->ctx[0].masked) { | |
269 | /* | |
270 | * Can't use check_and_mask here because we always want to | |
271 | * mask, not just when something is pending. | |
272 | */ | |
273 | if (vdev->pci_2_3) | |
274 | pci_intx(pdev, 0); | |
275 | else | |
276 | disable_irq_nosync(pdev->irq); | |
277 | ||
278 | vdev->ctx[0].masked = true; | |
279 | } | |
280 | ||
281 | spin_unlock_irqrestore(&vdev->irqlock, flags); | |
282 | } | |
283 | ||
284 | /* | |
285 | * If this is triggered by an eventfd, we can't call eventfd_signal | |
286 | * or else we'll deadlock on the eventfd wait queue. Return >0 when | |
287 | * a signal is necessary, which can then be handled via a work queue | |
288 | * or directly depending on the caller. | |
289 | */ | |
0bced2f7 WY |
290 | static int vfio_pci_intx_unmask_handler(struct vfio_pci_device *vdev, |
291 | void *unused) | |
89e1f7d4 AW |
292 | { |
293 | struct pci_dev *pdev = vdev->pdev; | |
294 | unsigned long flags; | |
295 | int ret = 0; | |
296 | ||
297 | spin_lock_irqsave(&vdev->irqlock, flags); | |
298 | ||
299 | /* | |
300 | * Unmasking comes from ioctl or config, so again, have the | |
301 | * physical bit follow the virtual even when not using INTx. | |
302 | */ | |
303 | if (unlikely(!is_intx(vdev))) { | |
304 | if (vdev->pci_2_3) | |
305 | pci_intx(pdev, 1); | |
306 | } else if (vdev->ctx[0].masked && !vdev->virq_disabled) { | |
307 | /* | |
308 | * A pending interrupt here would immediately trigger, | |
309 | * but we can avoid that overhead by just re-sending | |
310 | * the interrupt to the user. | |
311 | */ | |
312 | if (vdev->pci_2_3) { | |
313 | if (!pci_check_and_unmask_intx(pdev)) | |
314 | ret = 1; | |
315 | } else | |
316 | enable_irq(pdev->irq); | |
317 | ||
318 | vdev->ctx[0].masked = (ret > 0); | |
319 | } | |
320 | ||
321 | spin_unlock_irqrestore(&vdev->irqlock, flags); | |
322 | ||
323 | return ret; | |
324 | } | |
325 | ||
326 | void vfio_pci_intx_unmask(struct vfio_pci_device *vdev) | |
327 | { | |
328 | if (vfio_pci_intx_unmask_handler(vdev, NULL) > 0) | |
329 | vfio_send_intx_eventfd(vdev, NULL); | |
330 | } | |
331 | ||
332 | static irqreturn_t vfio_intx_handler(int irq, void *dev_id) | |
333 | { | |
334 | struct vfio_pci_device *vdev = dev_id; | |
335 | unsigned long flags; | |
336 | int ret = IRQ_NONE; | |
337 | ||
338 | spin_lock_irqsave(&vdev->irqlock, flags); | |
339 | ||
340 | if (!vdev->pci_2_3) { | |
341 | disable_irq_nosync(vdev->pdev->irq); | |
342 | vdev->ctx[0].masked = true; | |
343 | ret = IRQ_HANDLED; | |
344 | } else if (!vdev->ctx[0].masked && /* may be shared */ | |
345 | pci_check_and_mask_intx(vdev->pdev)) { | |
346 | vdev->ctx[0].masked = true; | |
347 | ret = IRQ_HANDLED; | |
348 | } | |
349 | ||
350 | spin_unlock_irqrestore(&vdev->irqlock, flags); | |
351 | ||
352 | if (ret == IRQ_HANDLED) | |
353 | vfio_send_intx_eventfd(vdev, NULL); | |
354 | ||
355 | return ret; | |
356 | } | |
357 | ||
358 | static int vfio_intx_enable(struct vfio_pci_device *vdev) | |
359 | { | |
360 | if (!is_irq_none(vdev)) | |
361 | return -EINVAL; | |
362 | ||
363 | if (!vdev->pdev->irq) | |
364 | return -ENODEV; | |
365 | ||
366 | vdev->ctx = kzalloc(sizeof(struct vfio_pci_irq_ctx), GFP_KERNEL); | |
367 | if (!vdev->ctx) | |
368 | return -ENOMEM; | |
369 | ||
370 | vdev->num_ctx = 1; | |
899649b7 AW |
371 | |
372 | /* | |
373 | * If the virtual interrupt is masked, restore it. Devices | |
374 | * supporting DisINTx can be masked at the hardware level | |
375 | * here, non-PCI-2.3 devices will have to wait until the | |
376 | * interrupt is enabled. | |
377 | */ | |
378 | vdev->ctx[0].masked = vdev->virq_disabled; | |
379 | if (vdev->pci_2_3) | |
380 | pci_intx(vdev->pdev, !vdev->ctx[0].masked); | |
381 | ||
89e1f7d4 AW |
382 | vdev->irq_type = VFIO_PCI_INTX_IRQ_INDEX; |
383 | ||
384 | return 0; | |
385 | } | |
386 | ||
387 | static int vfio_intx_set_signal(struct vfio_pci_device *vdev, int fd) | |
388 | { | |
389 | struct pci_dev *pdev = vdev->pdev; | |
390 | unsigned long irqflags = IRQF_SHARED; | |
391 | struct eventfd_ctx *trigger; | |
392 | unsigned long flags; | |
393 | int ret; | |
394 | ||
395 | if (vdev->ctx[0].trigger) { | |
396 | free_irq(pdev->irq, vdev); | |
397 | kfree(vdev->ctx[0].name); | |
398 | eventfd_ctx_put(vdev->ctx[0].trigger); | |
399 | vdev->ctx[0].trigger = NULL; | |
400 | } | |
401 | ||
402 | if (fd < 0) /* Disable only */ | |
403 | return 0; | |
404 | ||
405 | vdev->ctx[0].name = kasprintf(GFP_KERNEL, "vfio-intx(%s)", | |
406 | pci_name(pdev)); | |
407 | if (!vdev->ctx[0].name) | |
408 | return -ENOMEM; | |
409 | ||
410 | trigger = eventfd_ctx_fdget(fd); | |
411 | if (IS_ERR(trigger)) { | |
412 | kfree(vdev->ctx[0].name); | |
413 | return PTR_ERR(trigger); | |
414 | } | |
415 | ||
9dbdfd23 AW |
416 | vdev->ctx[0].trigger = trigger; |
417 | ||
89e1f7d4 AW |
418 | if (!vdev->pci_2_3) |
419 | irqflags = 0; | |
420 | ||
421 | ret = request_irq(pdev->irq, vfio_intx_handler, | |
422 | irqflags, vdev->ctx[0].name, vdev); | |
423 | if (ret) { | |
9dbdfd23 | 424 | vdev->ctx[0].trigger = NULL; |
89e1f7d4 AW |
425 | kfree(vdev->ctx[0].name); |
426 | eventfd_ctx_put(trigger); | |
427 | return ret; | |
428 | } | |
429 | ||
89e1f7d4 AW |
430 | /* |
431 | * INTx disable will stick across the new irq setup, | |
432 | * disable_irq won't. | |
433 | */ | |
434 | spin_lock_irqsave(&vdev->irqlock, flags); | |
899649b7 | 435 | if (!vdev->pci_2_3 && vdev->ctx[0].masked) |
89e1f7d4 AW |
436 | disable_irq_nosync(pdev->irq); |
437 | spin_unlock_irqrestore(&vdev->irqlock, flags); | |
438 | ||
439 | return 0; | |
440 | } | |
441 | ||
442 | static void vfio_intx_disable(struct vfio_pci_device *vdev) | |
443 | { | |
444 | vfio_intx_set_signal(vdev, -1); | |
b68e7fa8 AW |
445 | virqfd_disable(vdev, &vdev->ctx[0].unmask); |
446 | virqfd_disable(vdev, &vdev->ctx[0].mask); | |
89e1f7d4 AW |
447 | vdev->irq_type = VFIO_PCI_NUM_IRQS; |
448 | vdev->num_ctx = 0; | |
449 | kfree(vdev->ctx); | |
450 | } | |
451 | ||
452 | /* | |
453 | * MSI/MSI-X | |
454 | */ | |
455 | static irqreturn_t vfio_msihandler(int irq, void *arg) | |
456 | { | |
457 | struct eventfd_ctx *trigger = arg; | |
458 | ||
459 | eventfd_signal(trigger, 1); | |
460 | return IRQ_HANDLED; | |
461 | } | |
462 | ||
463 | static int vfio_msi_enable(struct vfio_pci_device *vdev, int nvec, bool msix) | |
464 | { | |
465 | struct pci_dev *pdev = vdev->pdev; | |
466 | int ret; | |
467 | ||
468 | if (!is_irq_none(vdev)) | |
469 | return -EINVAL; | |
470 | ||
37ccd2be | 471 | vdev->ctx = kcalloc(nvec, sizeof(struct vfio_pci_irq_ctx), GFP_KERNEL); |
89e1f7d4 AW |
472 | if (!vdev->ctx) |
473 | return -ENOMEM; | |
474 | ||
475 | if (msix) { | |
476 | int i; | |
477 | ||
478 | vdev->msix = kzalloc(nvec * sizeof(struct msix_entry), | |
479 | GFP_KERNEL); | |
480 | if (!vdev->msix) { | |
481 | kfree(vdev->ctx); | |
482 | return -ENOMEM; | |
483 | } | |
484 | ||
485 | for (i = 0; i < nvec; i++) | |
486 | vdev->msix[i].entry = i; | |
487 | ||
488 | ret = pci_enable_msix(pdev, vdev->msix, nvec); | |
489 | if (ret) { | |
490 | kfree(vdev->msix); | |
491 | kfree(vdev->ctx); | |
492 | return ret; | |
493 | } | |
494 | } else { | |
495 | ret = pci_enable_msi_block(pdev, nvec); | |
496 | if (ret) { | |
497 | kfree(vdev->ctx); | |
498 | return ret; | |
499 | } | |
500 | } | |
501 | ||
502 | vdev->num_ctx = nvec; | |
503 | vdev->irq_type = msix ? VFIO_PCI_MSIX_IRQ_INDEX : | |
504 | VFIO_PCI_MSI_IRQ_INDEX; | |
505 | ||
506 | if (!msix) { | |
507 | /* | |
508 | * Compute the virtual hardware field for max msi vectors - | |
509 | * it is the log base 2 of the number of vectors. | |
510 | */ | |
511 | vdev->msi_qmax = fls(nvec * 2 - 1) - 1; | |
512 | } | |
513 | ||
514 | return 0; | |
515 | } | |
516 | ||
517 | static int vfio_msi_set_vector_signal(struct vfio_pci_device *vdev, | |
518 | int vector, int fd, bool msix) | |
519 | { | |
520 | struct pci_dev *pdev = vdev->pdev; | |
521 | int irq = msix ? vdev->msix[vector].vector : pdev->irq + vector; | |
522 | char *name = msix ? "vfio-msix" : "vfio-msi"; | |
523 | struct eventfd_ctx *trigger; | |
524 | int ret; | |
525 | ||
526 | if (vector >= vdev->num_ctx) | |
527 | return -EINVAL; | |
528 | ||
529 | if (vdev->ctx[vector].trigger) { | |
530 | free_irq(irq, vdev->ctx[vector].trigger); | |
531 | kfree(vdev->ctx[vector].name); | |
532 | eventfd_ctx_put(vdev->ctx[vector].trigger); | |
533 | vdev->ctx[vector].trigger = NULL; | |
534 | } | |
535 | ||
536 | if (fd < 0) | |
537 | return 0; | |
538 | ||
539 | vdev->ctx[vector].name = kasprintf(GFP_KERNEL, "%s[%d](%s)", | |
540 | name, vector, pci_name(pdev)); | |
541 | if (!vdev->ctx[vector].name) | |
542 | return -ENOMEM; | |
543 | ||
544 | trigger = eventfd_ctx_fdget(fd); | |
545 | if (IS_ERR(trigger)) { | |
546 | kfree(vdev->ctx[vector].name); | |
547 | return PTR_ERR(trigger); | |
548 | } | |
549 | ||
550 | ret = request_irq(irq, vfio_msihandler, 0, | |
551 | vdev->ctx[vector].name, trigger); | |
552 | if (ret) { | |
553 | kfree(vdev->ctx[vector].name); | |
554 | eventfd_ctx_put(trigger); | |
555 | return ret; | |
556 | } | |
557 | ||
558 | vdev->ctx[vector].trigger = trigger; | |
559 | ||
560 | return 0; | |
561 | } | |
562 | ||
563 | static int vfio_msi_set_block(struct vfio_pci_device *vdev, unsigned start, | |
564 | unsigned count, int32_t *fds, bool msix) | |
565 | { | |
566 | int i, j, ret = 0; | |
567 | ||
568 | if (start + count > vdev->num_ctx) | |
569 | return -EINVAL; | |
570 | ||
571 | for (i = 0, j = start; i < count && !ret; i++, j++) { | |
572 | int fd = fds ? fds[i] : -1; | |
573 | ret = vfio_msi_set_vector_signal(vdev, j, fd, msix); | |
574 | } | |
575 | ||
576 | if (ret) { | |
577 | for (--j; j >= start; j--) | |
578 | vfio_msi_set_vector_signal(vdev, j, -1, msix); | |
579 | } | |
580 | ||
581 | return ret; | |
582 | } | |
583 | ||
584 | static void vfio_msi_disable(struct vfio_pci_device *vdev, bool msix) | |
585 | { | |
586 | struct pci_dev *pdev = vdev->pdev; | |
587 | int i; | |
588 | ||
589 | vfio_msi_set_block(vdev, 0, vdev->num_ctx, NULL, msix); | |
590 | ||
591 | for (i = 0; i < vdev->num_ctx; i++) { | |
b68e7fa8 AW |
592 | virqfd_disable(vdev, &vdev->ctx[i].unmask); |
593 | virqfd_disable(vdev, &vdev->ctx[i].mask); | |
89e1f7d4 AW |
594 | } |
595 | ||
596 | if (msix) { | |
597 | pci_disable_msix(vdev->pdev); | |
598 | kfree(vdev->msix); | |
599 | } else | |
600 | pci_disable_msi(pdev); | |
601 | ||
602 | vdev->irq_type = VFIO_PCI_NUM_IRQS; | |
603 | vdev->num_ctx = 0; | |
604 | kfree(vdev->ctx); | |
605 | } | |
606 | ||
607 | /* | |
608 | * IOCTL support | |
609 | */ | |
610 | static int vfio_pci_set_intx_unmask(struct vfio_pci_device *vdev, | |
611 | unsigned index, unsigned start, | |
612 | unsigned count, uint32_t flags, void *data) | |
613 | { | |
614 | if (!is_intx(vdev) || start != 0 || count != 1) | |
615 | return -EINVAL; | |
616 | ||
617 | if (flags & VFIO_IRQ_SET_DATA_NONE) { | |
618 | vfio_pci_intx_unmask(vdev); | |
619 | } else if (flags & VFIO_IRQ_SET_DATA_BOOL) { | |
620 | uint8_t unmask = *(uint8_t *)data; | |
621 | if (unmask) | |
622 | vfio_pci_intx_unmask(vdev); | |
623 | } else if (flags & VFIO_IRQ_SET_DATA_EVENTFD) { | |
624 | int32_t fd = *(int32_t *)data; | |
625 | if (fd >= 0) | |
626 | return virqfd_enable(vdev, vfio_pci_intx_unmask_handler, | |
627 | vfio_send_intx_eventfd, NULL, | |
628 | &vdev->ctx[0].unmask, fd); | |
629 | ||
b68e7fa8 | 630 | virqfd_disable(vdev, &vdev->ctx[0].unmask); |
89e1f7d4 AW |
631 | } |
632 | ||
633 | return 0; | |
634 | } | |
635 | ||
636 | static int vfio_pci_set_intx_mask(struct vfio_pci_device *vdev, | |
637 | unsigned index, unsigned start, | |
638 | unsigned count, uint32_t flags, void *data) | |
639 | { | |
640 | if (!is_intx(vdev) || start != 0 || count != 1) | |
641 | return -EINVAL; | |
642 | ||
643 | if (flags & VFIO_IRQ_SET_DATA_NONE) { | |
644 | vfio_pci_intx_mask(vdev); | |
645 | } else if (flags & VFIO_IRQ_SET_DATA_BOOL) { | |
646 | uint8_t mask = *(uint8_t *)data; | |
647 | if (mask) | |
648 | vfio_pci_intx_mask(vdev); | |
649 | } else if (flags & VFIO_IRQ_SET_DATA_EVENTFD) { | |
650 | return -ENOTTY; /* XXX implement me */ | |
651 | } | |
652 | ||
653 | return 0; | |
654 | } | |
655 | ||
656 | static int vfio_pci_set_intx_trigger(struct vfio_pci_device *vdev, | |
657 | unsigned index, unsigned start, | |
658 | unsigned count, uint32_t flags, void *data) | |
659 | { | |
660 | if (is_intx(vdev) && !count && (flags & VFIO_IRQ_SET_DATA_NONE)) { | |
661 | vfio_intx_disable(vdev); | |
662 | return 0; | |
663 | } | |
664 | ||
665 | if (!(is_intx(vdev) || is_irq_none(vdev)) || start != 0 || count != 1) | |
666 | return -EINVAL; | |
667 | ||
668 | if (flags & VFIO_IRQ_SET_DATA_EVENTFD) { | |
669 | int32_t fd = *(int32_t *)data; | |
670 | int ret; | |
671 | ||
672 | if (is_intx(vdev)) | |
673 | return vfio_intx_set_signal(vdev, fd); | |
674 | ||
675 | ret = vfio_intx_enable(vdev); | |
676 | if (ret) | |
677 | return ret; | |
678 | ||
679 | ret = vfio_intx_set_signal(vdev, fd); | |
680 | if (ret) | |
681 | vfio_intx_disable(vdev); | |
682 | ||
683 | return ret; | |
684 | } | |
685 | ||
686 | if (!is_intx(vdev)) | |
687 | return -EINVAL; | |
688 | ||
689 | if (flags & VFIO_IRQ_SET_DATA_NONE) { | |
690 | vfio_send_intx_eventfd(vdev, NULL); | |
691 | } else if (flags & VFIO_IRQ_SET_DATA_BOOL) { | |
692 | uint8_t trigger = *(uint8_t *)data; | |
693 | if (trigger) | |
694 | vfio_send_intx_eventfd(vdev, NULL); | |
695 | } | |
696 | return 0; | |
697 | } | |
698 | ||
699 | static int vfio_pci_set_msi_trigger(struct vfio_pci_device *vdev, | |
700 | unsigned index, unsigned start, | |
701 | unsigned count, uint32_t flags, void *data) | |
702 | { | |
703 | int i; | |
704 | bool msix = (index == VFIO_PCI_MSIX_IRQ_INDEX) ? true : false; | |
705 | ||
706 | if (irq_is(vdev, index) && !count && (flags & VFIO_IRQ_SET_DATA_NONE)) { | |
707 | vfio_msi_disable(vdev, msix); | |
708 | return 0; | |
709 | } | |
710 | ||
711 | if (!(irq_is(vdev, index) || is_irq_none(vdev))) | |
712 | return -EINVAL; | |
713 | ||
714 | if (flags & VFIO_IRQ_SET_DATA_EVENTFD) { | |
715 | int32_t *fds = data; | |
716 | int ret; | |
717 | ||
718 | if (vdev->irq_type == index) | |
719 | return vfio_msi_set_block(vdev, start, count, | |
720 | fds, msix); | |
721 | ||
722 | ret = vfio_msi_enable(vdev, start + count, msix); | |
723 | if (ret) | |
724 | return ret; | |
725 | ||
726 | ret = vfio_msi_set_block(vdev, start, count, fds, msix); | |
727 | if (ret) | |
728 | vfio_msi_disable(vdev, msix); | |
729 | ||
730 | return ret; | |
731 | } | |
732 | ||
733 | if (!irq_is(vdev, index) || start + count > vdev->num_ctx) | |
734 | return -EINVAL; | |
735 | ||
736 | for (i = start; i < start + count; i++) { | |
737 | if (!vdev->ctx[i].trigger) | |
738 | continue; | |
739 | if (flags & VFIO_IRQ_SET_DATA_NONE) { | |
740 | eventfd_signal(vdev->ctx[i].trigger, 1); | |
741 | } else if (flags & VFIO_IRQ_SET_DATA_BOOL) { | |
742 | uint8_t *bools = data; | |
743 | if (bools[i - start]) | |
744 | eventfd_signal(vdev->ctx[i].trigger, 1); | |
745 | } | |
746 | } | |
747 | return 0; | |
748 | } | |
749 | ||
dad9f897 VMP |
750 | static int vfio_pci_set_err_trigger(struct vfio_pci_device *vdev, |
751 | unsigned index, unsigned start, | |
752 | unsigned count, uint32_t flags, void *data) | |
753 | { | |
754 | int32_t fd = *(int32_t *)data; | |
755 | struct pci_dev *pdev = vdev->pdev; | |
756 | ||
757 | if ((index != VFIO_PCI_ERR_IRQ_INDEX) || | |
758 | !(flags & VFIO_IRQ_SET_DATA_TYPE_MASK)) | |
759 | return -EINVAL; | |
760 | ||
761 | /* | |
762 | * device_lock synchronizes setting and checking of | |
763 | * err_trigger. The vfio_pci_aer_err_detected() is also | |
764 | * called with device_lock held. | |
765 | */ | |
766 | ||
767 | /* DATA_NONE/DATA_BOOL enables loopback testing */ | |
768 | ||
769 | if (flags & VFIO_IRQ_SET_DATA_NONE) { | |
770 | device_lock(&pdev->dev); | |
771 | if (vdev->err_trigger) | |
772 | eventfd_signal(vdev->err_trigger, 1); | |
773 | device_unlock(&pdev->dev); | |
774 | return 0; | |
775 | } else if (flags & VFIO_IRQ_SET_DATA_BOOL) { | |
776 | uint8_t trigger = *(uint8_t *)data; | |
777 | device_lock(&pdev->dev); | |
778 | if (trigger && vdev->err_trigger) | |
779 | eventfd_signal(vdev->err_trigger, 1); | |
780 | device_unlock(&pdev->dev); | |
781 | return 0; | |
782 | } | |
783 | ||
784 | /* Handle SET_DATA_EVENTFD */ | |
785 | ||
786 | if (fd == -1) { | |
787 | device_lock(&pdev->dev); | |
788 | if (vdev->err_trigger) | |
789 | eventfd_ctx_put(vdev->err_trigger); | |
790 | vdev->err_trigger = NULL; | |
791 | device_unlock(&pdev->dev); | |
792 | return 0; | |
793 | } else if (fd >= 0) { | |
794 | struct eventfd_ctx *efdctx; | |
795 | efdctx = eventfd_ctx_fdget(fd); | |
796 | if (IS_ERR(efdctx)) | |
797 | return PTR_ERR(efdctx); | |
798 | device_lock(&pdev->dev); | |
799 | if (vdev->err_trigger) | |
800 | eventfd_ctx_put(vdev->err_trigger); | |
801 | vdev->err_trigger = efdctx; | |
802 | device_unlock(&pdev->dev); | |
803 | return 0; | |
804 | } else | |
805 | return -EINVAL; | |
806 | } | |
89e1f7d4 AW |
807 | int vfio_pci_set_irqs_ioctl(struct vfio_pci_device *vdev, uint32_t flags, |
808 | unsigned index, unsigned start, unsigned count, | |
809 | void *data) | |
810 | { | |
811 | int (*func)(struct vfio_pci_device *vdev, unsigned index, | |
812 | unsigned start, unsigned count, uint32_t flags, | |
813 | void *data) = NULL; | |
814 | ||
815 | switch (index) { | |
816 | case VFIO_PCI_INTX_IRQ_INDEX: | |
817 | switch (flags & VFIO_IRQ_SET_ACTION_TYPE_MASK) { | |
818 | case VFIO_IRQ_SET_ACTION_MASK: | |
819 | func = vfio_pci_set_intx_mask; | |
820 | break; | |
821 | case VFIO_IRQ_SET_ACTION_UNMASK: | |
822 | func = vfio_pci_set_intx_unmask; | |
823 | break; | |
824 | case VFIO_IRQ_SET_ACTION_TRIGGER: | |
825 | func = vfio_pci_set_intx_trigger; | |
826 | break; | |
827 | } | |
828 | break; | |
829 | case VFIO_PCI_MSI_IRQ_INDEX: | |
830 | case VFIO_PCI_MSIX_IRQ_INDEX: | |
831 | switch (flags & VFIO_IRQ_SET_ACTION_TYPE_MASK) { | |
832 | case VFIO_IRQ_SET_ACTION_MASK: | |
833 | case VFIO_IRQ_SET_ACTION_UNMASK: | |
834 | /* XXX Need masking support exported */ | |
835 | break; | |
836 | case VFIO_IRQ_SET_ACTION_TRIGGER: | |
837 | func = vfio_pci_set_msi_trigger; | |
838 | break; | |
839 | } | |
840 | break; | |
dad9f897 VMP |
841 | case VFIO_PCI_ERR_IRQ_INDEX: |
842 | switch (flags & VFIO_IRQ_SET_ACTION_TYPE_MASK) { | |
843 | case VFIO_IRQ_SET_ACTION_TRIGGER: | |
844 | if (pci_is_pcie(vdev->pdev)) | |
845 | func = vfio_pci_set_err_trigger; | |
846 | break; | |
847 | } | |
89e1f7d4 AW |
848 | } |
849 | ||
850 | if (!func) | |
851 | return -ENOTTY; | |
852 | ||
853 | return func(vdev, index, start, count, flags, data); | |
854 | } |