Merge tag 'devicetree-for-linus' of git://git.secretlab.ca/git/linux
[GitHub/mt8127/android_kernel_alcatel_ttab.git] / drivers / virtio / virtio_ring.c
1 /* Virtio ring implementation.
2 *
3 * Copyright 2007 Rusty Russell IBM Corporation
4 *
5 * This program is free software; you can redistribute it and/or modify
6 * it under the terms of the GNU General Public License as published by
7 * the Free Software Foundation; either version 2 of the License, or
8 * (at your option) any later version.
9 *
10 * This program is distributed in the hope that it will be useful,
11 * but WITHOUT ANY WARRANTY; without even the implied warranty of
12 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
13 * GNU General Public License for more details.
14 *
15 * You should have received a copy of the GNU General Public License
16 * along with this program; if not, write to the Free Software
17 * Foundation, Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA
18 */
19 #include <linux/virtio.h>
20 #include <linux/virtio_ring.h>
21 #include <linux/virtio_config.h>
22 #include <linux/device.h>
23 #include <linux/slab.h>
24 #include <linux/module.h>
25 #include <linux/hrtimer.h>
26
27 #ifdef DEBUG
28 /* For development, we want to crash whenever the ring is screwed. */
29 #define BAD_RING(_vq, fmt, args...) \
30 do { \
31 dev_err(&(_vq)->vq.vdev->dev, \
32 "%s:"fmt, (_vq)->vq.name, ##args); \
33 BUG(); \
34 } while (0)
35 /* Caller is supposed to guarantee no reentry. */
36 #define START_USE(_vq) \
37 do { \
38 if ((_vq)->in_use) \
39 panic("%s:in_use = %i\n", \
40 (_vq)->vq.name, (_vq)->in_use); \
41 (_vq)->in_use = __LINE__; \
42 } while (0)
43 #define END_USE(_vq) \
44 do { BUG_ON(!(_vq)->in_use); (_vq)->in_use = 0; } while(0)
45 #else
46 #define BAD_RING(_vq, fmt, args...) \
47 do { \
48 dev_err(&_vq->vq.vdev->dev, \
49 "%s:"fmt, (_vq)->vq.name, ##args); \
50 (_vq)->broken = true; \
51 } while (0)
52 #define START_USE(vq)
53 #define END_USE(vq)
54 #endif
55
56 struct vring_virtqueue
57 {
58 struct virtqueue vq;
59
60 /* Actual memory layout for this queue */
61 struct vring vring;
62
63 /* Can we use weak barriers? */
64 bool weak_barriers;
65
66 /* Other side has made a mess, don't try any more. */
67 bool broken;
68
69 /* Host supports indirect buffers */
70 bool indirect;
71
72 /* Host publishes avail event idx */
73 bool event;
74
75 /* Head of free buffer list. */
76 unsigned int free_head;
77 /* Number we've added since last sync. */
78 unsigned int num_added;
79
80 /* Last used index we've seen. */
81 u16 last_used_idx;
82
83 /* How to notify other side. FIXME: commonalize hcalls! */
84 void (*notify)(struct virtqueue *vq);
85
86 #ifdef DEBUG
87 /* They're supposed to lock for us. */
88 unsigned int in_use;
89
90 /* Figure out if their kicks are too delayed. */
91 bool last_add_time_valid;
92 ktime_t last_add_time;
93 #endif
94
95 /* Tokens for callbacks. */
96 void *data[];
97 };
98
99 #define to_vvq(_vq) container_of(_vq, struct vring_virtqueue, vq)
100
101 static inline struct scatterlist *sg_next_chained(struct scatterlist *sg,
102 unsigned int *count)
103 {
104 return sg_next(sg);
105 }
106
107 static inline struct scatterlist *sg_next_arr(struct scatterlist *sg,
108 unsigned int *count)
109 {
110 if (--(*count) == 0)
111 return NULL;
112 return sg + 1;
113 }
114
115 /* Set up an indirect table of descriptors and add it to the queue. */
116 static inline int vring_add_indirect(struct vring_virtqueue *vq,
117 struct scatterlist *sgs[],
118 struct scatterlist *(*next)
119 (struct scatterlist *, unsigned int *),
120 unsigned int total_sg,
121 unsigned int total_out,
122 unsigned int total_in,
123 unsigned int out_sgs,
124 unsigned int in_sgs,
125 gfp_t gfp)
126 {
127 struct vring_desc *desc;
128 unsigned head;
129 struct scatterlist *sg;
130 int i, n;
131
132 /*
133 * We require lowmem mappings for the descriptors because
134 * otherwise virt_to_phys will give us bogus addresses in the
135 * virtqueue.
136 */
137 gfp &= ~(__GFP_HIGHMEM | __GFP_HIGH);
138
139 desc = kmalloc(total_sg * sizeof(struct vring_desc), gfp);
140 if (!desc)
141 return -ENOMEM;
142
143 /* Transfer entries from the sg lists into the indirect page */
144 i = 0;
145 for (n = 0; n < out_sgs; n++) {
146 for (sg = sgs[n]; sg; sg = next(sg, &total_out)) {
147 desc[i].flags = VRING_DESC_F_NEXT;
148 desc[i].addr = sg_phys(sg);
149 desc[i].len = sg->length;
150 desc[i].next = i+1;
151 i++;
152 }
153 }
154 for (; n < (out_sgs + in_sgs); n++) {
155 for (sg = sgs[n]; sg; sg = next(sg, &total_in)) {
156 desc[i].flags = VRING_DESC_F_NEXT|VRING_DESC_F_WRITE;
157 desc[i].addr = sg_phys(sg);
158 desc[i].len = sg->length;
159 desc[i].next = i+1;
160 i++;
161 }
162 }
163 BUG_ON(i != total_sg);
164
165 /* Last one doesn't continue. */
166 desc[i-1].flags &= ~VRING_DESC_F_NEXT;
167 desc[i-1].next = 0;
168
169 /* We're about to use a buffer */
170 vq->vq.num_free--;
171
172 /* Use a single buffer which doesn't continue */
173 head = vq->free_head;
174 vq->vring.desc[head].flags = VRING_DESC_F_INDIRECT;
175 vq->vring.desc[head].addr = virt_to_phys(desc);
176 vq->vring.desc[head].len = i * sizeof(struct vring_desc);
177
178 /* Update free pointer */
179 vq->free_head = vq->vring.desc[head].next;
180
181 return head;
182 }
183
184 static inline int virtqueue_add(struct virtqueue *_vq,
185 struct scatterlist *sgs[],
186 struct scatterlist *(*next)
187 (struct scatterlist *, unsigned int *),
188 unsigned int total_out,
189 unsigned int total_in,
190 unsigned int out_sgs,
191 unsigned int in_sgs,
192 void *data,
193 gfp_t gfp)
194 {
195 struct vring_virtqueue *vq = to_vvq(_vq);
196 struct scatterlist *sg;
197 unsigned int i, n, avail, uninitialized_var(prev), total_sg;
198 int head;
199
200 START_USE(vq);
201
202 BUG_ON(data == NULL);
203
204 #ifdef DEBUG
205 {
206 ktime_t now = ktime_get();
207
208 /* No kick or get, with .1 second between? Warn. */
209 if (vq->last_add_time_valid)
210 WARN_ON(ktime_to_ms(ktime_sub(now, vq->last_add_time))
211 > 100);
212 vq->last_add_time = now;
213 vq->last_add_time_valid = true;
214 }
215 #endif
216
217 total_sg = total_in + total_out;
218
219 /* If the host supports indirect descriptor tables, and we have multiple
220 * buffers, then go indirect. FIXME: tune this threshold */
221 if (vq->indirect && total_sg > 1 && vq->vq.num_free) {
222 head = vring_add_indirect(vq, sgs, next, total_sg, total_out,
223 total_in,
224 out_sgs, in_sgs, gfp);
225 if (likely(head >= 0))
226 goto add_head;
227 }
228
229 BUG_ON(total_sg > vq->vring.num);
230 BUG_ON(total_sg == 0);
231
232 if (vq->vq.num_free < total_sg) {
233 pr_debug("Can't add buf len %i - avail = %i\n",
234 total_sg, vq->vq.num_free);
235 /* FIXME: for historical reasons, we force a notify here if
236 * there are outgoing parts to the buffer. Presumably the
237 * host should service the ring ASAP. */
238 if (out_sgs)
239 vq->notify(&vq->vq);
240 END_USE(vq);
241 return -ENOSPC;
242 }
243
244 /* We're about to use some buffers from the free list. */
245 vq->vq.num_free -= total_sg;
246
247 head = i = vq->free_head;
248 for (n = 0; n < out_sgs; n++) {
249 for (sg = sgs[n]; sg; sg = next(sg, &total_out)) {
250 vq->vring.desc[i].flags = VRING_DESC_F_NEXT;
251 vq->vring.desc[i].addr = sg_phys(sg);
252 vq->vring.desc[i].len = sg->length;
253 prev = i;
254 i = vq->vring.desc[i].next;
255 }
256 }
257 for (; n < (out_sgs + in_sgs); n++) {
258 for (sg = sgs[n]; sg; sg = next(sg, &total_in)) {
259 vq->vring.desc[i].flags = VRING_DESC_F_NEXT|VRING_DESC_F_WRITE;
260 vq->vring.desc[i].addr = sg_phys(sg);
261 vq->vring.desc[i].len = sg->length;
262 prev = i;
263 i = vq->vring.desc[i].next;
264 }
265 }
266 /* Last one doesn't continue. */
267 vq->vring.desc[prev].flags &= ~VRING_DESC_F_NEXT;
268
269 /* Update free pointer */
270 vq->free_head = i;
271
272 add_head:
273 /* Set token. */
274 vq->data[head] = data;
275
276 /* Put entry in available array (but don't update avail->idx until they
277 * do sync). */
278 avail = (vq->vring.avail->idx & (vq->vring.num-1));
279 vq->vring.avail->ring[avail] = head;
280
281 /* Descriptors and available array need to be set before we expose the
282 * new available array entries. */
283 virtio_wmb(vq->weak_barriers);
284 vq->vring.avail->idx++;
285 vq->num_added++;
286
287 /* This is very unlikely, but theoretically possible. Kick
288 * just in case. */
289 if (unlikely(vq->num_added == (1 << 16) - 1))
290 virtqueue_kick(_vq);
291
292 pr_debug("Added buffer head %i to %p\n", head, vq);
293 END_USE(vq);
294
295 return 0;
296 }
297
298 /**
299 * virtqueue_add_buf - expose buffer to other end
300 * @vq: the struct virtqueue we're talking about.
301 * @sg: the description of the buffer(s).
302 * @out_num: the number of sg readable by other side
303 * @in_num: the number of sg which are writable (after readable ones)
304 * @data: the token identifying the buffer.
305 * @gfp: how to do memory allocations (if necessary).
306 *
307 * Caller must ensure we don't call this with other virtqueue operations
308 * at the same time (except where noted).
309 *
310 * Returns zero or a negative error (ie. ENOSPC, ENOMEM).
311 */
312 int virtqueue_add_buf(struct virtqueue *_vq,
313 struct scatterlist sg[],
314 unsigned int out,
315 unsigned int in,
316 void *data,
317 gfp_t gfp)
318 {
319 struct scatterlist *sgs[2];
320
321 sgs[0] = sg;
322 sgs[1] = sg + out;
323
324 return virtqueue_add(_vq, sgs, sg_next_arr,
325 out, in, out ? 1 : 0, in ? 1 : 0, data, gfp);
326 }
327 EXPORT_SYMBOL_GPL(virtqueue_add_buf);
328
329 /**
330 * virtqueue_add_sgs - expose buffers to other end
331 * @vq: the struct virtqueue we're talking about.
332 * @sgs: array of terminated scatterlists.
333 * @out_num: the number of scatterlists readable by other side
334 * @in_num: the number of scatterlists which are writable (after readable ones)
335 * @data: the token identifying the buffer.
336 * @gfp: how to do memory allocations (if necessary).
337 *
338 * Caller must ensure we don't call this with other virtqueue operations
339 * at the same time (except where noted).
340 *
341 * Returns zero or a negative error (ie. ENOSPC, ENOMEM).
342 */
343 int virtqueue_add_sgs(struct virtqueue *_vq,
344 struct scatterlist *sgs[],
345 unsigned int out_sgs,
346 unsigned int in_sgs,
347 void *data,
348 gfp_t gfp)
349 {
350 unsigned int i, total_out, total_in;
351
352 /* Count them first. */
353 for (i = total_out = total_in = 0; i < out_sgs; i++) {
354 struct scatterlist *sg;
355 for (sg = sgs[i]; sg; sg = sg_next(sg))
356 total_out++;
357 }
358 for (; i < out_sgs + in_sgs; i++) {
359 struct scatterlist *sg;
360 for (sg = sgs[i]; sg; sg = sg_next(sg))
361 total_in++;
362 }
363 return virtqueue_add(_vq, sgs, sg_next_chained,
364 total_out, total_in, out_sgs, in_sgs, data, gfp);
365 }
366 EXPORT_SYMBOL_GPL(virtqueue_add_sgs);
367
368 /**
369 * virtqueue_add_outbuf - expose output buffers to other end
370 * @vq: the struct virtqueue we're talking about.
371 * @sgs: array of scatterlists (need not be terminated!)
372 * @num: the number of scatterlists readable by other side
373 * @data: the token identifying the buffer.
374 * @gfp: how to do memory allocations (if necessary).
375 *
376 * Caller must ensure we don't call this with other virtqueue operations
377 * at the same time (except where noted).
378 *
379 * Returns zero or a negative error (ie. ENOSPC, ENOMEM).
380 */
381 int virtqueue_add_outbuf(struct virtqueue *vq,
382 struct scatterlist sg[], unsigned int num,
383 void *data,
384 gfp_t gfp)
385 {
386 return virtqueue_add(vq, &sg, sg_next_arr, num, 0, 1, 0, data, gfp);
387 }
388 EXPORT_SYMBOL_GPL(virtqueue_add_outbuf);
389
390 /**
391 * virtqueue_add_inbuf - expose input buffers to other end
392 * @vq: the struct virtqueue we're talking about.
393 * @sgs: array of scatterlists (need not be terminated!)
394 * @num: the number of scatterlists writable by other side
395 * @data: the token identifying the buffer.
396 * @gfp: how to do memory allocations (if necessary).
397 *
398 * Caller must ensure we don't call this with other virtqueue operations
399 * at the same time (except where noted).
400 *
401 * Returns zero or a negative error (ie. ENOSPC, ENOMEM).
402 */
403 int virtqueue_add_inbuf(struct virtqueue *vq,
404 struct scatterlist sg[], unsigned int num,
405 void *data,
406 gfp_t gfp)
407 {
408 return virtqueue_add(vq, &sg, sg_next_arr, 0, num, 0, 1, data, gfp);
409 }
410 EXPORT_SYMBOL_GPL(virtqueue_add_inbuf);
411
412 /**
413 * virtqueue_kick_prepare - first half of split virtqueue_kick call.
414 * @vq: the struct virtqueue
415 *
416 * Instead of virtqueue_kick(), you can do:
417 * if (virtqueue_kick_prepare(vq))
418 * virtqueue_notify(vq);
419 *
420 * This is sometimes useful because the virtqueue_kick_prepare() needs
421 * to be serialized, but the actual virtqueue_notify() call does not.
422 */
423 bool virtqueue_kick_prepare(struct virtqueue *_vq)
424 {
425 struct vring_virtqueue *vq = to_vvq(_vq);
426 u16 new, old;
427 bool needs_kick;
428
429 START_USE(vq);
430 /* We need to expose available array entries before checking avail
431 * event. */
432 virtio_mb(vq->weak_barriers);
433
434 old = vq->vring.avail->idx - vq->num_added;
435 new = vq->vring.avail->idx;
436 vq->num_added = 0;
437
438 #ifdef DEBUG
439 if (vq->last_add_time_valid) {
440 WARN_ON(ktime_to_ms(ktime_sub(ktime_get(),
441 vq->last_add_time)) > 100);
442 }
443 vq->last_add_time_valid = false;
444 #endif
445
446 if (vq->event) {
447 needs_kick = vring_need_event(vring_avail_event(&vq->vring),
448 new, old);
449 } else {
450 needs_kick = !(vq->vring.used->flags & VRING_USED_F_NO_NOTIFY);
451 }
452 END_USE(vq);
453 return needs_kick;
454 }
455 EXPORT_SYMBOL_GPL(virtqueue_kick_prepare);
456
457 /**
458 * virtqueue_notify - second half of split virtqueue_kick call.
459 * @vq: the struct virtqueue
460 *
461 * This does not need to be serialized.
462 */
463 void virtqueue_notify(struct virtqueue *_vq)
464 {
465 struct vring_virtqueue *vq = to_vvq(_vq);
466
467 /* Prod other side to tell it about changes. */
468 vq->notify(_vq);
469 }
470 EXPORT_SYMBOL_GPL(virtqueue_notify);
471
472 /**
473 * virtqueue_kick - update after add_buf
474 * @vq: the struct virtqueue
475 *
476 * After one or more virtqueue_add_buf calls, invoke this to kick
477 * the other side.
478 *
479 * Caller must ensure we don't call this with other virtqueue
480 * operations at the same time (except where noted).
481 */
482 void virtqueue_kick(struct virtqueue *vq)
483 {
484 if (virtqueue_kick_prepare(vq))
485 virtqueue_notify(vq);
486 }
487 EXPORT_SYMBOL_GPL(virtqueue_kick);
488
489 static void detach_buf(struct vring_virtqueue *vq, unsigned int head)
490 {
491 unsigned int i;
492
493 /* Clear data ptr. */
494 vq->data[head] = NULL;
495
496 /* Put back on free list: find end */
497 i = head;
498
499 /* Free the indirect table */
500 if (vq->vring.desc[i].flags & VRING_DESC_F_INDIRECT)
501 kfree(phys_to_virt(vq->vring.desc[i].addr));
502
503 while (vq->vring.desc[i].flags & VRING_DESC_F_NEXT) {
504 i = vq->vring.desc[i].next;
505 vq->vq.num_free++;
506 }
507
508 vq->vring.desc[i].next = vq->free_head;
509 vq->free_head = head;
510 /* Plus final descriptor */
511 vq->vq.num_free++;
512 }
513
514 static inline bool more_used(const struct vring_virtqueue *vq)
515 {
516 return vq->last_used_idx != vq->vring.used->idx;
517 }
518
519 /**
520 * virtqueue_get_buf - get the next used buffer
521 * @vq: the struct virtqueue we're talking about.
522 * @len: the length written into the buffer
523 *
524 * If the driver wrote data into the buffer, @len will be set to the
525 * amount written. This means you don't need to clear the buffer
526 * beforehand to ensure there's no data leakage in the case of short
527 * writes.
528 *
529 * Caller must ensure we don't call this with other virtqueue
530 * operations at the same time (except where noted).
531 *
532 * Returns NULL if there are no used buffers, or the "data" token
533 * handed to virtqueue_add_buf().
534 */
535 void *virtqueue_get_buf(struct virtqueue *_vq, unsigned int *len)
536 {
537 struct vring_virtqueue *vq = to_vvq(_vq);
538 void *ret;
539 unsigned int i;
540 u16 last_used;
541
542 START_USE(vq);
543
544 if (unlikely(vq->broken)) {
545 END_USE(vq);
546 return NULL;
547 }
548
549 if (!more_used(vq)) {
550 pr_debug("No more buffers in queue\n");
551 END_USE(vq);
552 return NULL;
553 }
554
555 /* Only get used array entries after they have been exposed by host. */
556 virtio_rmb(vq->weak_barriers);
557
558 last_used = (vq->last_used_idx & (vq->vring.num - 1));
559 i = vq->vring.used->ring[last_used].id;
560 *len = vq->vring.used->ring[last_used].len;
561
562 if (unlikely(i >= vq->vring.num)) {
563 BAD_RING(vq, "id %u out of range\n", i);
564 return NULL;
565 }
566 if (unlikely(!vq->data[i])) {
567 BAD_RING(vq, "id %u is not a head!\n", i);
568 return NULL;
569 }
570
571 /* detach_buf clears data, so grab it now. */
572 ret = vq->data[i];
573 detach_buf(vq, i);
574 vq->last_used_idx++;
575 /* If we expect an interrupt for the next entry, tell host
576 * by writing event index and flush out the write before
577 * the read in the next get_buf call. */
578 if (!(vq->vring.avail->flags & VRING_AVAIL_F_NO_INTERRUPT)) {
579 vring_used_event(&vq->vring) = vq->last_used_idx;
580 virtio_mb(vq->weak_barriers);
581 }
582
583 #ifdef DEBUG
584 vq->last_add_time_valid = false;
585 #endif
586
587 END_USE(vq);
588 return ret;
589 }
590 EXPORT_SYMBOL_GPL(virtqueue_get_buf);
591
592 /**
593 * virtqueue_disable_cb - disable callbacks
594 * @vq: the struct virtqueue we're talking about.
595 *
596 * Note that this is not necessarily synchronous, hence unreliable and only
597 * useful as an optimization.
598 *
599 * Unlike other operations, this need not be serialized.
600 */
601 void virtqueue_disable_cb(struct virtqueue *_vq)
602 {
603 struct vring_virtqueue *vq = to_vvq(_vq);
604
605 vq->vring.avail->flags |= VRING_AVAIL_F_NO_INTERRUPT;
606 }
607 EXPORT_SYMBOL_GPL(virtqueue_disable_cb);
608
609 /**
610 * virtqueue_enable_cb - restart callbacks after disable_cb.
611 * @vq: the struct virtqueue we're talking about.
612 *
613 * This re-enables callbacks; it returns "false" if there are pending
614 * buffers in the queue, to detect a possible race between the driver
615 * checking for more work, and enabling callbacks.
616 *
617 * Caller must ensure we don't call this with other virtqueue
618 * operations at the same time (except where noted).
619 */
620 bool virtqueue_enable_cb(struct virtqueue *_vq)
621 {
622 struct vring_virtqueue *vq = to_vvq(_vq);
623
624 START_USE(vq);
625
626 /* We optimistically turn back on interrupts, then check if there was
627 * more to do. */
628 /* Depending on the VIRTIO_RING_F_EVENT_IDX feature, we need to
629 * either clear the flags bit or point the event index at the next
630 * entry. Always do both to keep code simple. */
631 vq->vring.avail->flags &= ~VRING_AVAIL_F_NO_INTERRUPT;
632 vring_used_event(&vq->vring) = vq->last_used_idx;
633 virtio_mb(vq->weak_barriers);
634 if (unlikely(more_used(vq))) {
635 END_USE(vq);
636 return false;
637 }
638
639 END_USE(vq);
640 return true;
641 }
642 EXPORT_SYMBOL_GPL(virtqueue_enable_cb);
643
644 /**
645 * virtqueue_enable_cb_delayed - restart callbacks after disable_cb.
646 * @vq: the struct virtqueue we're talking about.
647 *
648 * This re-enables callbacks but hints to the other side to delay
649 * interrupts until most of the available buffers have been processed;
650 * it returns "false" if there are many pending buffers in the queue,
651 * to detect a possible race between the driver checking for more work,
652 * and enabling callbacks.
653 *
654 * Caller must ensure we don't call this with other virtqueue
655 * operations at the same time (except where noted).
656 */
657 bool virtqueue_enable_cb_delayed(struct virtqueue *_vq)
658 {
659 struct vring_virtqueue *vq = to_vvq(_vq);
660 u16 bufs;
661
662 START_USE(vq);
663
664 /* We optimistically turn back on interrupts, then check if there was
665 * more to do. */
666 /* Depending on the VIRTIO_RING_F_USED_EVENT_IDX feature, we need to
667 * either clear the flags bit or point the event index at the next
668 * entry. Always do both to keep code simple. */
669 vq->vring.avail->flags &= ~VRING_AVAIL_F_NO_INTERRUPT;
670 /* TODO: tune this threshold */
671 bufs = (u16)(vq->vring.avail->idx - vq->last_used_idx) * 3 / 4;
672 vring_used_event(&vq->vring) = vq->last_used_idx + bufs;
673 virtio_mb(vq->weak_barriers);
674 if (unlikely((u16)(vq->vring.used->idx - vq->last_used_idx) > bufs)) {
675 END_USE(vq);
676 return false;
677 }
678
679 END_USE(vq);
680 return true;
681 }
682 EXPORT_SYMBOL_GPL(virtqueue_enable_cb_delayed);
683
684 /**
685 * virtqueue_detach_unused_buf - detach first unused buffer
686 * @vq: the struct virtqueue we're talking about.
687 *
688 * Returns NULL or the "data" token handed to virtqueue_add_buf().
689 * This is not valid on an active queue; it is useful only for device
690 * shutdown.
691 */
692 void *virtqueue_detach_unused_buf(struct virtqueue *_vq)
693 {
694 struct vring_virtqueue *vq = to_vvq(_vq);
695 unsigned int i;
696 void *buf;
697
698 START_USE(vq);
699
700 for (i = 0; i < vq->vring.num; i++) {
701 if (!vq->data[i])
702 continue;
703 /* detach_buf clears data, so grab it now. */
704 buf = vq->data[i];
705 detach_buf(vq, i);
706 vq->vring.avail->idx--;
707 END_USE(vq);
708 return buf;
709 }
710 /* That should have freed everything. */
711 BUG_ON(vq->vq.num_free != vq->vring.num);
712
713 END_USE(vq);
714 return NULL;
715 }
716 EXPORT_SYMBOL_GPL(virtqueue_detach_unused_buf);
717
718 irqreturn_t vring_interrupt(int irq, void *_vq)
719 {
720 struct vring_virtqueue *vq = to_vvq(_vq);
721
722 if (!more_used(vq)) {
723 pr_debug("virtqueue interrupt with no work for %p\n", vq);
724 return IRQ_NONE;
725 }
726
727 if (unlikely(vq->broken))
728 return IRQ_HANDLED;
729
730 pr_debug("virtqueue callback for %p (%p)\n", vq, vq->vq.callback);
731 if (vq->vq.callback)
732 vq->vq.callback(&vq->vq);
733
734 return IRQ_HANDLED;
735 }
736 EXPORT_SYMBOL_GPL(vring_interrupt);
737
738 struct virtqueue *vring_new_virtqueue(unsigned int index,
739 unsigned int num,
740 unsigned int vring_align,
741 struct virtio_device *vdev,
742 bool weak_barriers,
743 void *pages,
744 void (*notify)(struct virtqueue *),
745 void (*callback)(struct virtqueue *),
746 const char *name)
747 {
748 struct vring_virtqueue *vq;
749 unsigned int i;
750
751 /* We assume num is a power of 2. */
752 if (num & (num - 1)) {
753 dev_warn(&vdev->dev, "Bad virtqueue length %u\n", num);
754 return NULL;
755 }
756
757 vq = kmalloc(sizeof(*vq) + sizeof(void *)*num, GFP_KERNEL);
758 if (!vq)
759 return NULL;
760
761 vring_init(&vq->vring, num, pages, vring_align);
762 vq->vq.callback = callback;
763 vq->vq.vdev = vdev;
764 vq->vq.name = name;
765 vq->vq.num_free = num;
766 vq->vq.index = index;
767 vq->notify = notify;
768 vq->weak_barriers = weak_barriers;
769 vq->broken = false;
770 vq->last_used_idx = 0;
771 vq->num_added = 0;
772 list_add_tail(&vq->vq.list, &vdev->vqs);
773 #ifdef DEBUG
774 vq->in_use = false;
775 vq->last_add_time_valid = false;
776 #endif
777
778 vq->indirect = virtio_has_feature(vdev, VIRTIO_RING_F_INDIRECT_DESC);
779 vq->event = virtio_has_feature(vdev, VIRTIO_RING_F_EVENT_IDX);
780
781 /* No callback? Tell other side not to bother us. */
782 if (!callback)
783 vq->vring.avail->flags |= VRING_AVAIL_F_NO_INTERRUPT;
784
785 /* Put everything in free lists. */
786 vq->free_head = 0;
787 for (i = 0; i < num-1; i++) {
788 vq->vring.desc[i].next = i+1;
789 vq->data[i] = NULL;
790 }
791 vq->data[i] = NULL;
792
793 return &vq->vq;
794 }
795 EXPORT_SYMBOL_GPL(vring_new_virtqueue);
796
797 void vring_del_virtqueue(struct virtqueue *vq)
798 {
799 list_del(&vq->list);
800 kfree(to_vvq(vq));
801 }
802 EXPORT_SYMBOL_GPL(vring_del_virtqueue);
803
804 /* Manipulates transport-specific feature bits. */
805 void vring_transport_features(struct virtio_device *vdev)
806 {
807 unsigned int i;
808
809 for (i = VIRTIO_TRANSPORT_F_START; i < VIRTIO_TRANSPORT_F_END; i++) {
810 switch (i) {
811 case VIRTIO_RING_F_INDIRECT_DESC:
812 break;
813 case VIRTIO_RING_F_EVENT_IDX:
814 break;
815 default:
816 /* We don't understand this bit. */
817 clear_bit(i, vdev->features);
818 }
819 }
820 }
821 EXPORT_SYMBOL_GPL(vring_transport_features);
822
823 /**
824 * virtqueue_get_vring_size - return the size of the virtqueue's vring
825 * @vq: the struct virtqueue containing the vring of interest.
826 *
827 * Returns the size of the vring. This is mainly used for boasting to
828 * userspace. Unlike other operations, this need not be serialized.
829 */
830 unsigned int virtqueue_get_vring_size(struct virtqueue *_vq)
831 {
832
833 struct vring_virtqueue *vq = to_vvq(_vq);
834
835 return vq->vring.num;
836 }
837 EXPORT_SYMBOL_GPL(virtqueue_get_vring_size);
838
839 MODULE_LICENSE("GPL");