Merge tag 'devicetree-for-linus' of git://git.secretlab.ca/git/linux
[GitHub/mt8127/android_kernel_alcatel_ttab.git] / drivers / virtio / virtio_ring.c
CommitLineData
0a8a69dd
RR
1/* Virtio ring implementation.
2 *
3 * Copyright 2007 Rusty Russell IBM Corporation
4 *
5 * This program is free software; you can redistribute it and/or modify
6 * it under the terms of the GNU General Public License as published by
7 * the Free Software Foundation; either version 2 of the License, or
8 * (at your option) any later version.
9 *
10 * This program is distributed in the hope that it will be useful,
11 * but WITHOUT ANY WARRANTY; without even the implied warranty of
12 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
13 * GNU General Public License for more details.
14 *
15 * You should have received a copy of the GNU General Public License
16 * along with this program; if not, write to the Free Software
17 * Foundation, Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA
18 */
19#include <linux/virtio.h>
20#include <linux/virtio_ring.h>
e34f8725 21#include <linux/virtio_config.h>
0a8a69dd 22#include <linux/device.h>
5a0e3ad6 23#include <linux/slab.h>
b5a2c4f1 24#include <linux/module.h>
e93300b1 25#include <linux/hrtimer.h>
0a8a69dd
RR
26
27#ifdef DEBUG
28/* For development, we want to crash whenever the ring is screwed. */
9499f5e7
RR
29#define BAD_RING(_vq, fmt, args...) \
30 do { \
31 dev_err(&(_vq)->vq.vdev->dev, \
32 "%s:"fmt, (_vq)->vq.name, ##args); \
33 BUG(); \
34 } while (0)
c5f841f1
RR
35/* Caller is supposed to guarantee no reentry. */
36#define START_USE(_vq) \
37 do { \
38 if ((_vq)->in_use) \
9499f5e7
RR
39 panic("%s:in_use = %i\n", \
40 (_vq)->vq.name, (_vq)->in_use); \
c5f841f1 41 (_vq)->in_use = __LINE__; \
9499f5e7 42 } while (0)
3a35ce7d 43#define END_USE(_vq) \
97a545ab 44 do { BUG_ON(!(_vq)->in_use); (_vq)->in_use = 0; } while(0)
0a8a69dd 45#else
9499f5e7
RR
46#define BAD_RING(_vq, fmt, args...) \
47 do { \
48 dev_err(&_vq->vq.vdev->dev, \
49 "%s:"fmt, (_vq)->vq.name, ##args); \
50 (_vq)->broken = true; \
51 } while (0)
0a8a69dd
RR
52#define START_USE(vq)
53#define END_USE(vq)
54#endif
55
56struct vring_virtqueue
57{
58 struct virtqueue vq;
59
60 /* Actual memory layout for this queue */
61 struct vring vring;
62
7b21e34f
RR
63 /* Can we use weak barriers? */
64 bool weak_barriers;
65
0a8a69dd
RR
66 /* Other side has made a mess, don't try any more. */
67 bool broken;
68
9fa29b9d
MM
69 /* Host supports indirect buffers */
70 bool indirect;
71
a5c262c5
MT
72 /* Host publishes avail event idx */
73 bool event;
74
0a8a69dd
RR
75 /* Head of free buffer list. */
76 unsigned int free_head;
77 /* Number we've added since last sync. */
78 unsigned int num_added;
79
80 /* Last used index we've seen. */
1bc4953e 81 u16 last_used_idx;
0a8a69dd
RR
82
83 /* How to notify other side. FIXME: commonalize hcalls! */
84 void (*notify)(struct virtqueue *vq);
85
86#ifdef DEBUG
87 /* They're supposed to lock for us. */
88 unsigned int in_use;
e93300b1
RR
89
90 /* Figure out if their kicks are too delayed. */
91 bool last_add_time_valid;
92 ktime_t last_add_time;
0a8a69dd
RR
93#endif
94
95 /* Tokens for callbacks. */
96 void *data[];
97};
98
99#define to_vvq(_vq) container_of(_vq, struct vring_virtqueue, vq)
100
13816c76
RR
101static inline struct scatterlist *sg_next_chained(struct scatterlist *sg,
102 unsigned int *count)
103{
104 return sg_next(sg);
105}
106
107static inline struct scatterlist *sg_next_arr(struct scatterlist *sg,
108 unsigned int *count)
109{
110 if (--(*count) == 0)
111 return NULL;
112 return sg + 1;
113}
114
9fa29b9d 115/* Set up an indirect table of descriptors and add it to the queue. */
13816c76
RR
116static inline int vring_add_indirect(struct vring_virtqueue *vq,
117 struct scatterlist *sgs[],
118 struct scatterlist *(*next)
119 (struct scatterlist *, unsigned int *),
120 unsigned int total_sg,
121 unsigned int total_out,
122 unsigned int total_in,
123 unsigned int out_sgs,
124 unsigned int in_sgs,
125 gfp_t gfp)
9fa29b9d
MM
126{
127 struct vring_desc *desc;
128 unsigned head;
13816c76
RR
129 struct scatterlist *sg;
130 int i, n;
9fa29b9d 131
b92b1b89
WD
132 /*
133 * We require lowmem mappings for the descriptors because
134 * otherwise virt_to_phys will give us bogus addresses in the
135 * virtqueue.
136 */
137 gfp &= ~(__GFP_HIGHMEM | __GFP_HIGH);
138
13816c76 139 desc = kmalloc(total_sg * sizeof(struct vring_desc), gfp);
9fa29b9d 140 if (!desc)
686d3637 141 return -ENOMEM;
9fa29b9d 142
13816c76
RR
143 /* Transfer entries from the sg lists into the indirect page */
144 i = 0;
145 for (n = 0; n < out_sgs; n++) {
146 for (sg = sgs[n]; sg; sg = next(sg, &total_out)) {
147 desc[i].flags = VRING_DESC_F_NEXT;
148 desc[i].addr = sg_phys(sg);
149 desc[i].len = sg->length;
150 desc[i].next = i+1;
151 i++;
152 }
9fa29b9d 153 }
13816c76
RR
154 for (; n < (out_sgs + in_sgs); n++) {
155 for (sg = sgs[n]; sg; sg = next(sg, &total_in)) {
156 desc[i].flags = VRING_DESC_F_NEXT|VRING_DESC_F_WRITE;
157 desc[i].addr = sg_phys(sg);
158 desc[i].len = sg->length;
159 desc[i].next = i+1;
160 i++;
161 }
9fa29b9d 162 }
13816c76 163 BUG_ON(i != total_sg);
9fa29b9d
MM
164
165 /* Last one doesn't continue. */
166 desc[i-1].flags &= ~VRING_DESC_F_NEXT;
167 desc[i-1].next = 0;
168
169 /* We're about to use a buffer */
06ca287d 170 vq->vq.num_free--;
9fa29b9d
MM
171
172 /* Use a single buffer which doesn't continue */
173 head = vq->free_head;
174 vq->vring.desc[head].flags = VRING_DESC_F_INDIRECT;
175 vq->vring.desc[head].addr = virt_to_phys(desc);
176 vq->vring.desc[head].len = i * sizeof(struct vring_desc);
177
178 /* Update free pointer */
179 vq->free_head = vq->vring.desc[head].next;
180
181 return head;
182}
183
13816c76
RR
184static inline int virtqueue_add(struct virtqueue *_vq,
185 struct scatterlist *sgs[],
186 struct scatterlist *(*next)
187 (struct scatterlist *, unsigned int *),
188 unsigned int total_out,
189 unsigned int total_in,
190 unsigned int out_sgs,
191 unsigned int in_sgs,
192 void *data,
193 gfp_t gfp)
0a8a69dd
RR
194{
195 struct vring_virtqueue *vq = to_vvq(_vq);
13816c76
RR
196 struct scatterlist *sg;
197 unsigned int i, n, avail, uninitialized_var(prev), total_sg;
1fe9b6fe 198 int head;
0a8a69dd 199
9fa29b9d
MM
200 START_USE(vq);
201
0a8a69dd 202 BUG_ON(data == NULL);
9fa29b9d 203
e93300b1
RR
204#ifdef DEBUG
205 {
206 ktime_t now = ktime_get();
207
208 /* No kick or get, with .1 second between? Warn. */
209 if (vq->last_add_time_valid)
210 WARN_ON(ktime_to_ms(ktime_sub(now, vq->last_add_time))
211 > 100);
212 vq->last_add_time = now;
213 vq->last_add_time_valid = true;
214 }
215#endif
216
13816c76
RR
217 total_sg = total_in + total_out;
218
9fa29b9d
MM
219 /* If the host supports indirect descriptor tables, and we have multiple
220 * buffers, then go indirect. FIXME: tune this threshold */
13816c76
RR
221 if (vq->indirect && total_sg > 1 && vq->vq.num_free) {
222 head = vring_add_indirect(vq, sgs, next, total_sg, total_out,
223 total_in,
224 out_sgs, in_sgs, gfp);
1fe9b6fe 225 if (likely(head >= 0))
9fa29b9d
MM
226 goto add_head;
227 }
228
13816c76
RR
229 BUG_ON(total_sg > vq->vring.num);
230 BUG_ON(total_sg == 0);
0a8a69dd 231
13816c76 232 if (vq->vq.num_free < total_sg) {
0a8a69dd 233 pr_debug("Can't add buf len %i - avail = %i\n",
13816c76 234 total_sg, vq->vq.num_free);
44653eae
RR
235 /* FIXME: for historical reasons, we force a notify here if
236 * there are outgoing parts to the buffer. Presumably the
237 * host should service the ring ASAP. */
13816c76 238 if (out_sgs)
44653eae 239 vq->notify(&vq->vq);
0a8a69dd
RR
240 END_USE(vq);
241 return -ENOSPC;
242 }
243
244 /* We're about to use some buffers from the free list. */
13816c76
RR
245 vq->vq.num_free -= total_sg;
246
247 head = i = vq->free_head;
248 for (n = 0; n < out_sgs; n++) {
249 for (sg = sgs[n]; sg; sg = next(sg, &total_out)) {
250 vq->vring.desc[i].flags = VRING_DESC_F_NEXT;
251 vq->vring.desc[i].addr = sg_phys(sg);
252 vq->vring.desc[i].len = sg->length;
253 prev = i;
254 i = vq->vring.desc[i].next;
255 }
0a8a69dd 256 }
13816c76
RR
257 for (; n < (out_sgs + in_sgs); n++) {
258 for (sg = sgs[n]; sg; sg = next(sg, &total_in)) {
259 vq->vring.desc[i].flags = VRING_DESC_F_NEXT|VRING_DESC_F_WRITE;
260 vq->vring.desc[i].addr = sg_phys(sg);
261 vq->vring.desc[i].len = sg->length;
262 prev = i;
263 i = vq->vring.desc[i].next;
264 }
0a8a69dd
RR
265 }
266 /* Last one doesn't continue. */
267 vq->vring.desc[prev].flags &= ~VRING_DESC_F_NEXT;
268
269 /* Update free pointer */
270 vq->free_head = i;
271
9fa29b9d 272add_head:
0a8a69dd
RR
273 /* Set token. */
274 vq->data[head] = data;
275
276 /* Put entry in available array (but don't update avail->idx until they
3b720b8c 277 * do sync). */
ee7cd898 278 avail = (vq->vring.avail->idx & (vq->vring.num-1));
0a8a69dd
RR
279 vq->vring.avail->ring[avail] = head;
280
ee7cd898
RR
281 /* Descriptors and available array need to be set before we expose the
282 * new available array entries. */
a9a0fef7 283 virtio_wmb(vq->weak_barriers);
ee7cd898
RR
284 vq->vring.avail->idx++;
285 vq->num_added++;
286
287 /* This is very unlikely, but theoretically possible. Kick
288 * just in case. */
289 if (unlikely(vq->num_added == (1 << 16) - 1))
290 virtqueue_kick(_vq);
291
0a8a69dd
RR
292 pr_debug("Added buffer head %i to %p\n", head, vq);
293 END_USE(vq);
3c1b27d5 294
98e8c6bc 295 return 0;
0a8a69dd 296}
13816c76
RR
297
298/**
299 * virtqueue_add_buf - expose buffer to other end
300 * @vq: the struct virtqueue we're talking about.
301 * @sg: the description of the buffer(s).
302 * @out_num: the number of sg readable by other side
303 * @in_num: the number of sg which are writable (after readable ones)
304 * @data: the token identifying the buffer.
305 * @gfp: how to do memory allocations (if necessary).
306 *
307 * Caller must ensure we don't call this with other virtqueue operations
308 * at the same time (except where noted).
309 *
310 * Returns zero or a negative error (ie. ENOSPC, ENOMEM).
311 */
312int virtqueue_add_buf(struct virtqueue *_vq,
313 struct scatterlist sg[],
314 unsigned int out,
315 unsigned int in,
316 void *data,
317 gfp_t gfp)
318{
319 struct scatterlist *sgs[2];
320
321 sgs[0] = sg;
322 sgs[1] = sg + out;
323
324 return virtqueue_add(_vq, sgs, sg_next_arr,
325 out, in, out ? 1 : 0, in ? 1 : 0, data, gfp);
326}
f96fde41 327EXPORT_SYMBOL_GPL(virtqueue_add_buf);
0a8a69dd 328
13816c76
RR
329/**
330 * virtqueue_add_sgs - expose buffers to other end
331 * @vq: the struct virtqueue we're talking about.
332 * @sgs: array of terminated scatterlists.
333 * @out_num: the number of scatterlists readable by other side
334 * @in_num: the number of scatterlists which are writable (after readable ones)
335 * @data: the token identifying the buffer.
336 * @gfp: how to do memory allocations (if necessary).
337 *
338 * Caller must ensure we don't call this with other virtqueue operations
339 * at the same time (except where noted).
340 *
341 * Returns zero or a negative error (ie. ENOSPC, ENOMEM).
342 */
343int virtqueue_add_sgs(struct virtqueue *_vq,
344 struct scatterlist *sgs[],
345 unsigned int out_sgs,
346 unsigned int in_sgs,
347 void *data,
348 gfp_t gfp)
349{
350 unsigned int i, total_out, total_in;
351
352 /* Count them first. */
353 for (i = total_out = total_in = 0; i < out_sgs; i++) {
354 struct scatterlist *sg;
355 for (sg = sgs[i]; sg; sg = sg_next(sg))
356 total_out++;
357 }
358 for (; i < out_sgs + in_sgs; i++) {
359 struct scatterlist *sg;
360 for (sg = sgs[i]; sg; sg = sg_next(sg))
361 total_in++;
362 }
363 return virtqueue_add(_vq, sgs, sg_next_chained,
364 total_out, total_in, out_sgs, in_sgs, data, gfp);
365}
366EXPORT_SYMBOL_GPL(virtqueue_add_sgs);
367
282edb36
RR
368/**
369 * virtqueue_add_outbuf - expose output buffers to other end
370 * @vq: the struct virtqueue we're talking about.
371 * @sgs: array of scatterlists (need not be terminated!)
372 * @num: the number of scatterlists readable by other side
373 * @data: the token identifying the buffer.
374 * @gfp: how to do memory allocations (if necessary).
375 *
376 * Caller must ensure we don't call this with other virtqueue operations
377 * at the same time (except where noted).
378 *
379 * Returns zero or a negative error (ie. ENOSPC, ENOMEM).
380 */
381int virtqueue_add_outbuf(struct virtqueue *vq,
382 struct scatterlist sg[], unsigned int num,
383 void *data,
384 gfp_t gfp)
385{
386 return virtqueue_add(vq, &sg, sg_next_arr, num, 0, 1, 0, data, gfp);
387}
388EXPORT_SYMBOL_GPL(virtqueue_add_outbuf);
389
390/**
391 * virtqueue_add_inbuf - expose input buffers to other end
392 * @vq: the struct virtqueue we're talking about.
393 * @sgs: array of scatterlists (need not be terminated!)
394 * @num: the number of scatterlists writable by other side
395 * @data: the token identifying the buffer.
396 * @gfp: how to do memory allocations (if necessary).
397 *
398 * Caller must ensure we don't call this with other virtqueue operations
399 * at the same time (except where noted).
400 *
401 * Returns zero or a negative error (ie. ENOSPC, ENOMEM).
402 */
403int virtqueue_add_inbuf(struct virtqueue *vq,
404 struct scatterlist sg[], unsigned int num,
405 void *data,
406 gfp_t gfp)
407{
408 return virtqueue_add(vq, &sg, sg_next_arr, 0, num, 0, 1, data, gfp);
409}
410EXPORT_SYMBOL_GPL(virtqueue_add_inbuf);
411
5dfc1762 412/**
41f0377f 413 * virtqueue_kick_prepare - first half of split virtqueue_kick call.
5dfc1762
RR
414 * @vq: the struct virtqueue
415 *
41f0377f
RR
416 * Instead of virtqueue_kick(), you can do:
417 * if (virtqueue_kick_prepare(vq))
418 * virtqueue_notify(vq);
5dfc1762 419 *
41f0377f
RR
420 * This is sometimes useful because the virtqueue_kick_prepare() needs
421 * to be serialized, but the actual virtqueue_notify() call does not.
5dfc1762 422 */
41f0377f 423bool virtqueue_kick_prepare(struct virtqueue *_vq)
0a8a69dd
RR
424{
425 struct vring_virtqueue *vq = to_vvq(_vq);
a5c262c5 426 u16 new, old;
41f0377f
RR
427 bool needs_kick;
428
0a8a69dd 429 START_USE(vq);
a72caae2
JW
430 /* We need to expose available array entries before checking avail
431 * event. */
a9a0fef7 432 virtio_mb(vq->weak_barriers);
0a8a69dd 433
ee7cd898
RR
434 old = vq->vring.avail->idx - vq->num_added;
435 new = vq->vring.avail->idx;
0a8a69dd
RR
436 vq->num_added = 0;
437
e93300b1
RR
438#ifdef DEBUG
439 if (vq->last_add_time_valid) {
440 WARN_ON(ktime_to_ms(ktime_sub(ktime_get(),
441 vq->last_add_time)) > 100);
442 }
443 vq->last_add_time_valid = false;
444#endif
445
41f0377f
RR
446 if (vq->event) {
447 needs_kick = vring_need_event(vring_avail_event(&vq->vring),
448 new, old);
449 } else {
450 needs_kick = !(vq->vring.used->flags & VRING_USED_F_NO_NOTIFY);
451 }
0a8a69dd 452 END_USE(vq);
41f0377f
RR
453 return needs_kick;
454}
455EXPORT_SYMBOL_GPL(virtqueue_kick_prepare);
456
457/**
458 * virtqueue_notify - second half of split virtqueue_kick call.
459 * @vq: the struct virtqueue
460 *
461 * This does not need to be serialized.
462 */
463void virtqueue_notify(struct virtqueue *_vq)
464{
465 struct vring_virtqueue *vq = to_vvq(_vq);
466
467 /* Prod other side to tell it about changes. */
468 vq->notify(_vq);
469}
470EXPORT_SYMBOL_GPL(virtqueue_notify);
471
472/**
473 * virtqueue_kick - update after add_buf
474 * @vq: the struct virtqueue
475 *
476 * After one or more virtqueue_add_buf calls, invoke this to kick
477 * the other side.
478 *
479 * Caller must ensure we don't call this with other virtqueue
480 * operations at the same time (except where noted).
481 */
482void virtqueue_kick(struct virtqueue *vq)
483{
484 if (virtqueue_kick_prepare(vq))
485 virtqueue_notify(vq);
0a8a69dd 486}
7c5e9ed0 487EXPORT_SYMBOL_GPL(virtqueue_kick);
0a8a69dd
RR
488
489static void detach_buf(struct vring_virtqueue *vq, unsigned int head)
490{
491 unsigned int i;
492
493 /* Clear data ptr. */
494 vq->data[head] = NULL;
495
496 /* Put back on free list: find end */
497 i = head;
9fa29b9d
MM
498
499 /* Free the indirect table */
500 if (vq->vring.desc[i].flags & VRING_DESC_F_INDIRECT)
501 kfree(phys_to_virt(vq->vring.desc[i].addr));
502
0a8a69dd
RR
503 while (vq->vring.desc[i].flags & VRING_DESC_F_NEXT) {
504 i = vq->vring.desc[i].next;
06ca287d 505 vq->vq.num_free++;
0a8a69dd
RR
506 }
507
508 vq->vring.desc[i].next = vq->free_head;
509 vq->free_head = head;
510 /* Plus final descriptor */
06ca287d 511 vq->vq.num_free++;
0a8a69dd
RR
512}
513
0a8a69dd
RR
514static inline bool more_used(const struct vring_virtqueue *vq)
515{
516 return vq->last_used_idx != vq->vring.used->idx;
517}
518
5dfc1762
RR
519/**
520 * virtqueue_get_buf - get the next used buffer
521 * @vq: the struct virtqueue we're talking about.
522 * @len: the length written into the buffer
523 *
524 * If the driver wrote data into the buffer, @len will be set to the
525 * amount written. This means you don't need to clear the buffer
526 * beforehand to ensure there's no data leakage in the case of short
527 * writes.
528 *
529 * Caller must ensure we don't call this with other virtqueue
530 * operations at the same time (except where noted).
531 *
532 * Returns NULL if there are no used buffers, or the "data" token
f96fde41 533 * handed to virtqueue_add_buf().
5dfc1762 534 */
7c5e9ed0 535void *virtqueue_get_buf(struct virtqueue *_vq, unsigned int *len)
0a8a69dd
RR
536{
537 struct vring_virtqueue *vq = to_vvq(_vq);
538 void *ret;
539 unsigned int i;
3b720b8c 540 u16 last_used;
0a8a69dd
RR
541
542 START_USE(vq);
543
5ef82752
RR
544 if (unlikely(vq->broken)) {
545 END_USE(vq);
546 return NULL;
547 }
548
0a8a69dd
RR
549 if (!more_used(vq)) {
550 pr_debug("No more buffers in queue\n");
551 END_USE(vq);
552 return NULL;
553 }
554
2d61ba95 555 /* Only get used array entries after they have been exposed by host. */
a9a0fef7 556 virtio_rmb(vq->weak_barriers);
2d61ba95 557
3b720b8c
RR
558 last_used = (vq->last_used_idx & (vq->vring.num - 1));
559 i = vq->vring.used->ring[last_used].id;
560 *len = vq->vring.used->ring[last_used].len;
0a8a69dd
RR
561
562 if (unlikely(i >= vq->vring.num)) {
563 BAD_RING(vq, "id %u out of range\n", i);
564 return NULL;
565 }
566 if (unlikely(!vq->data[i])) {
567 BAD_RING(vq, "id %u is not a head!\n", i);
568 return NULL;
569 }
570
571 /* detach_buf clears data, so grab it now. */
572 ret = vq->data[i];
573 detach_buf(vq, i);
574 vq->last_used_idx++;
a5c262c5
MT
575 /* If we expect an interrupt for the next entry, tell host
576 * by writing event index and flush out the write before
577 * the read in the next get_buf call. */
578 if (!(vq->vring.avail->flags & VRING_AVAIL_F_NO_INTERRUPT)) {
579 vring_used_event(&vq->vring) = vq->last_used_idx;
a9a0fef7 580 virtio_mb(vq->weak_barriers);
a5c262c5
MT
581 }
582
e93300b1
RR
583#ifdef DEBUG
584 vq->last_add_time_valid = false;
585#endif
586
0a8a69dd
RR
587 END_USE(vq);
588 return ret;
589}
7c5e9ed0 590EXPORT_SYMBOL_GPL(virtqueue_get_buf);
0a8a69dd 591
5dfc1762
RR
592/**
593 * virtqueue_disable_cb - disable callbacks
594 * @vq: the struct virtqueue we're talking about.
595 *
596 * Note that this is not necessarily synchronous, hence unreliable and only
597 * useful as an optimization.
598 *
599 * Unlike other operations, this need not be serialized.
600 */
7c5e9ed0 601void virtqueue_disable_cb(struct virtqueue *_vq)
18445c4d
RR
602{
603 struct vring_virtqueue *vq = to_vvq(_vq);
604
18445c4d 605 vq->vring.avail->flags |= VRING_AVAIL_F_NO_INTERRUPT;
18445c4d 606}
7c5e9ed0 607EXPORT_SYMBOL_GPL(virtqueue_disable_cb);
18445c4d 608
5dfc1762
RR
609/**
610 * virtqueue_enable_cb - restart callbacks after disable_cb.
611 * @vq: the struct virtqueue we're talking about.
612 *
613 * This re-enables callbacks; it returns "false" if there are pending
614 * buffers in the queue, to detect a possible race between the driver
615 * checking for more work, and enabling callbacks.
616 *
617 * Caller must ensure we don't call this with other virtqueue
618 * operations at the same time (except where noted).
619 */
7c5e9ed0 620bool virtqueue_enable_cb(struct virtqueue *_vq)
0a8a69dd
RR
621{
622 struct vring_virtqueue *vq = to_vvq(_vq);
623
624 START_USE(vq);
0a8a69dd
RR
625
626 /* We optimistically turn back on interrupts, then check if there was
627 * more to do. */
a5c262c5
MT
628 /* Depending on the VIRTIO_RING_F_EVENT_IDX feature, we need to
629 * either clear the flags bit or point the event index at the next
630 * entry. Always do both to keep code simple. */
0a8a69dd 631 vq->vring.avail->flags &= ~VRING_AVAIL_F_NO_INTERRUPT;
a5c262c5 632 vring_used_event(&vq->vring) = vq->last_used_idx;
a9a0fef7 633 virtio_mb(vq->weak_barriers);
0a8a69dd 634 if (unlikely(more_used(vq))) {
0a8a69dd
RR
635 END_USE(vq);
636 return false;
637 }
638
639 END_USE(vq);
640 return true;
641}
7c5e9ed0 642EXPORT_SYMBOL_GPL(virtqueue_enable_cb);
0a8a69dd 643
5dfc1762
RR
644/**
645 * virtqueue_enable_cb_delayed - restart callbacks after disable_cb.
646 * @vq: the struct virtqueue we're talking about.
647 *
648 * This re-enables callbacks but hints to the other side to delay
649 * interrupts until most of the available buffers have been processed;
650 * it returns "false" if there are many pending buffers in the queue,
651 * to detect a possible race between the driver checking for more work,
652 * and enabling callbacks.
653 *
654 * Caller must ensure we don't call this with other virtqueue
655 * operations at the same time (except where noted).
656 */
7ab358c2
MT
657bool virtqueue_enable_cb_delayed(struct virtqueue *_vq)
658{
659 struct vring_virtqueue *vq = to_vvq(_vq);
660 u16 bufs;
661
662 START_USE(vq);
663
664 /* We optimistically turn back on interrupts, then check if there was
665 * more to do. */
666 /* Depending on the VIRTIO_RING_F_USED_EVENT_IDX feature, we need to
667 * either clear the flags bit or point the event index at the next
668 * entry. Always do both to keep code simple. */
669 vq->vring.avail->flags &= ~VRING_AVAIL_F_NO_INTERRUPT;
670 /* TODO: tune this threshold */
671 bufs = (u16)(vq->vring.avail->idx - vq->last_used_idx) * 3 / 4;
672 vring_used_event(&vq->vring) = vq->last_used_idx + bufs;
a9a0fef7 673 virtio_mb(vq->weak_barriers);
7ab358c2
MT
674 if (unlikely((u16)(vq->vring.used->idx - vq->last_used_idx) > bufs)) {
675 END_USE(vq);
676 return false;
677 }
678
679 END_USE(vq);
680 return true;
681}
682EXPORT_SYMBOL_GPL(virtqueue_enable_cb_delayed);
683
5dfc1762
RR
684/**
685 * virtqueue_detach_unused_buf - detach first unused buffer
686 * @vq: the struct virtqueue we're talking about.
687 *
f96fde41 688 * Returns NULL or the "data" token handed to virtqueue_add_buf().
5dfc1762
RR
689 * This is not valid on an active queue; it is useful only for device
690 * shutdown.
691 */
7c5e9ed0 692void *virtqueue_detach_unused_buf(struct virtqueue *_vq)
c021eac4
SM
693{
694 struct vring_virtqueue *vq = to_vvq(_vq);
695 unsigned int i;
696 void *buf;
697
698 START_USE(vq);
699
700 for (i = 0; i < vq->vring.num; i++) {
701 if (!vq->data[i])
702 continue;
703 /* detach_buf clears data, so grab it now. */
704 buf = vq->data[i];
705 detach_buf(vq, i);
b3258ff1 706 vq->vring.avail->idx--;
c021eac4
SM
707 END_USE(vq);
708 return buf;
709 }
710 /* That should have freed everything. */
06ca287d 711 BUG_ON(vq->vq.num_free != vq->vring.num);
c021eac4
SM
712
713 END_USE(vq);
714 return NULL;
715}
7c5e9ed0 716EXPORT_SYMBOL_GPL(virtqueue_detach_unused_buf);
c021eac4 717
0a8a69dd
RR
718irqreturn_t vring_interrupt(int irq, void *_vq)
719{
720 struct vring_virtqueue *vq = to_vvq(_vq);
721
722 if (!more_used(vq)) {
723 pr_debug("virtqueue interrupt with no work for %p\n", vq);
724 return IRQ_NONE;
725 }
726
727 if (unlikely(vq->broken))
728 return IRQ_HANDLED;
729
730 pr_debug("virtqueue callback for %p (%p)\n", vq, vq->vq.callback);
18445c4d
RR
731 if (vq->vq.callback)
732 vq->vq.callback(&vq->vq);
0a8a69dd
RR
733
734 return IRQ_HANDLED;
735}
c6fd4701 736EXPORT_SYMBOL_GPL(vring_interrupt);
0a8a69dd 737
17bb6d40
JW
738struct virtqueue *vring_new_virtqueue(unsigned int index,
739 unsigned int num,
87c7d57c 740 unsigned int vring_align,
0a8a69dd 741 struct virtio_device *vdev,
7b21e34f 742 bool weak_barriers,
0a8a69dd
RR
743 void *pages,
744 void (*notify)(struct virtqueue *),
9499f5e7
RR
745 void (*callback)(struct virtqueue *),
746 const char *name)
0a8a69dd
RR
747{
748 struct vring_virtqueue *vq;
749 unsigned int i;
750
42b36cc0
RR
751 /* We assume num is a power of 2. */
752 if (num & (num - 1)) {
753 dev_warn(&vdev->dev, "Bad virtqueue length %u\n", num);
754 return NULL;
755 }
756
0a8a69dd
RR
757 vq = kmalloc(sizeof(*vq) + sizeof(void *)*num, GFP_KERNEL);
758 if (!vq)
759 return NULL;
760
87c7d57c 761 vring_init(&vq->vring, num, pages, vring_align);
0a8a69dd
RR
762 vq->vq.callback = callback;
763 vq->vq.vdev = vdev;
9499f5e7 764 vq->vq.name = name;
06ca287d
RR
765 vq->vq.num_free = num;
766 vq->vq.index = index;
0a8a69dd 767 vq->notify = notify;
7b21e34f 768 vq->weak_barriers = weak_barriers;
0a8a69dd
RR
769 vq->broken = false;
770 vq->last_used_idx = 0;
771 vq->num_added = 0;
9499f5e7 772 list_add_tail(&vq->vq.list, &vdev->vqs);
0a8a69dd
RR
773#ifdef DEBUG
774 vq->in_use = false;
e93300b1 775 vq->last_add_time_valid = false;
0a8a69dd
RR
776#endif
777
9fa29b9d 778 vq->indirect = virtio_has_feature(vdev, VIRTIO_RING_F_INDIRECT_DESC);
a5c262c5 779 vq->event = virtio_has_feature(vdev, VIRTIO_RING_F_EVENT_IDX);
9fa29b9d 780
0a8a69dd
RR
781 /* No callback? Tell other side not to bother us. */
782 if (!callback)
783 vq->vring.avail->flags |= VRING_AVAIL_F_NO_INTERRUPT;
784
785 /* Put everything in free lists. */
0a8a69dd 786 vq->free_head = 0;
3b870624 787 for (i = 0; i < num-1; i++) {
0a8a69dd 788 vq->vring.desc[i].next = i+1;
3b870624
AS
789 vq->data[i] = NULL;
790 }
791 vq->data[i] = NULL;
0a8a69dd
RR
792
793 return &vq->vq;
794}
c6fd4701 795EXPORT_SYMBOL_GPL(vring_new_virtqueue);
0a8a69dd
RR
796
797void vring_del_virtqueue(struct virtqueue *vq)
798{
9499f5e7 799 list_del(&vq->list);
0a8a69dd
RR
800 kfree(to_vvq(vq));
801}
c6fd4701 802EXPORT_SYMBOL_GPL(vring_del_virtqueue);
0a8a69dd 803
e34f8725
RR
804/* Manipulates transport-specific feature bits. */
805void vring_transport_features(struct virtio_device *vdev)
806{
807 unsigned int i;
808
809 for (i = VIRTIO_TRANSPORT_F_START; i < VIRTIO_TRANSPORT_F_END; i++) {
810 switch (i) {
9fa29b9d
MM
811 case VIRTIO_RING_F_INDIRECT_DESC:
812 break;
a5c262c5
MT
813 case VIRTIO_RING_F_EVENT_IDX:
814 break;
e34f8725
RR
815 default:
816 /* We don't understand this bit. */
817 clear_bit(i, vdev->features);
818 }
819 }
820}
821EXPORT_SYMBOL_GPL(vring_transport_features);
822
5dfc1762
RR
823/**
824 * virtqueue_get_vring_size - return the size of the virtqueue's vring
825 * @vq: the struct virtqueue containing the vring of interest.
826 *
827 * Returns the size of the vring. This is mainly used for boasting to
828 * userspace. Unlike other operations, this need not be serialized.
829 */
8f9f4668
RJ
830unsigned int virtqueue_get_vring_size(struct virtqueue *_vq)
831{
832
833 struct vring_virtqueue *vq = to_vvq(_vq);
834
835 return vq->vring.num;
836}
837EXPORT_SYMBOL_GPL(virtqueue_get_vring_size);
838
c6fd4701 839MODULE_LICENSE("GPL");