#define queue_ring_for_each(i, q) \
for (i = (q)->head; i != (q)->tail; i = (i + 1) % FLUSH_QUEUE_SIZE)
-static void queue_release(struct dma_ops_domain *dom,
- struct flush_queue *queue)
-{
- unsigned i;
-
- assert_spin_locked(&queue->lock);
-
- queue_ring_for_each(i, queue)
- free_iova_fast(&dom->iovad,
- queue->entries[i].iova_pfn,
- queue->entries[i].pages);
-
- queue->head = queue->tail = 0;
-}
-
static inline unsigned queue_ring_add(struct flush_queue *queue)
{
unsigned idx = queue->tail;
queue = get_cpu_ptr(dom->flush_queue);
spin_lock_irqsave(&queue->lock, flags);
- queue_ring_free_flushed(dom, queue);
-
- if (queue_ring_full(queue)) {
+ /*
+ * When ring-queue is full, flush the entries from the IOTLB so
+ * that we can free all entries with queue_ring_free_flushed()
+ * below.
+ */
+ if (queue_ring_full(queue))
dma_ops_domain_flush_tlb(dom);
- queue_release(dom, queue);
- }
+
+ queue_ring_free_flushed(dom, queue);
idx = queue_ring_add(queue);