struct flush_queue {
struct flush_queue_entry *entries;
unsigned head, tail;
+ spinlock_t lock;
};
/*
dma_ops_domain_free_flush_queue(dom);
return -ENOMEM;
}
+
+ spin_lock_init(&queue->lock);
}
return 0;
static inline bool queue_ring_full(struct flush_queue *queue)
{
+ assert_spin_locked(&queue->lock);
+
return (((queue->tail + 1) % FLUSH_QUEUE_SIZE) == queue->head);
}
{
unsigned i;
+ assert_spin_locked(&queue->lock);
+
queue_ring_for_each(i, queue)
free_iova_fast(&dom->iovad,
queue->entries[i].iova_pfn,
{
unsigned idx = queue->tail;
+ assert_spin_locked(&queue->lock);
queue->tail = (idx + 1) % FLUSH_QUEUE_SIZE;
return idx;
unsigned long address, unsigned long pages)
{
struct flush_queue *queue;
+ unsigned long flags;
int idx;
pages = __roundup_pow_of_two(pages);
address >>= PAGE_SHIFT;
queue = get_cpu_ptr(dom->flush_queue);
+ spin_lock_irqsave(&queue->lock, flags);
if (queue_ring_full(queue)) {
domain_flush_tlb(&dom->domain);
queue->entries[idx].iova_pfn = address;
queue->entries[idx].pages = pages;
+ spin_unlock_irqrestore(&queue->lock, flags);
put_cpu_ptr(dom->flush_queue);
}