fq = per_cpu_ptr(iovad->fq, cpu);
fq->head = 0;
fq->tail = 0;
+
+ spin_lock_init(&fq->lock);
}
return 0;
static inline bool fq_full(struct iova_fq *fq)
{
+ assert_spin_locked(&fq->lock);
return (((fq->tail + 1) % IOVA_FQ_SIZE) == fq->head);
}
{
unsigned idx = fq->tail;
+ assert_spin_locked(&fq->lock);
+
fq->tail = (idx + 1) % IOVA_FQ_SIZE;
return idx;
u64 counter = atomic64_read(&iovad->fq_flush_finish_cnt);
unsigned idx;
+ assert_spin_locked(&fq->lock);
+
fq_ring_for_each(idx, fq) {
if (fq->entries[idx].counter >= counter)
unsigned long data)
{
struct iova_fq *fq = get_cpu_ptr(iovad->fq);
+ unsigned long flags;
unsigned idx;
+ spin_lock_irqsave(&fq->lock, flags);
+
/*
* First remove all entries from the flush queue that have already been
* flushed out on another CPU. This makes the fq_full() check below less
fq->entries[idx].data = data;
fq->entries[idx].counter = atomic64_read(&iovad->fq_flush_start_cnt);
+ spin_unlock_irqrestore(&fq->lock, flags);
put_cpu_ptr(iovad->fq);
}
EXPORT_SYMBOL_GPL(queue_iova);