From fb418dab8a4f01dde0c025d15145c589ec02796b Mon Sep 17 00:00:00 2001 From: Joerg Roedel Date: Thu, 10 Aug 2017 16:14:59 +0200 Subject: [PATCH] iommu/iova: Add flush counters to Flush-Queue implementation There are two counters: * fq_flush_start_cnt - Increased when a TLB flush is started. * fq_flush_finish_cnt - Increased when a TLB flush is finished. The fq_flush_start_cnt is assigned to every Flush-Queue entry on its creation. When freeing entries from the Flush-Queue, the value in the entry is compared to the fq_flush_finish_cnt. The entry can only be freed when its value is less than the value of fq_flush_finish_cnt. The reason for these counters it to take advantage of IOMMU TLB flushes that happened on other CPUs. These already flushed the TLB for Flush-Queue entries on other CPUs so that they can already be freed without flushing the TLB again. This makes it less likely that the Flush-Queue is full and saves IOMMU TLB flushes. Signed-off-by: Joerg Roedel --- drivers/iommu/iova.c | 27 ++++++++++++++++++++++++--- include/linux/iova.h | 8 ++++++++ 2 files changed, 32 insertions(+), 3 deletions(-) diff --git a/drivers/iommu/iova.c b/drivers/iommu/iova.c index e5c9a7ae6088..47b144e417ad 100644 --- a/drivers/iommu/iova.c +++ b/drivers/iommu/iova.c @@ -75,6 +75,9 @@ int init_iova_flush_queue(struct iova_domain *iovad, { int cpu; + atomic64_set(&iovad->fq_flush_start_cnt, 0); + atomic64_set(&iovad->fq_flush_finish_cnt, 0); + iovad->fq = alloc_percpu(struct iova_fq); if (!iovad->fq) return -ENOMEM; @@ -482,20 +485,30 @@ static inline unsigned fq_ring_add(struct iova_fq *fq) static void fq_ring_free(struct iova_domain *iovad, struct iova_fq *fq) { + u64 counter = atomic64_read(&iovad->fq_flush_finish_cnt); unsigned idx; fq_ring_for_each(idx, fq) { + if (fq->entries[idx].counter >= counter) + break; + if (iovad->entry_dtor) iovad->entry_dtor(fq->entries[idx].data); free_iova_fast(iovad, fq->entries[idx].iova_pfn, fq->entries[idx].pages); + + fq->head = (fq->head + 1) % IOVA_FQ_SIZE; } +} - fq->head = 0; - fq->tail = 0; +static void iova_domain_flush(struct iova_domain *iovad) +{ + atomic64_inc(&iovad->fq_flush_start_cnt); + iovad->flush_cb(iovad); + atomic64_inc(&iovad->fq_flush_finish_cnt); } static void fq_destroy_all_entries(struct iova_domain *iovad) @@ -526,8 +539,15 @@ void queue_iova(struct iova_domain *iovad, struct iova_fq *fq = get_cpu_ptr(iovad->fq); unsigned idx; + /* + * First remove all entries from the flush queue that have already been + * flushed out on another CPU. This makes the fq_full() check below less + * likely to be true. + */ + fq_ring_free(iovad, fq); + if (fq_full(fq)) { - iovad->flush_cb(iovad); + iova_domain_flush(iovad); fq_ring_free(iovad, fq); } @@ -536,6 +556,7 @@ void queue_iova(struct iova_domain *iovad, fq->entries[idx].iova_pfn = pfn; fq->entries[idx].pages = pages; fq->entries[idx].data = data; + fq->entries[idx].counter = atomic64_read(&iovad->fq_flush_start_cnt); put_cpu_ptr(iovad->fq); } diff --git a/include/linux/iova.h b/include/linux/iova.h index 1ae85248ec50..985b8008999e 100644 --- a/include/linux/iova.h +++ b/include/linux/iova.h @@ -14,6 +14,7 @@ #include #include #include +#include #include /* iova structure */ @@ -52,6 +53,7 @@ struct iova_fq_entry { unsigned long iova_pfn; unsigned long pages; unsigned long data; + u64 counter; /* Flush counter when this entrie was added */ }; /* Per-CPU Flush Queue structure */ @@ -77,6 +79,12 @@ struct iova_domain { iova entry */ struct iova_fq __percpu *fq; /* Flush Queue */ + + atomic64_t fq_flush_start_cnt; /* Number of TLB flushes that + have been started */ + + atomic64_t fq_flush_finish_cnt; /* Number of TLB flushes that + have been finished */ }; static inline unsigned long iova_size(struct iova *iova) -- 2.20.1