iommu/io-pgtable: Indicate granule for TLB maintenance
authorRobin Murphy <robin.murphy@arm.com>
Mon, 7 Dec 2015 18:18:53 +0000 (18:18 +0000)
committerWill Deacon <will.deacon@arm.com>
Thu, 17 Dec 2015 12:05:34 +0000 (12:05 +0000)
IOMMU hardware with range-based TLB maintenance commands can work
happily with the iova and size arguments passed via the tlb_add_flush
callback, but for IOMMUs which require separate commands per entry in
the range, it is not straightforward to infer the necessary granularity
when it comes to issuing the actual commands.

Add an additional argument indicating the granularity for the benefit
of drivers needing to know, and update the ARM LPAE code appropriately
(for non-leaf invalidations we currently just assume the worst-case
page granularity rather than walking the table to check).

Signed-off-by: Robin Murphy <robin.murphy@arm.com>
Signed-off-by: Will Deacon <will.deacon@arm.com>
drivers/iommu/arm-smmu-v3.c
drivers/iommu/arm-smmu.c
drivers/iommu/io-pgtable-arm.c
drivers/iommu/io-pgtable.h
drivers/iommu/ipmmu-vmsa.c

index 4c5ef4e5da98ebf9dfda7d63b5895fb6d8206592..735ad2c58dd84f75bbb62ea8e841b231a60de7a9 100644 (file)
@@ -1341,7 +1341,7 @@ static void arm_smmu_tlb_inv_context(void *cookie)
 }
 
 static void arm_smmu_tlb_inv_range_nosync(unsigned long iova, size_t size,
-                                         bool leaf, void *cookie)
+                                         size_t granule, bool leaf, void *cookie)
 {
        struct arm_smmu_domain *smmu_domain = cookie;
        struct arm_smmu_device *smmu = smmu_domain->smmu;
index 6ed169bcb39d5c6441920506adb59d15c10000d2..7e04bf5640ae4996debc95319765445906371135 100644 (file)
@@ -582,7 +582,7 @@ static void arm_smmu_tlb_inv_context(void *cookie)
 }
 
 static void arm_smmu_tlb_inv_range_nosync(unsigned long iova, size_t size,
-                                         bool leaf, void *cookie)
+                                         size_t granule, bool leaf, void *cookie)
 {
        struct arm_smmu_domain *smmu_domain = cookie;
        struct arm_smmu_cfg *cfg = &smmu_domain->cfg;
index 366a354c689d5ccfd06a981eb4b398966b2951da..7a5c772f7be2ad6786507e47671d5e4e06b72a24 100644 (file)
        ((((d)->levels - ((l) - ARM_LPAE_START_LVL(d) + 1))             \
          * (d)->bits_per_level) + (d)->pg_shift)
 
+#define ARM_LPAE_GRANULE(d)            (1UL << (d)->pg_shift)
+
 #define ARM_LPAE_PAGES_PER_PGD(d)                                      \
-       DIV_ROUND_UP((d)->pgd_size, 1UL << (d)->pg_shift)
+       DIV_ROUND_UP((d)->pgd_size, ARM_LPAE_GRANULE(d))
 
 /*
  * Calculate the index at level l used to map virtual address a using the
 /* IOPTE accessors */
 #define iopte_deref(pte,d)                                     \
        (__va((pte) & ((1ULL << ARM_LPAE_MAX_ADDR_BITS) - 1)    \
-       & ~((1ULL << (d)->pg_shift) - 1)))
+       & ~(ARM_LPAE_GRANULE(d) - 1ULL)))
 
 #define iopte_type(pte,l)                                      \
        (((pte) >> ARM_LPAE_PTE_TYPE_SHIFT) & ARM_LPAE_PTE_TYPE_MASK)
@@ -326,7 +328,7 @@ static int __arm_lpae_map(struct arm_lpae_io_pgtable *data, unsigned long iova,
        /* Grab a pointer to the next level */
        pte = *ptep;
        if (!pte) {
-               cptep = __arm_lpae_alloc_pages(1UL << data->pg_shift,
+               cptep = __arm_lpae_alloc_pages(ARM_LPAE_GRANULE(data),
                                               GFP_ATOMIC, cfg);
                if (!cptep)
                        return -ENOMEM;
@@ -412,7 +414,7 @@ static void __arm_lpae_free_pgtable(struct arm_lpae_io_pgtable *data, int lvl,
        if (lvl == ARM_LPAE_START_LVL(data))
                table_size = data->pgd_size;
        else
-               table_size = 1UL << data->pg_shift;
+               table_size = ARM_LPAE_GRANULE(data);
 
        start = ptep;
        end = (void *)ptep + table_size;
@@ -473,7 +475,7 @@ static int arm_lpae_split_blk_unmap(struct arm_lpae_io_pgtable *data,
 
        __arm_lpae_set_pte(ptep, table, cfg);
        iova &= ~(blk_size - 1);
-       cfg->tlb->tlb_add_flush(iova, blk_size, true, data->iop.cookie);
+       cfg->tlb->tlb_add_flush(iova, blk_size, blk_size, true, data->iop.cookie);
        return size;
 }
 
@@ -501,12 +503,13 @@ static int __arm_lpae_unmap(struct arm_lpae_io_pgtable *data,
 
                if (!iopte_leaf(pte, lvl)) {
                        /* Also flush any partial walks */
-                       tlb->tlb_add_flush(iova, size, false, cookie);
+                       tlb->tlb_add_flush(iova, size, ARM_LPAE_GRANULE(data),
+                                          false, cookie);
                        tlb->tlb_sync(cookie);
                        ptep = iopte_deref(pte, data);
                        __arm_lpae_free_pgtable(data, lvl + 1, ptep);
                } else {
-                       tlb->tlb_add_flush(iova, size, true, cookie);
+                       tlb->tlb_add_flush(iova, size, size, true, cookie);
                }
 
                return size;
@@ -572,7 +575,7 @@ static phys_addr_t arm_lpae_iova_to_phys(struct io_pgtable_ops *ops,
        return 0;
 
 found_translation:
-       iova &= ((1 << data->pg_shift) - 1);
+       iova &= (ARM_LPAE_GRANULE(data) - 1);
        return ((phys_addr_t)iopte_to_pfn(pte,data) << data->pg_shift) | iova;
 }
 
@@ -670,7 +673,7 @@ arm_64_lpae_alloc_pgtable_s1(struct io_pgtable_cfg *cfg, void *cookie)
              (ARM_LPAE_TCR_RGN_WBWA << ARM_LPAE_TCR_IRGN0_SHIFT) |
              (ARM_LPAE_TCR_RGN_WBWA << ARM_LPAE_TCR_ORGN0_SHIFT);
 
-       switch (1 << data->pg_shift) {
+       switch (ARM_LPAE_GRANULE(data)) {
        case SZ_4K:
                reg |= ARM_LPAE_TCR_TG0_4K;
                break;
@@ -771,7 +774,7 @@ arm_64_lpae_alloc_pgtable_s2(struct io_pgtable_cfg *cfg, void *cookie)
 
        sl = ARM_LPAE_START_LVL(data);
 
-       switch (1 << data->pg_shift) {
+       switch (ARM_LPAE_GRANULE(data)) {
        case SZ_4K:
                reg |= ARM_LPAE_TCR_TG0_4K;
                sl++; /* SL0 format is different for 4K granule size */
@@ -891,8 +894,8 @@ static void dummy_tlb_flush_all(void *cookie)
        WARN_ON(cookie != cfg_cookie);
 }
 
-static void dummy_tlb_add_flush(unsigned long iova, size_t size, bool leaf,
-                               void *cookie)
+static void dummy_tlb_add_flush(unsigned long iova, size_t size,
+                               size_t granule, bool leaf, void *cookie)
 {
        WARN_ON(cookie != cfg_cookie);
        WARN_ON(!(size & cfg_cookie->pgsize_bitmap));
index ac9e2341a633ed82420d3d06bd24cec23240b86b..2e18469afe3c0c424ebd076b52aa284d49a0a5b8 100644 (file)
@@ -26,8 +26,8 @@ enum io_pgtable_fmt {
  */
 struct iommu_gather_ops {
        void (*tlb_flush_all)(void *cookie);
-       void (*tlb_add_flush)(unsigned long iova, size_t size, bool leaf,
-                             void *cookie);
+       void (*tlb_add_flush)(unsigned long iova, size_t size, size_t granule,
+                             bool leaf, void *cookie);
        void (*tlb_sync)(void *cookie);
 };
 
index 8cf605fa9946013642b2a88f500beb285cc55cfc..5b1166d407c464e4dfce2999c977aa4a58fad0f7 100644 (file)
@@ -277,8 +277,8 @@ static void ipmmu_tlb_flush_all(void *cookie)
        ipmmu_tlb_invalidate(domain);
 }
 
-static void ipmmu_tlb_add_flush(unsigned long iova, size_t size, bool leaf,
-                               void *cookie)
+static void ipmmu_tlb_add_flush(unsigned long iova, size_t size,
+                               size_t granule, bool leaf, void *cookie)
 {
        /* The hardware doesn't support selective TLB flush. */
 }