iommu/arm-smmu: Workaround for ThunderX erratum #27704
authorTirumalesh Chalamarla <tchalamarla@caviumnetworks.com>
Fri, 4 Mar 2016 21:56:09 +0000 (13:56 -0800)
committerWill Deacon <will.deacon@arm.com>
Tue, 3 May 2016 17:23:01 +0000 (18:23 +0100)
Due to erratum #27704, the CN88xx SMMUv2 implementation supports only
shared ASID and VMID numberspaces.

This patch ensures that ASID and VMIDs are unique across all SMMU
instances on affected Cavium systems.

Signed-off-by: Tirumalesh Chalamarla <tchalamarla@caviumnetworks.com>
Signed-off-by: Akula Geethasowjanya <Geethasowjanya.Akula@caviumnetworks.com>
[will: commit message, comments and formatting]
Signed-off-by: Will Deacon <will.deacon@arm.com>
Documentation/arm64/silicon-errata.txt
Documentation/devicetree/bindings/iommu/arm,smmu.txt
drivers/iommu/arm-smmu.c

index ba4b6acfc5457bc30d5adc519ed0869b4433dfcd..806f91cdd45df3373b35571282d43ccd5108ff3a 100644 (file)
@@ -57,3 +57,4 @@ stable kernels.
 | Cavium         | ThunderX ITS    | #22375, #24313  | CAVIUM_ERRATUM_22375    |
 | Cavium         | ThunderX GICv3  | #23154          | CAVIUM_ERRATUM_23154    |
 | Cavium         | ThunderX Core   | #27456          | CAVIUM_ERRATUM_27456    |
+| Cavium         | ThunderX SMMUv2 | #27704          | N/A                    |
index 718074501fcbc97b5c7b5c33cab2736e433f5220..19fe6f2c83f61c07dd6e5f32ced3921f1804e8a4 100644 (file)
@@ -16,6 +16,7 @@ conditions.
                         "arm,mmu-400"
                         "arm,mmu-401"
                         "arm,mmu-500"
+                        "cavium,smmu-v2"
 
                   depending on the particular implementation and/or the
                   version of the architecture implemented.
index 25e884a75f6baf2aa0e1dc2dc00183df5486cb65..e933679a326648fdd86800398e8f48dbed2edc0d 100644 (file)
@@ -334,6 +334,8 @@ struct arm_smmu_device {
 
        struct list_head                list;
        struct rb_root                  masters;
+
+       u32                             cavium_id_base; /* Specific to Cavium */
 };
 
 struct arm_smmu_cfg {
@@ -343,8 +345,8 @@ struct arm_smmu_cfg {
 };
 #define INVALID_IRPTNDX                        0xff
 
-#define ARM_SMMU_CB_ASID(cfg)          ((cfg)->cbndx)
-#define ARM_SMMU_CB_VMID(cfg)          ((cfg)->cbndx + 1)
+#define ARM_SMMU_CB_ASID(smmu, cfg) ((u16)(smmu)->cavium_id_base + (cfg)->cbndx)
+#define ARM_SMMU_CB_VMID(smmu, cfg) ((u16)(smmu)->cavium_id_base + (cfg)->cbndx + 1)
 
 enum arm_smmu_domain_stage {
        ARM_SMMU_DOMAIN_S1 = 0,
@@ -372,6 +374,8 @@ struct arm_smmu_option_prop {
        const char *prop;
 };
 
+static atomic_t cavium_smmu_context_count = ATOMIC_INIT(0);
+
 static struct arm_smmu_option_prop arm_smmu_options[] = {
        { ARM_SMMU_OPT_SECURE_CFG_ACCESS, "calxeda,smmu-secure-config-access" },
        { 0, NULL},
@@ -583,11 +587,11 @@ static void arm_smmu_tlb_inv_context(void *cookie)
 
        if (stage1) {
                base = ARM_SMMU_CB_BASE(smmu) + ARM_SMMU_CB(smmu, cfg->cbndx);
-               writel_relaxed(ARM_SMMU_CB_ASID(cfg),
+               writel_relaxed(ARM_SMMU_CB_ASID(smmu, cfg),
                               base + ARM_SMMU_CB_S1_TLBIASID);
        } else {
                base = ARM_SMMU_GR0(smmu);
-               writel_relaxed(ARM_SMMU_CB_VMID(cfg),
+               writel_relaxed(ARM_SMMU_CB_VMID(smmu, cfg),
                               base + ARM_SMMU_GR0_TLBIVMID);
        }
 
@@ -609,7 +613,7 @@ static void arm_smmu_tlb_inv_range_nosync(unsigned long iova, size_t size,
 
                if (!IS_ENABLED(CONFIG_64BIT) || smmu->version == ARM_SMMU_V1) {
                        iova &= ~12UL;
-                       iova |= ARM_SMMU_CB_ASID(cfg);
+                       iova |= ARM_SMMU_CB_ASID(smmu, cfg);
                        do {
                                writel_relaxed(iova, reg);
                                iova += granule;
@@ -617,7 +621,7 @@ static void arm_smmu_tlb_inv_range_nosync(unsigned long iova, size_t size,
 #ifdef CONFIG_64BIT
                } else {
                        iova >>= 12;
-                       iova |= (u64)ARM_SMMU_CB_ASID(cfg) << 48;
+                       iova |= (u64)ARM_SMMU_CB_ASID(smmu, cfg) << 48;
                        do {
                                writeq_relaxed(iova, reg);
                                iova += granule >> 12;
@@ -637,7 +641,7 @@ static void arm_smmu_tlb_inv_range_nosync(unsigned long iova, size_t size,
 #endif
        } else {
                reg = ARM_SMMU_GR0(smmu) + ARM_SMMU_GR0_TLBIVMID;
-               writel_relaxed(ARM_SMMU_CB_VMID(cfg), reg);
+               writel_relaxed(ARM_SMMU_CB_VMID(smmu, cfg), reg);
        }
 }
 
@@ -746,7 +750,7 @@ static void arm_smmu_init_context_bank(struct arm_smmu_domain *smmu_domain,
 #endif
                /* 16-bit VMIDs live in CBA2R */
                if (smmu->features & ARM_SMMU_FEAT_VMID16)
-                       reg |= ARM_SMMU_CB_VMID(cfg) << CBA2R_VMID_SHIFT;
+                       reg |= ARM_SMMU_CB_VMID(smmu, cfg) << CBA2R_VMID_SHIFT;
 
                writel_relaxed(reg, gr1_base + ARM_SMMU_GR1_CBA2R(cfg->cbndx));
        }
@@ -765,7 +769,7 @@ static void arm_smmu_init_context_bank(struct arm_smmu_domain *smmu_domain,
                        (CBAR_S1_MEMATTR_WB << CBAR_S1_MEMATTR_SHIFT);
        } else if (!(smmu->features & ARM_SMMU_FEAT_VMID16)) {
                /* 8-bit VMIDs live in CBAR */
-               reg |= ARM_SMMU_CB_VMID(cfg) << CBAR_VMID_SHIFT;
+               reg |= ARM_SMMU_CB_VMID(smmu, cfg) << CBAR_VMID_SHIFT;
        }
        writel_relaxed(reg, gr1_base + ARM_SMMU_GR1_CBAR(cfg->cbndx));
 
@@ -773,11 +777,11 @@ static void arm_smmu_init_context_bank(struct arm_smmu_domain *smmu_domain,
        if (stage1) {
                reg64 = pgtbl_cfg->arm_lpae_s1_cfg.ttbr[0];
 
-               reg64 |= ((u64)ARM_SMMU_CB_ASID(cfg)) << TTBRn_ASID_SHIFT;
+               reg64 |= ((u64)ARM_SMMU_CB_ASID(smmu, cfg)) << TTBRn_ASID_SHIFT;
                smmu_writeq(reg64, cb_base + ARM_SMMU_CB_TTBR0);
 
                reg64 = pgtbl_cfg->arm_lpae_s1_cfg.ttbr[1];
-               reg64 |= ((u64)ARM_SMMU_CB_ASID(cfg)) << TTBRn_ASID_SHIFT;
+               reg64 |= ((u64)ARM_SMMU_CB_ASID(smmu, cfg)) << TTBRn_ASID_SHIFT;
                smmu_writeq(reg64, cb_base + ARM_SMMU_CB_TTBR1);
        } else {
                reg64 = pgtbl_cfg->arm_lpae_s2_cfg.vttbr;
@@ -1737,6 +1741,7 @@ static const struct of_device_id arm_smmu_of_match[] = {
        { .compatible = "arm,mmu-400", .data = (void *)ARM_SMMU_V1 },
        { .compatible = "arm,mmu-401", .data = (void *)ARM_SMMU_V1 },
        { .compatible = "arm,mmu-500", .data = (void *)ARM_SMMU_V2 },
+       { .compatible = "cavium,smmu-v2", .data = (void *)ARM_SMMU_V2 },
        { },
 };
 MODULE_DEVICE_TABLE(of, arm_smmu_of_match);
@@ -1847,6 +1852,18 @@ static int arm_smmu_device_dt_probe(struct platform_device *pdev)
                }
        }
 
+       /*
+        * Cavium CN88xx erratum #27704.
+        * Ensure ASID and VMID allocation is unique across all SMMUs in
+        * the system.
+        */
+       if (of_device_is_compatible(dev->of_node, "cavium,smmu-v2")) {
+               smmu->cavium_id_base =
+                       atomic_add_return(smmu->num_context_banks,
+                                         &cavium_smmu_context_count);
+               smmu->cavium_id_base -= smmu->num_context_banks;
+       }
+
        INIT_LIST_HEAD(&smmu->list);
        spin_lock(&arm_smmu_devices_lock);
        list_add(&smmu->list, &arm_smmu_devices);