}
/* Wait for any pending TLB invalidations to complete */
---- ----static void __arm_smmu_tlb_sync(struct arm_smmu_device *smmu)
++++ ++++static void __arm_smmu_tlb_sync(struct arm_smmu_device *smmu,
++++ ++++ void __iomem *sync, void __iomem *status)
{
---- ---- int count = 0;
---- ---- void __iomem *gr0_base = ARM_SMMU_GR0(smmu);
---- ----
---- ---- writel_relaxed(0, gr0_base + ARM_SMMU_GR0_sTLBGSYNC);
---- ---- while (readl_relaxed(gr0_base + ARM_SMMU_GR0_sTLBGSTATUS)
---- ---- & sTLBGSTATUS_GSACTIVE) {
---- ---- cpu_relax();
---- ---- if (++count == TLB_LOOP_TIMEOUT) {
---- ---- dev_err_ratelimited(smmu->dev,
---- ---- "TLB sync timed out -- SMMU may be deadlocked\n");
---- ---- return;
++++ ++++ unsigned int spin_cnt, delay;
++++ ++++
++++ ++++ writel_relaxed(0, sync);
++++ ++++ for (delay = 1; delay < TLB_LOOP_TIMEOUT; delay *= 2) {
++++ ++++ for (spin_cnt = TLB_SPIN_COUNT; spin_cnt > 0; spin_cnt--) {
++++ ++++ if (!(readl_relaxed(status) & sTLBGSTATUS_GSACTIVE))
++++ ++++ return;
++++ ++++ cpu_relax();
}
---- ---- udelay(1);
++++ ++++ udelay(delay);
}
---- ---static void arm_smmu_tlb_sync(void *cookie)
++++ ++++ dev_err_ratelimited(smmu->dev,
++++ ++++ "TLB sync timed out -- SMMU may be deadlocked\n");
+ }
+
---- --- __arm_smmu_tlb_sync(smmu_domain->smmu);
++++ ++++static void arm_smmu_tlb_sync_global(struct arm_smmu_device *smmu)
++++ ++++{
++++ ++++ void __iomem *base = ARM_SMMU_GR0(smmu);
++++ ++++
++++ ++++ __arm_smmu_tlb_sync(smmu, base + ARM_SMMU_GR0_sTLBGSYNC,
++++ ++++ base + ARM_SMMU_GR0_sTLBGSTATUS);
++++ ++++}
++++ ++++
++++ ++++static void arm_smmu_tlb_sync_context(void *cookie)
+ {
+ struct arm_smmu_domain *smmu_domain = cookie;
++++ ++++ struct arm_smmu_device *smmu = smmu_domain->smmu;
++++ ++++ void __iomem *base = ARM_SMMU_CB(smmu, smmu_domain->cfg.cbndx);
++++ ++++
++++ ++++ __arm_smmu_tlb_sync(smmu, base + ARM_SMMU_CB_TLBSYNC,
++++ ++++ base + ARM_SMMU_CB_TLBSTATUS);
}
---- ---static void arm_smmu_tlb_inv_context(void *cookie)
- static void arm_smmu_tlb_sync(void *cookie)
++++ ++++static void arm_smmu_tlb_sync_vmid(void *cookie)
++++ +++{
++++ +++ struct arm_smmu_domain *smmu_domain = cookie;
- __arm_smmu_tlb_sync(smmu_domain->smmu);
++++ ++++
++++ ++++ arm_smmu_tlb_sync_global(smmu_domain->smmu);
++++ +++}
++++ +++
- static void arm_smmu_tlb_inv_context(void *cookie)
++++ ++++static void arm_smmu_tlb_inv_context_s1(void *cookie)
{
struct arm_smmu_domain *smmu_domain = cookie;
struct arm_smmu_cfg *cfg = &smmu_domain->cfg;
---- ---- struct arm_smmu_device *smmu = smmu_domain->smmu;
---- ---- bool stage1 = cfg->cbar != CBAR_TYPE_S2_TRANS;
---- ---- void __iomem *base;
++++ ++++ void __iomem *base = ARM_SMMU_CB(smmu_domain->smmu, cfg->cbndx);
---- ---- if (stage1) {
---- ---- base = ARM_SMMU_CB_BASE(smmu) + ARM_SMMU_CB(smmu, cfg->cbndx);
---- ---- writel_relaxed(ARM_SMMU_CB_ASID(smmu, cfg),
---- ---- base + ARM_SMMU_CB_S1_TLBIASID);
---- ---- } else {
---- ---- base = ARM_SMMU_GR0(smmu);
---- ---- writel_relaxed(ARM_SMMU_CB_VMID(smmu, cfg),
---- ---- base + ARM_SMMU_GR0_TLBIVMID);
---- ---- }
++++ ++++ writel_relaxed(cfg->asid, base + ARM_SMMU_CB_S1_TLBIASID);
++++ ++++ arm_smmu_tlb_sync_context(cookie);
++++ ++++}
++++ +++
- __arm_smmu_tlb_sync(smmu);
++++ ++++static void arm_smmu_tlb_inv_context_s2(void *cookie)
++++ ++++{
++++ ++++ struct arm_smmu_domain *smmu_domain = cookie;
++++ ++++ struct arm_smmu_device *smmu = smmu_domain->smmu;
++++ ++++ void __iomem *base = ARM_SMMU_GR0(smmu);
+
---- --- __arm_smmu_tlb_sync(smmu);
++++ ++++ writel_relaxed(smmu_domain->cfg.vmid, base + ARM_SMMU_GR0_TLBIVMID);
++++ ++++ arm_smmu_tlb_sync_global(smmu);
}
static void arm_smmu_tlb_inv_range_nosync(unsigned long iova, size_t size,
arm_smmu_device_reset(smmu);
arm_smmu_test_smr_masks(smmu);
----- --- /* Oh, for a proper bus abstraction */
----- --- if (!iommu_present(&platform_bus_type))
----- --- bus_set_iommu(&platform_bus_type, &arm_smmu_ops);
----- ---#ifdef CONFIG_ARM_AMBA
----- --- if (!iommu_present(&amba_bustype))
----- --- bus_set_iommu(&amba_bustype, &arm_smmu_ops);
----- ---#endif
----- ---#ifdef CONFIG_PCI
----- --- if (!iommu_present(&pci_bus_type)) {
----- --- pci_request_acs();
----- --- bus_set_iommu(&pci_bus_type, &arm_smmu_ops);
----- --- }
----- ---#endif
+++++ +++ /*
+++++ +++ * For ACPI and generic DT bindings, an SMMU will be probed before
+++++ +++ * any device which might need it, so we want the bus ops in place
+++++ +++ * ready to handle default domain setup as soon as any SMMU exists.
+++++ +++ */
+++++ +++ if (!using_legacy_binding)
+++++ +++ arm_smmu_bus_init();
+++++ +++
++++ +++ return 0;
++++ +++}
++++ +++
+++++ +++/*
+++++ +++ * With the legacy DT binding in play, though, we have no guarantees about
+++++ +++ * probe order, but then we're also not doing default domains, so we can
+++++ +++ * delay setting bus ops until we're sure every possible SMMU is ready,
+++++ +++ * and that way ensure that no add_device() calls get missed.
+++++ +++ */
+++++ +++static int arm_smmu_legacy_bus_init(void)
+++++ +++{
+++++ +++ if (using_legacy_binding)
+++++ +++ arm_smmu_bus_init();
+ return 0;
+ }
+++++ +++device_initcall_sync(arm_smmu_legacy_bus_init);
+
static int arm_smmu_device_remove(struct platform_device *pdev)
{
struct arm_smmu_device *smmu = platform_get_drvdata(pdev);