const struct iommu_ops *ops = NULL;
int ret = -ENODEV;
struct fwnode_handle *iort_fwnode;
+ struct iommu_fwspec *fwspec = dev->iommu_fwspec;
+
+ /*
+ * If we already translated the fwspec there
+ * is nothing left to do, return the iommu_ops.
+ */
+ if (fwspec && fwspec->ops)
+ return fwspec->ops;
if (node) {
iort_fwnode = iort_get_fwnode(node);
return NULL;
ops = iommu_ops_from_fwnode(iort_fwnode);
+ /*
+ * If the ops look-up fails, this means that either
+ * the SMMU drivers have not been probed yet or that
+ * the SMMU drivers are not built in the kernel;
+ * Depending on whether the SMMU drivers are built-in
+ * in the kernel or not, defer the IOMMU configuration
+ * or just abort it.
+ */
if (!ops)
- return NULL;
+ return iort_iommu_driver_enabled(node->type) ?
+ ERR_PTR(-EPROBE_DEFER) : NULL;
ret = arm_smmu_iort_xlate(dev, streamid, iort_fwnode, ops);
}
while (parent) {
ops = iort_iommu_xlate(dev, parent, streamid);
+ if (IS_ERR_OR_NULL(ops))
+ return ops;
parent = iort_node_get_id(node, &streamid,
IORT_IOMMU_TYPE, i++);
}
}
+ /*
+ * If we have reason to believe the IOMMU driver missed the initial
+ * add_device callback for dev, replay it to get things in order.
+ */
+ if (!IS_ERR_OR_NULL(ops) && ops->add_device &&
+ dev->bus && !dev->iommu_group) {
+ int err = ops->add_device(dev);
+
+ if (err)
+ ops = ERR_PTR(err);
+ }
+
return ops;
}
* @dev: The pointer to the device
* @attr: device dma attributes
*/
-void acpi_dma_configure(struct device *dev, enum dev_dma_attr attr)
+int acpi_dma_configure(struct device *dev, enum dev_dma_attr attr)
{
const struct iommu_ops *iommu;
+ u64 size;
iort_set_dma_mask(dev);
iommu = iort_iommu_configure(dev);
+ if (IS_ERR(iommu))
+ return PTR_ERR(iommu);
+ size = max(dev->coherent_dma_mask, dev->coherent_dma_mask + 1);
/*
* Assume dma valid range starts at 0 and covers the whole
* coherent_dma_mask.
*/
- arch_setup_dma_ops(dev, 0, dev->coherent_dma_mask + 1, iommu,
- attr == DEV_DMA_COHERENT);
+ arch_setup_dma_ops(dev, 0, size, iommu, attr == DEV_DMA_COHERENT);
+
+ return 0;
}
EXPORT_SYMBOL_GPL(acpi_dma_configure);
} else if (has_acpi_companion(dma_dev)) {
attr = acpi_get_dma_attr(to_acpi_device_node(dma_dev->fwnode));
if (attr != DEV_DMA_NOT_SUPPORTED)
- acpi_dma_configure(dev, attr);
+ ret = acpi_dma_configure(dev, attr);
}
if (bridge)
bool acpi_dma_supported(struct acpi_device *adev);
enum dev_dma_attr acpi_get_dma_attr(struct acpi_device *adev);
-void acpi_dma_configure(struct device *dev, enum dev_dma_attr attr);
+int acpi_dma_configure(struct device *dev, enum dev_dma_attr attr);
void acpi_dma_deconfigure(struct device *dev);
struct acpi_device *acpi_find_child_device(struct acpi_device *parent,
return DEV_DMA_NOT_SUPPORTED;
}
-static inline void acpi_dma_configure(struct device *dev,
- enum dev_dma_attr attr) { }
+static inline int acpi_dma_configure(struct device *dev,
+ enum dev_dma_attr attr)
+{
+ return 0;
+}
static inline void acpi_dma_deconfigure(struct device *dev) { }