*/
static int vmw_dma_select_mode(struct vmw_private *dev_priv)
{
- const struct dma_map_ops *dma_ops = get_dma_ops(dev_priv->dev->dev);
static const char *names[vmw_dma_map_max] = {
[vmw_dma_phys] = "Using physical TTM page addresses.",
[vmw_dma_alloc_coherent] = "Using coherent TTM pages.",
[vmw_dma_map_populate] = "Keeping DMA mappings.",
[vmw_dma_map_bind] = "Giving up DMA mappings early."};
+#ifdef CONFIG_X86
+ const struct dma_map_ops *dma_ops = get_dma_ops(dev_priv->dev->dev);
#ifdef CONFIG_INTEL_IOMMU
if (intel_iommu_enabled) {
dev_priv->map_mode = vmw_dma_map_populate;
#endif
+#ifdef CONFIG_INTEL_IOMMU
out_fixup:
+#endif
if (dev_priv->map_mode == vmw_dma_map_populate &&
vmw_restrict_iommu)
dev_priv->map_mode = vmw_dma_map_bind;
return -EINVAL;
#endif
+#else /* CONFIG_X86 */
+ dev_priv->map_mode = vmw_dma_map_populate;
+#endif /* CONFIG_X86 */
+
DRM_INFO("DMA map mode: %s\n", names[dev_priv->map_mode]);
return 0;
}
page_virtual = kmap_atomic(page);
- desc_dma = page_virtual[desc_per_page].ppn << PAGE_SHIFT;
+ desc_dma = (dma_addr_t)
+ le32_to_cpu(page_virtual[desc_per_page].ppn) <<
+ PAGE_SHIFT;
kunmap_atomic(page_virtual);
__free_page(page);
desc_dma = 0;
list_for_each_entry_reverse(page, desc_pages, lru) {
page_virtual = kmap_atomic(page);
- page_virtual[desc_per_page].ppn = desc_dma >> PAGE_SHIFT;
+ page_virtual[desc_per_page].ppn = cpu_to_le32
+ (desc_dma >> PAGE_SHIFT);
kunmap_atomic(page_virtual);
desc_dma = dma_map_page(dev, page, 0, PAGE_SIZE,
DMA_TO_DEVICE);