#define AGP_GENERIC_SIZES_ENTRIES 11
extern struct aper_size_info_16 agp3_generic_sizes[];
+#define virt_to_gart(x) (phys_to_gart(virt_to_phys(x)))
+#define gart_to_virt(x) (phys_to_virt(gart_to_phys(x)))
extern int agp_off;
extern int agp_try_unsupported_boot;
pci_read_config_dword(agp_bridge->dev, ALI_CACHE_FLUSH_CTRL, &temp);
pci_write_config_dword(agp_bridge->dev, ALI_CACHE_FLUSH_CTRL,
(((temp & ALI_CACHE_FLUSH_ADDR_MASK) |
- virt_to_phys(addr)) | ALI_CACHE_FLUSH_EN ));
+ virt_to_gart(addr)) | ALI_CACHE_FLUSH_EN ));
return addr;
}
pci_read_config_dword(agp_bridge->dev, ALI_CACHE_FLUSH_CTRL, &temp);
pci_write_config_dword(agp_bridge->dev, ALI_CACHE_FLUSH_CTRL,
(((temp & ALI_CACHE_FLUSH_ADDR_MASK) |
- virt_to_phys(addr)) | ALI_CACHE_FLUSH_EN));
+ virt_to_gart(addr)) | ALI_CACHE_FLUSH_EN));
agp_generic_destroy_page(addr);
}
SetPageReserved(virt_to_page(page_map->real));
global_cache_flush();
- page_map->remapped = ioremap_nocache(virt_to_phys(page_map->real),
+ page_map->remapped = ioremap_nocache(virt_to_gart(page_map->real),
PAGE_SIZE);
if (page_map->remapped == NULL) {
ClearPageReserved(virt_to_page(page_map->real));
agp_bridge->gatt_table_real = (u32 *)page_dir.real;
agp_bridge->gatt_table = (u32 __iomem *)page_dir.remapped;
- agp_bridge->gatt_bus_addr = virt_to_phys(page_dir.real);
+ agp_bridge->gatt_bus_addr = virt_to_gart(page_dir.real);
/* Get the address for the gart region.
* This is a bus address even on the alpha, b/c its
/* Calculate the agp offset */
for (i = 0; i < value->num_entries / 1024; i++, addr += 0x00400000) {
- writel(virt_to_phys(amd_irongate_private.gatt_pages[i]->real) | 1,
+ writel(virt_to_gart(amd_irongate_private.gatt_pages[i]->real) | 1,
page_dir.remapped+GET_PAGE_DIR_OFF(addr));
readl(page_dir.remapped+GET_PAGE_DIR_OFF(addr)); /* PCI Posting. */
}
static int amd_8151_configure(void)
{
- unsigned long gatt_bus = virt_to_phys(agp_bridge->gatt_table_real);
+ unsigned long gatt_bus = virt_to_gart(agp_bridge->gatt_table_real);
/* Configure AGP regs in each x86-64 host bridge. */
for_each_nb() {
{
struct agp_bridge_data *bridge = pci_get_drvdata(pdev);
- release_mem_region(virt_to_phys(bridge->gatt_table_real),
+ release_mem_region(virt_to_gart(bridge->gatt_table_real),
amd64_aperture_sizes[bridge->aperture_size_idx].size);
agp_remove_bridge(bridge);
agp_put_bridge(bridge);
SetPageReserved(virt_to_page(page_map->real));
err = map_page_into_agp(virt_to_page(page_map->real));
- page_map->remapped = ioremap_nocache(virt_to_phys(page_map->real),
+ page_map->remapped = ioremap_nocache(virt_to_gart(page_map->real),
PAGE_SIZE);
if (page_map->remapped == NULL || err) {
ClearPageReserved(virt_to_page(page_map->real));
agp_bridge->gatt_table_real = (u32 *)page_dir.real;
agp_bridge->gatt_table = (u32 __iomem *) page_dir.remapped;
- agp_bridge->gatt_bus_addr = virt_to_bus(page_dir.real);
+ agp_bridge->gatt_bus_addr = virt_to_gart(page_dir.real);
/* Write out the size register */
current_size = A_SIZE_LVL2(agp_bridge->current_size);
/* Calculate the agp offset */
for(i = 0; i < value->num_entries / 1024; i++, addr += 0x00400000) {
- writel(virt_to_bus(ati_generic_private.gatt_pages[i]->real) | 1,
+ writel(virt_to_gart(ati_generic_private.gatt_pages[i]->real) | 1,
page_dir.remapped+GET_PAGE_DIR_OFF(addr));
readl(page_dir.remapped+GET_PAGE_DIR_OFF(addr)); /* PCI Posting. */
}
return -ENOMEM;
}
- bridge->scratch_page_real = virt_to_phys(addr);
+ bridge->scratch_page_real = virt_to_gart(addr);
bridge->scratch_page =
bridge->driver->mask_memory(bridge, bridge->scratch_page_real, 0);
}
err_out:
if (bridge->driver->needs_scratch_page)
bridge->driver->agp_destroy_page(
- phys_to_virt(bridge->scratch_page_real));
+ gart_to_virt(bridge->scratch_page_real));
if (got_gatt)
bridge->driver->free_gatt_table(bridge);
if (got_keylist) {
if (bridge->driver->agp_destroy_page &&
bridge->driver->needs_scratch_page)
bridge->driver->agp_destroy_page(
- phys_to_virt(bridge->scratch_page_real));
+ gart_to_virt(bridge->scratch_page_real));
}
/* When we remove the global variable agp_bridge from all drivers
efficeon_private.l1_table[index] = page;
- value = __pa(page) | pati | present | index;
+ value = virt_to_gart(page) | pati | present | index;
pci_write_config_dword(agp_bridge->dev,
EFFICEON_ATTPAGE, value);
}
if (curr->page_count != 0) {
for (i = 0; i < curr->page_count; i++) {
- curr->bridge->driver->agp_destroy_page(phys_to_virt(curr->memory[i]));
+ curr->bridge->driver->agp_destroy_page(gart_to_virt(curr->memory[i]));
}
}
agp_free_key(curr->key);
agp_free_memory(new);
return NULL;
}
- new->memory[i] = virt_to_phys(addr);
+ new->memory[i] = virt_to_gart(addr);
new->page_count++;
}
new->bridge = bridge;
break;
}
- table = (char *) __get_free_pages(GFP_KERNEL,
- page_order);
+ table = alloc_gatt_pages(page_order);
if (table == NULL) {
i++;
size = ((struct aper_size_info_fixed *) temp)->size;
page_order = ((struct aper_size_info_fixed *) temp)->page_order;
num_entries = ((struct aper_size_info_fixed *) temp)->num_entries;
- table = (char *) __get_free_pages(GFP_KERNEL, page_order);
+ table = alloc_gatt_pages(page_order);
}
if (table == NULL)
agp_gatt_table = (void *)table;
bridge->driver->cache_flush();
- bridge->gatt_table = ioremap_nocache(virt_to_phys(table),
+ bridge->gatt_table = ioremap_nocache(virt_to_gart(table),
(PAGE_SIZE * (1 << page_order)));
bridge->driver->cache_flush();
for (page = virt_to_page(table); page <= virt_to_page(table_end); page++)
ClearPageReserved(page);
- free_pages((unsigned long) table, page_order);
+ free_gatt_pages(table, page_order);
return -ENOMEM;
}
- bridge->gatt_bus_addr = virt_to_phys(bridge->gatt_table_real);
+ bridge->gatt_bus_addr = virt_to_gart(bridge->gatt_table_real);
/* AK: bogus, should encode addresses > 4GB */
for (i = 0; i < num_entries; i++) {
for (page = virt_to_page(table); page <= virt_to_page(table_end); page++)
ClearPageReserved(page);
- free_pages((unsigned long) bridge->gatt_table_real, page_order);
+ free_gatt_pages(bridge->gatt_table_real, page_order);
agp_gatt_table = NULL;
bridge->gatt_table = NULL;
hp->gart_size = HP_ZX1_GART_SIZE;
hp->gatt_entries = hp->gart_size / hp->io_page_size;
- hp->io_pdir = phys_to_virt(readq(hp->ioc_regs+HP_ZX1_PDIR_BASE));
+ hp->io_pdir = gart_to_virt(readq(hp->ioc_regs+HP_ZX1_PDIR_BASE));
hp->gatt = &hp->io_pdir[HP_ZX1_IOVA_TO_PDIR(hp->gart_base)];
if (hp->gatt[0] != HP_ZX1_SBA_IOMMU_COOKIE) {
agp_bridge->mode = readl(hp->lba_regs+hp->lba_cap_offset+PCI_AGP_STATUS);
if (hp->io_pdir_owner) {
- writel(virt_to_phys(hp->io_pdir), hp->ioc_regs+HP_ZX1_PDIR_BASE);
+ writel(virt_to_gart(hp->io_pdir), hp->ioc_regs+HP_ZX1_PDIR_BASE);
readl(hp->ioc_regs+HP_ZX1_PDIR_BASE);
writel(hp->io_tlb_ps, hp->ioc_regs+HP_ZX1_TCNFG);
readl(hp->ioc_regs+HP_ZX1_TCNFG);
}
memset(lp->alloced_map, 0, map_size);
- lp->paddr = virt_to_phys(lpage);
+ lp->paddr = virt_to_gart(lpage);
lp->refcount = 0;
atomic_add(I460_KPAGES_PER_IOPAGE, &agp_bridge->current_memory_agp);
return 0;
kfree(lp->alloced_map);
lp->alloced_map = NULL;
- free_pages((unsigned long) phys_to_virt(lp->paddr), I460_IO_PAGE_SHIFT - PAGE_SHIFT);
+ free_pages((unsigned long) gart_to_virt(lp->paddr), I460_IO_PAGE_SHIFT - PAGE_SHIFT);
atomic_sub(I460_KPAGES_PER_IOPAGE, &agp_bridge->current_memory_agp);
}
if (new == NULL)
return NULL;
- new->memory[0] = virt_to_phys(addr);
+ new->memory[0] = virt_to_gart(addr);
if (pg_count == 4) {
/* kludge to get 4 physical pages for ARGB cursor */
new->memory[1] = new->memory[0] + PAGE_SIZE;
agp_free_key(curr->key);
if(curr->type == AGP_PHYS_MEMORY) {
if (curr->page_count == 4)
- i8xx_destroy_pages(phys_to_virt(curr->memory[0]));
+ i8xx_destroy_pages(gart_to_virt(curr->memory[0]));
else
agp_bridge->driver->agp_destroy_page(
- phys_to_virt(curr->memory[0]));
+ gart_to_virt(curr->memory[0]));
vfree(curr->memory);
}
kfree(curr);
}
SetPageReserved(virt_to_page(page_map->real));
global_cache_flush();
- page_map->remapped = ioremap_nocache(virt_to_phys(page_map->real),
+ page_map->remapped = ioremap_nocache(virt_to_gart(page_map->real),
PAGE_SIZE);
if (page_map->remapped == NULL) {
ClearPageReserved(virt_to_page(page_map->real));
/* Create a fake scratch directory */
for(i = 0; i < 1024; i++) {
writel(agp_bridge->scratch_page, serverworks_private.scratch_dir.remapped+i);
- writel(virt_to_phys(serverworks_private.scratch_dir.real) | 1, page_dir.remapped+i);
+ writel(virt_to_gart(serverworks_private.scratch_dir.real) | 1, page_dir.remapped+i);
}
retval = serverworks_create_gatt_pages(value->num_entries / 1024);
agp_bridge->gatt_table_real = (u32 *)page_dir.real;
agp_bridge->gatt_table = (u32 __iomem *)page_dir.remapped;
- agp_bridge->gatt_bus_addr = virt_to_phys(page_dir.real);
+ agp_bridge->gatt_bus_addr = virt_to_gart(page_dir.real);
/* Get the address for the gart region.
* This is a bus address even on the alpha, b/c its
/* Calculate the agp offset */
for(i = 0; i < value->num_entries / 1024; i++)
- writel(virt_to_phys(serverworks_private.gatt_pages[i]->real)|1, page_dir.remapped+i);
+ writel(virt_to_gart(serverworks_private.gatt_pages[i]->real)|1, page_dir.remapped+i);
return 0;
}
bridge->gatt_table_real = (u32 *) table;
bridge->gatt_table = (u32 *)table;
- bridge->gatt_bus_addr = virt_to_phys(table);
+ bridge->gatt_bus_addr = virt_to_gart(table);
for (i = 0; i < num_entries; i++)
bridge->gatt_table[i] = 0;
#define flush_agp_mappings()
#define flush_agp_cache() mb()
+/* Convert a physical address to an address suitable for the GART. */
+#define phys_to_gart(x) (x)
+#define gart_to_phys(x) (x)
+
+/* GATT allocation. Returns/accepts GATT kernel virtual address. */
+#define alloc_gatt_pages(order) \
+ ((char *)__get_free_pages(GFP_KERNEL, (order)))
+#define free_gatt_pages(table, order) \
+ free_pages((unsigned long)(table), (order))
+
#endif
worth it. Would need a page for it. */
#define flush_agp_cache() asm volatile("wbinvd":::"memory")
+/* Convert a physical address to an address suitable for the GART. */
+#define phys_to_gart(x) (x)
+#define gart_to_phys(x) (x)
+
+/* GATT allocation. Returns/accepts GATT kernel virtual address. */
+#define alloc_gatt_pages(order) \
+ ((char *)__get_free_pages(GFP_KERNEL, (order)))
+#define free_gatt_pages(table, order) \
+ free_pages((unsigned long)(table), (order))
+
#endif
#define flush_agp_mappings() /* nothing */
#define flush_agp_cache() mb()
+/* Convert a physical address to an address suitable for the GART. */
+#define phys_to_gart(x) (x)
+#define gart_to_phys(x) (x)
+
+/* GATT allocation. Returns/accepts GATT kernel virtual address. */
+#define alloc_gatt_pages(order) \
+ ((char *)__get_free_pages(GFP_KERNEL, (order)))
+#define free_gatt_pages(table, order) \
+ free_pages((unsigned long)(table), (order))
+
#endif /* _ASM_IA64_AGP_H */
#define flush_agp_mappings()
#define flush_agp_cache() mb()
+/* Convert a physical address to an address suitable for the GART. */
+#define phys_to_gart(x) (x)
+#define gart_to_phys(x) (x)
+
+/* GATT allocation. Returns/accepts GATT kernel virtual address. */
+#define alloc_gatt_pages(order) \
+ ((char *)__get_free_pages(GFP_KERNEL, (order)))
+#define free_gatt_pages(table, order) \
+ free_pages((unsigned long)(table), (order))
+
#endif
#define flush_agp_mappings()
#define flush_agp_cache() mb()
+/* Convert a physical address to an address suitable for the GART. */
+#define phys_to_gart(x) (x)
+#define gart_to_phys(x) (x)
+
+/* GATT allocation. Returns/accepts GATT kernel virtual address. */
+#define alloc_gatt_pages(order) \
+ ((char *)__get_free_pages(GFP_KERNEL, (order)))
+#define free_gatt_pages(table, order) \
+ free_pages((unsigned long)(table), (order))
+
#endif
#define flush_agp_mappings()
#define flush_agp_cache() mb()
+/* Convert a physical address to an address suitable for the GART. */
+#define phys_to_gart(x) (x)
+#define gart_to_phys(x) (x)
+
+/* GATT allocation. Returns/accepts GATT kernel virtual address. */
+#define alloc_gatt_pages(order) \
+ ((char *)__get_free_pages(GFP_KERNEL, (order)))
+#define free_gatt_pages(table, order) \
+ free_pages((unsigned long)(table), (order))
+
#endif
worth it. Would need a page for it. */
#define flush_agp_cache() asm volatile("wbinvd":::"memory")
+/* Convert a physical address to an address suitable for the GART. */
+#define phys_to_gart(x) (x)
+#define gart_to_phys(x) (x)
+
+/* GATT allocation. Returns/accepts GATT kernel virtual address. */
+#define alloc_gatt_pages(order) \
+ ((char *)__get_free_pages(GFP_KERNEL, (order)))
+#define free_gatt_pages(table, order) \
+ free_pages((unsigned long)(table), (order))
+
#endif