* could end up in wrong address. */
}
-int r100_pci_gart_enable(struct radeon_device *rdev)
+int r100_pci_gart_init(struct radeon_device *rdev)
{
- uint32_t tmp;
int r;
+ if (rdev->gart.table.ram.ptr) {
+ WARN(1, "R100 PCI GART already initialized.\n");
+ return 0;
+ }
/* Initialize common gart structure */
r = radeon_gart_init(rdev);
- if (r) {
+ if (r)
return r;
- }
- if (rdev->gart.table.ram.ptr == NULL) {
- rdev->gart.table_size = rdev->gart.num_gpu_pages * 4;
- r = radeon_gart_table_ram_alloc(rdev);
- if (r) {
- return r;
- }
- }
+ rdev->gart.table_size = rdev->gart.num_gpu_pages * 4;
+ rdev->asic->gart_tlb_flush = &r100_pci_gart_tlb_flush;
+ rdev->asic->gart_set_page = &r100_pci_gart_set_page;
+ return radeon_gart_table_ram_alloc(rdev);
+}
+
+int r100_pci_gart_enable(struct radeon_device *rdev)
+{
+ uint32_t tmp;
+
/* discard memory request outside of configured range */
tmp = RREG32(RADEON_AIC_CNTL) | RADEON_DIS_OUT_OF_PCI_GART_ACCESS;
WREG32(RADEON_AIC_CNTL, tmp);
return 0;
}
-int r100_gart_enable(struct radeon_device *rdev)
+void r100_pci_gart_fini(struct radeon_device *rdev)
{
- if (rdev->flags & RADEON_IS_AGP) {
- r100_pci_gart_disable(rdev);
- return 0;
- }
- return r100_pci_gart_enable(rdev);
+ r100_pci_gart_disable(rdev);
+ radeon_gart_table_ram_free(rdev);
+ radeon_gart_fini(rdev);
}
void r100_mc_fini(struct radeon_device *rdev)
{
- r100_pci_gart_disable(rdev);
- radeon_gart_table_ram_free(rdev);
- radeon_gart_fini(rdev);
}
int r100_rb2d_reset(struct radeon_device *rdev);
int r100_cp_init(struct radeon_device *rdev, unsigned ring_size);
int r100_pci_gart_enable(struct radeon_device *rdev);
-void r100_pci_gart_disable(struct radeon_device *rdev);
void r100_mc_setup(struct radeon_device *rdev);
void r100_mc_disable_clients(struct radeon_device *rdev);
int r100_gui_wait_for_idle(struct radeon_device *rdev);
mb();
}
-int rv370_pcie_gart_enable(struct radeon_device *rdev)
+int rv370_pcie_gart_set_page(struct radeon_device *rdev, int i, uint64_t addr)
+{
+ void __iomem *ptr = (void *)rdev->gart.table.vram.ptr;
+
+ if (i < 0 || i > rdev->gart.num_gpu_pages) {
+ return -EINVAL;
+ }
+ addr = (lower_32_bits(addr) >> 8) |
+ ((upper_32_bits(addr) & 0xff) << 24) |
+ 0xc;
+ /* on x86 we want this to be CPU endian, on powerpc
+ * on powerpc without HW swappers, it'll get swapped on way
+ * into VRAM - so no need for cpu_to_le32 on VRAM tables */
+ writel(addr, ((void __iomem *)ptr) + (i * 4));
+ return 0;
+}
+
+int rv370_pcie_gart_init(struct radeon_device *rdev)
{
- uint32_t table_addr;
- uint32_t tmp;
int r;
+ if (rdev->gart.table.vram.robj) {
+ WARN(1, "RV370 PCIE GART already initialized.\n");
+ return 0;
+ }
/* Initialize common gart structure */
r = radeon_gart_init(rdev);
- if (r) {
+ if (r)
return r;
- }
r = rv370_debugfs_pcie_gart_info_init(rdev);
- if (r) {
+ if (r)
DRM_ERROR("Failed to register debugfs file for PCIE gart !\n");
- }
rdev->gart.table_size = rdev->gart.num_gpu_pages * 4;
- r = radeon_gart_table_vram_alloc(rdev);
- if (r) {
- return r;
+ rdev->asic->gart_tlb_flush = &rv370_pcie_gart_tlb_flush;
+ rdev->asic->gart_set_page = &rv370_pcie_gart_set_page;
+ return radeon_gart_table_vram_alloc(rdev);
+}
+
+int rv370_pcie_gart_enable(struct radeon_device *rdev)
+{
+ uint32_t table_addr;
+ uint32_t tmp;
+ int r;
+
+ if (rdev->gart.table.vram.robj == NULL) {
+ dev_err(rdev->dev, "No VRAM object for PCIE GART.\n");
+ return -EINVAL;
}
+ r = radeon_gart_table_vram_pin(rdev);
+ if (r)
+ return r;
/* discard memory request outside of configured range */
tmp = RADEON_PCIE_TX_GART_UNMAPPED_ACCESS_DISCARD;
WREG32_PCIE(RADEON_PCIE_TX_GART_CNTL, tmp);
}
}
-int rv370_pcie_gart_set_page(struct radeon_device *rdev, int i, uint64_t addr)
-{
- void __iomem *ptr = (void *)rdev->gart.table.vram.ptr;
-
- if (i < 0 || i > rdev->gart.num_gpu_pages) {
- return -EINVAL;
- }
- addr = (lower_32_bits(addr) >> 8) |
- ((upper_32_bits(addr) & 0xff) << 24) |
- 0xc;
- /* on x86 we want this to be CPU endian, on powerpc
- * on powerpc without HW swappers, it'll get swapped on way
- * into VRAM - so no need for cpu_to_le32 on VRAM tables */
- writel(addr, ((void __iomem *)ptr) + (i * 4));
- return 0;
-}
-
-int r300_gart_enable(struct radeon_device *rdev)
+void rv370_pcie_gart_fini(struct radeon_device *rdev)
{
-#if __OS_HAS_AGP
- if (rdev->flags & RADEON_IS_AGP) {
- if (rdev->family > CHIP_RV350) {
- rv370_pcie_gart_disable(rdev);
- } else {
- r100_pci_gart_disable(rdev);
- }
- return 0;
- }
-#endif
- if (rdev->flags & RADEON_IS_PCIE) {
- rdev->asic->gart_disable = &rv370_pcie_gart_disable;
- rdev->asic->gart_tlb_flush = &rv370_pcie_gart_tlb_flush;
- rdev->asic->gart_set_page = &rv370_pcie_gart_set_page;
- return rv370_pcie_gart_enable(rdev);
- }
- if (rdev->flags & RADEON_IS_PCI) {
- rdev->asic->gart_disable = &r100_pci_gart_disable;
- rdev->asic->gart_tlb_flush = &r100_pci_gart_tlb_flush;
- rdev->asic->gart_set_page = &r100_pci_gart_set_page;
- return r100_pci_gart_enable(rdev);
- }
- return r100_pci_gart_enable(rdev);
+ rv370_pcie_gart_disable(rdev);
+ radeon_gart_table_vram_free(rdev);
+ radeon_gart_fini(rdev);
}
-
/*
* MC
*/
void r300_mc_fini(struct radeon_device *rdev)
{
- if (rdev->flags & RADEON_IS_PCIE) {
- rv370_pcie_gart_disable(rdev);
- radeon_gart_table_vram_free(rdev);
- } else {
- r100_pci_gart_disable(rdev);
- radeon_gart_table_ram_free(rdev);
- }
- radeon_gart_fini(rdev);
}
/* Stops all mc clients */
r100_mc_stop(rdev, &save);
- /* Shutdown PCI/PCIE GART */
- radeon_gart_disable(rdev);
if (rdev->flags & RADEON_IS_AGP) {
WREG32(R_00014C_MC_AGP_LOCATION,
S_00014C_MC_AGP_START(rdev->mc.gtt_start >> 16) |
{
int r;
+ /* Make sur GART are not working */
+ if (rdev->flags & RADEON_IS_PCIE)
+ rv370_pcie_gart_disable(rdev);
+ if (rdev->flags & RADEON_IS_PCI)
+ r100_pci_gart_disable(rdev);
/* Resume clock before doing reset */
r420_clock_resume(rdev);
/* Reset gpu before posting otherwise ATOM will enter infinite loop */
r300_mc_program(rdev);
/* Initialize GART (initialize after TTM so we can allocate
* memory through TTM but finalize after TTM) */
- r = radeon_gart_enable(rdev);
- if (r) {
- dev_err(rdev->dev, "failled initializing GART (%d).\n", r);
- return r;
+ if (rdev->flags & RADEON_IS_PCIE) {
+ r = rv370_pcie_gart_enable(rdev);
+ if (r)
+ return r;
+ }
+ if (rdev->flags & RADEON_IS_PCI) {
+ r = r100_pci_gart_enable(rdev);
+ if (r)
+ return r;
}
r420_pipes_init(rdev);
/* Enable IRQ */
r100_cp_disable(rdev);
r100_wb_disable(rdev);
r100_irq_disable(rdev);
- radeon_gart_disable(rdev);
+ if (rdev->flags & RADEON_IS_PCIE)
+ rv370_pcie_gart_disable(rdev);
+ if (rdev->flags & RADEON_IS_PCI)
+ r100_pci_gart_disable(rdev);
return 0;
}
r100_wb_fini(rdev);
r100_ib_fini(rdev);
radeon_gem_fini(rdev);
- if (rdev->flags & RADEON_IS_PCIE) {
- rv370_pcie_gart_disable(rdev);
- radeon_gart_table_vram_free(rdev);
- } else {
- r100_pci_gart_disable(rdev);
- radeon_gart_table_ram_free(rdev);
- }
- radeon_gart_fini(rdev);
+ if (rdev->flags & RADEON_IS_PCIE)
+ rv370_pcie_gart_fini(rdev);
+ if (rdev->flags & RADEON_IS_PCI)
+ r100_pci_gart_fini(rdev);
radeon_agp_fini(rdev);
radeon_irq_kms_fini(rdev);
radeon_fence_driver_fini(rdev);
if (r) {
return r;
}
+ if (rdev->flags & RADEON_IS_PCIE) {
+ r = rv370_pcie_gart_init(rdev);
+ if (r)
+ return r;
+ }
+ if (rdev->flags & RADEON_IS_PCI) {
+ r = r100_pci_gart_init(rdev);
+ if (r)
+ return r;
+ }
r300_set_reg_safe(rdev);
r = r420_resume(rdev);
if (r) {
r100_cp_fini(rdev);
r100_wb_fini(rdev);
r100_ib_fini(rdev);
- if (rdev->flags & RADEON_IS_PCIE) {
- rv370_pcie_gart_disable(rdev);
- radeon_gart_table_vram_free(rdev);
- } else {
- r100_pci_gart_disable(rdev);
- radeon_gart_table_ram_free(rdev);
- }
- radeon_gart_fini(rdev);
+ if (rdev->flags & RADEON_IS_PCIE)
+ rv370_pcie_gart_fini(rdev);
+ if (rdev->flags & RADEON_IS_PCI)
+ r100_pci_gart_fini(rdev);
radeon_agp_fini(rdev);
radeon_irq_kms_fini(rdev);
}
/* r520,rv530,rv560,rv570,r580 depends on : */
void r100_hdp_reset(struct radeon_device *rdev);
-int rv370_pcie_gart_enable(struct radeon_device *rdev);
-void rv370_pcie_gart_disable(struct radeon_device *rdev);
void r420_pipes_init(struct radeon_device *rdev);
void rs600_mc_disable_clients(struct radeon_device *rdev);
void rs600_disable_vga(struct radeon_device *rdev);
void r520_mc_fini(struct radeon_device *rdev)
{
- rv370_pcie_gart_disable(rdev);
- radeon_gart_table_vram_free(rdev);
- radeon_gart_fini(rdev);
}
}
}
-int r600_pcie_gart_enable(struct radeon_device *rdev)
+int r600_pcie_gart_init(struct radeon_device *rdev)
{
- u32 tmp;
- int r, i;
+ int r;
+ if (rdev->gart.table.vram.robj) {
+ WARN(1, "R600 PCIE GART already initialized.\n");
+ return 0;
+ }
/* Initialize common gart structure */
r = radeon_gart_init(rdev);
- if (r) {
+ if (r)
return r;
- }
rdev->gart.table_size = rdev->gart.num_gpu_pages * 8;
- r = radeon_gart_table_vram_alloc(rdev);
- if (r) {
- return r;
+ return radeon_gart_table_vram_alloc(rdev);
+}
+
+int r600_pcie_gart_enable(struct radeon_device *rdev)
+{
+ u32 tmp;
+ int r, i;
+
+ if (rdev->gart.table.vram.robj == NULL) {
+ dev_err(rdev->dev, "No VRAM object for PCIE GART.\n");
+ return -EINVAL;
}
+ r = radeon_gart_table_vram_pin(rdev);
+ if (r)
+ return r;
for (i = 0; i < rdev->gart.num_gpu_pages; i++)
r600_gart_clear_page(rdev, i);
/* Setup L2 cache */
u32 tmp;
int i;
- /* Clear ptes*/
- for (i = 0; i < rdev->gart.num_gpu_pages; i++)
- r600_gart_clear_page(rdev, i);
- r600_pcie_gart_tlb_flush(rdev);
/* Disable all tables */
for (i = 0; i < 7; i++)
WREG32(VM_CONTEXT0_CNTL + (i * 4), 0);
WREG32(MC_VM_L1_TLB_MCB_WR_SYS_CNTL, tmp);
WREG32(MC_VM_L1_TLB_MCB_RD_HDP_CNTL, tmp);
WREG32(MC_VM_L1_TLB_MCB_WR_HDP_CNTL, tmp);
+ if (rdev->gart.table.vram.robj) {
+ radeon_object_kunmap(rdev->gart.table.vram.robj);
+ radeon_object_unpin(rdev->gart.table.vram.robj);
+ }
+}
+
+void r600_pcie_gart_fini(struct radeon_device *rdev)
+{
+ r600_pcie_gart_disable(rdev);
+ radeon_gart_table_vram_free(rdev);
+ radeon_gart_fini(rdev);
}
int r600_mc_wait_for_idle(struct radeon_device *rdev)
{
/* FIXME: we should wait for ring to be empty */
r600_cp_stop(rdev);
+ r600_pcie_gart_disable(rdev);
return 0;
}
}
}
+ r = r600_pcie_gart_init(rdev);
+ if (r)
+ return r;
+
r = r600_resume(rdev);
if (r) {
if (rdev->flags & RADEON_IS_AGP) {
r600_blit_fini(rdev);
radeon_ring_fini(rdev);
- r600_pcie_gart_disable(rdev);
- radeon_gart_table_vram_free(rdev);
- radeon_gart_fini(rdev);
+ r600_pcie_gart_fini(rdev);
radeon_gem_fini(rdev);
radeon_fence_driver_fini(rdev);
radeon_clocks_fini(rdev);
void (*mc_fini)(struct radeon_device *rdev);
int (*wb_init)(struct radeon_device *rdev);
void (*wb_fini)(struct radeon_device *rdev);
+ int (*gart_init)(struct radeon_device *rdev);
+ void (*gart_fini)(struct radeon_device *rdev);
int (*gart_enable)(struct radeon_device *rdev);
void (*gart_disable)(struct radeon_device *rdev);
void (*gart_tlb_flush)(struct radeon_device *rdev);
#define radeon_mc_fini(rdev) (rdev)->asic->mc_fini((rdev))
#define radeon_wb_init(rdev) (rdev)->asic->wb_init((rdev))
#define radeon_wb_fini(rdev) (rdev)->asic->wb_fini((rdev))
+#define radeon_gpu_gart_init(rdev) (rdev)->asic->gart_init((rdev))
+#define radeon_gpu_gart_fini(rdev) (rdev)->asic->gart_fini((rdev))
#define radeon_gart_enable(rdev) (rdev)->asic->gart_enable((rdev))
#define radeon_gart_disable(rdev) (rdev)->asic->gart_disable((rdev))
#define radeon_gart_tlb_flush(rdev) (rdev)->asic->gart_tlb_flush((rdev))
#define radeon_bandwidth_update(rdev) (rdev)->asic->bandwidth_update((rdev))
/* Common functions */
+extern int radeon_gart_table_vram_pin(struct radeon_device *rdev);
extern int radeon_modeset_init(struct radeon_device *rdev);
extern void radeon_modeset_fini(struct radeon_device *rdev);
extern bool radeon_card_posted(struct radeon_device *rdev);
extern int r100_cp_init(struct radeon_device *rdev, unsigned ring_size);
extern void r100_cp_fini(struct radeon_device *rdev);
extern void r100_pci_gart_tlb_flush(struct radeon_device *rdev);
+extern int r100_pci_gart_init(struct radeon_device *rdev);
+extern void r100_pci_gart_fini(struct radeon_device *rdev);
extern int r100_pci_gart_enable(struct radeon_device *rdev);
extern void r100_pci_gart_disable(struct radeon_device *rdev);
extern int r100_pci_gart_set_page(struct radeon_device *rdev, int i, uint64_t addr);
extern void r300_set_reg_safe(struct radeon_device *rdev);
extern void r300_mc_program(struct radeon_device *rdev);
extern void r300_vram_info(struct radeon_device *rdev);
+extern int rv370_pcie_gart_init(struct radeon_device *rdev);
+extern void rv370_pcie_gart_fini(struct radeon_device *rdev);
+extern int rv370_pcie_gart_enable(struct radeon_device *rdev);
extern void rv370_pcie_gart_disable(struct radeon_device *rdev);
/* r420,r423,rv410 */
extern int r600_count_pipe_bits(uint32_t val);
extern int r600_gart_clear_page(struct radeon_device *rdev, int i);
extern int r600_mc_wait_for_idle(struct radeon_device *rdev);
+extern int r600_pcie_gart_init(struct radeon_device *rdev);
extern void r600_pcie_gart_tlb_flush(struct radeon_device *rdev);
extern int r600_ib_test(struct radeon_device *rdev);
extern int r600_ring_test(struct radeon_device *rdev);
u32 r100_get_vblank_counter(struct radeon_device *rdev, int crtc);
int r100_wb_init(struct radeon_device *rdev);
void r100_wb_fini(struct radeon_device *rdev);
-int r100_gart_enable(struct radeon_device *rdev);
+int r100_pci_gart_init(struct radeon_device *rdev);
+void r100_pci_gart_fini(struct radeon_device *rdev);
+int r100_pci_gart_enable(struct radeon_device *rdev);
void r100_pci_gart_disable(struct radeon_device *rdev);
void r100_pci_gart_tlb_flush(struct radeon_device *rdev);
int r100_pci_gart_set_page(struct radeon_device *rdev, int i, uint64_t addr);
.mc_fini = &r100_mc_fini,
.wb_init = &r100_wb_init,
.wb_fini = &r100_wb_fini,
- .gart_enable = &r100_gart_enable,
+ .gart_init = &r100_pci_gart_init,
+ .gart_fini = &r100_pci_gart_fini,
+ .gart_enable = &r100_pci_gart_enable,
.gart_disable = &r100_pci_gart_disable,
.gart_tlb_flush = &r100_pci_gart_tlb_flush,
.gart_set_page = &r100_pci_gart_set_page,
void r300_fence_ring_emit(struct radeon_device *rdev,
struct radeon_fence *fence);
int r300_cs_parse(struct radeon_cs_parser *p);
-int r300_gart_enable(struct radeon_device *rdev);
+int rv370_pcie_gart_init(struct radeon_device *rdev);
+void rv370_pcie_gart_fini(struct radeon_device *rdev);
+int rv370_pcie_gart_enable(struct radeon_device *rdev);
void rv370_pcie_gart_disable(struct radeon_device *rdev);
void rv370_pcie_gart_tlb_flush(struct radeon_device *rdev);
int rv370_pcie_gart_set_page(struct radeon_device *rdev, int i, uint64_t addr);
.mc_fini = &r300_mc_fini,
.wb_init = &r100_wb_init,
.wb_fini = &r100_wb_fini,
- .gart_enable = &r300_gart_enable,
+ .gart_init = &r100_pci_gart_init,
+ .gart_fini = &r100_pci_gart_fini,
+ .gart_enable = &r100_pci_gart_enable,
.gart_disable = &r100_pci_gart_disable,
.gart_tlb_flush = &r100_pci_gart_tlb_flush,
.gart_set_page = &r100_pci_gart_set_page,
.mc_fini = NULL,
.wb_init = NULL,
.wb_fini = NULL,
- .gart_enable = &r300_gart_enable,
- .gart_disable = &rv370_pcie_gart_disable,
+ .gart_enable = NULL,
+ .gart_disable = NULL,
.gart_tlb_flush = &rv370_pcie_gart_tlb_flush,
.gart_set_page = &rv370_pcie_gart_set_page,
.cp_init = NULL,
void rs400_vram_info(struct radeon_device *rdev);
int rs400_mc_init(struct radeon_device *rdev);
void rs400_mc_fini(struct radeon_device *rdev);
+int rs400_gart_init(struct radeon_device *rdev);
+void rs400_gart_fini(struct radeon_device *rdev);
int rs400_gart_enable(struct radeon_device *rdev);
void rs400_gart_disable(struct radeon_device *rdev);
void rs400_gart_tlb_flush(struct radeon_device *rdev);
.mc_fini = &rs400_mc_fini,
.wb_init = &r100_wb_init,
.wb_fini = &r100_wb_fini,
+ .gart_init = &rs400_gart_init,
+ .gart_fini = &rs400_gart_fini,
.gart_enable = &rs400_gart_enable,
.gart_disable = &rs400_gart_disable,
.gart_tlb_flush = &rs400_gart_tlb_flush,
int rs600_irq_set(struct radeon_device *rdev);
int rs600_irq_process(struct radeon_device *rdev);
u32 rs600_get_vblank_counter(struct radeon_device *rdev, int crtc);
+int rs600_gart_init(struct radeon_device *rdev);
+void rs600_gart_fini(struct radeon_device *rdev);
int rs600_gart_enable(struct radeon_device *rdev);
void rs600_gart_disable(struct radeon_device *rdev);
void rs600_gart_tlb_flush(struct radeon_device *rdev);
.mc_fini = &rs600_mc_fini,
.wb_init = &r100_wb_init,
.wb_fini = &r100_wb_fini,
+ .gart_init = &rs600_gart_init,
+ .gart_fini = &rs600_gart_fini,
.gart_enable = &rs600_gart_enable,
.gart_disable = &rs600_gart_disable,
.gart_tlb_flush = &rs600_gart_tlb_flush,
.mc_fini = &rs690_mc_fini,
.wb_init = &r100_wb_init,
.wb_fini = &r100_wb_fini,
+ .gart_init = &rs400_gart_init,
+ .gart_fini = &rs400_gart_fini,
.gart_enable = &rs400_gart_enable,
.gart_disable = &rs400_gart_disable,
.gart_tlb_flush = &rs400_gart_tlb_flush,
.mc_fini = &rv515_mc_fini,
.wb_init = &r100_wb_init,
.wb_fini = &r100_wb_fini,
- .gart_enable = &r300_gart_enable,
+ .gart_init = &rv370_pcie_gart_init,
+ .gart_fini = &rv370_pcie_gart_fini,
+ .gart_enable = &rv370_pcie_gart_enable,
.gart_disable = &rv370_pcie_gart_disable,
.gart_tlb_flush = &rv370_pcie_gart_tlb_flush,
.gart_set_page = &rv370_pcie_gart_set_page,
.mc_fini = &r520_mc_fini,
.wb_init = &r100_wb_init,
.wb_fini = &r100_wb_fini,
- .gart_enable = &r300_gart_enable,
+ .gart_init = &rv370_pcie_gart_init,
+ .gart_fini = &rv370_pcie_gart_fini,
+ .gart_enable = &rv370_pcie_gart_enable,
.gart_disable = &rv370_pcie_gart_disable,
.gart_tlb_flush = &rv370_pcie_gart_tlb_flush,
.gart_set_page = &rv370_pcie_gart_set_page,
case CHIP_RV350:
case CHIP_RV380:
rdev->asic = &r300_asic;
+ if (rdev->flags & RADEON_IS_PCIE) {
+ rdev->asic->gart_init = &rv370_pcie_gart_init;
+ rdev->asic->gart_fini = &rv370_pcie_gart_fini;
+ rdev->asic->gart_enable = &rv370_pcie_gart_enable;
+ rdev->asic->gart_disable = &rv370_pcie_gart_disable;
+ rdev->asic->gart_tlb_flush = &rv370_pcie_gart_tlb_flush;
+ rdev->asic->gart_set_page = &rv370_pcie_gart_set_page;
+ }
break;
case CHIP_R420:
case CHIP_R423:
rwlock_init(&rdev->fence_drv.lock);
INIT_LIST_HEAD(&rdev->gem.objects);
+ /* Set asic functions */
+ r = radeon_asic_init(rdev);
+ if (r) {
+ return r;
+ }
+
if (radeon_agpmode == -1) {
rdev->flags &= ~RADEON_IS_AGP;
if (rdev->family >= CHIP_RV515 ||
rdev->family == CHIP_R423) {
DRM_INFO("Forcing AGP to PCIE mode\n");
rdev->flags |= RADEON_IS_PCIE;
+ rdev->asic->gart_init = &rv370_pcie_gart_init;
+ rdev->asic->gart_fini = &rv370_pcie_gart_fini;
+ rdev->asic->gart_enable = &rv370_pcie_gart_enable;
+ rdev->asic->gart_disable = &rv370_pcie_gart_disable;
+ rdev->asic->gart_tlb_flush = &rv370_pcie_gart_tlb_flush;
+ rdev->asic->gart_set_page = &rv370_pcie_gart_set_page;
} else {
DRM_INFO("Forcing AGP to PCI mode\n");
rdev->flags |= RADEON_IS_PCI;
+ rdev->asic->gart_init = &r100_pci_gart_init;
+ rdev->asic->gart_fini = &r100_pci_gart_fini;
+ rdev->asic->gart_enable = &r100_pci_gart_enable;
+ rdev->asic->gart_disable = &r100_pci_gart_disable;
+ rdev->asic->gart_tlb_flush = &r100_pci_gart_tlb_flush;
+ rdev->asic->gart_set_page = &r100_pci_gart_set_page;
}
}
- /* Set asic functions */
- r = radeon_asic_init(rdev);
- if (r) {
- return r;
- }
-
/* set DMA mask + need_dma32 flags.
* PCIE - can handle 40-bits.
* IGP - can handle 40-bits (in theory)
if (r) {
return r;
}
+ r = radeon_gpu_gart_init(rdev);
+ if (r)
+ return r;
/* Initialize GART (initialize after TTM so we can allocate
* memory through TTM but finalize after TTM) */
r = radeon_gart_enable(rdev);
radeon_ib_pool_fini(rdev);
radeon_cp_fini(rdev);
radeon_wb_fini(rdev);
+ radeon_gpu_gart_fini(rdev);
radeon_gem_fini(rdev);
radeon_mc_fini(rdev);
#if __OS_HAS_AGP
int radeon_gart_table_vram_alloc(struct radeon_device *rdev)
{
- uint64_t gpu_addr;
int r;
if (rdev->gart.table.vram.robj == NULL) {
return r;
}
}
+ return 0;
+}
+
+int radeon_gart_table_vram_pin(struct radeon_device *rdev)
+{
+ uint64_t gpu_addr;
+ int r;
+
r = radeon_object_pin(rdev->gart.table.vram.robj,
RADEON_GEM_DOMAIN_VRAM, &gpu_addr);
if (r) {
WREG32_MC(RS480_GART_CACHE_CNTRL, 0);
}
-int rs400_gart_enable(struct radeon_device *rdev)
+int rs400_gart_init(struct radeon_device *rdev)
{
- uint32_t size_reg;
- uint32_t tmp;
int r;
+ if (rdev->gart.table.ram.ptr) {
+ WARN(1, "RS400 GART already initialized.\n");
+ return 0;
+ }
+ /* Check gart size */
+ switch(rdev->mc.gtt_size / (1024 * 1024)) {
+ case 32:
+ case 64:
+ case 128:
+ case 256:
+ case 512:
+ case 1024:
+ case 2048:
+ break;
+ default:
+ return -EINVAL;
+ }
/* Initialize common gart structure */
r = radeon_gart_init(rdev);
- if (r) {
+ if (r)
return r;
- }
- if (rs400_debugfs_pcie_gart_info_init(rdev)) {
+ if (rs400_debugfs_pcie_gart_info_init(rdev))
DRM_ERROR("Failed to register debugfs file for RS400 GART !\n");
- }
+ rdev->gart.table_size = rdev->gart.num_gpu_pages * 4;
+ return radeon_gart_table_ram_alloc(rdev);
+}
+
+int rs400_gart_enable(struct radeon_device *rdev)
+{
+ uint32_t size_reg;
+ uint32_t tmp;
tmp = RREG32_MC(RS690_AIC_CTRL_SCRATCH);
tmp |= RS690_DIS_OUT_OF_PCI_GART_ACCESS;
default:
return -EINVAL;
}
- if (rdev->gart.table.ram.ptr == NULL) {
- rdev->gart.table_size = rdev->gart.num_gpu_pages * 4;
- r = radeon_gart_table_ram_alloc(rdev);
- if (r) {
- return r;
- }
- }
/* It should be fine to program it to max value */
if (rdev->family == CHIP_RS690 || (rdev->family == CHIP_RS740)) {
WREG32_MC(RS690_MCCFG_AGP_BASE, 0xFFFFFFFF);
WREG32_MC(RS480_AGP_ADDRESS_SPACE_SIZE, 0);
}
+void rs400_gart_fini(struct radeon_device *rdev)
+{
+ rs400_gart_disable(rdev);
+ radeon_gart_table_ram_free(rdev);
+ radeon_gart_fini(rdev);
+}
+
int rs400_gart_set_page(struct radeon_device *rdev, int i, uint64_t addr)
{
uint32_t entry;
(void)RREG32(RADEON_HOST_PATH_CNTL);
WREG32(RADEON_HOST_PATH_CNTL, tmp);
(void)RREG32(RADEON_HOST_PATH_CNTL);
+
return 0;
}
void rs400_mc_fini(struct radeon_device *rdev)
{
- rs400_gart_disable(rdev);
- radeon_gart_table_ram_free(rdev);
- radeon_gart_fini(rdev);
}
tmp = RREG32_MC(RS600_MC_PT0_CNTL);
}
-int rs600_gart_enable(struct radeon_device *rdev)
+int rs600_gart_init(struct radeon_device *rdev)
{
- uint32_t tmp;
- int i;
int r;
+ if (rdev->gart.table.vram.robj) {
+ WARN(1, "RS600 GART already initialized.\n");
+ return 0;
+ }
/* Initialize common gart structure */
r = radeon_gart_init(rdev);
if (r) {
return r;
}
rdev->gart.table_size = rdev->gart.num_gpu_pages * 8;
- r = radeon_gart_table_vram_alloc(rdev);
- if (r) {
- return r;
+ return radeon_gart_table_vram_alloc(rdev);
+}
+
+int rs600_gart_enable(struct radeon_device *rdev)
+{
+ uint32_t tmp;
+ int r, i;
+
+ if (rdev->gart.table.vram.robj == NULL) {
+ dev_err(rdev->dev, "No VRAM object for PCIE GART.\n");
+ return -EINVAL;
}
+ r = radeon_gart_table_vram_pin(rdev);
+ if (r)
+ return r;
/* FIXME: setup default page */
WREG32_MC(RS600_MC_PT0_CNTL,
(RS600_EFFECTIVE_L2_CACHE_SIZE(6) |
tmp = RREG32_MC(RS600_MC_CNTL1);
tmp &= ~RS600_ENABLE_PAGE_TABLES;
WREG32_MC(RS600_MC_CNTL1, tmp);
- radeon_object_kunmap(rdev->gart.table.vram.robj);
- radeon_object_unpin(rdev->gart.table.vram.robj);
+ if (rdev->gart.table.vram.robj) {
+ radeon_object_kunmap(rdev->gart.table.vram.robj);
+ radeon_object_unpin(rdev->gart.table.vram.robj);
+ }
+}
+
+void rs600_gart_fini(struct radeon_device *rdev)
+{
+ rs600_gart_disable(rdev);
+ radeon_gart_table_vram_free(rdev);
+ radeon_gart_fini(rdev);
}
#define R600_PTE_VALID (1 << 0)
void rs600_mc_fini(struct radeon_device *rdev)
{
- rs600_gart_disable(rdev);
- radeon_gart_table_vram_free(rdev);
- radeon_gart_fini(rdev);
}
void rs690_mc_fini(struct radeon_device *rdev)
{
- rs400_gart_disable(rdev);
- radeon_gart_table_ram_free(rdev);
- radeon_gart_fini(rdev);
}
int r100_rb2d_reset(struct radeon_device *rdev);
int r100_gui_wait_for_idle(struct radeon_device *rdev);
int r100_cp_init(struct radeon_device *rdev, unsigned ring_size);
-int rv370_pcie_gart_enable(struct radeon_device *rdev);
-void rv370_pcie_gart_disable(struct radeon_device *rdev);
void r420_pipes_init(struct radeon_device *rdev);
void rs600_mc_disable_clients(struct radeon_device *rdev);
void rs600_disable_vga(struct radeon_device *rdev);
void rv515_mc_fini(struct radeon_device *rdev)
{
- rv370_pcie_gart_disable(rdev);
- radeon_gart_table_vram_free(rdev);
- radeon_gart_fini(rdev);
}
u32 tmp;
int r, i;
- /* Initialize common gart structure */
- r = radeon_gart_init(rdev);
- if (r) {
- return r;
+ if (rdev->gart.table.vram.robj == NULL) {
+ dev_err(rdev->dev, "No VRAM object for PCIE GART.\n");
+ return -EINVAL;
}
- rdev->gart.table_size = rdev->gart.num_gpu_pages * 8;
- r = radeon_gart_table_vram_alloc(rdev);
- if (r) {
+ r = radeon_gart_table_vram_pin(rdev);
+ if (r)
return r;
- }
for (i = 0; i < rdev->gart.num_gpu_pages; i++)
r600_gart_clear_page(rdev, i);
/* Setup L2 cache */
u32 tmp;
int i;
- /* Clear ptes*/
- for (i = 0; i < rdev->gart.num_gpu_pages; i++)
- r600_gart_clear_page(rdev, i);
- r600_pcie_gart_tlb_flush(rdev);
/* Disable all tables */
for (i = 0; i < 7; i++)
WREG32(VM_CONTEXT0_CNTL + (i * 4), 0);
WREG32(MC_VM_MB_L1_TLB1_CNTL, tmp);
WREG32(MC_VM_MB_L1_TLB2_CNTL, tmp);
WREG32(MC_VM_MB_L1_TLB3_CNTL, tmp);
+ if (rdev->gart.table.vram.robj) {
+ radeon_object_kunmap(rdev->gart.table.vram.robj);
+ radeon_object_unpin(rdev->gart.table.vram.robj);
+ }
+}
+
+void rv770_pcie_gart_fini(struct radeon_device *rdev)
+{
+ rv770_pcie_gart_disable(rdev);
+ radeon_gart_table_vram_free(rdev);
+ radeon_gart_fini(rdev);
}
{
/* FIXME: we should wait for ring to be empty */
r700_cp_stop(rdev);
+ rv770_pcie_gart_disable(rdev);
return 0;
}
}
}
+ r = r600_pcie_gart_init(rdev);
+ if (r)
+ return r;
+
r = rv770_resume(rdev);
if (r) {
if (rdev->flags & RADEON_IS_AGP) {
{
r600_blit_fini(rdev);
radeon_ring_fini(rdev);
- rv770_pcie_gart_disable(rdev);
- radeon_gart_table_vram_free(rdev);
- radeon_gart_fini(rdev);
+ rv770_pcie_gart_fini(rdev);
radeon_gem_fini(rdev);
radeon_fence_driver_fini(rdev);
radeon_clocks_fini(rdev);