{
if (ring->count_dw <= 0)
DRM_ERROR("amdgpu: writing more dwords to the ring than expected!\n");
- ring->ring[ring->wptr++] = v;
+ ring->ring[ring->wptr++ & ring->buf_mask] = v;
ring->wptr &= ring->ptr_mask;
ring->count_dw--;
}
}
amdgpu_ring_clear_ring(ring);
}
- ring->ptr_mask = (ring->ring_size / 4) - 1;
+ ring->buf_mask = (ring->ring_size / 4) - 1;
+ ring->ptr_mask = ring->funcs->support_64bit_ptrs ?
+ 0xffffffffffffffff : ring->buf_mask;
+
ring->max_dw = max_dw;
if (amdgpu_debugfs_ring_init(adev, ring)) {
enum amdgpu_ring_type type;
uint32_t align_mask;
u32 nop;
+ bool support_64bit_ptrs;
/* ring read/write ptr handling */
- u32 (*get_rptr)(struct amdgpu_ring *ring);
- u32 (*get_wptr)(struct amdgpu_ring *ring);
+ u64 (*get_rptr)(struct amdgpu_ring *ring);
+ u64 (*get_wptr)(struct amdgpu_ring *ring);
void (*set_wptr)(struct amdgpu_ring *ring);
/* validating and patching of IBs */
int (*parse_cs)(struct amdgpu_cs_parser *p, uint32_t ib_idx);
struct amdgpu_bo *ring_obj;
volatile uint32_t *ring;
unsigned rptr_offs;
- unsigned wptr;
- unsigned wptr_old;
+ u64 wptr;
+ u64 wptr_old;
unsigned ring_size;
unsigned max_dw;
int count_dw;
uint64_t gpu_addr;
- uint32_t ptr_mask;
+ uint64_t ptr_mask;
+ uint32_t buf_mask;
bool ready;
u32 idx;
u32 me;
*
* Get the current rptr from the hardware (CIK+).
*/
-static uint32_t cik_sdma_ring_get_rptr(struct amdgpu_ring *ring)
+static uint64_t cik_sdma_ring_get_rptr(struct amdgpu_ring *ring)
{
u32 rptr;
*
* Get the current wptr from the hardware (CIK+).
*/
-static uint32_t cik_sdma_ring_get_wptr(struct amdgpu_ring *ring)
+static uint64_t cik_sdma_ring_get_wptr(struct amdgpu_ring *ring)
{
struct amdgpu_device *adev = ring->adev;
u32 me = (ring == &adev->sdma.instance[0].ring) ? 0 : 1;
struct amdgpu_device *adev = ring->adev;
u32 me = (ring == &adev->sdma.instance[0].ring) ? 0 : 1;
- WREG32(mmSDMA0_GFX_RB_WPTR + sdma_offsets[me], (ring->wptr << 2) & 0x3fffc);
+ WREG32(mmSDMA0_GFX_RB_WPTR + sdma_offsets[me],
+ (lower_32_bits(ring->wptr) << 2) & 0x3fffc);
}
static void cik_sdma_ring_insert_nop(struct amdgpu_ring *ring, uint32_t count)
u32 extra_bits = vm_id & 0xf;
/* IB packet must end on a 8 DW boundary */
- cik_sdma_ring_insert_nop(ring, (12 - (ring->wptr & 7)) % 8);
+ cik_sdma_ring_insert_nop(ring, (12 - (lower_32_bits(ring->wptr) & 7)) % 8);
amdgpu_ring_write(ring, SDMA_PACKET(SDMA_OPCODE_INDIRECT_BUFFER, 0, extra_bits));
amdgpu_ring_write(ring, ib->gpu_addr & 0xffffffe0); /* base must be 32 byte aligned */
WREG32(mmSDMA0_GFX_RB_BASE_HI + sdma_offsets[i], ring->gpu_addr >> 40);
ring->wptr = 0;
- WREG32(mmSDMA0_GFX_RB_WPTR + sdma_offsets[i], ring->wptr << 2);
+ WREG32(mmSDMA0_GFX_RB_WPTR + sdma_offsets[i], lower_32_bits(ring->wptr) << 2);
/* enable DMA RB */
WREG32(mmSDMA0_GFX_RB_CNTL + sdma_offsets[i],
.type = AMDGPU_RING_TYPE_SDMA,
.align_mask = 0xf,
.nop = SDMA_PACKET(SDMA_OPCODE_NOP, 0, 0),
+ .support_64bit_ptrs = false,
.get_rptr = cik_sdma_ring_get_rptr,
.get_wptr = cik_sdma_ring_get_wptr,
.set_wptr = cik_sdma_ring_set_wptr,
return 0;
}
-static u32 gfx_v6_0_ring_get_rptr(struct amdgpu_ring *ring)
+static u64 gfx_v6_0_ring_get_rptr(struct amdgpu_ring *ring)
{
return ring->adev->wb.wb[ring->rptr_offs];
}
-static u32 gfx_v6_0_ring_get_wptr(struct amdgpu_ring *ring)
+static u64 gfx_v6_0_ring_get_wptr(struct amdgpu_ring *ring)
{
struct amdgpu_device *adev = ring->adev;
{
struct amdgpu_device *adev = ring->adev;
- WREG32(mmCP_RB0_WPTR, ring->wptr);
+ WREG32(mmCP_RB0_WPTR, lower_32_bits(ring->wptr));
(void)RREG32(mmCP_RB0_WPTR);
}
struct amdgpu_device *adev = ring->adev;
if (ring == &adev->gfx.compute_ring[0]) {
- WREG32(mmCP_RB1_WPTR, ring->wptr);
+ WREG32(mmCP_RB1_WPTR, lower_32_bits(ring->wptr));
(void)RREG32(mmCP_RB1_WPTR);
} else if (ring == &adev->gfx.compute_ring[1]) {
- WREG32(mmCP_RB2_WPTR, ring->wptr);
+ WREG32(mmCP_RB2_WPTR, lower_32_bits(ring->wptr));
(void)RREG32(mmCP_RB2_WPTR);
} else {
BUG();
.type = AMDGPU_RING_TYPE_GFX,
.align_mask = 0xff,
.nop = 0x80000000,
+ .support_64bit_ptrs = false,
.get_rptr = gfx_v6_0_ring_get_rptr,
.get_wptr = gfx_v6_0_ring_get_wptr,
.set_wptr = gfx_v6_0_ring_set_wptr_gfx,
/* Initialize the ring buffer's read and write pointers */
WREG32(mmCP_RB0_CNTL, tmp | CP_RB0_CNTL__RB_RPTR_WR_ENA_MASK);
ring->wptr = 0;
- WREG32(mmCP_RB0_WPTR, ring->wptr);
+ WREG32(mmCP_RB0_WPTR, lower_32_bits(ring->wptr));
/* set the wb address wether it's enabled or not */
rptr_addr = adev->wb.gpu_addr + (ring->rptr_offs * 4);
return 0;
}
-static u32 gfx_v7_0_ring_get_rptr(struct amdgpu_ring *ring)
+static u64 gfx_v7_0_ring_get_rptr(struct amdgpu_ring *ring)
{
return ring->adev->wb.wb[ring->rptr_offs];
}
-static u32 gfx_v7_0_ring_get_wptr_gfx(struct amdgpu_ring *ring)
+static u64 gfx_v7_0_ring_get_wptr_gfx(struct amdgpu_ring *ring)
{
struct amdgpu_device *adev = ring->adev;
{
struct amdgpu_device *adev = ring->adev;
- WREG32(mmCP_RB0_WPTR, ring->wptr);
+ WREG32(mmCP_RB0_WPTR, lower_32_bits(ring->wptr));
(void)RREG32(mmCP_RB0_WPTR);
}
-static u32 gfx_v7_0_ring_get_wptr_compute(struct amdgpu_ring *ring)
+static u64 gfx_v7_0_ring_get_wptr_compute(struct amdgpu_ring *ring)
{
/* XXX check if swapping is necessary on BE */
return ring->adev->wb.wb[ring->wptr_offs];
struct amdgpu_device *adev = ring->adev;
/* XXX check if swapping is necessary on BE */
- adev->wb.wb[ring->wptr_offs] = ring->wptr;
- WDOORBELL32(ring->doorbell_index, ring->wptr);
+ adev->wb.wb[ring->wptr_offs] = lower_32_bits(ring->wptr);
+ WDOORBELL32(ring->doorbell_index, lower_32_bits(ring->wptr));
}
/**
/* read and write pointers, similar to CP_RB0_WPTR/_RPTR */
ring->wptr = 0;
- mqd->queue_state.cp_hqd_pq_wptr = ring->wptr;
+ mqd->queue_state.cp_hqd_pq_wptr = lower_32_bits(ring->wptr);
WREG32(mmCP_HQD_PQ_WPTR, mqd->queue_state.cp_hqd_pq_wptr);
mqd->queue_state.cp_hqd_pq_rptr = RREG32(mmCP_HQD_PQ_RPTR);
.type = AMDGPU_RING_TYPE_GFX,
.align_mask = 0xff,
.nop = PACKET3(PACKET3_NOP, 0x3FFF),
+ .support_64bit_ptrs = false,
.get_rptr = gfx_v7_0_ring_get_rptr,
.get_wptr = gfx_v7_0_ring_get_wptr_gfx,
.set_wptr = gfx_v7_0_ring_set_wptr_gfx,
.type = AMDGPU_RING_TYPE_COMPUTE,
.align_mask = 0xff,
.nop = PACKET3(PACKET3_NOP, 0x3FFF),
+ .support_64bit_ptrs = false,
.get_rptr = gfx_v7_0_ring_get_rptr,
.get_wptr = gfx_v7_0_ring_get_wptr_compute,
.set_wptr = gfx_v7_0_ring_set_wptr_compute,
/* Initialize the ring buffer's read and write pointers */
WREG32(mmCP_RB0_CNTL, tmp | CP_RB0_CNTL__RB_RPTR_WR_ENA_MASK);
ring->wptr = 0;
- WREG32(mmCP_RB0_WPTR, ring->wptr);
+ WREG32(mmCP_RB0_WPTR, lower_32_bits(ring->wptr));
/* set the wb address wether it's enabled or not */
rptr_addr = adev->wb.gpu_addr + (ring->rptr_offs * 4);
/* reset read and write pointers, similar to CP_RB0_WPTR/_RPTR */
ring->wptr = 0;
- mqd->cp_hqd_pq_wptr = ring->wptr;
+ mqd->cp_hqd_pq_wptr = lower_32_bits(ring->wptr);
WREG32(mmCP_HQD_PQ_WPTR, mqd->cp_hqd_pq_wptr);
mqd->cp_hqd_pq_rptr = RREG32(mmCP_HQD_PQ_RPTR);
return 0;
}
-static u32 gfx_v8_0_ring_get_rptr(struct amdgpu_ring *ring)
+static u64 gfx_v8_0_ring_get_rptr(struct amdgpu_ring *ring)
{
return ring->adev->wb.wb[ring->rptr_offs];
}
-static u32 gfx_v8_0_ring_get_wptr_gfx(struct amdgpu_ring *ring)
+static u64 gfx_v8_0_ring_get_wptr_gfx(struct amdgpu_ring *ring)
{
struct amdgpu_device *adev = ring->adev;
if (ring->use_doorbell) {
/* XXX check if swapping is necessary on BE */
- adev->wb.wb[ring->wptr_offs] = ring->wptr;
- WDOORBELL32(ring->doorbell_index, ring->wptr);
+ adev->wb.wb[ring->wptr_offs] = lower_32_bits(ring->wptr);
+ WDOORBELL32(ring->doorbell_index, lower_32_bits(ring->wptr));
} else {
- WREG32(mmCP_RB0_WPTR, ring->wptr);
+ WREG32(mmCP_RB0_WPTR, lower_32_bits(ring->wptr));
(void)RREG32(mmCP_RB0_WPTR);
}
}
}
}
-static u32 gfx_v8_0_ring_get_wptr_compute(struct amdgpu_ring *ring)
+static u64 gfx_v8_0_ring_get_wptr_compute(struct amdgpu_ring *ring)
{
return ring->adev->wb.wb[ring->wptr_offs];
}
struct amdgpu_device *adev = ring->adev;
/* XXX check if swapping is necessary on BE */
- adev->wb.wb[ring->wptr_offs] = ring->wptr;
- WDOORBELL32(ring->doorbell_index, ring->wptr);
+ adev->wb.wb[ring->wptr_offs] = lower_32_bits(ring->wptr);
+ WDOORBELL32(ring->doorbell_index, lower_32_bits(ring->wptr));
}
static void gfx_v8_0_ring_emit_fence_compute(struct amdgpu_ring *ring,
.type = AMDGPU_RING_TYPE_GFX,
.align_mask = 0xff,
.nop = PACKET3(PACKET3_NOP, 0x3FFF),
+ .support_64bit_ptrs = false,
.get_rptr = gfx_v8_0_ring_get_rptr,
.get_wptr = gfx_v8_0_ring_get_wptr_gfx,
.set_wptr = gfx_v8_0_ring_set_wptr_gfx,
.type = AMDGPU_RING_TYPE_COMPUTE,
.align_mask = 0xff,
.nop = PACKET3(PACKET3_NOP, 0x3FFF),
+ .support_64bit_ptrs = false,
.get_rptr = gfx_v8_0_ring_get_rptr,
.get_wptr = gfx_v8_0_ring_get_wptr_compute,
.set_wptr = gfx_v8_0_ring_set_wptr_compute,
.type = AMDGPU_RING_TYPE_KIQ,
.align_mask = 0xff,
.nop = PACKET3(PACKET3_NOP, 0x3FFF),
+ .support_64bit_ptrs = false,
.get_rptr = gfx_v8_0_ring_get_rptr,
.get_wptr = gfx_v8_0_ring_get_wptr_compute,
.set_wptr = gfx_v8_0_ring_set_wptr_compute,
*
* Get the current rptr from the hardware (VI+).
*/
-static uint32_t sdma_v2_4_ring_get_rptr(struct amdgpu_ring *ring)
+static uint64_t sdma_v2_4_ring_get_rptr(struct amdgpu_ring *ring)
{
/* XXX check if swapping is necessary on BE */
return ring->adev->wb.wb[ring->rptr_offs] >> 2;
*
* Get the current wptr from the hardware (VI+).
*/
-static uint32_t sdma_v2_4_ring_get_wptr(struct amdgpu_ring *ring)
+static uint64_t sdma_v2_4_ring_get_wptr(struct amdgpu_ring *ring)
{
struct amdgpu_device *adev = ring->adev;
int me = (ring == &ring->adev->sdma.instance[0].ring) ? 0 : 1;
struct amdgpu_device *adev = ring->adev;
int me = (ring == &ring->adev->sdma.instance[0].ring) ? 0 : 1;
- WREG32(mmSDMA0_GFX_RB_WPTR + sdma_offsets[me], ring->wptr << 2);
+ WREG32(mmSDMA0_GFX_RB_WPTR + sdma_offsets[me], lower_32_bits(ring->wptr) << 2);
}
static void sdma_v2_4_ring_insert_nop(struct amdgpu_ring *ring, uint32_t count)
u32 vmid = vm_id & 0xf;
/* IB packet must end on a 8 DW boundary */
- sdma_v2_4_ring_insert_nop(ring, (10 - (ring->wptr & 7)) % 8);
+ sdma_v2_4_ring_insert_nop(ring, (10 - (lower_32_bits(ring->wptr) & 7)) % 8);
amdgpu_ring_write(ring, SDMA_PKT_HEADER_OP(SDMA_OP_INDIRECT) |
SDMA_PKT_INDIRECT_HEADER_VMID(vmid));
WREG32(mmSDMA0_GFX_RB_BASE_HI + sdma_offsets[i], ring->gpu_addr >> 40);
ring->wptr = 0;
- WREG32(mmSDMA0_GFX_RB_WPTR + sdma_offsets[i], ring->wptr << 2);
+ WREG32(mmSDMA0_GFX_RB_WPTR + sdma_offsets[i], lower_32_bits(ring->wptr) << 2);
/* enable DMA RB */
rb_cntl = REG_SET_FIELD(rb_cntl, SDMA0_GFX_RB_CNTL, RB_ENABLE, 1);
.type = AMDGPU_RING_TYPE_SDMA,
.align_mask = 0xf,
.nop = SDMA_PKT_NOP_HEADER_OP(SDMA_OP_NOP),
+ .support_64bit_ptrs = false,
.get_rptr = sdma_v2_4_ring_get_rptr,
.get_wptr = sdma_v2_4_ring_get_wptr,
.set_wptr = sdma_v2_4_ring_set_wptr,
*
* Get the current rptr from the hardware (VI+).
*/
-static uint32_t sdma_v3_0_ring_get_rptr(struct amdgpu_ring *ring)
+static uint64_t sdma_v3_0_ring_get_rptr(struct amdgpu_ring *ring)
{
/* XXX check if swapping is necessary on BE */
return ring->adev->wb.wb[ring->rptr_offs] >> 2;
*
* Get the current wptr from the hardware (VI+).
*/
-static uint32_t sdma_v3_0_ring_get_wptr(struct amdgpu_ring *ring)
+static uint64_t sdma_v3_0_ring_get_wptr(struct amdgpu_ring *ring)
{
struct amdgpu_device *adev = ring->adev;
u32 wptr;
if (ring->use_doorbell) {
/* XXX check if swapping is necessary on BE */
- adev->wb.wb[ring->wptr_offs] = ring->wptr << 2;
- WDOORBELL32(ring->doorbell_index, ring->wptr << 2);
+ adev->wb.wb[ring->wptr_offs] = lower_32_bits(ring->wptr) << 2;
+ WDOORBELL32(ring->doorbell_index, lower_32_bits(ring->wptr) << 2);
} else {
int me = (ring == &ring->adev->sdma.instance[0].ring) ? 0 : 1;
- WREG32(mmSDMA0_GFX_RB_WPTR + sdma_offsets[me], ring->wptr << 2);
+ WREG32(mmSDMA0_GFX_RB_WPTR + sdma_offsets[me], lower_32_bits(ring->wptr) << 2);
}
}
u32 vmid = vm_id & 0xf;
/* IB packet must end on a 8 DW boundary */
- sdma_v3_0_ring_insert_nop(ring, (10 - (ring->wptr & 7)) % 8);
+ sdma_v3_0_ring_insert_nop(ring, (10 - (lower_32_bits(ring->wptr) & 7)) % 8);
amdgpu_ring_write(ring, SDMA_PKT_HEADER_OP(SDMA_OP_INDIRECT) |
SDMA_PKT_INDIRECT_HEADER_VMID(vmid));
WREG32(mmSDMA0_GFX_RB_BASE_HI + sdma_offsets[i], ring->gpu_addr >> 40);
ring->wptr = 0;
- WREG32(mmSDMA0_GFX_RB_WPTR + sdma_offsets[i], ring->wptr << 2);
+ WREG32(mmSDMA0_GFX_RB_WPTR + sdma_offsets[i], lower_32_bits(ring->wptr) << 2);
doorbell = RREG32(mmSDMA0_GFX_DOORBELL + sdma_offsets[i]);
.type = AMDGPU_RING_TYPE_SDMA,
.align_mask = 0xf,
.nop = SDMA_PKT_NOP_HEADER_OP(SDMA_OP_NOP),
+ .support_64bit_ptrs = false,
.get_rptr = sdma_v3_0_ring_get_rptr,
.get_wptr = sdma_v3_0_ring_get_wptr,
.set_wptr = sdma_v3_0_ring_set_wptr,
static void si_dma_set_vm_pte_funcs(struct amdgpu_device *adev);
static void si_dma_set_irq_funcs(struct amdgpu_device *adev);
-static uint32_t si_dma_ring_get_rptr(struct amdgpu_ring *ring)
+static uint64_t si_dma_ring_get_rptr(struct amdgpu_ring *ring)
{
return ring->adev->wb.wb[ring->rptr_offs>>2];
}
-static uint32_t si_dma_ring_get_wptr(struct amdgpu_ring *ring)
+static uint64_t si_dma_ring_get_wptr(struct amdgpu_ring *ring)
{
struct amdgpu_device *adev = ring->adev;
u32 me = (ring == &adev->sdma.instance[0].ring) ? 0 : 1;
struct amdgpu_device *adev = ring->adev;
u32 me = (ring == &adev->sdma.instance[0].ring) ? 0 : 1;
- WREG32(DMA_RB_WPTR + sdma_offsets[me], (ring->wptr << 2) & 0x3fffc);
+ WREG32(DMA_RB_WPTR + sdma_offsets[me],
+ (lower_32_bits(ring->wptr) << 2) & 0x3fffc);
}
static void si_dma_ring_emit_ib(struct amdgpu_ring *ring,
/* The indirect buffer packet must end on an 8 DW boundary in the DMA ring.
* Pad as necessary with NOPs.
*/
- while ((ring->wptr & 7) != 5)
+ while ((lower_32_bits(ring->wptr) & 7) != 5)
amdgpu_ring_write(ring, DMA_PACKET(DMA_PACKET_NOP, 0, 0, 0, 0));
amdgpu_ring_write(ring, DMA_IB_PACKET(DMA_PACKET_INDIRECT_BUFFER, vm_id, 0));
amdgpu_ring_write(ring, (ib->gpu_addr & 0xFFFFFFE0));
WREG32(DMA_CNTL + sdma_offsets[i], dma_cntl);
ring->wptr = 0;
- WREG32(DMA_RB_WPTR + sdma_offsets[i], ring->wptr << 2);
+ WREG32(DMA_RB_WPTR + sdma_offsets[i], lower_32_bits(ring->wptr) << 2);
WREG32(DMA_RB_CNTL + sdma_offsets[i], rb_cntl | DMA_RB_ENABLE);
ring->ready = true;
.type = AMDGPU_RING_TYPE_SDMA,
.align_mask = 0xf,
.nop = DMA_PACKET(DMA_PACKET_NOP, 0, 0, 0, 0),
+ .support_64bit_ptrs = false,
.get_rptr = si_dma_ring_get_rptr,
.get_wptr = si_dma_ring_get_wptr,
.set_wptr = si_dma_ring_set_wptr,
*
* Returns the current hardware read pointer
*/
-static uint32_t uvd_v4_2_ring_get_rptr(struct amdgpu_ring *ring)
+static uint64_t uvd_v4_2_ring_get_rptr(struct amdgpu_ring *ring)
{
struct amdgpu_device *adev = ring->adev;
*
* Returns the current hardware write pointer
*/
-static uint32_t uvd_v4_2_ring_get_wptr(struct amdgpu_ring *ring)
+static uint64_t uvd_v4_2_ring_get_wptr(struct amdgpu_ring *ring)
{
struct amdgpu_device *adev = ring->adev;
{
struct amdgpu_device *adev = ring->adev;
- WREG32(mmUVD_RBC_RB_WPTR, ring->wptr);
+ WREG32(mmUVD_RBC_RB_WPTR, lower_32_bits(ring->wptr));
}
static int uvd_v4_2_early_init(void *handle)
WREG32(mmUVD_RBC_RB_RPTR, 0x0);
ring->wptr = RREG32(mmUVD_RBC_RB_RPTR);
- WREG32(mmUVD_RBC_RB_WPTR, ring->wptr);
+ WREG32(mmUVD_RBC_RB_WPTR, lower_32_bits(ring->wptr));
/* set the ring address */
WREG32(mmUVD_RBC_RB_BASE, ring->gpu_addr);
.type = AMDGPU_RING_TYPE_UVD,
.align_mask = 0xf,
.nop = PACKET0(mmUVD_NO_OP, 0),
+ .support_64bit_ptrs = false,
.get_rptr = uvd_v4_2_ring_get_rptr,
.get_wptr = uvd_v4_2_ring_get_wptr,
.set_wptr = uvd_v4_2_ring_set_wptr,
*
* Returns the current hardware read pointer
*/
-static uint32_t uvd_v5_0_ring_get_rptr(struct amdgpu_ring *ring)
+static uint64_t uvd_v5_0_ring_get_rptr(struct amdgpu_ring *ring)
{
struct amdgpu_device *adev = ring->adev;
*
* Returns the current hardware write pointer
*/
-static uint32_t uvd_v5_0_ring_get_wptr(struct amdgpu_ring *ring)
+static uint64_t uvd_v5_0_ring_get_wptr(struct amdgpu_ring *ring)
{
struct amdgpu_device *adev = ring->adev;
{
struct amdgpu_device *adev = ring->adev;
- WREG32(mmUVD_RBC_RB_WPTR, ring->wptr);
+ WREG32(mmUVD_RBC_RB_WPTR, lower_32_bits(ring->wptr));
}
static int uvd_v5_0_early_init(void *handle)
WREG32(mmUVD_RBC_RB_RPTR, 0);
ring->wptr = RREG32(mmUVD_RBC_RB_RPTR);
- WREG32(mmUVD_RBC_RB_WPTR, ring->wptr);
+ WREG32(mmUVD_RBC_RB_WPTR, lower_32_bits(ring->wptr));
WREG32_P(mmUVD_RBC_RB_CNTL, 0, ~UVD_RBC_RB_CNTL__RB_NO_FETCH_MASK);
.type = AMDGPU_RING_TYPE_UVD,
.align_mask = 0xf,
.nop = PACKET0(mmUVD_NO_OP, 0),
+ .support_64bit_ptrs = false,
.get_rptr = uvd_v5_0_ring_get_rptr,
.get_wptr = uvd_v5_0_ring_get_wptr,
.set_wptr = uvd_v5_0_ring_set_wptr,
*
* Returns the current hardware read pointer
*/
-static uint32_t uvd_v6_0_ring_get_rptr(struct amdgpu_ring *ring)
+static uint64_t uvd_v6_0_ring_get_rptr(struct amdgpu_ring *ring)
{
struct amdgpu_device *adev = ring->adev;
*
* Returns the current hardware write pointer
*/
-static uint32_t uvd_v6_0_ring_get_wptr(struct amdgpu_ring *ring)
+static uint64_t uvd_v6_0_ring_get_wptr(struct amdgpu_ring *ring)
{
struct amdgpu_device *adev = ring->adev;
{
struct amdgpu_device *adev = ring->adev;
- WREG32(mmUVD_RBC_RB_WPTR, ring->wptr);
+ WREG32(mmUVD_RBC_RB_WPTR, lower_32_bits(ring->wptr));
}
static int uvd_v6_0_early_init(void *handle)
WREG32(mmUVD_RBC_RB_RPTR, 0);
ring->wptr = RREG32(mmUVD_RBC_RB_RPTR);
- WREG32(mmUVD_RBC_RB_WPTR, ring->wptr);
+ WREG32(mmUVD_RBC_RB_WPTR, lower_32_bits(ring->wptr));
WREG32_FIELD(UVD_RBC_RB_CNTL, RB_NO_FETCH, 0);
.type = AMDGPU_RING_TYPE_UVD,
.align_mask = 0xf,
.nop = PACKET0(mmUVD_NO_OP, 0),
+ .support_64bit_ptrs = false,
.get_rptr = uvd_v6_0_ring_get_rptr,
.get_wptr = uvd_v6_0_ring_get_wptr,
.set_wptr = uvd_v6_0_ring_set_wptr,
.type = AMDGPU_RING_TYPE_UVD,
.align_mask = 0xf,
.nop = PACKET0(mmUVD_NO_OP, 0),
+ .support_64bit_ptrs = false,
.get_rptr = uvd_v6_0_ring_get_rptr,
.get_wptr = uvd_v6_0_ring_get_wptr,
.set_wptr = uvd_v6_0_ring_set_wptr,
*
* Returns the current hardware read pointer
*/
-static uint32_t vce_v2_0_ring_get_rptr(struct amdgpu_ring *ring)
+static uint64_t vce_v2_0_ring_get_rptr(struct amdgpu_ring *ring)
{
struct amdgpu_device *adev = ring->adev;
*
* Returns the current hardware write pointer
*/
-static uint32_t vce_v2_0_ring_get_wptr(struct amdgpu_ring *ring)
+static uint64_t vce_v2_0_ring_get_wptr(struct amdgpu_ring *ring)
{
struct amdgpu_device *adev = ring->adev;
struct amdgpu_device *adev = ring->adev;
if (ring == &adev->vce.ring[0])
- WREG32(mmVCE_RB_WPTR, ring->wptr);
+ WREG32(mmVCE_RB_WPTR, lower_32_bits(ring->wptr));
else
- WREG32(mmVCE_RB_WPTR2, ring->wptr);
+ WREG32(mmVCE_RB_WPTR2, lower_32_bits(ring->wptr));
}
static int vce_v2_0_lmi_clean(struct amdgpu_device *adev)
vce_v2_0_mc_resume(adev);
ring = &adev->vce.ring[0];
- WREG32(mmVCE_RB_RPTR, ring->wptr);
- WREG32(mmVCE_RB_WPTR, ring->wptr);
+ WREG32(mmVCE_RB_RPTR, lower_32_bits(ring->wptr));
+ WREG32(mmVCE_RB_WPTR, lower_32_bits(ring->wptr));
WREG32(mmVCE_RB_BASE_LO, ring->gpu_addr);
WREG32(mmVCE_RB_BASE_HI, upper_32_bits(ring->gpu_addr));
WREG32(mmVCE_RB_SIZE, ring->ring_size / 4);
ring = &adev->vce.ring[1];
- WREG32(mmVCE_RB_RPTR2, ring->wptr);
- WREG32(mmVCE_RB_WPTR2, ring->wptr);
+ WREG32(mmVCE_RB_RPTR2, lower_32_bits(ring->wptr));
+ WREG32(mmVCE_RB_WPTR2, lower_32_bits(ring->wptr));
WREG32(mmVCE_RB_BASE_LO2, ring->gpu_addr);
WREG32(mmVCE_RB_BASE_HI2, upper_32_bits(ring->gpu_addr));
WREG32(mmVCE_RB_SIZE2, ring->ring_size / 4);
.type = AMDGPU_RING_TYPE_VCE,
.align_mask = 0xf,
.nop = VCE_CMD_NO_OP,
+ .support_64bit_ptrs = false,
.get_rptr = vce_v2_0_ring_get_rptr,
.get_wptr = vce_v2_0_ring_get_wptr,
.set_wptr = vce_v2_0_ring_set_wptr,
*
* Returns the current hardware read pointer
*/
-static uint32_t vce_v3_0_ring_get_rptr(struct amdgpu_ring *ring)
+static uint64_t vce_v3_0_ring_get_rptr(struct amdgpu_ring *ring)
{
struct amdgpu_device *adev = ring->adev;
*
* Returns the current hardware write pointer
*/
-static uint32_t vce_v3_0_ring_get_wptr(struct amdgpu_ring *ring)
+static uint64_t vce_v3_0_ring_get_wptr(struct amdgpu_ring *ring)
{
struct amdgpu_device *adev = ring->adev;
struct amdgpu_device *adev = ring->adev;
if (ring == &adev->vce.ring[0])
- WREG32(mmVCE_RB_WPTR, ring->wptr);
+ WREG32(mmVCE_RB_WPTR, lower_32_bits(ring->wptr));
else if (ring == &adev->vce.ring[1])
- WREG32(mmVCE_RB_WPTR2, ring->wptr);
+ WREG32(mmVCE_RB_WPTR2, lower_32_bits(ring->wptr));
else
- WREG32(mmVCE_RB_WPTR3, ring->wptr);
+ WREG32(mmVCE_RB_WPTR3, lower_32_bits(ring->wptr));
}
static void vce_v3_0_override_vce_clock_gating(struct amdgpu_device *adev, bool override)
int idx, r;
ring = &adev->vce.ring[0];
- WREG32(mmVCE_RB_RPTR, ring->wptr);
- WREG32(mmVCE_RB_WPTR, ring->wptr);
+ WREG32(mmVCE_RB_RPTR, lower_32_bits(ring->wptr));
+ WREG32(mmVCE_RB_WPTR, lower_32_bits(ring->wptr));
WREG32(mmVCE_RB_BASE_LO, ring->gpu_addr);
WREG32(mmVCE_RB_BASE_HI, upper_32_bits(ring->gpu_addr));
WREG32(mmVCE_RB_SIZE, ring->ring_size / 4);
ring = &adev->vce.ring[1];
- WREG32(mmVCE_RB_RPTR2, ring->wptr);
- WREG32(mmVCE_RB_WPTR2, ring->wptr);
+ WREG32(mmVCE_RB_RPTR2, lower_32_bits(ring->wptr));
+ WREG32(mmVCE_RB_WPTR2, lower_32_bits(ring->wptr));
WREG32(mmVCE_RB_BASE_LO2, ring->gpu_addr);
WREG32(mmVCE_RB_BASE_HI2, upper_32_bits(ring->gpu_addr));
WREG32(mmVCE_RB_SIZE2, ring->ring_size / 4);
ring = &adev->vce.ring[2];
- WREG32(mmVCE_RB_RPTR3, ring->wptr);
- WREG32(mmVCE_RB_WPTR3, ring->wptr);
+ WREG32(mmVCE_RB_RPTR3, lower_32_bits(ring->wptr));
+ WREG32(mmVCE_RB_WPTR3, lower_32_bits(ring->wptr));
WREG32(mmVCE_RB_BASE_LO3, ring->gpu_addr);
WREG32(mmVCE_RB_BASE_HI3, upper_32_bits(ring->gpu_addr));
WREG32(mmVCE_RB_SIZE3, ring->ring_size / 4);
.type = AMDGPU_RING_TYPE_VCE,
.align_mask = 0xf,
.nop = VCE_CMD_NO_OP,
+ .support_64bit_ptrs = false,
.get_rptr = vce_v3_0_ring_get_rptr,
.get_wptr = vce_v3_0_ring_get_wptr,
.set_wptr = vce_v3_0_ring_set_wptr,
.type = AMDGPU_RING_TYPE_VCE,
.align_mask = 0xf,
.nop = VCE_CMD_NO_OP,
+ .support_64bit_ptrs = false,
.get_rptr = vce_v3_0_ring_get_rptr,
.get_wptr = vce_v3_0_ring_get_wptr,
.set_wptr = vce_v3_0_ring_set_wptr,