nouveau_dma_pre_init(chan);
chan->user_put = 0x40;
chan->user_get = 0x44;
+ if (dev_priv->card_type >= NV_50)
+ chan->user_get_hi = 0x60;
/* disable the fifo caches */
pfifo->reassign(dev, false);
seq_printf(m, "channel id : %d\n", chan->id);
seq_printf(m, "cpu fifo state:\n");
- seq_printf(m, " base: 0x%08x\n", chan->pushbuf_base);
+ seq_printf(m, " base: 0x%10llx\n", chan->pushbuf_base);
seq_printf(m, " max: 0x%08x\n", chan->dma.max << 2);
seq_printf(m, " cur: 0x%08x\n", chan->dma.cur << 2);
seq_printf(m, " put: 0x%08x\n", chan->dma.put << 2);
* -EBUSY if timeout exceeded
*/
static inline int
-READ_GET(struct nouveau_channel *chan, uint32_t *prev_get, uint32_t *timeout)
+READ_GET(struct nouveau_channel *chan, uint64_t *prev_get, int *timeout)
{
- uint32_t val;
+ uint64_t val;
val = nvchan_rd32(chan, chan->user_get);
+ if (chan->user_get_hi)
+ val |= (uint64_t)nvchan_rd32(chan, chan->user_get_hi) << 32;
/* reset counter as long as GET is still advancing, this is
* to avoid misdetecting a GPU lockup if the GPU happens to
static int
nv50_dma_wait(struct nouveau_channel *chan, int slots, int count)
{
- uint32_t cnt = 0, prev_get = 0;
- int ret;
+ uint64_t prev_get = 0;
+ int ret, cnt = 0;
ret = nv50_dma_push_wait(chan, slots + 1);
if (unlikely(ret))
int
nouveau_dma_wait(struct nouveau_channel *chan, int slots, int size)
{
- uint32_t prev_get = 0, cnt = 0;
- int get;
+ uint64_t prev_get = 0;
+ int cnt = 0, get;
if (chan->dma.ib_max)
return nv50_dma_wait(chan, slots, size);
/* mapping of the regs controlling the fifo */
void __iomem *user;
uint32_t user_get;
+ uint32_t user_get_hi;
uint32_t user_put;
/* Fencing */
struct nouveau_gpuobj *pushbuf;
struct nouveau_bo *pushbuf_bo;
struct nouveau_vma pushbuf_vma;
- uint32_t pushbuf_base;
+ uint64_t pushbuf_base;
/* Notifier memory */
struct nouveau_bo *notifier_bo;
struct drm_device *dev = chan->dev;
struct drm_nouveau_private *dev_priv = dev->dev_private;
struct nouveau_gpuobj *ramfc = NULL;
+ uint64_t ib_offset = chan->pushbuf_base + chan->dma.ib_base * 4;
unsigned long flags;
int ret;
nv_wo32(ramfc, 0x7c, 0x30000001);
nv_wo32(ramfc, 0x78, 0x00000000);
nv_wo32(ramfc, 0x3c, 0x403f6078);
- nv_wo32(ramfc, 0x50, chan->pushbuf_base + chan->dma.ib_base * 4);
- nv_wo32(ramfc, 0x54, drm_order(chan->dma.ib_max + 1) << 16);
+ nv_wo32(ramfc, 0x50, lower_32_bits(ib_offset));
+ nv_wo32(ramfc, 0x54, upper_32_bits(ib_offset) |
+ drm_order(chan->dma.ib_max + 1) << 16);
if (dev_priv->chipset != 0x50) {
nv_wo32(chan->ramin, 0, chan->id);