}
/* PGD pointer */
- nv_wo32(chan, 0x0200, lower_32_bits(chan->addr + 0x1000));
- nv_wo32(chan, 0x0204, upper_32_bits(chan->addr + 0x1000));
- nv_wo32(chan, 0x0208, 0xffffffff);
- nv_wo32(chan, 0x020c, 0x000000ff);
+ nvkm_kmap(chan);
+ nvkm_wo32(chan, 0x0200, lower_32_bits(chan->addr + 0x1000));
+ nvkm_wo32(chan, 0x0204, upper_32_bits(chan->addr + 0x1000));
+ nvkm_wo32(chan, 0x0208, 0xffffffff);
+ nvkm_wo32(chan, 0x020c, 0x000000ff);
/* PGT[0] pointer */
- nv_wo32(chan, 0x1000, 0x00000000);
- nv_wo32(chan, 0x1004, 0x00000001 | (chan->addr + 0x2000) >> 8);
+ nvkm_wo32(chan, 0x1000, 0x00000000);
+ nvkm_wo32(chan, 0x1004, 0x00000001 | (chan->addr + 0x2000) >> 8);
/* identity-map the whole "channel" into its own vm */
for (i = 0; i < chan->size / 4096; i++) {
u64 addr = ((chan->addr + (i * 4096)) >> 8) | 1;
- nv_wo32(chan, 0x2000 + (i * 8), lower_32_bits(addr));
- nv_wo32(chan, 0x2004 + (i * 8), upper_32_bits(addr));
+ nvkm_wo32(chan, 0x2000 + (i * 8), lower_32_bits(addr));
+ nvkm_wo32(chan, 0x2004 + (i * 8), upper_32_bits(addr));
}
/* context pointer (virt) */
- nv_wo32(chan, 0x0210, 0x00080004);
- nv_wo32(chan, 0x0214, 0x00000000);
-
+ nvkm_wo32(chan, 0x0210, 0x00080004);
+ nvkm_wo32(chan, 0x0214, 0x00000000);
bar->flush(bar);
+ nvkm_done(chan);
nvkm_wr32(device, 0x100cb8, (chan->addr + 0x1000) >> 8);
nvkm_wr32(device, 0x100cbc, 0x80000001);
break;
);
- nv_wo32(chan, 0x8001c, 1);
- nv_wo32(chan, 0x80020, 0);
- nv_wo32(chan, 0x80028, 0);
- nv_wo32(chan, 0x8002c, 0);
+ nvkm_kmap(chan);
+ nvkm_wo32(chan, 0x8001c, 1);
+ nvkm_wo32(chan, 0x80020, 0);
+ nvkm_wo32(chan, 0x80028, 0);
+ nvkm_wo32(chan, 0x8002c, 0);
bar->flush(bar);
+ nvkm_done(chan);
} else {
nvkm_wr32(device, 0x409840, 0x80000000);
nvkm_wr32(device, 0x409500, 0x80000000 | chan->addr >> 12);
gr->data = kmalloc(gr->size, GFP_KERNEL);
if (gr->data) {
+ nvkm_kmap(chan);
for (i = 0; i < gr->size; i += 4)
- gr->data[i / 4] = nv_ro32(chan, 0x80000 + i);
+ gr->data[i / 4] = nvkm_ro32(chan, 0x80000 + i);
+ nvkm_done(chan);
ret = 0;
} else {
ret = -ENOMEM;
if (ctx->mode != NVKM_GRCTX_VALS)
return;
+ nvkm_kmap(obj);
offset += 0x0280/4;
for (i = 0; i < 16; i++, offset += 2)
- nv_wo32(obj, offset * 4, 0x3f800000);
+ nvkm_wo32(obj, offset * 4, 0x3f800000);
for (vs = 0; vs < vs_nr; vs++, offset += vs_len) {
for (i = 0; i < vs_nr_b0 * 6; i += 6)
- nv_wo32(obj, (offset + b0_offset + i) * 4, 0x00000001);
+ nvkm_wo32(obj, (offset + b0_offset + i) * 4, 0x00000001);
for (i = 0; i < vs_nr_b1 * 4; i += 4)
- nv_wo32(obj, (offset + b1_offset + i) * 4, 0x3f800000);
+ nvkm_wo32(obj, (offset + b1_offset + i) * 4, 0x3f800000);
}
+ nvkm_done(obj);
}
static void
struct nvkm_grctx ctx = {
.device = device,
.mode = NVKM_GRCTX_PROG,
- .data = ctxprog,
+ .ucode = ctxprog,
.ctxprog_max = 256,
};
NVKM_GRCTX_PROG,
NVKM_GRCTX_VALS
} mode;
- void *data;
+ u32 *ucode;
+ struct nvkm_gpuobj *data;
u32 ctxprog_max;
u32 ctxprog_len;
static inline void
cp_out(struct nvkm_grctx *ctx, u32 inst)
{
- u32 *ctxprog = ctx->data;
+ u32 *ctxprog = ctx->ucode;
if (ctx->mode != NVKM_GRCTX_PROG)
return;
static inline void
cp_name(struct nvkm_grctx *ctx, int name)
{
- u32 *ctxprog = ctx->data;
+ u32 *ctxprog = ctx->ucode;
int i;
if (ctx->mode != NVKM_GRCTX_PROG)
reg = (reg - 0x00400000) / 4;
reg = (reg - ctx->ctxprog_reg) + ctx->ctxvals_base;
- nv_wo32(ctx->data, reg * 4, val);
+ nvkm_kmap(ctx->data);
+ nvkm_wo32(ctx->data, reg * 4, val);
+ nvkm_done(ctx->data);
}
#endif
struct nvkm_grctx ctx = {
.device = device,
.mode = NVKM_GRCTX_PROG,
- .data = ctxprog,
+ .ucode = ctxprog,
.ctxprog_max = 512,
};
static void
dd_emit(struct nvkm_grctx *ctx, int num, u32 val) {
int i;
- if (val && ctx->mode == NVKM_GRCTX_VALS)
+ if (val && ctx->mode == NVKM_GRCTX_VALS) {
+ nvkm_kmap(ctx->data);
for (i = 0; i < num; i++)
- nv_wo32(ctx->data, 4 * (ctx->ctxvals_pos + i), val);
+ nvkm_wo32(ctx->data, 4 * (ctx->ctxvals_pos + i), val);
+ nvkm_done(ctx->data);
+ }
ctx->ctxvals_pos += num;
}
static void
xf_emit(struct nvkm_grctx *ctx, int num, u32 val) {
int i;
- if (val && ctx->mode == NVKM_GRCTX_VALS)
+ if (val && ctx->mode == NVKM_GRCTX_VALS) {
+ nvkm_kmap(ctx->data);
for (i = 0; i < num; i++)
- nv_wo32(ctx->data, 4 * (ctx->ctxvals_pos + (i << 3)), val);
+ nvkm_wo32(ctx->data, 4 * (ctx->ctxvals_pos + (i << 3)), val);
+ nvkm_done(ctx->data);
+ }
ctx->ctxvals_pos += num << 3;
}
struct gf100_gr_data *data = gr->mmio_data;
struct gf100_gr_mmio *mmio = gr->mmio_list;
struct gf100_gr_chan *chan;
+ struct nvkm_gpuobj *image;
int ret, i;
/* allocate memory for context, and fill with default values */
}
/* finally, fill in the mmio list and point the context at it */
+ nvkm_kmap(chan->mmio);
for (i = 0; mmio->addr && i < ARRAY_SIZE(gr->mmio_list); i++) {
u32 addr = mmio->addr;
u32 data = mmio->data;
data |= info >> mmio->shift;
}
- nv_wo32(chan->mmio, chan->mmio_nr++ * 4, addr);
- nv_wo32(chan->mmio, chan->mmio_nr++ * 4, data);
+ nvkm_wo32(chan->mmio, chan->mmio_nr++ * 4, addr);
+ nvkm_wo32(chan->mmio, chan->mmio_nr++ * 4, data);
mmio++;
}
+ nvkm_done(chan->mmio);
+ image = &chan->base.base.gpuobj;
+ nvkm_kmap(image);
for (i = 0; i < gr->size; i += 4)
- nv_wo32(chan, i, gr->data[i / 4]);
+ nvkm_wo32(image, i, gr->data[i / 4]);
if (!gr->firmware) {
- nv_wo32(chan, 0x00, chan->mmio_nr / 2);
- nv_wo32(chan, 0x04, chan->mmio_vma.offset >> 8);
+ nvkm_wo32(image, 0x00, chan->mmio_nr / 2);
+ nvkm_wo32(image, 0x04, chan->mmio_vma.offset >> 8);
} else {
- nv_wo32(chan, 0xf4, 0);
- nv_wo32(chan, 0xf8, 0);
- nv_wo32(chan, 0x10, chan->mmio_nr / 2);
- nv_wo32(chan, 0x14, lower_32_bits(chan->mmio_vma.offset));
- nv_wo32(chan, 0x18, upper_32_bits(chan->mmio_vma.offset));
- nv_wo32(chan, 0x1c, 1);
- nv_wo32(chan, 0x20, 0);
- nv_wo32(chan, 0x28, 0);
- nv_wo32(chan, 0x2c, 0);
+ nvkm_wo32(image, 0xf4, 0);
+ nvkm_wo32(image, 0xf8, 0);
+ nvkm_wo32(image, 0x10, chan->mmio_nr / 2);
+ nvkm_wo32(image, 0x14, lower_32_bits(chan->mmio_vma.offset));
+ nvkm_wo32(image, 0x18, upper_32_bits(chan->mmio_vma.offset));
+ nvkm_wo32(image, 0x1c, 1);
+ nvkm_wo32(image, 0x20, 0);
+ nvkm_wo32(image, 0x28, 0);
+ nvkm_wo32(image, 0x2c, 0);
}
+ nvkm_done(image);
return 0;
}
if (ret)
return ret;
- for (i = 0; i < 0x1000; i += 4) {
- nv_wo32(gr->unk4188b4, i, 0x00000010);
- nv_wo32(gr->unk4188b8, i, 0x00000010);
- }
+ nvkm_kmap(gr->unk4188b4);
+ for (i = 0; i < 0x1000; i += 4)
+ nvkm_wo32(gr->unk4188b4, i, 0x00000010);
+ nvkm_done(gr->unk4188b4);
+
+ nvkm_kmap(gr->unk4188b8);
+ for (i = 0; i < 0x1000; i += 4)
+ nvkm_wo32(gr->unk4188b8, i, 0x00000010);
+ nvkm_done(gr->unk4188b8);
gr->rop_nr = (nvkm_rd32(device, 0x409604) & 0x001f0000) >> 16;
gr->gpc_nr = nvkm_rd32(device, 0x409604) & 0x0000001f;
*/
static void
-nv04_gr_set_ctx1(struct nvkm_object *object, u32 mask, u32 value)
+nv04_gr_set_ctx1(struct nvkm_object *obj, u32 mask, u32 value)
{
- struct nv04_gr *gr = (void *)object->engine;
+ struct nvkm_gpuobj *object = container_of(obj, typeof(*object), object);
+ struct nv04_gr *gr = (void *)object->object.engine;
struct nvkm_device *device = gr->base.engine.subdev.device;
int subc = (nvkm_rd32(device, NV04_PGRAPH_TRAPPED_ADDR) >> 13) & 0x7;
u32 tmp;
- tmp = nv_ro32(object, 0x00);
+ nvkm_kmap(object);
+ tmp = nvkm_ro32(object, 0x00);
tmp &= ~mask;
tmp |= value;
- nv_wo32(object, 0x00, tmp);
+ nvkm_wo32(object, 0x00, tmp);
+ nvkm_done(object);
nvkm_wr32(device, NV04_PGRAPH_CTX_SWITCH1, tmp);
nvkm_wr32(device, NV04_PGRAPH_CTX_CACHE1 + (subc<<2), tmp);
}
static void
-nv04_gr_set_ctx_val(struct nvkm_object *object, u32 mask, u32 value)
+nv04_gr_set_ctx_val(struct nvkm_object *obj, u32 mask, u32 value)
{
+ struct nvkm_gpuobj *object = container_of(obj, typeof(*object), object);
int class, op, valid = 1;
u32 tmp, ctx1;
- ctx1 = nv_ro32(object, 0x00);
+ nvkm_kmap(object);
+ ctx1 = nvkm_ro32(object, 0x00);
class = ctx1 & 0xff;
op = (ctx1 >> 15) & 7;
- tmp = nv_ro32(object, 0x0c);
+ tmp = nvkm_ro32(object, 0x0c);
tmp &= ~mask;
tmp |= value;
- nv_wo32(object, 0x0c, tmp);
+ nvkm_wo32(object, 0x0c, tmp);
+ nvkm_done(object);
/* check for valid surf2d/surf_dst/surf_color */
if (!(tmp & 0x02000000))
break;
}
- nv04_gr_set_ctx1(object, 0x01000000, valid << 24);
+ nv04_gr_set_ctx1(obj, 0x01000000, valid << 24);
}
static int
-nv04_gr_mthd_set_operation(struct nvkm_object *object, u32 mthd,
+nv04_gr_mthd_set_operation(struct nvkm_object *obj, u32 mthd,
void *args, u32 size)
{
- u32 class = nv_ro32(object, 0) & 0xff;
+ struct nvkm_gpuobj *object = container_of(obj, typeof(*object), object);
+ u32 class = nvkm_ro32(object, 0) & 0xff;
u32 data = *(u32 *)args;
if (data > 5)
return 1;
/* Old versions of the objects only accept first three operations. */
if (data > 2 && class < 0x40)
return 1;
- nv04_gr_set_ctx1(object, 0x00038000, data << 15);
+ nv04_gr_set_ctx1(obj, 0x00038000, data << 15);
/* changing operation changes set of objects needed for validation */
- nv04_gr_set_ctx_val(object, 0, 0);
+ nv04_gr_set_ctx_val(obj, 0, 0);
return 0;
}
if (ret)
return ret;
- nv_wo32(obj, 0x00, nv_mclass(obj));
+ nvkm_kmap(obj);
+ nvkm_wo32(obj, 0x00, nv_mclass(obj));
#ifdef __BIG_ENDIAN
- nv_mo32(obj, 0x00, 0x00080000, 0x00080000);
+ nvkm_mo32(obj, 0x00, 0x00080000, 0x00080000);
#endif
- nv_wo32(obj, 0x04, 0x00000000);
- nv_wo32(obj, 0x08, 0x00000000);
- nv_wo32(obj, 0x0c, 0x00000000);
+ nvkm_wo32(obj, 0x04, 0x00000000);
+ nvkm_wo32(obj, 0x08, 0x00000000);
+ nvkm_wo32(obj, 0x0c, 0x00000000);
+ nvkm_done(obj);
return 0;
}
struct nvkm_object **pobject)
{
struct nv20_gr_chan *chan;
+ struct nvkm_gpuobj *image;
int ret, i;
ret = nvkm_gr_context_create(parent, engine, oclass, NULL, 0x37f0,
return ret;
chan->chid = nvkm_fifo_chan(parent)->chid;
-
- nv_wo32(chan, 0x0000, 0x00000001 | (chan->chid << 24));
- nv_wo32(chan, 0x033c, 0xffff0000);
- nv_wo32(chan, 0x03a0, 0x0fff0000);
- nv_wo32(chan, 0x03a4, 0x0fff0000);
- nv_wo32(chan, 0x047c, 0x00000101);
- nv_wo32(chan, 0x0490, 0x00000111);
- nv_wo32(chan, 0x04a8, 0x44400000);
+ image = &chan->base.base.gpuobj;
+
+ nvkm_kmap(image);
+ nvkm_wo32(image, 0x0000, 0x00000001 | (chan->chid << 24));
+ nvkm_wo32(image, 0x033c, 0xffff0000);
+ nvkm_wo32(image, 0x03a0, 0x0fff0000);
+ nvkm_wo32(image, 0x03a4, 0x0fff0000);
+ nvkm_wo32(image, 0x047c, 0x00000101);
+ nvkm_wo32(image, 0x0490, 0x00000111);
+ nvkm_wo32(image, 0x04a8, 0x44400000);
for (i = 0x04d4; i <= 0x04e0; i += 4)
- nv_wo32(chan, i, 0x00030303);
+ nvkm_wo32(image, i, 0x00030303);
for (i = 0x04f4; i <= 0x0500; i += 4)
- nv_wo32(chan, i, 0x00080000);
+ nvkm_wo32(image, i, 0x00080000);
for (i = 0x050c; i <= 0x0518; i += 4)
- nv_wo32(chan, i, 0x01012000);
+ nvkm_wo32(image, i, 0x01012000);
for (i = 0x051c; i <= 0x0528; i += 4)
- nv_wo32(chan, i, 0x000105b8);
+ nvkm_wo32(image, i, 0x000105b8);
for (i = 0x052c; i <= 0x0538; i += 4)
- nv_wo32(chan, i, 0x00080008);
+ nvkm_wo32(image, i, 0x00080008);
for (i = 0x055c; i <= 0x0598; i += 4)
- nv_wo32(chan, i, 0x07ff0000);
- nv_wo32(chan, 0x05a4, 0x4b7fffff);
- nv_wo32(chan, 0x05fc, 0x00000001);
- nv_wo32(chan, 0x0604, 0x00004000);
- nv_wo32(chan, 0x0610, 0x00000001);
- nv_wo32(chan, 0x0618, 0x00040000);
- nv_wo32(chan, 0x061c, 0x00010000);
+ nvkm_wo32(image, i, 0x07ff0000);
+ nvkm_wo32(image, 0x05a4, 0x4b7fffff);
+ nvkm_wo32(image, 0x05fc, 0x00000001);
+ nvkm_wo32(image, 0x0604, 0x00004000);
+ nvkm_wo32(image, 0x0610, 0x00000001);
+ nvkm_wo32(image, 0x0618, 0x00040000);
+ nvkm_wo32(image, 0x061c, 0x00010000);
for (i = 0x1c1c; i <= 0x248c; i += 16) {
- nv_wo32(chan, (i + 0), 0x10700ff9);
- nv_wo32(chan, (i + 4), 0x0436086c);
- nv_wo32(chan, (i + 8), 0x000c001b);
+ nvkm_wo32(image, (i + 0), 0x10700ff9);
+ nvkm_wo32(image, (i + 4), 0x0436086c);
+ nvkm_wo32(image, (i + 8), 0x000c001b);
}
- nv_wo32(chan, 0x281c, 0x3f800000);
- nv_wo32(chan, 0x2830, 0x3f800000);
- nv_wo32(chan, 0x285c, 0x40000000);
- nv_wo32(chan, 0x2860, 0x3f800000);
- nv_wo32(chan, 0x2864, 0x3f000000);
- nv_wo32(chan, 0x286c, 0x40000000);
- nv_wo32(chan, 0x2870, 0x3f800000);
- nv_wo32(chan, 0x2878, 0xbf800000);
- nv_wo32(chan, 0x2880, 0xbf800000);
- nv_wo32(chan, 0x34a4, 0x000fe000);
- nv_wo32(chan, 0x3530, 0x000003f8);
- nv_wo32(chan, 0x3540, 0x002fe000);
+ nvkm_wo32(image, 0x281c, 0x3f800000);
+ nvkm_wo32(image, 0x2830, 0x3f800000);
+ nvkm_wo32(image, 0x285c, 0x40000000);
+ nvkm_wo32(image, 0x2860, 0x3f800000);
+ nvkm_wo32(image, 0x2864, 0x3f000000);
+ nvkm_wo32(image, 0x286c, 0x40000000);
+ nvkm_wo32(image, 0x2870, 0x3f800000);
+ nvkm_wo32(image, 0x2878, 0xbf800000);
+ nvkm_wo32(image, 0x2880, 0xbf800000);
+ nvkm_wo32(image, 0x34a4, 0x000fe000);
+ nvkm_wo32(image, 0x3530, 0x000003f8);
+ nvkm_wo32(image, 0x3540, 0x002fe000);
for (i = 0x355c; i <= 0x3578; i += 4)
- nv_wo32(chan, i, 0x001c527c);
+ nvkm_wo32(image, i, 0x001c527c);
+ nvkm_done(image);
return 0;
}
if (ret)
return ret;
- nv_wo32(gr->ctxtab, chan->chid * 4, nv_gpuobj(chan)->addr >> 4);
+ nvkm_kmap(gr->ctxtab);
+ nvkm_wo32(gr->ctxtab, chan->chid * 4, nv_gpuobj(chan)->addr >> 4);
+ nvkm_done(gr->ctxtab);
return 0;
}
}
nvkm_mask(device, 0x400720, 0x00000001, 0x00000001);
- nv_wo32(gr->ctxtab, chan->chid * 4, 0x00000000);
+ nvkm_kmap(gr->ctxtab);
+ nvkm_wo32(gr->ctxtab, chan->chid * 4, 0x00000000);
+ nvkm_done(gr->ctxtab);
return nvkm_gr_context_fini(&chan->base, suspend);
}
struct nvkm_object **pobject)
{
struct nv20_gr_chan *chan;
+ struct nvkm_gpuobj *image;
int ret, i;
ret = nvkm_gr_context_create(parent, engine, oclass, NULL, 0x3724,
return ret;
chan->chid = nvkm_fifo_chan(parent)->chid;
+ image = &chan->base.base.gpuobj;
- nv_wo32(chan, 0x0028, 0x00000001 | (chan->chid << 24));
- nv_wo32(chan, 0x035c, 0xffff0000);
- nv_wo32(chan, 0x03c0, 0x0fff0000);
- nv_wo32(chan, 0x03c4, 0x0fff0000);
- nv_wo32(chan, 0x049c, 0x00000101);
- nv_wo32(chan, 0x04b0, 0x00000111);
- nv_wo32(chan, 0x04c8, 0x00000080);
- nv_wo32(chan, 0x04cc, 0xffff0000);
- nv_wo32(chan, 0x04d0, 0x00000001);
- nv_wo32(chan, 0x04e4, 0x44400000);
- nv_wo32(chan, 0x04fc, 0x4b800000);
+ nvkm_kmap(image);
+ nvkm_wo32(image, 0x0028, 0x00000001 | (chan->chid << 24));
+ nvkm_wo32(image, 0x035c, 0xffff0000);
+ nvkm_wo32(image, 0x03c0, 0x0fff0000);
+ nvkm_wo32(image, 0x03c4, 0x0fff0000);
+ nvkm_wo32(image, 0x049c, 0x00000101);
+ nvkm_wo32(image, 0x04b0, 0x00000111);
+ nvkm_wo32(image, 0x04c8, 0x00000080);
+ nvkm_wo32(image, 0x04cc, 0xffff0000);
+ nvkm_wo32(image, 0x04d0, 0x00000001);
+ nvkm_wo32(image, 0x04e4, 0x44400000);
+ nvkm_wo32(image, 0x04fc, 0x4b800000);
for (i = 0x0510; i <= 0x051c; i += 4)
- nv_wo32(chan, i, 0x00030303);
+ nvkm_wo32(image, i, 0x00030303);
for (i = 0x0530; i <= 0x053c; i += 4)
- nv_wo32(chan, i, 0x00080000);
+ nvkm_wo32(image, i, 0x00080000);
for (i = 0x0548; i <= 0x0554; i += 4)
- nv_wo32(chan, i, 0x01012000);
+ nvkm_wo32(image, i, 0x01012000);
for (i = 0x0558; i <= 0x0564; i += 4)
- nv_wo32(chan, i, 0x000105b8);
+ nvkm_wo32(image, i, 0x000105b8);
for (i = 0x0568; i <= 0x0574; i += 4)
- nv_wo32(chan, i, 0x00080008);
+ nvkm_wo32(image, i, 0x00080008);
for (i = 0x0598; i <= 0x05d4; i += 4)
- nv_wo32(chan, i, 0x07ff0000);
- nv_wo32(chan, 0x05e0, 0x4b7fffff);
- nv_wo32(chan, 0x0620, 0x00000080);
- nv_wo32(chan, 0x0624, 0x30201000);
- nv_wo32(chan, 0x0628, 0x70605040);
- nv_wo32(chan, 0x062c, 0xb0a09080);
- nv_wo32(chan, 0x0630, 0xf0e0d0c0);
- nv_wo32(chan, 0x0664, 0x00000001);
- nv_wo32(chan, 0x066c, 0x00004000);
- nv_wo32(chan, 0x0678, 0x00000001);
- nv_wo32(chan, 0x0680, 0x00040000);
- nv_wo32(chan, 0x0684, 0x00010000);
+ nvkm_wo32(image, i, 0x07ff0000);
+ nvkm_wo32(image, 0x05e0, 0x4b7fffff);
+ nvkm_wo32(image, 0x0620, 0x00000080);
+ nvkm_wo32(image, 0x0624, 0x30201000);
+ nvkm_wo32(image, 0x0628, 0x70605040);
+ nvkm_wo32(image, 0x062c, 0xb0a09080);
+ nvkm_wo32(image, 0x0630, 0xf0e0d0c0);
+ nvkm_wo32(image, 0x0664, 0x00000001);
+ nvkm_wo32(image, 0x066c, 0x00004000);
+ nvkm_wo32(image, 0x0678, 0x00000001);
+ nvkm_wo32(image, 0x0680, 0x00040000);
+ nvkm_wo32(image, 0x0684, 0x00010000);
for (i = 0x1b04; i <= 0x2374; i += 16) {
- nv_wo32(chan, (i + 0), 0x10700ff9);
- nv_wo32(chan, (i + 4), 0x0436086c);
- nv_wo32(chan, (i + 8), 0x000c001b);
+ nvkm_wo32(image, (i + 0), 0x10700ff9);
+ nvkm_wo32(image, (i + 4), 0x0436086c);
+ nvkm_wo32(image, (i + 8), 0x000c001b);
}
- nv_wo32(chan, 0x2704, 0x3f800000);
- nv_wo32(chan, 0x2718, 0x3f800000);
- nv_wo32(chan, 0x2744, 0x40000000);
- nv_wo32(chan, 0x2748, 0x3f800000);
- nv_wo32(chan, 0x274c, 0x3f000000);
- nv_wo32(chan, 0x2754, 0x40000000);
- nv_wo32(chan, 0x2758, 0x3f800000);
- nv_wo32(chan, 0x2760, 0xbf800000);
- nv_wo32(chan, 0x2768, 0xbf800000);
- nv_wo32(chan, 0x308c, 0x000fe000);
- nv_wo32(chan, 0x3108, 0x000003f8);
- nv_wo32(chan, 0x3468, 0x002fe000);
+ nvkm_wo32(image, 0x2704, 0x3f800000);
+ nvkm_wo32(image, 0x2718, 0x3f800000);
+ nvkm_wo32(image, 0x2744, 0x40000000);
+ nvkm_wo32(image, 0x2748, 0x3f800000);
+ nvkm_wo32(image, 0x274c, 0x3f000000);
+ nvkm_wo32(image, 0x2754, 0x40000000);
+ nvkm_wo32(image, 0x2758, 0x3f800000);
+ nvkm_wo32(image, 0x2760, 0xbf800000);
+ nvkm_wo32(image, 0x2768, 0xbf800000);
+ nvkm_wo32(image, 0x308c, 0x000fe000);
+ nvkm_wo32(image, 0x3108, 0x000003f8);
+ nvkm_wo32(image, 0x3468, 0x002fe000);
for (i = 0x3484; i <= 0x34a0; i += 4)
- nv_wo32(chan, i, 0x001c527c);
+ nvkm_wo32(image, i, 0x001c527c);
+ nvkm_done(image);
return 0;
}
struct nvkm_object **pobject)
{
struct nv20_gr_chan *chan;
+ struct nvkm_gpuobj *image;
int ret, i;
ret = nvkm_gr_context_create(parent, engine, oclass, NULL, 0x36b0,
return ret;
chan->chid = nvkm_fifo_chan(parent)->chid;
+ image = &chan->base.base.gpuobj;
- nv_wo32(chan, 0x0000, 0x00000001 | (chan->chid << 24));
- nv_wo32(chan, 0x033c, 0xffff0000);
- nv_wo32(chan, 0x03a0, 0x0fff0000);
- nv_wo32(chan, 0x03a4, 0x0fff0000);
- nv_wo32(chan, 0x047c, 0x00000101);
- nv_wo32(chan, 0x0490, 0x00000111);
- nv_wo32(chan, 0x04a8, 0x44400000);
+ nvkm_kmap(image);
+ nvkm_wo32(image, 0x0000, 0x00000001 | (chan->chid << 24));
+ nvkm_wo32(image, 0x033c, 0xffff0000);
+ nvkm_wo32(image, 0x03a0, 0x0fff0000);
+ nvkm_wo32(image, 0x03a4, 0x0fff0000);
+ nvkm_wo32(image, 0x047c, 0x00000101);
+ nvkm_wo32(image, 0x0490, 0x00000111);
+ nvkm_wo32(image, 0x04a8, 0x44400000);
for (i = 0x04d4; i <= 0x04e0; i += 4)
- nv_wo32(chan, i, 0x00030303);
+ nvkm_wo32(image, i, 0x00030303);
for (i = 0x04f4; i <= 0x0500; i += 4)
- nv_wo32(chan, i, 0x00080000);
+ nvkm_wo32(image, i, 0x00080000);
for (i = 0x050c; i <= 0x0518; i += 4)
- nv_wo32(chan, i, 0x01012000);
+ nvkm_wo32(image, i, 0x01012000);
for (i = 0x051c; i <= 0x0528; i += 4)
- nv_wo32(chan, i, 0x000105b8);
+ nvkm_wo32(image, i, 0x000105b8);
for (i = 0x052c; i <= 0x0538; i += 4)
- nv_wo32(chan, i, 0x00080008);
+ nvkm_wo32(image, i, 0x00080008);
for (i = 0x055c; i <= 0x0598; i += 4)
- nv_wo32(chan, i, 0x07ff0000);
- nv_wo32(chan, 0x05a4, 0x4b7fffff);
- nv_wo32(chan, 0x05fc, 0x00000001);
- nv_wo32(chan, 0x0604, 0x00004000);
- nv_wo32(chan, 0x0610, 0x00000001);
- nv_wo32(chan, 0x0618, 0x00040000);
- nv_wo32(chan, 0x061c, 0x00010000);
+ nvkm_wo32(image, i, 0x07ff0000);
+ nvkm_wo32(image, 0x05a4, 0x4b7fffff);
+ nvkm_wo32(image, 0x05fc, 0x00000001);
+ nvkm_wo32(image, 0x0604, 0x00004000);
+ nvkm_wo32(image, 0x0610, 0x00000001);
+ nvkm_wo32(image, 0x0618, 0x00040000);
+ nvkm_wo32(image, 0x061c, 0x00010000);
for (i = 0x1a9c; i <= 0x22fc; i += 16) { /*XXX: check!! */
- nv_wo32(chan, (i + 0), 0x10700ff9);
- nv_wo32(chan, (i + 4), 0x0436086c);
- nv_wo32(chan, (i + 8), 0x000c001b);
+ nvkm_wo32(image, (i + 0), 0x10700ff9);
+ nvkm_wo32(image, (i + 4), 0x0436086c);
+ nvkm_wo32(image, (i + 8), 0x000c001b);
}
- nv_wo32(chan, 0x269c, 0x3f800000);
- nv_wo32(chan, 0x26b0, 0x3f800000);
- nv_wo32(chan, 0x26dc, 0x40000000);
- nv_wo32(chan, 0x26e0, 0x3f800000);
- nv_wo32(chan, 0x26e4, 0x3f000000);
- nv_wo32(chan, 0x26ec, 0x40000000);
- nv_wo32(chan, 0x26f0, 0x3f800000);
- nv_wo32(chan, 0x26f8, 0xbf800000);
- nv_wo32(chan, 0x2700, 0xbf800000);
- nv_wo32(chan, 0x3024, 0x000fe000);
- nv_wo32(chan, 0x30a0, 0x000003f8);
- nv_wo32(chan, 0x33fc, 0x002fe000);
+ nvkm_wo32(image, 0x269c, 0x3f800000);
+ nvkm_wo32(image, 0x26b0, 0x3f800000);
+ nvkm_wo32(image, 0x26dc, 0x40000000);
+ nvkm_wo32(image, 0x26e0, 0x3f800000);
+ nvkm_wo32(image, 0x26e4, 0x3f000000);
+ nvkm_wo32(image, 0x26ec, 0x40000000);
+ nvkm_wo32(image, 0x26f0, 0x3f800000);
+ nvkm_wo32(image, 0x26f8, 0xbf800000);
+ nvkm_wo32(image, 0x2700, 0xbf800000);
+ nvkm_wo32(image, 0x3024, 0x000fe000);
+ nvkm_wo32(image, 0x30a0, 0x000003f8);
+ nvkm_wo32(image, 0x33fc, 0x002fe000);
for (i = 0x341c; i <= 0x3438; i += 4)
- nv_wo32(chan, i, 0x001c527c);
+ nvkm_wo32(image, i, 0x001c527c);
+ nvkm_done(image);
return 0;
}
struct nvkm_object **pobject)
{
struct nv20_gr_chan *chan;
+ struct nvkm_gpuobj *image;
int ret, i;
ret = nvkm_gr_context_create(parent, engine, oclass, NULL, 0x5f48,
return ret;
chan->chid = nvkm_fifo_chan(parent)->chid;
-
- nv_wo32(chan, 0x0028, 0x00000001 | (chan->chid << 24));
- nv_wo32(chan, 0x0410, 0x00000101);
- nv_wo32(chan, 0x0424, 0x00000111);
- nv_wo32(chan, 0x0428, 0x00000060);
- nv_wo32(chan, 0x0444, 0x00000080);
- nv_wo32(chan, 0x0448, 0xffff0000);
- nv_wo32(chan, 0x044c, 0x00000001);
- nv_wo32(chan, 0x0460, 0x44400000);
- nv_wo32(chan, 0x048c, 0xffff0000);
+ image = &chan->base.base.gpuobj;
+
+ nvkm_kmap(image);
+ nvkm_wo32(image, 0x0028, 0x00000001 | (chan->chid << 24));
+ nvkm_wo32(image, 0x0410, 0x00000101);
+ nvkm_wo32(image, 0x0424, 0x00000111);
+ nvkm_wo32(image, 0x0428, 0x00000060);
+ nvkm_wo32(image, 0x0444, 0x00000080);
+ nvkm_wo32(image, 0x0448, 0xffff0000);
+ nvkm_wo32(image, 0x044c, 0x00000001);
+ nvkm_wo32(image, 0x0460, 0x44400000);
+ nvkm_wo32(image, 0x048c, 0xffff0000);
for (i = 0x04e0; i < 0x04e8; i += 4)
- nv_wo32(chan, i, 0x0fff0000);
- nv_wo32(chan, 0x04ec, 0x00011100);
+ nvkm_wo32(image, i, 0x0fff0000);
+ nvkm_wo32(image, 0x04ec, 0x00011100);
for (i = 0x0508; i < 0x0548; i += 4)
- nv_wo32(chan, i, 0x07ff0000);
- nv_wo32(chan, 0x0550, 0x4b7fffff);
- nv_wo32(chan, 0x058c, 0x00000080);
- nv_wo32(chan, 0x0590, 0x30201000);
- nv_wo32(chan, 0x0594, 0x70605040);
- nv_wo32(chan, 0x0598, 0xb8a89888);
- nv_wo32(chan, 0x059c, 0xf8e8d8c8);
- nv_wo32(chan, 0x05b0, 0xb0000000);
+ nvkm_wo32(image, i, 0x07ff0000);
+ nvkm_wo32(image, 0x0550, 0x4b7fffff);
+ nvkm_wo32(image, 0x058c, 0x00000080);
+ nvkm_wo32(image, 0x0590, 0x30201000);
+ nvkm_wo32(image, 0x0594, 0x70605040);
+ nvkm_wo32(image, 0x0598, 0xb8a89888);
+ nvkm_wo32(image, 0x059c, 0xf8e8d8c8);
+ nvkm_wo32(image, 0x05b0, 0xb0000000);
for (i = 0x0600; i < 0x0640; i += 4)
- nv_wo32(chan, i, 0x00010588);
+ nvkm_wo32(image, i, 0x00010588);
for (i = 0x0640; i < 0x0680; i += 4)
- nv_wo32(chan, i, 0x00030303);
+ nvkm_wo32(image, i, 0x00030303);
for (i = 0x06c0; i < 0x0700; i += 4)
- nv_wo32(chan, i, 0x0008aae4);
+ nvkm_wo32(image, i, 0x0008aae4);
for (i = 0x0700; i < 0x0740; i += 4)
- nv_wo32(chan, i, 0x01012000);
+ nvkm_wo32(image, i, 0x01012000);
for (i = 0x0740; i < 0x0780; i += 4)
- nv_wo32(chan, i, 0x00080008);
- nv_wo32(chan, 0x085c, 0x00040000);
- nv_wo32(chan, 0x0860, 0x00010000);
+ nvkm_wo32(image, i, 0x00080008);
+ nvkm_wo32(image, 0x085c, 0x00040000);
+ nvkm_wo32(image, 0x0860, 0x00010000);
for (i = 0x0864; i < 0x0874; i += 4)
- nv_wo32(chan, i, 0x00040004);
+ nvkm_wo32(image, i, 0x00040004);
for (i = 0x1f18; i <= 0x3088 ; i += 16) {
- nv_wo32(chan, i + 0, 0x10700ff9);
- nv_wo32(chan, i + 1, 0x0436086c);
- nv_wo32(chan, i + 2, 0x000c001b);
+ nvkm_wo32(image, i + 0, 0x10700ff9);
+ nvkm_wo32(image, i + 1, 0x0436086c);
+ nvkm_wo32(image, i + 2, 0x000c001b);
}
for (i = 0x30b8; i < 0x30c8; i += 4)
- nv_wo32(chan, i, 0x0000ffff);
- nv_wo32(chan, 0x344c, 0x3f800000);
- nv_wo32(chan, 0x3808, 0x3f800000);
- nv_wo32(chan, 0x381c, 0x3f800000);
- nv_wo32(chan, 0x3848, 0x40000000);
- nv_wo32(chan, 0x384c, 0x3f800000);
- nv_wo32(chan, 0x3850, 0x3f000000);
- nv_wo32(chan, 0x3858, 0x40000000);
- nv_wo32(chan, 0x385c, 0x3f800000);
- nv_wo32(chan, 0x3864, 0xbf800000);
- nv_wo32(chan, 0x386c, 0xbf800000);
+ nvkm_wo32(image, i, 0x0000ffff);
+ nvkm_wo32(image, 0x344c, 0x3f800000);
+ nvkm_wo32(image, 0x3808, 0x3f800000);
+ nvkm_wo32(image, 0x381c, 0x3f800000);
+ nvkm_wo32(image, 0x3848, 0x40000000);
+ nvkm_wo32(image, 0x384c, 0x3f800000);
+ nvkm_wo32(image, 0x3850, 0x3f000000);
+ nvkm_wo32(image, 0x3858, 0x40000000);
+ nvkm_wo32(image, 0x385c, 0x3f800000);
+ nvkm_wo32(image, 0x3864, 0xbf800000);
+ nvkm_wo32(image, 0x386c, 0xbf800000);
+ nvkm_done(image);
return 0;
}
struct nvkm_object **pobject)
{
struct nv20_gr_chan *chan;
+ struct nvkm_gpuobj *image;
int ret, i;
ret = nvkm_gr_context_create(parent, engine, oclass, NULL, 0x46dc,
return ret;
chan->chid = nvkm_fifo_chan(parent)->chid;
+ image = &chan->base.base.gpuobj;
- nv_wo32(chan, 0x0028, 0x00000001 | (chan->chid << 24));
- nv_wo32(chan, 0x040c, 0x01000101);
- nv_wo32(chan, 0x0420, 0x00000111);
- nv_wo32(chan, 0x0424, 0x00000060);
- nv_wo32(chan, 0x0440, 0x00000080);
- nv_wo32(chan, 0x0444, 0xffff0000);
- nv_wo32(chan, 0x0448, 0x00000001);
- nv_wo32(chan, 0x045c, 0x44400000);
- nv_wo32(chan, 0x0480, 0xffff0000);
+ nvkm_kmap(image);
+ nvkm_wo32(image, 0x0028, 0x00000001 | (chan->chid << 24));
+ nvkm_wo32(image, 0x040c, 0x01000101);
+ nvkm_wo32(image, 0x0420, 0x00000111);
+ nvkm_wo32(image, 0x0424, 0x00000060);
+ nvkm_wo32(image, 0x0440, 0x00000080);
+ nvkm_wo32(image, 0x0444, 0xffff0000);
+ nvkm_wo32(image, 0x0448, 0x00000001);
+ nvkm_wo32(image, 0x045c, 0x44400000);
+ nvkm_wo32(image, 0x0480, 0xffff0000);
for (i = 0x04d4; i < 0x04dc; i += 4)
- nv_wo32(chan, i, 0x0fff0000);
- nv_wo32(chan, 0x04e0, 0x00011100);
+ nvkm_wo32(image, i, 0x0fff0000);
+ nvkm_wo32(image, 0x04e0, 0x00011100);
for (i = 0x04fc; i < 0x053c; i += 4)
- nv_wo32(chan, i, 0x07ff0000);
- nv_wo32(chan, 0x0544, 0x4b7fffff);
- nv_wo32(chan, 0x057c, 0x00000080);
- nv_wo32(chan, 0x0580, 0x30201000);
- nv_wo32(chan, 0x0584, 0x70605040);
- nv_wo32(chan, 0x0588, 0xb8a89888);
- nv_wo32(chan, 0x058c, 0xf8e8d8c8);
- nv_wo32(chan, 0x05a0, 0xb0000000);
+ nvkm_wo32(image, i, 0x07ff0000);
+ nvkm_wo32(image, 0x0544, 0x4b7fffff);
+ nvkm_wo32(image, 0x057c, 0x00000080);
+ nvkm_wo32(image, 0x0580, 0x30201000);
+ nvkm_wo32(image, 0x0584, 0x70605040);
+ nvkm_wo32(image, 0x0588, 0xb8a89888);
+ nvkm_wo32(image, 0x058c, 0xf8e8d8c8);
+ nvkm_wo32(image, 0x05a0, 0xb0000000);
for (i = 0x05f0; i < 0x0630; i += 4)
- nv_wo32(chan, i, 0x00010588);
+ nvkm_wo32(image, i, 0x00010588);
for (i = 0x0630; i < 0x0670; i += 4)
- nv_wo32(chan, i, 0x00030303);
+ nvkm_wo32(image, i, 0x00030303);
for (i = 0x06b0; i < 0x06f0; i += 4)
- nv_wo32(chan, i, 0x0008aae4);
+ nvkm_wo32(image, i, 0x0008aae4);
for (i = 0x06f0; i < 0x0730; i += 4)
- nv_wo32(chan, i, 0x01012000);
+ nvkm_wo32(image, i, 0x01012000);
for (i = 0x0730; i < 0x0770; i += 4)
- nv_wo32(chan, i, 0x00080008);
- nv_wo32(chan, 0x0850, 0x00040000);
- nv_wo32(chan, 0x0854, 0x00010000);
+ nvkm_wo32(image, i, 0x00080008);
+ nvkm_wo32(image, 0x0850, 0x00040000);
+ nvkm_wo32(image, 0x0854, 0x00010000);
for (i = 0x0858; i < 0x0868; i += 4)
- nv_wo32(chan, i, 0x00040004);
+ nvkm_wo32(image, i, 0x00040004);
for (i = 0x15ac; i <= 0x271c ; i += 16) {
- nv_wo32(chan, i + 0, 0x10700ff9);
- nv_wo32(chan, i + 1, 0x0436086c);
- nv_wo32(chan, i + 2, 0x000c001b);
+ nvkm_wo32(image, i + 0, 0x10700ff9);
+ nvkm_wo32(image, i + 1, 0x0436086c);
+ nvkm_wo32(image, i + 2, 0x000c001b);
}
for (i = 0x274c; i < 0x275c; i += 4)
- nv_wo32(chan, i, 0x0000ffff);
- nv_wo32(chan, 0x2ae0, 0x3f800000);
- nv_wo32(chan, 0x2e9c, 0x3f800000);
- nv_wo32(chan, 0x2eb0, 0x3f800000);
- nv_wo32(chan, 0x2edc, 0x40000000);
- nv_wo32(chan, 0x2ee0, 0x3f800000);
- nv_wo32(chan, 0x2ee4, 0x3f000000);
- nv_wo32(chan, 0x2eec, 0x40000000);
- nv_wo32(chan, 0x2ef0, 0x3f800000);
- nv_wo32(chan, 0x2ef8, 0xbf800000);
- nv_wo32(chan, 0x2f00, 0xbf800000);
+ nvkm_wo32(image, i, 0x0000ffff);
+ nvkm_wo32(image, 0x2ae0, 0x3f800000);
+ nvkm_wo32(image, 0x2e9c, 0x3f800000);
+ nvkm_wo32(image, 0x2eb0, 0x3f800000);
+ nvkm_wo32(image, 0x2edc, 0x40000000);
+ nvkm_wo32(image, 0x2ee0, 0x3f800000);
+ nvkm_wo32(image, 0x2ee4, 0x3f000000);
+ nvkm_wo32(image, 0x2eec, 0x40000000);
+ nvkm_wo32(image, 0x2ef0, 0x3f800000);
+ nvkm_wo32(image, 0x2ef8, 0xbf800000);
+ nvkm_wo32(image, 0x2f00, 0xbf800000);
+ nvkm_done(image);
return 0;
}
struct nvkm_object **pobject)
{
struct nv20_gr_chan *chan;
+ struct nvkm_gpuobj *image;
int ret, i;
ret = nvkm_gr_context_create(parent, engine, oclass, NULL, 0x577c,
return ret;
chan->chid = nvkm_fifo_chan(parent)->chid;
+ image = &chan->base.base.gpuobj;
- nv_wo32(chan, 0x0028, 0x00000001 | (chan->chid << 24));
- nv_wo32(chan, 0x040c, 0x00000101);
- nv_wo32(chan, 0x0420, 0x00000111);
- nv_wo32(chan, 0x0424, 0x00000060);
- nv_wo32(chan, 0x0440, 0x00000080);
- nv_wo32(chan, 0x0444, 0xffff0000);
- nv_wo32(chan, 0x0448, 0x00000001);
- nv_wo32(chan, 0x045c, 0x44400000);
- nv_wo32(chan, 0x0488, 0xffff0000);
+ nvkm_kmap(image);
+ nvkm_wo32(image, 0x0028, 0x00000001 | (chan->chid << 24));
+ nvkm_wo32(image, 0x040c, 0x00000101);
+ nvkm_wo32(image, 0x0420, 0x00000111);
+ nvkm_wo32(image, 0x0424, 0x00000060);
+ nvkm_wo32(image, 0x0440, 0x00000080);
+ nvkm_wo32(image, 0x0444, 0xffff0000);
+ nvkm_wo32(image, 0x0448, 0x00000001);
+ nvkm_wo32(image, 0x045c, 0x44400000);
+ nvkm_wo32(image, 0x0488, 0xffff0000);
for (i = 0x04dc; i < 0x04e4; i += 4)
- nv_wo32(chan, i, 0x0fff0000);
- nv_wo32(chan, 0x04e8, 0x00011100);
+ nvkm_wo32(image, i, 0x0fff0000);
+ nvkm_wo32(image, 0x04e8, 0x00011100);
for (i = 0x0504; i < 0x0544; i += 4)
- nv_wo32(chan, i, 0x07ff0000);
- nv_wo32(chan, 0x054c, 0x4b7fffff);
- nv_wo32(chan, 0x0588, 0x00000080);
- nv_wo32(chan, 0x058c, 0x30201000);
- nv_wo32(chan, 0x0590, 0x70605040);
- nv_wo32(chan, 0x0594, 0xb8a89888);
- nv_wo32(chan, 0x0598, 0xf8e8d8c8);
- nv_wo32(chan, 0x05ac, 0xb0000000);
+ nvkm_wo32(image, i, 0x07ff0000);
+ nvkm_wo32(image, 0x054c, 0x4b7fffff);
+ nvkm_wo32(image, 0x0588, 0x00000080);
+ nvkm_wo32(image, 0x058c, 0x30201000);
+ nvkm_wo32(image, 0x0590, 0x70605040);
+ nvkm_wo32(image, 0x0594, 0xb8a89888);
+ nvkm_wo32(image, 0x0598, 0xf8e8d8c8);
+ nvkm_wo32(image, 0x05ac, 0xb0000000);
for (i = 0x0604; i < 0x0644; i += 4)
- nv_wo32(chan, i, 0x00010588);
+ nvkm_wo32(image, i, 0x00010588);
for (i = 0x0644; i < 0x0684; i += 4)
- nv_wo32(chan, i, 0x00030303);
+ nvkm_wo32(image, i, 0x00030303);
for (i = 0x06c4; i < 0x0704; i += 4)
- nv_wo32(chan, i, 0x0008aae4);
+ nvkm_wo32(image, i, 0x0008aae4);
for (i = 0x0704; i < 0x0744; i += 4)
- nv_wo32(chan, i, 0x01012000);
+ nvkm_wo32(image, i, 0x01012000);
for (i = 0x0744; i < 0x0784; i += 4)
- nv_wo32(chan, i, 0x00080008);
- nv_wo32(chan, 0x0860, 0x00040000);
- nv_wo32(chan, 0x0864, 0x00010000);
+ nvkm_wo32(image, i, 0x00080008);
+ nvkm_wo32(image, 0x0860, 0x00040000);
+ nvkm_wo32(image, 0x0864, 0x00010000);
for (i = 0x0868; i < 0x0878; i += 4)
- nv_wo32(chan, i, 0x00040004);
+ nvkm_wo32(image, i, 0x00040004);
for (i = 0x1f1c; i <= 0x308c ; i += 16) {
- nv_wo32(chan, i + 0, 0x10700ff9);
- nv_wo32(chan, i + 4, 0x0436086c);
- nv_wo32(chan, i + 8, 0x000c001b);
+ nvkm_wo32(image, i + 0, 0x10700ff9);
+ nvkm_wo32(image, i + 4, 0x0436086c);
+ nvkm_wo32(image, i + 8, 0x000c001b);
}
for (i = 0x30bc; i < 0x30cc; i += 4)
- nv_wo32(chan, i, 0x0000ffff);
- nv_wo32(chan, 0x3450, 0x3f800000);
- nv_wo32(chan, 0x380c, 0x3f800000);
- nv_wo32(chan, 0x3820, 0x3f800000);
- nv_wo32(chan, 0x384c, 0x40000000);
- nv_wo32(chan, 0x3850, 0x3f800000);
- nv_wo32(chan, 0x3854, 0x3f000000);
- nv_wo32(chan, 0x385c, 0x40000000);
- nv_wo32(chan, 0x3860, 0x3f800000);
- nv_wo32(chan, 0x3868, 0xbf800000);
- nv_wo32(chan, 0x3870, 0xbf800000);
+ nvkm_wo32(image, i, 0x0000ffff);
+ nvkm_wo32(image, 0x3450, 0x3f800000);
+ nvkm_wo32(image, 0x380c, 0x3f800000);
+ nvkm_wo32(image, 0x3820, 0x3f800000);
+ nvkm_wo32(image, 0x384c, 0x40000000);
+ nvkm_wo32(image, 0x3850, 0x3f800000);
+ nvkm_wo32(image, 0x3854, 0x3f000000);
+ nvkm_wo32(image, 0x385c, 0x40000000);
+ nvkm_wo32(image, 0x3860, 0x3f800000);
+ nvkm_wo32(image, 0x3868, 0xbf800000);
+ nvkm_wo32(image, 0x3870, 0xbf800000);
+ nvkm_done(image);
return 0;
}
if (ret)
return ret;
- nv_wo32(obj, 0x00, nv_mclass(obj));
- nv_wo32(obj, 0x04, 0x00000000);
- nv_wo32(obj, 0x08, 0x00000000);
+ nvkm_kmap(obj);
+ nvkm_wo32(obj, 0x00, nv_mclass(obj));
+ nvkm_wo32(obj, 0x04, 0x00000000);
+ nvkm_wo32(obj, 0x08, 0x00000000);
#ifdef __BIG_ENDIAN
- nv_mo32(obj, 0x08, 0x01000000, 0x01000000);
+ nvkm_mo32(obj, 0x08, 0x01000000, 0x01000000);
#endif
- nv_wo32(obj, 0x0c, 0x00000000);
- nv_wo32(obj, 0x10, 0x00000000);
+ nvkm_wo32(obj, 0x0c, 0x00000000);
+ nvkm_wo32(obj, 0x10, 0x00000000);
+ nvkm_done(obj);
return 0;
}
return ret;
nv40_grctx_fill(nv_device(gr), nv_gpuobj(chan));
- nv_wo32(chan, 0x00000, nv_gpuobj(chan)->addr >> 4);
+ nvkm_wo32(&chan->base.base.gpuobj, 0x00000, nv_gpuobj(chan)->addr >> 4);
return 0;
}
if (ret)
return ret;
- nv_wo32(obj, 0x00, nv_mclass(obj));
- nv_wo32(obj, 0x04, 0x00000000);
- nv_wo32(obj, 0x08, 0x00000000);
- nv_wo32(obj, 0x0c, 0x00000000);
+ nvkm_kmap(obj);
+ nvkm_wo32(obj, 0x00, nv_mclass(obj));
+ nvkm_wo32(obj, 0x04, 0x00000000);
+ nvkm_wo32(obj, 0x08, 0x00000000);
+ nvkm_wo32(obj, 0x0c, 0x00000000);
+ nvkm_done(obj);
return 0;
}