int i;
if (gpuobj->flags & NVOBJ_FLAG_ZERO_FREE) {
+ nvkm_kmap(gpuobj);
for (i = 0; i < gpuobj->size; i += 4)
- nv_wo32(gpuobj, i, 0x00000000);
+ nvkm_wo32(gpuobj, i, 0x00000000);
+ nvkm_done(gpuobj);
}
if (gpuobj->node)
}
if (flags & NVOBJ_FLAG_ZERO_ALLOC) {
+ nvkm_kmap(gpuobj);
for (i = 0; i < gpuobj->size; i += 4)
- nv_wo32(gpuobj, i, 0x00000000);
+ nvkm_wo32(gpuobj, i, 0x00000000);
+ nvkm_done(gpuobj);
}
return ret;
int
nvkm_ramht_insert(struct nvkm_ramht *ramht, int chid, u32 handle, u32 context)
{
+ struct nvkm_gpuobj *gpuobj = &ramht->gpuobj;
struct nvkm_bar *bar = nvkm_bar(ramht);
+ int ret = -ENOSPC;
u32 co, ho;
co = ho = nvkm_ramht_hash(ramht, chid, handle);
+ nvkm_kmap(gpuobj);
do {
- if (!nv_ro32(ramht, co + 4)) {
- nv_wo32(ramht, co + 0, handle);
- nv_wo32(ramht, co + 4, context);
+ if (!nvkm_ro32(gpuobj, co + 4)) {
+ nvkm_wo32(gpuobj, co + 0, handle);
+ nvkm_wo32(gpuobj, co + 4, context);
if (bar)
bar->flush(bar);
- return co;
+ ret = co;
+ break;
}
co += 8;
if (co >= nv_gpuobj(ramht)->size)
co = 0;
} while (co != ho);
+ nvkm_done(gpuobj);
- return -ENOMEM;
+ return ret;
}
void
nvkm_ramht_remove(struct nvkm_ramht *ramht, int cookie)
{
+ struct nvkm_gpuobj *gpuobj = &ramht->gpuobj;
struct nvkm_bar *bar = nvkm_bar(ramht);
- nv_wo32(ramht, cookie + 0, 0x00000000);
- nv_wo32(ramht, cookie + 4, 0x00000000);
+ nvkm_kmap(gpuobj);
+ nvkm_wo32(gpuobj, cookie + 0, 0x00000000);
+ nvkm_wo32(gpuobj, cookie + 4, 0x00000000);
if (bar)
bar->flush(bar);
+ nvkm_done(gpuobj);
}
static struct nvkm_oclass