2 * Copyright (C) 2006 Ben Skeggs.
6 * Permission is hereby granted, free of charge, to any person obtaining
7 * a copy of this software and associated documentation files (the
8 * "Software"), to deal in the Software without restriction, including
9 * without limitation the rights to use, copy, modify, merge, publish,
10 * distribute, sublicense, and/or sell copies of the Software, and to
11 * permit persons to whom the Software is furnished to do so, subject to
12 * the following conditions:
14 * The above copyright notice and this permission notice (including the
15 * next paragraph) shall be included in all copies or substantial
16 * portions of the Software.
18 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND,
19 * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
20 * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT.
21 * IN NO EVENT SHALL THE COPYRIGHT OWNER(S) AND/OR ITS SUPPLIERS BE
22 * LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION
23 * OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION
24 * WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE.
30 * Ben Skeggs <darktama@iinet.net.au>
34 #include "nouveau_drv.h"
35 #include <drm/nouveau_drm.h>
36 #include "nouveau_fifo.h"
37 #include "nouveau_ramht.h"
38 #include "nouveau_software.h"
39 #include "nouveau_vm.h"
41 struct nouveau_gpuobj_method
{
42 struct list_head head
;
44 int (*exec
)(struct nouveau_channel
*, u32
class, u32 mthd
, u32 data
);
47 struct nouveau_gpuobj_class
{
48 struct list_head head
;
49 struct list_head methods
;
55 nouveau_gpuobj_class_new(struct drm_device
*dev
, u32
class, u32 engine
)
57 struct drm_nouveau_private
*dev_priv
= dev
->dev_private
;
58 struct nouveau_gpuobj_class
*oc
;
60 oc
= kzalloc(sizeof(*oc
), GFP_KERNEL
);
64 INIT_LIST_HEAD(&oc
->methods
);
67 list_add(&oc
->head
, &dev_priv
->classes
);
72 nouveau_gpuobj_mthd_new(struct drm_device
*dev
, u32
class, u32 mthd
,
73 int (*exec
)(struct nouveau_channel
*, u32
, u32
, u32
))
75 struct drm_nouveau_private
*dev_priv
= dev
->dev_private
;
76 struct nouveau_gpuobj_method
*om
;
77 struct nouveau_gpuobj_class
*oc
;
79 list_for_each_entry(oc
, &dev_priv
->classes
, head
) {
87 om
= kzalloc(sizeof(*om
), GFP_KERNEL
);
93 list_add(&om
->head
, &oc
->methods
);
98 nouveau_gpuobj_mthd_call(struct nouveau_channel
*chan
,
99 u32
class, u32 mthd
, u32 data
)
101 struct drm_nouveau_private
*dev_priv
= chan
->dev
->dev_private
;
102 struct nouveau_gpuobj_method
*om
;
103 struct nouveau_gpuobj_class
*oc
;
105 list_for_each_entry(oc
, &dev_priv
->classes
, head
) {
109 list_for_each_entry(om
, &oc
->methods
, head
) {
110 if (om
->mthd
== mthd
)
111 return om
->exec(chan
, class, mthd
, data
);
119 nouveau_gpuobj_mthd_call2(struct drm_device
*dev
, int chid
,
120 u32
class, u32 mthd
, u32 data
)
122 struct drm_nouveau_private
*dev_priv
= dev
->dev_private
;
123 struct nouveau_fifo_priv
*pfifo
= nv_engine(dev
, NVOBJ_ENGINE_FIFO
);
124 struct nouveau_channel
*chan
= NULL
;
128 spin_lock_irqsave(&dev_priv
->channels
.lock
, flags
);
129 if (chid
>= 0 && chid
< pfifo
->channels
)
130 chan
= dev_priv
->channels
.ptr
[chid
];
132 ret
= nouveau_gpuobj_mthd_call(chan
, class, mthd
, data
);
133 spin_unlock_irqrestore(&dev_priv
->channels
.lock
, flags
);
138 nouveau_gpuobj_new(struct drm_device
*dev
, struct nouveau_channel
*chan
,
139 uint32_t size
, int align
, uint32_t flags
,
140 struct nouveau_gpuobj
**gpuobj_ret
)
142 struct drm_nouveau_private
*dev_priv
= dev
->dev_private
;
143 struct nouveau_instmem_engine
*instmem
= &dev_priv
->engine
.instmem
;
144 struct nouveau_gpuobj
*gpuobj
;
145 struct drm_mm_node
*ramin
= NULL
;
148 NV_DEBUG(dev
, "ch%d size=%u align=%d flags=0x%08x\n",
149 chan
? chan
->id
: -1, size
, align
, flags
);
151 gpuobj
= kzalloc(sizeof(*gpuobj
), GFP_KERNEL
);
154 NV_DEBUG(dev
, "gpuobj %p\n", gpuobj
);
156 gpuobj
->flags
= flags
;
157 kref_init(&gpuobj
->refcount
);
160 spin_lock(&dev_priv
->ramin_lock
);
161 list_add_tail(&gpuobj
->list
, &dev_priv
->gpuobj_list
);
162 spin_unlock(&dev_priv
->ramin_lock
);
164 if (!(flags
& NVOBJ_FLAG_VM
) && chan
) {
165 ramin
= drm_mm_search_free(&chan
->ramin_heap
, size
, align
, 0);
167 ramin
= drm_mm_get_block(ramin
, size
, align
);
169 nouveau_gpuobj_ref(NULL
, &gpuobj
);
173 gpuobj
->pinst
= chan
->ramin
->pinst
;
174 if (gpuobj
->pinst
!= ~0)
175 gpuobj
->pinst
+= ramin
->start
;
177 gpuobj
->cinst
= ramin
->start
;
178 gpuobj
->vinst
= ramin
->start
+ chan
->ramin
->vinst
;
179 gpuobj
->node
= ramin
;
181 ret
= instmem
->get(gpuobj
, chan
, size
, align
);
183 nouveau_gpuobj_ref(NULL
, &gpuobj
);
188 if (!(flags
& NVOBJ_FLAG_DONT_MAP
))
189 ret
= instmem
->map(gpuobj
);
193 gpuobj
->cinst
= NVOBJ_CINST_GLOBAL
;
196 if (gpuobj
->flags
& NVOBJ_FLAG_ZERO_ALLOC
) {
197 for (i
= 0; i
< gpuobj
->size
; i
+= 4)
198 nv_wo32(gpuobj
, i
, 0);
203 *gpuobj_ret
= gpuobj
;
208 nouveau_gpuobj_init(struct drm_device
*dev
)
210 struct drm_nouveau_private
*dev_priv
= dev
->dev_private
;
214 INIT_LIST_HEAD(&dev_priv
->gpuobj_list
);
215 INIT_LIST_HEAD(&dev_priv
->classes
);
216 spin_lock_init(&dev_priv
->ramin_lock
);
217 dev_priv
->ramin_base
= ~0;
223 nouveau_gpuobj_takedown(struct drm_device
*dev
)
225 struct drm_nouveau_private
*dev_priv
= dev
->dev_private
;
226 struct nouveau_gpuobj_method
*om
, *tm
;
227 struct nouveau_gpuobj_class
*oc
, *tc
;
231 list_for_each_entry_safe(oc
, tc
, &dev_priv
->classes
, head
) {
232 list_for_each_entry_safe(om
, tm
, &oc
->methods
, head
) {
240 WARN_ON(!list_empty(&dev_priv
->gpuobj_list
));
245 nouveau_gpuobj_del(struct kref
*ref
)
247 struct nouveau_gpuobj
*gpuobj
=
248 container_of(ref
, struct nouveau_gpuobj
, refcount
);
249 struct drm_device
*dev
= gpuobj
->dev
;
250 struct drm_nouveau_private
*dev_priv
= dev
->dev_private
;
251 struct nouveau_instmem_engine
*instmem
= &dev_priv
->engine
.instmem
;
254 NV_DEBUG(dev
, "gpuobj %p\n", gpuobj
);
256 if (gpuobj
->node
&& (gpuobj
->flags
& NVOBJ_FLAG_ZERO_FREE
)) {
257 for (i
= 0; i
< gpuobj
->size
; i
+= 4)
258 nv_wo32(gpuobj
, i
, 0);
263 gpuobj
->dtor(dev
, gpuobj
);
265 if (gpuobj
->cinst
== NVOBJ_CINST_GLOBAL
) {
267 instmem
->unmap(gpuobj
);
268 instmem
->put(gpuobj
);
272 spin_lock(&dev_priv
->ramin_lock
);
273 drm_mm_put_block(gpuobj
->node
);
274 spin_unlock(&dev_priv
->ramin_lock
);
278 spin_lock(&dev_priv
->ramin_lock
);
279 list_del(&gpuobj
->list
);
280 spin_unlock(&dev_priv
->ramin_lock
);
286 nouveau_gpuobj_ref(struct nouveau_gpuobj
*ref
, struct nouveau_gpuobj
**ptr
)
289 kref_get(&ref
->refcount
);
292 kref_put(&(*ptr
)->refcount
, nouveau_gpuobj_del
);
298 nouveau_gpuobj_new_fake(struct drm_device
*dev
, u32 pinst
, u64 vinst
,
299 u32 size
, u32 flags
, struct nouveau_gpuobj
**pgpuobj
)
301 struct drm_nouveau_private
*dev_priv
= dev
->dev_private
;
302 struct nouveau_gpuobj
*gpuobj
= NULL
;
306 "pinst=0x%08x vinst=0x%010llx size=0x%08x flags=0x%08x\n",
307 pinst
, vinst
, size
, flags
);
309 gpuobj
= kzalloc(sizeof(*gpuobj
), GFP_KERNEL
);
312 NV_DEBUG(dev
, "gpuobj %p\n", gpuobj
);
314 gpuobj
->flags
= flags
;
315 kref_init(&gpuobj
->refcount
);
317 gpuobj
->pinst
= pinst
;
318 gpuobj
->cinst
= NVOBJ_CINST_GLOBAL
;
319 gpuobj
->vinst
= vinst
;
321 if (gpuobj
->flags
& NVOBJ_FLAG_ZERO_ALLOC
) {
322 for (i
= 0; i
< gpuobj
->size
; i
+= 4)
323 nv_wo32(gpuobj
, i
, 0);
324 dev_priv
->engine
.instmem
.flush(dev
);
327 spin_lock(&dev_priv
->ramin_lock
);
328 list_add_tail(&gpuobj
->list
, &dev_priv
->gpuobj_list
);
329 spin_unlock(&dev_priv
->ramin_lock
);
335 nv50_gpuobj_dma_init(struct nouveau_gpuobj
*obj
, u32 offset
, int class,
336 u64 base
, u64 size
, int target
, int access
,
339 struct drm_nouveau_private
*dev_priv
= obj
->dev
->dev_private
;
340 struct nouveau_instmem_engine
*pinstmem
= &dev_priv
->engine
.instmem
;
343 flags0
= (comp
<< 29) | (type
<< 22) | class;
344 flags0
|= 0x00100000;
347 case NV_MEM_ACCESS_RO
: flags0
|= 0x00040000; break;
348 case NV_MEM_ACCESS_RW
:
349 case NV_MEM_ACCESS_WO
: flags0
|= 0x00080000; break;
355 case NV_MEM_TARGET_VRAM
:
356 flags0
|= 0x00010000;
358 case NV_MEM_TARGET_PCI
:
359 flags0
|= 0x00020000;
361 case NV_MEM_TARGET_PCI_NOSNOOP
:
362 flags0
|= 0x00030000;
364 case NV_MEM_TARGET_GART
:
365 base
+= dev_priv
->gart_info
.aper_base
;
367 flags0
&= ~0x00100000;
371 /* convert to base + limit */
372 size
= (base
+ size
) - 1;
374 nv_wo32(obj
, offset
+ 0x00, flags0
);
375 nv_wo32(obj
, offset
+ 0x04, lower_32_bits(size
));
376 nv_wo32(obj
, offset
+ 0x08, lower_32_bits(base
));
377 nv_wo32(obj
, offset
+ 0x0c, upper_32_bits(size
) << 24 |
378 upper_32_bits(base
));
379 nv_wo32(obj
, offset
+ 0x10, 0x00000000);
380 nv_wo32(obj
, offset
+ 0x14, 0x00000000);
382 pinstmem
->flush(obj
->dev
);
386 nv50_gpuobj_dma_new(struct nouveau_channel
*chan
, int class, u64 base
, u64 size
,
387 int target
, int access
, u32 type
, u32 comp
,
388 struct nouveau_gpuobj
**pobj
)
390 struct drm_device
*dev
= chan
->dev
;
393 ret
= nouveau_gpuobj_new(dev
, chan
, 24, 16, NVOBJ_FLAG_ZERO_FREE
, pobj
);
397 nv50_gpuobj_dma_init(*pobj
, 0, class, base
, size
, target
,
403 nouveau_gpuobj_dma_new(struct nouveau_channel
*chan
, int class, u64 base
,
404 u64 size
, int access
, int target
,
405 struct nouveau_gpuobj
**pobj
)
407 struct drm_nouveau_private
*dev_priv
= chan
->dev
->dev_private
;
408 struct drm_device
*dev
= chan
->dev
;
409 struct nouveau_gpuobj
*obj
;
413 if (dev_priv
->card_type
>= NV_50
) {
414 u32 comp
= (target
== NV_MEM_TARGET_VM
) ? NV_MEM_COMP_VM
: 0;
415 u32 type
= (target
== NV_MEM_TARGET_VM
) ? NV_MEM_TYPE_VM
: 0;
417 return nv50_gpuobj_dma_new(chan
, class, base
, size
,
418 target
, access
, type
, comp
, pobj
);
421 if (target
== NV_MEM_TARGET_GART
) {
422 struct nouveau_gpuobj
*gart
= dev_priv
->gart_info
.sg_ctxdma
;
424 if (dev_priv
->gart_info
.type
== NOUVEAU_GART_PDMA
) {
426 nouveau_gpuobj_ref(gart
, pobj
);
430 base
= nouveau_sgdma_get_physical(dev
, base
);
431 target
= NV_MEM_TARGET_PCI
;
433 base
+= dev_priv
->gart_info
.aper_base
;
434 if (dev_priv
->gart_info
.type
== NOUVEAU_GART_AGP
)
435 target
= NV_MEM_TARGET_PCI_NOSNOOP
;
437 target
= NV_MEM_TARGET_PCI
;
442 flags0
|= 0x00003000; /* PT present, PT linear */
446 case NV_MEM_TARGET_PCI
:
447 flags0
|= 0x00020000;
449 case NV_MEM_TARGET_PCI_NOSNOOP
:
450 flags0
|= 0x00030000;
457 case NV_MEM_ACCESS_RO
:
458 flags0
|= 0x00004000;
460 case NV_MEM_ACCESS_WO
:
461 flags0
|= 0x00008000;
463 flags2
|= 0x00000002;
467 flags0
|= (base
& 0x00000fff) << 20;
468 flags2
|= (base
& 0xfffff000);
470 ret
= nouveau_gpuobj_new(dev
, chan
, 16, 16, NVOBJ_FLAG_ZERO_FREE
, &obj
);
474 nv_wo32(obj
, 0x00, flags0
);
475 nv_wo32(obj
, 0x04, size
- 1);
476 nv_wo32(obj
, 0x08, flags2
);
477 nv_wo32(obj
, 0x0c, flags2
);
479 obj
->engine
= NVOBJ_ENGINE_SW
;
486 nouveau_gpuobj_gr_new(struct nouveau_channel
*chan
, u32 handle
, int class)
488 struct drm_nouveau_private
*dev_priv
= chan
->dev
->dev_private
;
489 struct drm_device
*dev
= chan
->dev
;
490 struct nouveau_gpuobj_class
*oc
;
493 NV_DEBUG(dev
, "ch%d class=0x%04x\n", chan
->id
, class);
495 list_for_each_entry(oc
, &dev_priv
->classes
, head
) {
496 struct nouveau_exec_engine
*eng
= dev_priv
->eng
[oc
->engine
];
501 if (!chan
->engctx
[oc
->engine
]) {
502 ret
= eng
->context_new(chan
, oc
->engine
);
507 return eng
->object_new(chan
, oc
->engine
, handle
, class);
514 nouveau_gpuobj_channel_init_pramin(struct nouveau_channel
*chan
)
516 struct drm_device
*dev
= chan
->dev
;
517 struct drm_nouveau_private
*dev_priv
= dev
->dev_private
;
522 NV_DEBUG(dev
, "ch%d\n", chan
->id
);
524 /* Base amount for object storage (4KiB enough?) */
528 if (dev_priv
->card_type
== NV_50
) {
529 /* Various fixed table thingos */
530 size
+= 0x1400; /* mostly unknown stuff */
531 size
+= 0x4000; /* vm pd */
533 /* RAMHT, not sure about setting size yet, 32KiB to be safe */
539 ret
= nouveau_gpuobj_new(dev
, NULL
, size
, 0x1000, 0, &chan
->ramin
);
541 NV_ERROR(dev
, "Error allocating channel PRAMIN: %d\n", ret
);
545 ret
= drm_mm_init(&chan
->ramin_heap
, base
, size
- base
);
547 NV_ERROR(dev
, "Error creating PRAMIN heap: %d\n", ret
);
548 nouveau_gpuobj_ref(NULL
, &chan
->ramin
);
556 nvc0_gpuobj_channel_init(struct nouveau_channel
*chan
, struct nouveau_vm
*vm
)
558 struct drm_device
*dev
= chan
->dev
;
559 struct nouveau_gpuobj
*pgd
= NULL
;
560 struct nouveau_vm_pgd
*vpgd
;
563 ret
= nouveau_gpuobj_new(dev
, NULL
, 4096, 0x1000, 0, &chan
->ramin
);
567 /* create page directory for this vm if none currently exists,
568 * will be destroyed automagically when last reference to the
571 if (list_empty(&vm
->pgd_list
)) {
572 ret
= nouveau_gpuobj_new(dev
, NULL
, 65536, 0x1000, 0, &pgd
);
576 nouveau_vm_ref(vm
, &chan
->vm
, pgd
);
577 nouveau_gpuobj_ref(NULL
, &pgd
);
579 /* point channel at vm's page directory */
580 vpgd
= list_first_entry(&vm
->pgd_list
, struct nouveau_vm_pgd
, head
);
581 nv_wo32(chan
->ramin
, 0x0200, lower_32_bits(vpgd
->obj
->vinst
));
582 nv_wo32(chan
->ramin
, 0x0204, upper_32_bits(vpgd
->obj
->vinst
));
583 nv_wo32(chan
->ramin
, 0x0208, 0xffffffff);
584 nv_wo32(chan
->ramin
, 0x020c, 0x000000ff);
590 nouveau_gpuobj_channel_init(struct nouveau_channel
*chan
,
591 uint32_t vram_h
, uint32_t tt_h
)
593 struct drm_device
*dev
= chan
->dev
;
594 struct drm_nouveau_private
*dev_priv
= dev
->dev_private
;
595 struct nouveau_fpriv
*fpriv
= nouveau_fpriv(chan
->file_priv
);
596 struct nouveau_vm
*vm
= fpriv
? fpriv
->vm
: dev_priv
->chan_vm
;
597 struct nouveau_gpuobj
*vram
= NULL
, *tt
= NULL
;
600 NV_DEBUG(dev
, "ch%d vram=0x%08x tt=0x%08x\n", chan
->id
, vram_h
, tt_h
);
601 if (dev_priv
->card_type
>= NV_C0
)
602 return nvc0_gpuobj_channel_init(chan
, vm
);
604 /* Allocate a chunk of memory for per-channel object storage */
605 ret
= nouveau_gpuobj_channel_init_pramin(chan
);
607 NV_ERROR(dev
, "init pramin\n");
612 * - Allocate per-channel page-directory
613 * - Link with shared channel VM
616 u32 pgd_offs
= (dev_priv
->chipset
== 0x50) ? 0x1400 : 0x0200;
617 u64 vm_vinst
= chan
->ramin
->vinst
+ pgd_offs
;
618 u32 vm_pinst
= chan
->ramin
->pinst
;
621 vm_pinst
+= pgd_offs
;
623 ret
= nouveau_gpuobj_new_fake(dev
, vm_pinst
, vm_vinst
, 0x4000,
628 nouveau_vm_ref(vm
, &chan
->vm
, chan
->vm_pd
);
632 if (dev_priv
->card_type
< NV_50
) {
633 nouveau_ramht_ref(dev_priv
->ramht
, &chan
->ramht
, NULL
);
635 struct nouveau_gpuobj
*ramht
= NULL
;
637 ret
= nouveau_gpuobj_new(dev
, chan
, 0x8000, 16,
638 NVOBJ_FLAG_ZERO_ALLOC
, &ramht
);
642 ret
= nouveau_ramht_new(dev
, ramht
, &chan
->ramht
);
643 nouveau_gpuobj_ref(NULL
, &ramht
);
649 if (dev_priv
->card_type
>= NV_50
) {
650 ret
= nouveau_gpuobj_dma_new(chan
, NV_CLASS_DMA_IN_MEMORY
,
651 0, (1ULL << 40), NV_MEM_ACCESS_RW
,
652 NV_MEM_TARGET_VM
, &vram
);
654 NV_ERROR(dev
, "Error creating VRAM ctxdma: %d\n", ret
);
658 ret
= nouveau_gpuobj_dma_new(chan
, NV_CLASS_DMA_IN_MEMORY
,
659 0, dev_priv
->fb_available_size
,
661 NV_MEM_TARGET_VRAM
, &vram
);
663 NV_ERROR(dev
, "Error creating VRAM ctxdma: %d\n", ret
);
668 ret
= nouveau_ramht_insert(chan
, vram_h
, vram
);
669 nouveau_gpuobj_ref(NULL
, &vram
);
671 NV_ERROR(dev
, "Error adding VRAM ctxdma to RAMHT: %d\n", ret
);
675 /* TT memory ctxdma */
676 if (dev_priv
->card_type
>= NV_50
) {
677 ret
= nouveau_gpuobj_dma_new(chan
, NV_CLASS_DMA_IN_MEMORY
,
678 0, (1ULL << 40), NV_MEM_ACCESS_RW
,
679 NV_MEM_TARGET_VM
, &tt
);
681 ret
= nouveau_gpuobj_dma_new(chan
, NV_CLASS_DMA_IN_MEMORY
,
682 0, dev_priv
->gart_info
.aper_size
,
684 NV_MEM_TARGET_GART
, &tt
);
688 NV_ERROR(dev
, "Error creating TT ctxdma: %d\n", ret
);
692 ret
= nouveau_ramht_insert(chan
, tt_h
, tt
);
693 nouveau_gpuobj_ref(NULL
, &tt
);
695 NV_ERROR(dev
, "Error adding TT ctxdma to RAMHT: %d\n", ret
);
703 nouveau_gpuobj_channel_takedown(struct nouveau_channel
*chan
)
705 NV_DEBUG(chan
->dev
, "ch%d\n", chan
->id
);
707 nouveau_vm_ref(NULL
, &chan
->vm
, chan
->vm_pd
);
708 nouveau_gpuobj_ref(NULL
, &chan
->vm_pd
);
710 if (drm_mm_initialized(&chan
->ramin_heap
))
711 drm_mm_takedown(&chan
->ramin_heap
);
712 nouveau_gpuobj_ref(NULL
, &chan
->ramin
);
716 nouveau_gpuobj_suspend(struct drm_device
*dev
)
718 struct drm_nouveau_private
*dev_priv
= dev
->dev_private
;
719 struct nouveau_gpuobj
*gpuobj
;
722 list_for_each_entry(gpuobj
, &dev_priv
->gpuobj_list
, list
) {
723 if (gpuobj
->cinst
!= NVOBJ_CINST_GLOBAL
)
726 gpuobj
->suspend
= vmalloc(gpuobj
->size
);
727 if (!gpuobj
->suspend
) {
728 nouveau_gpuobj_resume(dev
);
732 for (i
= 0; i
< gpuobj
->size
; i
+= 4)
733 gpuobj
->suspend
[i
/4] = nv_ro32(gpuobj
, i
);
740 nouveau_gpuobj_resume(struct drm_device
*dev
)
742 struct drm_nouveau_private
*dev_priv
= dev
->dev_private
;
743 struct nouveau_gpuobj
*gpuobj
;
746 list_for_each_entry(gpuobj
, &dev_priv
->gpuobj_list
, list
) {
747 if (!gpuobj
->suspend
)
750 for (i
= 0; i
< gpuobj
->size
; i
+= 4)
751 nv_wo32(gpuobj
, i
, gpuobj
->suspend
[i
/4]);
753 vfree(gpuobj
->suspend
);
754 gpuobj
->suspend
= NULL
;
757 dev_priv
->engine
.instmem
.flush(dev
);
761 nv_ro32(struct nouveau_gpuobj
*gpuobj
, u32 offset
)
763 struct drm_nouveau_private
*dev_priv
= gpuobj
->dev
->dev_private
;
764 struct drm_device
*dev
= gpuobj
->dev
;
767 if (gpuobj
->pinst
== ~0 || !dev_priv
->ramin_available
) {
768 u64 ptr
= gpuobj
->vinst
+ offset
;
769 u32 base
= ptr
>> 16;
772 spin_lock_irqsave(&dev_priv
->vm_lock
, flags
);
773 if (dev_priv
->ramin_base
!= base
) {
774 dev_priv
->ramin_base
= base
;
775 nv_wr32(dev
, 0x001700, dev_priv
->ramin_base
);
777 val
= nv_rd32(dev
, 0x700000 + (ptr
& 0xffff));
778 spin_unlock_irqrestore(&dev_priv
->vm_lock
, flags
);
782 return nv_ri32(dev
, gpuobj
->pinst
+ offset
);
786 nv_wo32(struct nouveau_gpuobj
*gpuobj
, u32 offset
, u32 val
)
788 struct drm_nouveau_private
*dev_priv
= gpuobj
->dev
->dev_private
;
789 struct drm_device
*dev
= gpuobj
->dev
;
792 if (gpuobj
->pinst
== ~0 || !dev_priv
->ramin_available
) {
793 u64 ptr
= gpuobj
->vinst
+ offset
;
794 u32 base
= ptr
>> 16;
796 spin_lock_irqsave(&dev_priv
->vm_lock
, flags
);
797 if (dev_priv
->ramin_base
!= base
) {
798 dev_priv
->ramin_base
= base
;
799 nv_wr32(dev
, 0x001700, dev_priv
->ramin_base
);
801 nv_wr32(dev
, 0x700000 + (ptr
& 0xffff), val
);
802 spin_unlock_irqrestore(&dev_priv
->vm_lock
, flags
);
806 nv_wi32(dev
, gpuobj
->pinst
+ offset
, val
);