Commit | Line | Data |
---|---|---|
6ee73861 BS |
1 | /* |
2 | * Copyright (C) 2006 Ben Skeggs. | |
3 | * | |
4 | * All Rights Reserved. | |
5 | * | |
6 | * Permission is hereby granted, free of charge, to any person obtaining | |
7 | * a copy of this software and associated documentation files (the | |
8 | * "Software"), to deal in the Software without restriction, including | |
9 | * without limitation the rights to use, copy, modify, merge, publish, | |
10 | * distribute, sublicense, and/or sell copies of the Software, and to | |
11 | * permit persons to whom the Software is furnished to do so, subject to | |
12 | * the following conditions: | |
13 | * | |
14 | * The above copyright notice and this permission notice (including the | |
15 | * next paragraph) shall be included in all copies or substantial | |
16 | * portions of the Software. | |
17 | * | |
18 | * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, | |
19 | * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF | |
20 | * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. | |
21 | * IN NO EVENT SHALL THE COPYRIGHT OWNER(S) AND/OR ITS SUPPLIERS BE | |
22 | * LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION | |
23 | * OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION | |
24 | * WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE. | |
25 | * | |
26 | */ | |
27 | ||
28 | /* | |
29 | * Authors: | |
30 | * Ben Skeggs <darktama@iinet.net.au> | |
31 | */ | |
32 | ||
33 | #include "drmP.h" | |
34 | #include "drm.h" | |
35 | #include "nouveau_drv.h" | |
36 | #include "nouveau_drm.h" | |
479dcaea | 37 | #include "nouveau_ramht.h" |
4c136142 | 38 | #include "nouveau_vm.h" |
6ee73861 | 39 | |
b8c157d3 BS |
40 | struct nouveau_gpuobj_method { |
41 | struct list_head head; | |
42 | u32 mthd; | |
43 | int (*exec)(struct nouveau_channel *, u32 class, u32 mthd, u32 data); | |
44 | }; | |
45 | ||
46 | struct nouveau_gpuobj_class { | |
47 | struct list_head head; | |
48 | struct list_head methods; | |
49 | u32 id; | |
50 | u32 engine; | |
51 | }; | |
52 | ||
53 | int | |
54 | nouveau_gpuobj_class_new(struct drm_device *dev, u32 class, u32 engine) | |
55 | { | |
56 | struct drm_nouveau_private *dev_priv = dev->dev_private; | |
57 | struct nouveau_gpuobj_class *oc; | |
58 | ||
59 | oc = kzalloc(sizeof(*oc), GFP_KERNEL); | |
60 | if (!oc) | |
61 | return -ENOMEM; | |
62 | ||
63 | INIT_LIST_HEAD(&oc->methods); | |
64 | oc->id = class; | |
65 | oc->engine = engine; | |
66 | list_add(&oc->head, &dev_priv->classes); | |
67 | return 0; | |
68 | } | |
69 | ||
70 | int | |
71 | nouveau_gpuobj_mthd_new(struct drm_device *dev, u32 class, u32 mthd, | |
72 | int (*exec)(struct nouveau_channel *, u32, u32, u32)) | |
73 | { | |
74 | struct drm_nouveau_private *dev_priv = dev->dev_private; | |
75 | struct nouveau_gpuobj_method *om; | |
76 | struct nouveau_gpuobj_class *oc; | |
77 | ||
78 | list_for_each_entry(oc, &dev_priv->classes, head) { | |
79 | if (oc->id == class) | |
80 | goto found; | |
81 | } | |
82 | ||
83 | return -EINVAL; | |
84 | ||
85 | found: | |
86 | om = kzalloc(sizeof(*om), GFP_KERNEL); | |
87 | if (!om) | |
88 | return -ENOMEM; | |
89 | ||
90 | om->mthd = mthd; | |
91 | om->exec = exec; | |
92 | list_add(&om->head, &oc->methods); | |
93 | return 0; | |
94 | } | |
95 | ||
96 | int | |
97 | nouveau_gpuobj_mthd_call(struct nouveau_channel *chan, | |
98 | u32 class, u32 mthd, u32 data) | |
99 | { | |
100 | struct drm_nouveau_private *dev_priv = chan->dev->dev_private; | |
101 | struct nouveau_gpuobj_method *om; | |
102 | struct nouveau_gpuobj_class *oc; | |
103 | ||
104 | list_for_each_entry(oc, &dev_priv->classes, head) { | |
105 | if (oc->id != class) | |
106 | continue; | |
107 | ||
108 | list_for_each_entry(om, &oc->methods, head) { | |
109 | if (om->mthd == mthd) | |
110 | return om->exec(chan, class, mthd, data); | |
111 | } | |
112 | } | |
113 | ||
114 | return -ENOENT; | |
115 | } | |
116 | ||
274fec93 BS |
117 | int |
118 | nouveau_gpuobj_mthd_call2(struct drm_device *dev, int chid, | |
119 | u32 class, u32 mthd, u32 data) | |
120 | { | |
121 | struct drm_nouveau_private *dev_priv = dev->dev_private; | |
122 | struct nouveau_channel *chan = NULL; | |
123 | unsigned long flags; | |
124 | int ret = -EINVAL; | |
125 | ||
126 | spin_lock_irqsave(&dev_priv->channels.lock, flags); | |
127 | if (chid > 0 && chid < dev_priv->engine.fifo.channels) | |
128 | chan = dev_priv->channels.ptr[chid]; | |
129 | if (chan) | |
130 | ret = nouveau_gpuobj_mthd_call(chan, class, mthd, data); | |
131 | spin_unlock_irqrestore(&dev_priv->channels.lock, flags); | |
132 | return ret; | |
133 | } | |
134 | ||
6ee73861 BS |
135 | /* NVidia uses context objects to drive drawing operations. |
136 | ||
137 | Context objects can be selected into 8 subchannels in the FIFO, | |
138 | and then used via DMA command buffers. | |
139 | ||
140 | A context object is referenced by a user defined handle (CARD32). The HW | |
141 | looks up graphics objects in a hash table in the instance RAM. | |
142 | ||
143 | An entry in the hash table consists of 2 CARD32. The first CARD32 contains | |
144 | the handle, the second one a bitfield, that contains the address of the | |
145 | object in instance RAM. | |
146 | ||
147 | The format of the second CARD32 seems to be: | |
148 | ||
149 | NV4 to NV30: | |
150 | ||
151 | 15: 0 instance_addr >> 4 | |
152 | 17:16 engine (here uses 1 = graphics) | |
153 | 28:24 channel id (here uses 0) | |
154 | 31 valid (use 1) | |
155 | ||
156 | NV40: | |
157 | ||
158 | 15: 0 instance_addr >> 4 (maybe 19-0) | |
159 | 21:20 engine (here uses 1 = graphics) | |
160 | I'm unsure about the other bits, but using 0 seems to work. | |
161 | ||
162 | The key into the hash table depends on the object handle and channel id and | |
163 | is given as: | |
164 | */ | |
6ee73861 BS |
165 | |
166 | int | |
167 | nouveau_gpuobj_new(struct drm_device *dev, struct nouveau_channel *chan, | |
168 | uint32_t size, int align, uint32_t flags, | |
169 | struct nouveau_gpuobj **gpuobj_ret) | |
170 | { | |
171 | struct drm_nouveau_private *dev_priv = dev->dev_private; | |
e41115d0 | 172 | struct nouveau_instmem_engine *instmem = &dev_priv->engine.instmem; |
6ee73861 | 173 | struct nouveau_gpuobj *gpuobj; |
5125bfd8 | 174 | struct drm_mm_node *ramin = NULL; |
e41115d0 | 175 | int ret, i; |
6ee73861 BS |
176 | |
177 | NV_DEBUG(dev, "ch%d size=%u align=%d flags=0x%08x\n", | |
178 | chan ? chan->id : -1, size, align, flags); | |
179 | ||
6ee73861 BS |
180 | gpuobj = kzalloc(sizeof(*gpuobj), GFP_KERNEL); |
181 | if (!gpuobj) | |
182 | return -ENOMEM; | |
183 | NV_DEBUG(dev, "gpuobj %p\n", gpuobj); | |
b3beb167 | 184 | gpuobj->dev = dev; |
6ee73861 | 185 | gpuobj->flags = flags; |
eb9bcbdc | 186 | kref_init(&gpuobj->refcount); |
43efc9ce | 187 | gpuobj->size = size; |
6ee73861 | 188 | |
e05d7eae | 189 | spin_lock(&dev_priv->ramin_lock); |
6ee73861 | 190 | list_add_tail(&gpuobj->list, &dev_priv->gpuobj_list); |
e05d7eae | 191 | spin_unlock(&dev_priv->ramin_lock); |
6ee73861 | 192 | |
6ee73861 | 193 | if (chan) { |
5125bfd8 BS |
194 | ramin = drm_mm_search_free(&chan->ramin_heap, size, align, 0); |
195 | if (ramin) | |
196 | ramin = drm_mm_get_block(ramin, size, align); | |
5125bfd8 BS |
197 | if (!ramin) { |
198 | nouveau_gpuobj_ref(NULL, &gpuobj); | |
199 | return -ENOMEM; | |
200 | } | |
6ee73861 | 201 | |
e41115d0 BS |
202 | gpuobj->pinst = chan->ramin->pinst; |
203 | if (gpuobj->pinst != ~0) | |
204 | gpuobj->pinst += ramin->start; | |
b833ac26 | 205 | |
ca130c22 | 206 | gpuobj->cinst = ramin->start; |
e41115d0 BS |
207 | gpuobj->vinst = ramin->start + chan->ramin->vinst; |
208 | gpuobj->node = ramin; | |
209 | } else { | |
210 | ret = instmem->get(gpuobj, size, align); | |
6ee73861 | 211 | if (ret) { |
a8eaebc6 | 212 | nouveau_gpuobj_ref(NULL, &gpuobj); |
6ee73861 BS |
213 | return ret; |
214 | } | |
5125bfd8 | 215 | |
e41115d0 | 216 | ret = -ENOSYS; |
a11c3198 | 217 | if (!(flags & NVOBJ_FLAG_DONT_MAP)) |
e41115d0 BS |
218 | ret = instmem->map(gpuobj); |
219 | if (ret) | |
5125bfd8 | 220 | gpuobj->pinst = ~0; |
e41115d0 BS |
221 | |
222 | gpuobj->cinst = NVOBJ_CINST_GLOBAL; | |
de3a6c0a BS |
223 | } |
224 | ||
6ee73861 | 225 | if (gpuobj->flags & NVOBJ_FLAG_ZERO_ALLOC) { |
43efc9ce | 226 | for (i = 0; i < gpuobj->size; i += 4) |
b3beb167 | 227 | nv_wo32(gpuobj, i, 0); |
e41115d0 | 228 | instmem->flush(dev); |
6ee73861 BS |
229 | } |
230 | ||
a8eaebc6 | 231 | |
6ee73861 BS |
232 | *gpuobj_ret = gpuobj; |
233 | return 0; | |
234 | } | |
235 | ||
236 | int | |
fbd2895e | 237 | nouveau_gpuobj_init(struct drm_device *dev) |
6ee73861 BS |
238 | { |
239 | struct drm_nouveau_private *dev_priv = dev->dev_private; | |
240 | ||
241 | NV_DEBUG(dev, "\n"); | |
242 | ||
243 | INIT_LIST_HEAD(&dev_priv->gpuobj_list); | |
bd2e597d | 244 | INIT_LIST_HEAD(&dev_priv->classes); |
5125bfd8 BS |
245 | spin_lock_init(&dev_priv->ramin_lock); |
246 | dev_priv->ramin_base = ~0; | |
6ee73861 BS |
247 | |
248 | return 0; | |
249 | } | |
250 | ||
6ee73861 BS |
251 | void |
252 | nouveau_gpuobj_takedown(struct drm_device *dev) | |
253 | { | |
254 | struct drm_nouveau_private *dev_priv = dev->dev_private; | |
b8c157d3 BS |
255 | struct nouveau_gpuobj_method *om, *tm; |
256 | struct nouveau_gpuobj_class *oc, *tc; | |
6ee73861 BS |
257 | |
258 | NV_DEBUG(dev, "\n"); | |
6ee73861 | 259 | |
b8c157d3 BS |
260 | list_for_each_entry_safe(oc, tc, &dev_priv->classes, head) { |
261 | list_for_each_entry_safe(om, tm, &oc->methods, head) { | |
262 | list_del(&om->head); | |
263 | kfree(om); | |
264 | } | |
265 | list_del(&oc->head); | |
266 | kfree(oc); | |
267 | } | |
268 | ||
eb9bcbdc | 269 | BUG_ON(!list_empty(&dev_priv->gpuobj_list)); |
6ee73861 BS |
270 | } |
271 | ||
185abecc | 272 | |
eb9bcbdc BS |
273 | static void |
274 | nouveau_gpuobj_del(struct kref *ref) | |
6ee73861 | 275 | { |
eb9bcbdc BS |
276 | struct nouveau_gpuobj *gpuobj = |
277 | container_of(ref, struct nouveau_gpuobj, refcount); | |
a8eaebc6 | 278 | struct drm_device *dev = gpuobj->dev; |
6ee73861 | 279 | struct drm_nouveau_private *dev_priv = dev->dev_private; |
e41115d0 | 280 | struct nouveau_instmem_engine *instmem = &dev_priv->engine.instmem; |
6ee73861 BS |
281 | int i; |
282 | ||
a8eaebc6 | 283 | NV_DEBUG(dev, "gpuobj %p\n", gpuobj); |
6ee73861 | 284 | |
e41115d0 | 285 | if (gpuobj->node && (gpuobj->flags & NVOBJ_FLAG_ZERO_FREE)) { |
43efc9ce | 286 | for (i = 0; i < gpuobj->size; i += 4) |
b3beb167 | 287 | nv_wo32(gpuobj, i, 0); |
e41115d0 | 288 | instmem->flush(dev); |
6ee73861 BS |
289 | } |
290 | ||
291 | if (gpuobj->dtor) | |
292 | gpuobj->dtor(dev, gpuobj); | |
293 | ||
e41115d0 BS |
294 | if (gpuobj->cinst == NVOBJ_CINST_GLOBAL) { |
295 | if (gpuobj->node) { | |
296 | instmem->unmap(gpuobj); | |
297 | instmem->put(gpuobj); | |
298 | } | |
299 | } else { | |
300 | if (gpuobj->node) { | |
301 | spin_lock(&dev_priv->ramin_lock); | |
302 | drm_mm_put_block(gpuobj->node); | |
303 | spin_unlock(&dev_priv->ramin_lock); | |
304 | } | |
305 | } | |
6ee73861 | 306 | |
e05d7eae | 307 | spin_lock(&dev_priv->ramin_lock); |
6ee73861 | 308 | list_del(&gpuobj->list); |
e05d7eae | 309 | spin_unlock(&dev_priv->ramin_lock); |
6ee73861 | 310 | |
6ee73861 | 311 | kfree(gpuobj); |
6ee73861 BS |
312 | } |
313 | ||
a8eaebc6 BS |
314 | void |
315 | nouveau_gpuobj_ref(struct nouveau_gpuobj *ref, struct nouveau_gpuobj **ptr) | |
6ee73861 | 316 | { |
a8eaebc6 | 317 | if (ref) |
eb9bcbdc | 318 | kref_get(&ref->refcount); |
6ee73861 | 319 | |
eb9bcbdc BS |
320 | if (*ptr) |
321 | kref_put(&(*ptr)->refcount, nouveau_gpuobj_del); | |
6ee73861 | 322 | |
a8eaebc6 | 323 | *ptr = ref; |
6ee73861 BS |
324 | } |
325 | ||
326 | int | |
43efc9ce BS |
327 | nouveau_gpuobj_new_fake(struct drm_device *dev, u32 pinst, u64 vinst, |
328 | u32 size, u32 flags, struct nouveau_gpuobj **pgpuobj) | |
6ee73861 BS |
329 | { |
330 | struct drm_nouveau_private *dev_priv = dev->dev_private; | |
331 | struct nouveau_gpuobj *gpuobj = NULL; | |
332 | int i; | |
333 | ||
334 | NV_DEBUG(dev, | |
43efc9ce BS |
335 | "pinst=0x%08x vinst=0x%010llx size=0x%08x flags=0x%08x\n", |
336 | pinst, vinst, size, flags); | |
6ee73861 BS |
337 | |
338 | gpuobj = kzalloc(sizeof(*gpuobj), GFP_KERNEL); | |
339 | if (!gpuobj) | |
340 | return -ENOMEM; | |
341 | NV_DEBUG(dev, "gpuobj %p\n", gpuobj); | |
b3beb167 | 342 | gpuobj->dev = dev; |
43efc9ce | 343 | gpuobj->flags = flags; |
eb9bcbdc | 344 | kref_init(&gpuobj->refcount); |
43efc9ce BS |
345 | gpuobj->size = size; |
346 | gpuobj->pinst = pinst; | |
e41115d0 | 347 | gpuobj->cinst = NVOBJ_CINST_GLOBAL; |
43efc9ce | 348 | gpuobj->vinst = vinst; |
de3a6c0a | 349 | |
6ee73861 | 350 | if (gpuobj->flags & NVOBJ_FLAG_ZERO_ALLOC) { |
43efc9ce | 351 | for (i = 0; i < gpuobj->size; i += 4) |
b3beb167 | 352 | nv_wo32(gpuobj, i, 0); |
f56cb86f | 353 | dev_priv->engine.instmem.flush(dev); |
6ee73861 BS |
354 | } |
355 | ||
e05d7eae | 356 | spin_lock(&dev_priv->ramin_lock); |
43efc9ce | 357 | list_add_tail(&gpuobj->list, &dev_priv->gpuobj_list); |
e05d7eae | 358 | spin_unlock(&dev_priv->ramin_lock); |
43efc9ce | 359 | *pgpuobj = gpuobj; |
6ee73861 BS |
360 | return 0; |
361 | } | |
362 | ||
363 | ||
364 | static uint32_t | |
365 | nouveau_gpuobj_class_instmem_size(struct drm_device *dev, int class) | |
366 | { | |
367 | struct drm_nouveau_private *dev_priv = dev->dev_private; | |
368 | ||
369 | /*XXX: dodgy hack for now */ | |
370 | if (dev_priv->card_type >= NV_50) | |
371 | return 24; | |
372 | if (dev_priv->card_type >= NV_40) | |
373 | return 32; | |
374 | return 16; | |
375 | } | |
376 | ||
377 | /* | |
378 | DMA objects are used to reference a piece of memory in the | |
379 | framebuffer, PCI or AGP address space. Each object is 16 bytes big | |
380 | and looks as follows: | |
381 | ||
382 | entry[0] | |
383 | 11:0 class (seems like I can always use 0 here) | |
384 | 12 page table present? | |
385 | 13 page entry linear? | |
386 | 15:14 access: 0 rw, 1 ro, 2 wo | |
387 | 17:16 target: 0 NV memory, 1 NV memory tiled, 2 PCI, 3 AGP | |
388 | 31:20 dma adjust (bits 0-11 of the address) | |
389 | entry[1] | |
390 | dma limit (size of transfer) | |
391 | entry[X] | |
392 | 1 0 readonly, 1 readwrite | |
393 | 31:12 dma frame address of the page (bits 12-31 of the address) | |
394 | entry[N] | |
395 | page table terminator, same value as the first pte, as does nvidia | |
396 | rivatv uses 0xffffffff | |
397 | ||
398 | Non linear page tables need a list of frame addresses afterwards, | |
399 | the rivatv project has some info on this. | |
400 | ||
401 | The method below creates a DMA object in instance RAM and returns a handle | |
402 | to it that can be used to set up context objects. | |
403 | */ | |
7f4a195f BS |
404 | |
405 | void | |
406 | nv50_gpuobj_dma_init(struct nouveau_gpuobj *obj, u32 offset, int class, | |
407 | u64 base, u64 size, int target, int access, | |
408 | u32 type, u32 comp) | |
6ee73861 | 409 | { |
7f4a195f BS |
410 | struct drm_nouveau_private *dev_priv = obj->dev->dev_private; |
411 | struct nouveau_instmem_engine *pinstmem = &dev_priv->engine.instmem; | |
412 | u32 flags0; | |
6ee73861 | 413 | |
7f4a195f BS |
414 | flags0 = (comp << 29) | (type << 22) | class; |
415 | flags0 |= 0x00100000; | |
416 | ||
417 | switch (access) { | |
418 | case NV_MEM_ACCESS_RO: flags0 |= 0x00040000; break; | |
419 | case NV_MEM_ACCESS_RW: | |
420 | case NV_MEM_ACCESS_WO: flags0 |= 0x00080000; break; | |
421 | default: | |
422 | break; | |
423 | } | |
6ee73861 BS |
424 | |
425 | switch (target) { | |
7f4a195f BS |
426 | case NV_MEM_TARGET_VRAM: |
427 | flags0 |= 0x00010000; | |
428 | break; | |
429 | case NV_MEM_TARGET_PCI: | |
430 | flags0 |= 0x00020000; | |
431 | break; | |
432 | case NV_MEM_TARGET_PCI_NOSNOOP: | |
433 | flags0 |= 0x00030000; | |
6ee73861 | 434 | break; |
7f4a195f | 435 | case NV_MEM_TARGET_GART: |
b571fe21 | 436 | base += dev_priv->gart_info.aper_base; |
6ee73861 | 437 | default: |
7f4a195f | 438 | flags0 &= ~0x00100000; |
6ee73861 BS |
439 | break; |
440 | } | |
441 | ||
7f4a195f BS |
442 | /* convert to base + limit */ |
443 | size = (base + size) - 1; | |
6ee73861 | 444 | |
7f4a195f BS |
445 | nv_wo32(obj, offset + 0x00, flags0); |
446 | nv_wo32(obj, offset + 0x04, lower_32_bits(size)); | |
447 | nv_wo32(obj, offset + 0x08, lower_32_bits(base)); | |
448 | nv_wo32(obj, offset + 0x0c, upper_32_bits(size) << 24 | | |
449 | upper_32_bits(base)); | |
450 | nv_wo32(obj, offset + 0x10, 0x00000000); | |
451 | nv_wo32(obj, offset + 0x14, 0x00000000); | |
6ee73861 | 452 | |
7f4a195f BS |
453 | pinstmem->flush(obj->dev); |
454 | } | |
6ee73861 | 455 | |
7f4a195f BS |
456 | int |
457 | nv50_gpuobj_dma_new(struct nouveau_channel *chan, int class, u64 base, u64 size, | |
458 | int target, int access, u32 type, u32 comp, | |
459 | struct nouveau_gpuobj **pobj) | |
460 | { | |
461 | struct drm_device *dev = chan->dev; | |
462 | int ret; | |
6ee73861 | 463 | |
a0fd9b9f | 464 | ret = nouveau_gpuobj_new(dev, chan, 24, 16, NVOBJ_FLAG_ZERO_FREE, pobj); |
7f4a195f BS |
465 | if (ret) |
466 | return ret; | |
6ee73861 | 467 | |
7f4a195f BS |
468 | nv50_gpuobj_dma_init(*pobj, 0, class, base, size, target, |
469 | access, type, comp); | |
6ee73861 BS |
470 | return 0; |
471 | } | |
472 | ||
473 | int | |
7f4a195f BS |
474 | nouveau_gpuobj_dma_new(struct nouveau_channel *chan, int class, u64 base, |
475 | u64 size, int access, int target, | |
476 | struct nouveau_gpuobj **pobj) | |
6ee73861 | 477 | { |
7f4a195f | 478 | struct drm_nouveau_private *dev_priv = chan->dev->dev_private; |
6ee73861 | 479 | struct drm_device *dev = chan->dev; |
7f4a195f | 480 | struct nouveau_gpuobj *obj; |
fd70b6cd | 481 | u32 flags0, flags2; |
6ee73861 BS |
482 | int ret; |
483 | ||
7f4a195f BS |
484 | if (dev_priv->card_type >= NV_50) { |
485 | u32 comp = (target == NV_MEM_TARGET_VM) ? NV_MEM_COMP_VM : 0; | |
486 | u32 type = (target == NV_MEM_TARGET_VM) ? NV_MEM_TYPE_VM : 0; | |
487 | ||
488 | return nv50_gpuobj_dma_new(chan, class, base, size, | |
489 | target, access, type, comp, pobj); | |
490 | } | |
491 | ||
492 | if (target == NV_MEM_TARGET_GART) { | |
58e6c7a9 BS |
493 | struct nouveau_gpuobj *gart = dev_priv->gart_info.sg_ctxdma; |
494 | ||
495 | if (dev_priv->gart_info.type == NOUVEAU_GART_PDMA) { | |
496 | if (base == 0) { | |
497 | nouveau_gpuobj_ref(gart, pobj); | |
498 | return 0; | |
499 | } | |
500 | ||
501 | base = nouveau_sgdma_get_physical(dev, base); | |
7f4a195f | 502 | target = NV_MEM_TARGET_PCI; |
7f4a195f | 503 | } else { |
58e6c7a9 BS |
504 | base += dev_priv->gart_info.aper_base; |
505 | if (dev_priv->gart_info.type == NOUVEAU_GART_AGP) | |
506 | target = NV_MEM_TARGET_PCI_NOSNOOP; | |
507 | else | |
508 | target = NV_MEM_TARGET_PCI; | |
6ee73861 | 509 | } |
6ee73861 BS |
510 | } |
511 | ||
7f4a195f BS |
512 | flags0 = class; |
513 | flags0 |= 0x00003000; /* PT present, PT linear */ | |
514 | flags2 = 0; | |
515 | ||
516 | switch (target) { | |
517 | case NV_MEM_TARGET_PCI: | |
518 | flags0 |= 0x00020000; | |
519 | break; | |
520 | case NV_MEM_TARGET_PCI_NOSNOOP: | |
521 | flags0 |= 0x00030000; | |
522 | break; | |
523 | default: | |
524 | break; | |
525 | } | |
526 | ||
527 | switch (access) { | |
528 | case NV_MEM_ACCESS_RO: | |
529 | flags0 |= 0x00004000; | |
530 | break; | |
531 | case NV_MEM_ACCESS_WO: | |
532 | flags0 |= 0x00008000; | |
533 | default: | |
534 | flags2 |= 0x00000002; | |
535 | break; | |
536 | } | |
537 | ||
538 | flags0 |= (base & 0x00000fff) << 20; | |
539 | flags2 |= (base & 0xfffff000); | |
540 | ||
a0fd9b9f | 541 | ret = nouveau_gpuobj_new(dev, chan, 16, 16, NVOBJ_FLAG_ZERO_FREE, &obj); |
7f4a195f BS |
542 | if (ret) |
543 | return ret; | |
544 | ||
545 | nv_wo32(obj, 0x00, flags0); | |
546 | nv_wo32(obj, 0x04, size - 1); | |
547 | nv_wo32(obj, 0x08, flags2); | |
548 | nv_wo32(obj, 0x0c, flags2); | |
549 | ||
550 | obj->engine = NVOBJ_ENGINE_SW; | |
551 | obj->class = class; | |
552 | *pobj = obj; | |
553 | return 0; | |
6ee73861 BS |
554 | } |
555 | ||
556 | /* Context objects in the instance RAM have the following structure. | |
557 | * On NV40 they are 32 byte long, on NV30 and smaller 16 bytes. | |
558 | ||
559 | NV4 - NV30: | |
560 | ||
561 | entry[0] | |
562 | 11:0 class | |
563 | 12 chroma key enable | |
564 | 13 user clip enable | |
565 | 14 swizzle enable | |
566 | 17:15 patch config: | |
567 | scrcopy_and, rop_and, blend_and, scrcopy, srccopy_pre, blend_pre | |
568 | 18 synchronize enable | |
569 | 19 endian: 1 big, 0 little | |
570 | 21:20 dither mode | |
571 | 23 single step enable | |
572 | 24 patch status: 0 invalid, 1 valid | |
573 | 25 context_surface 0: 1 valid | |
574 | 26 context surface 1: 1 valid | |
575 | 27 context pattern: 1 valid | |
576 | 28 context rop: 1 valid | |
577 | 29,30 context beta, beta4 | |
578 | entry[1] | |
579 | 7:0 mono format | |
580 | 15:8 color format | |
581 | 31:16 notify instance address | |
582 | entry[2] | |
583 | 15:0 dma 0 instance address | |
584 | 31:16 dma 1 instance address | |
585 | entry[3] | |
586 | dma method traps | |
587 | ||
588 | NV40: | |
589 | No idea what the exact format is. Here's what can be deducted: | |
590 | ||
591 | entry[0]: | |
592 | 11:0 class (maybe uses more bits here?) | |
593 | 17 user clip enable | |
594 | 21:19 patch config | |
595 | 25 patch status valid ? | |
596 | entry[1]: | |
597 | 15:0 DMA notifier (maybe 20:0) | |
598 | entry[2]: | |
599 | 15:0 DMA 0 instance (maybe 20:0) | |
600 | 24 big endian | |
601 | entry[3]: | |
602 | 15:0 DMA 1 instance (maybe 20:0) | |
603 | entry[4]: | |
604 | entry[5]: | |
605 | set to 0? | |
606 | */ | |
a6a1a380 BS |
607 | static int |
608 | nouveau_gpuobj_sw_new(struct nouveau_channel *chan, int class, | |
609 | struct nouveau_gpuobj **gpuobj_ret) | |
610 | { | |
ceac3099 | 611 | struct drm_nouveau_private *dev_priv = chan->dev->dev_private; |
a6a1a380 BS |
612 | struct nouveau_gpuobj *gpuobj; |
613 | ||
a6a1a380 BS |
614 | gpuobj = kzalloc(sizeof(*gpuobj), GFP_KERNEL); |
615 | if (!gpuobj) | |
616 | return -ENOMEM; | |
617 | gpuobj->dev = chan->dev; | |
618 | gpuobj->engine = NVOBJ_ENGINE_SW; | |
619 | gpuobj->class = class; | |
620 | kref_init(&gpuobj->refcount); | |
621 | gpuobj->cinst = 0x40; | |
622 | ||
623 | spin_lock(&dev_priv->ramin_lock); | |
624 | list_add_tail(&gpuobj->list, &dev_priv->gpuobj_list); | |
625 | spin_unlock(&dev_priv->ramin_lock); | |
626 | *gpuobj_ret = gpuobj; | |
627 | return 0; | |
628 | } | |
629 | ||
6ee73861 | 630 | int |
ceac3099 | 631 | nouveau_gpuobj_gr_new(struct nouveau_channel *chan, u32 handle, int class) |
6ee73861 | 632 | { |
a6a1a380 | 633 | struct drm_nouveau_private *dev_priv = chan->dev->dev_private; |
6ee73861 | 634 | struct drm_device *dev = chan->dev; |
b8c157d3 | 635 | struct nouveau_gpuobj_class *oc; |
ceac3099 | 636 | struct nouveau_gpuobj *gpuobj; |
6ee73861 BS |
637 | int ret; |
638 | ||
639 | NV_DEBUG(dev, "ch%d class=0x%04x\n", chan->id, class); | |
640 | ||
b8c157d3 BS |
641 | list_for_each_entry(oc, &dev_priv->classes, head) { |
642 | if (oc->id == class) | |
643 | goto found; | |
a6a1a380 BS |
644 | } |
645 | ||
b8c157d3 BS |
646 | NV_ERROR(dev, "illegal object class: 0x%x\n", class); |
647 | return -EINVAL; | |
a6a1a380 | 648 | |
b8c157d3 | 649 | found: |
f4512e65 | 650 | switch (oc->engine) { |
ceac3099 | 651 | case NVOBJ_ENGINE_SW: |
7460d703 BS |
652 | if (dev_priv->card_type < NV_C0) { |
653 | ret = nouveau_gpuobj_sw_new(chan, class, &gpuobj); | |
654 | if (ret) | |
655 | return ret; | |
656 | goto insert; | |
657 | } | |
658 | break; | |
f4512e65 | 659 | case NVOBJ_ENGINE_GR: |
e457acae BS |
660 | if ((dev_priv->card_type >= NV_20 && !chan->ramin_grctx) || |
661 | (dev_priv->card_type < NV_20 && !chan->pgraph_ctx)) { | |
f4512e65 BS |
662 | struct nouveau_pgraph_engine *pgraph = |
663 | &dev_priv->engine.graph; | |
664 | ||
665 | ret = pgraph->create_context(chan); | |
666 | if (ret) | |
667 | return ret; | |
668 | } | |
669 | break; | |
670 | case NVOBJ_ENGINE_CRYPT: | |
671 | if (!chan->crypt_ctx) { | |
672 | struct nouveau_crypt_engine *pcrypt = | |
673 | &dev_priv->engine.crypt; | |
674 | ||
675 | ret = pcrypt->create_context(chan); | |
676 | if (ret) | |
677 | return ret; | |
678 | } | |
679 | break; | |
680 | } | |
681 | ||
7460d703 BS |
682 | /* we're done if this is fermi */ |
683 | if (dev_priv->card_type >= NV_C0) | |
684 | return 0; | |
685 | ||
6ee73861 BS |
686 | ret = nouveau_gpuobj_new(dev, chan, |
687 | nouveau_gpuobj_class_instmem_size(dev, class), | |
688 | 16, | |
689 | NVOBJ_FLAG_ZERO_ALLOC | NVOBJ_FLAG_ZERO_FREE, | |
ceac3099 | 690 | &gpuobj); |
6ee73861 | 691 | if (ret) { |
a6a1a380 | 692 | NV_ERROR(dev, "error creating gpuobj: %d\n", ret); |
6ee73861 BS |
693 | return ret; |
694 | } | |
695 | ||
6ee73861 | 696 | if (dev_priv->card_type >= NV_50) { |
ceac3099 BS |
697 | nv_wo32(gpuobj, 0, class); |
698 | nv_wo32(gpuobj, 20, 0x00010000); | |
6ee73861 BS |
699 | } else { |
700 | switch (class) { | |
701 | case NV_CLASS_NULL: | |
ceac3099 BS |
702 | nv_wo32(gpuobj, 0, 0x00001030); |
703 | nv_wo32(gpuobj, 4, 0xFFFFFFFF); | |
6ee73861 BS |
704 | break; |
705 | default: | |
706 | if (dev_priv->card_type >= NV_40) { | |
ceac3099 | 707 | nv_wo32(gpuobj, 0, class); |
6ee73861 | 708 | #ifdef __BIG_ENDIAN |
ceac3099 | 709 | nv_wo32(gpuobj, 8, 0x01000000); |
6ee73861 BS |
710 | #endif |
711 | } else { | |
712 | #ifdef __BIG_ENDIAN | |
ceac3099 | 713 | nv_wo32(gpuobj, 0, class | 0x00080000); |
6ee73861 | 714 | #else |
ceac3099 | 715 | nv_wo32(gpuobj, 0, class); |
6ee73861 BS |
716 | #endif |
717 | } | |
718 | } | |
719 | } | |
f56cb86f | 720 | dev_priv->engine.instmem.flush(dev); |
6ee73861 | 721 | |
ceac3099 BS |
722 | gpuobj->engine = oc->engine; |
723 | gpuobj->class = oc->id; | |
724 | ||
725 | insert: | |
726 | ret = nouveau_ramht_insert(chan, handle, gpuobj); | |
727 | if (ret) | |
728 | NV_ERROR(dev, "error adding gpuobj to RAMHT: %d\n", ret); | |
729 | nouveau_gpuobj_ref(NULL, &gpuobj); | |
730 | return ret; | |
6ee73861 BS |
731 | } |
732 | ||
6ee73861 BS |
733 | static int |
734 | nouveau_gpuobj_channel_init_pramin(struct nouveau_channel *chan) | |
735 | { | |
736 | struct drm_device *dev = chan->dev; | |
737 | struct drm_nouveau_private *dev_priv = dev->dev_private; | |
6ee73861 BS |
738 | uint32_t size; |
739 | uint32_t base; | |
740 | int ret; | |
741 | ||
742 | NV_DEBUG(dev, "ch%d\n", chan->id); | |
743 | ||
744 | /* Base amount for object storage (4KiB enough?) */ | |
bd2e597d | 745 | size = 0x2000; |
6ee73861 BS |
746 | base = 0; |
747 | ||
748 | /* PGRAPH context */ | |
816544b2 | 749 | size += dev_priv->engine.graph.grctx_size; |
6ee73861 BS |
750 | |
751 | if (dev_priv->card_type == NV_50) { | |
752 | /* Various fixed table thingos */ | |
753 | size += 0x1400; /* mostly unknown stuff */ | |
754 | size += 0x4000; /* vm pd */ | |
755 | base = 0x6000; | |
756 | /* RAMHT, not sure about setting size yet, 32KiB to be safe */ | |
757 | size += 0x8000; | |
758 | /* RAMFC */ | |
759 | size += 0x1000; | |
6ee73861 BS |
760 | } |
761 | ||
a8eaebc6 | 762 | ret = nouveau_gpuobj_new(dev, NULL, size, 0x1000, 0, &chan->ramin); |
6ee73861 BS |
763 | if (ret) { |
764 | NV_ERROR(dev, "Error allocating channel PRAMIN: %d\n", ret); | |
765 | return ret; | |
766 | } | |
6ee73861 | 767 | |
de3a6c0a | 768 | ret = drm_mm_init(&chan->ramin_heap, base, size); |
6ee73861 BS |
769 | if (ret) { |
770 | NV_ERROR(dev, "Error creating PRAMIN heap: %d\n", ret); | |
a8eaebc6 | 771 | nouveau_gpuobj_ref(NULL, &chan->ramin); |
6ee73861 BS |
772 | return ret; |
773 | } | |
774 | ||
775 | return 0; | |
776 | } | |
777 | ||
778 | int | |
779 | nouveau_gpuobj_channel_init(struct nouveau_channel *chan, | |
780 | uint32_t vram_h, uint32_t tt_h) | |
781 | { | |
782 | struct drm_device *dev = chan->dev; | |
783 | struct drm_nouveau_private *dev_priv = dev->dev_private; | |
6ee73861 | 784 | struct nouveau_gpuobj *vram = NULL, *tt = NULL; |
4c136142 | 785 | int ret; |
6ee73861 | 786 | |
6ee73861 BS |
787 | NV_DEBUG(dev, "ch%d vram=0x%08x tt=0x%08x\n", chan->id, vram_h, tt_h); |
788 | ||
effd6e06 BS |
789 | if (dev_priv->card_type == NV_C0) { |
790 | struct nouveau_vm *vm = dev_priv->chan_vm; | |
791 | struct nouveau_vm_pgd *vpgd; | |
792 | ||
793 | ret = nouveau_gpuobj_new(dev, NULL, 4096, 0x1000, 0, | |
794 | &chan->ramin); | |
795 | if (ret) | |
796 | return ret; | |
797 | ||
798 | nouveau_vm_ref(vm, &chan->vm, NULL); | |
799 | ||
800 | vpgd = list_first_entry(&vm->pgd_list, struct nouveau_vm_pgd, head); | |
801 | nv_wo32(chan->ramin, 0x0200, lower_32_bits(vpgd->obj->vinst)); | |
802 | nv_wo32(chan->ramin, 0x0204, upper_32_bits(vpgd->obj->vinst)); | |
803 | nv_wo32(chan->ramin, 0x0208, 0xffffffff); | |
804 | nv_wo32(chan->ramin, 0x020c, 0x000000ff); | |
805 | return 0; | |
806 | } | |
807 | ||
816544b2 BS |
808 | /* Allocate a chunk of memory for per-channel object storage */ |
809 | ret = nouveau_gpuobj_channel_init_pramin(chan); | |
810 | if (ret) { | |
811 | NV_ERROR(dev, "init pramin\n"); | |
812 | return ret; | |
6ee73861 BS |
813 | } |
814 | ||
effd6e06 | 815 | /* NV50 VM |
6ee73861 | 816 | * - Allocate per-channel page-directory |
4c136142 | 817 | * - Link with shared channel VM |
6ee73861 | 818 | */ |
4c136142 | 819 | if (dev_priv->chan_vm) { |
5125bfd8 BS |
820 | u32 pgd_offs = (dev_priv->chipset == 0x50) ? 0x1400 : 0x0200; |
821 | u64 vm_vinst = chan->ramin->vinst + pgd_offs; | |
822 | u32 vm_pinst = chan->ramin->pinst; | |
6ee73861 | 823 | |
5125bfd8 BS |
824 | if (vm_pinst != ~0) |
825 | vm_pinst += pgd_offs; | |
6ee73861 | 826 | |
5125bfd8 | 827 | ret = nouveau_gpuobj_new_fake(dev, vm_pinst, vm_vinst, 0x4000, |
a8eaebc6 | 828 | 0, &chan->vm_pd); |
f56cb86f | 829 | if (ret) |
6ee73861 | 830 | return ret; |
6ee73861 | 831 | |
4c136142 | 832 | nouveau_vm_ref(dev_priv->chan_vm, &chan->vm, chan->vm_pd); |
6ee73861 BS |
833 | } |
834 | ||
835 | /* RAMHT */ | |
836 | if (dev_priv->card_type < NV_50) { | |
a8eaebc6 BS |
837 | nouveau_ramht_ref(dev_priv->ramht, &chan->ramht, NULL); |
838 | } else { | |
839 | struct nouveau_gpuobj *ramht = NULL; | |
840 | ||
841 | ret = nouveau_gpuobj_new(dev, chan, 0x8000, 16, | |
842 | NVOBJ_FLAG_ZERO_ALLOC, &ramht); | |
6ee73861 BS |
843 | if (ret) |
844 | return ret; | |
a8eaebc6 BS |
845 | |
846 | ret = nouveau_ramht_new(dev, ramht, &chan->ramht); | |
847 | nouveau_gpuobj_ref(NULL, &ramht); | |
6ee73861 BS |
848 | if (ret) |
849 | return ret; | |
850 | } | |
851 | ||
852 | /* VRAM ctxdma */ | |
853 | if (dev_priv->card_type >= NV_50) { | |
854 | ret = nouveau_gpuobj_dma_new(chan, NV_CLASS_DMA_IN_MEMORY, | |
4c136142 | 855 | 0, (1ULL << 40), NV_MEM_ACCESS_RW, |
7f4a195f | 856 | NV_MEM_TARGET_VM, &vram); |
6ee73861 BS |
857 | if (ret) { |
858 | NV_ERROR(dev, "Error creating VRAM ctxdma: %d\n", ret); | |
859 | return ret; | |
860 | } | |
861 | } else { | |
862 | ret = nouveau_gpuobj_dma_new(chan, NV_CLASS_DMA_IN_MEMORY, | |
a8eaebc6 | 863 | 0, dev_priv->fb_available_size, |
7f4a195f BS |
864 | NV_MEM_ACCESS_RW, |
865 | NV_MEM_TARGET_VRAM, &vram); | |
6ee73861 BS |
866 | if (ret) { |
867 | NV_ERROR(dev, "Error creating VRAM ctxdma: %d\n", ret); | |
868 | return ret; | |
869 | } | |
870 | } | |
871 | ||
a8eaebc6 BS |
872 | ret = nouveau_ramht_insert(chan, vram_h, vram); |
873 | nouveau_gpuobj_ref(NULL, &vram); | |
6ee73861 | 874 | if (ret) { |
a8eaebc6 | 875 | NV_ERROR(dev, "Error adding VRAM ctxdma to RAMHT: %d\n", ret); |
6ee73861 BS |
876 | return ret; |
877 | } | |
878 | ||
879 | /* TT memory ctxdma */ | |
880 | if (dev_priv->card_type >= NV_50) { | |
a8eaebc6 | 881 | ret = nouveau_gpuobj_dma_new(chan, NV_CLASS_DMA_IN_MEMORY, |
4c136142 | 882 | 0, (1ULL << 40), NV_MEM_ACCESS_RW, |
7f4a195f | 883 | NV_MEM_TARGET_VM, &tt); |
6ee73861 | 884 | } else { |
7f4a195f BS |
885 | ret = nouveau_gpuobj_dma_new(chan, NV_CLASS_DMA_IN_MEMORY, |
886 | 0, dev_priv->gart_info.aper_size, | |
887 | NV_MEM_ACCESS_RW, | |
888 | NV_MEM_TARGET_GART, &tt); | |
6ee73861 BS |
889 | } |
890 | ||
891 | if (ret) { | |
892 | NV_ERROR(dev, "Error creating TT ctxdma: %d\n", ret); | |
893 | return ret; | |
894 | } | |
895 | ||
a8eaebc6 BS |
896 | ret = nouveau_ramht_insert(chan, tt_h, tt); |
897 | nouveau_gpuobj_ref(NULL, &tt); | |
6ee73861 | 898 | if (ret) { |
a8eaebc6 | 899 | NV_ERROR(dev, "Error adding TT ctxdma to RAMHT: %d\n", ret); |
6ee73861 BS |
900 | return ret; |
901 | } | |
902 | ||
903 | return 0; | |
904 | } | |
905 | ||
906 | void | |
907 | nouveau_gpuobj_channel_takedown(struct nouveau_channel *chan) | |
908 | { | |
6ee73861 | 909 | struct drm_device *dev = chan->dev; |
6ee73861 BS |
910 | |
911 | NV_DEBUG(dev, "ch%d\n", chan->id); | |
912 | ||
a8eaebc6 | 913 | nouveau_ramht_ref(NULL, &chan->ramht, chan); |
6ee73861 | 914 | |
4c136142 | 915 | nouveau_vm_ref(NULL, &chan->vm, chan->vm_pd); |
a8eaebc6 | 916 | nouveau_gpuobj_ref(NULL, &chan->vm_pd); |
6ee73861 | 917 | |
31a5b8ce | 918 | if (drm_mm_initialized(&chan->ramin_heap)) |
b833ac26 | 919 | drm_mm_takedown(&chan->ramin_heap); |
a8eaebc6 | 920 | nouveau_gpuobj_ref(NULL, &chan->ramin); |
6ee73861 BS |
921 | } |
922 | ||
923 | int | |
924 | nouveau_gpuobj_suspend(struct drm_device *dev) | |
925 | { | |
926 | struct drm_nouveau_private *dev_priv = dev->dev_private; | |
927 | struct nouveau_gpuobj *gpuobj; | |
928 | int i; | |
929 | ||
6ee73861 | 930 | list_for_each_entry(gpuobj, &dev_priv->gpuobj_list, list) { |
e41115d0 | 931 | if (gpuobj->cinst != NVOBJ_CINST_GLOBAL) |
6ee73861 BS |
932 | continue; |
933 | ||
dc1e5c0d BS |
934 | gpuobj->suspend = vmalloc(gpuobj->size); |
935 | if (!gpuobj->suspend) { | |
6ee73861 BS |
936 | nouveau_gpuobj_resume(dev); |
937 | return -ENOMEM; | |
938 | } | |
939 | ||
43efc9ce | 940 | for (i = 0; i < gpuobj->size; i += 4) |
dc1e5c0d | 941 | gpuobj->suspend[i/4] = nv_ro32(gpuobj, i); |
6ee73861 BS |
942 | } |
943 | ||
944 | return 0; | |
945 | } | |
946 | ||
6ee73861 BS |
947 | void |
948 | nouveau_gpuobj_resume(struct drm_device *dev) | |
949 | { | |
950 | struct drm_nouveau_private *dev_priv = dev->dev_private; | |
951 | struct nouveau_gpuobj *gpuobj; | |
952 | int i; | |
953 | ||
6ee73861 | 954 | list_for_each_entry(gpuobj, &dev_priv->gpuobj_list, list) { |
dc1e5c0d | 955 | if (!gpuobj->suspend) |
6ee73861 BS |
956 | continue; |
957 | ||
43efc9ce | 958 | for (i = 0; i < gpuobj->size; i += 4) |
dc1e5c0d BS |
959 | nv_wo32(gpuobj, i, gpuobj->suspend[i/4]); |
960 | ||
961 | vfree(gpuobj->suspend); | |
962 | gpuobj->suspend = NULL; | |
6ee73861 BS |
963 | } |
964 | ||
dc1e5c0d | 965 | dev_priv->engine.instmem.flush(dev); |
6ee73861 BS |
966 | } |
967 | ||
968 | int nouveau_ioctl_grobj_alloc(struct drm_device *dev, void *data, | |
969 | struct drm_file *file_priv) | |
970 | { | |
6ee73861 | 971 | struct drm_nouveau_grobj_alloc *init = data; |
6ee73861 BS |
972 | struct nouveau_channel *chan; |
973 | int ret; | |
974 | ||
6ee73861 BS |
975 | if (init->handle == ~0) |
976 | return -EINVAL; | |
977 | ||
cff5c133 BS |
978 | chan = nouveau_channel_get(dev, file_priv, init->channel); |
979 | if (IS_ERR(chan)) | |
980 | return PTR_ERR(chan); | |
981 | ||
982 | if (nouveau_ramht_find(chan, init->handle)) { | |
983 | ret = -EEXIST; | |
984 | goto out; | |
985 | } | |
6ee73861 | 986 | |
ceac3099 | 987 | ret = nouveau_gpuobj_gr_new(chan, init->handle, init->class); |
6ee73861 BS |
988 | if (ret) { |
989 | NV_ERROR(dev, "Error creating object: %d (%d/0x%08x)\n", | |
990 | ret, init->channel, init->handle); | |
6ee73861 BS |
991 | } |
992 | ||
cff5c133 BS |
993 | out: |
994 | nouveau_channel_put(&chan); | |
995 | return ret; | |
6ee73861 BS |
996 | } |
997 | ||
998 | int nouveau_ioctl_gpuobj_free(struct drm_device *dev, void *data, | |
999 | struct drm_file *file_priv) | |
1000 | { | |
1001 | struct drm_nouveau_gpuobj_free *objfree = data; | |
6ee73861 | 1002 | struct nouveau_channel *chan; |
18a16a76 | 1003 | int ret; |
6ee73861 | 1004 | |
cff5c133 BS |
1005 | chan = nouveau_channel_get(dev, file_priv, objfree->channel); |
1006 | if (IS_ERR(chan)) | |
1007 | return PTR_ERR(chan); | |
6ee73861 | 1008 | |
6dccd311 FJ |
1009 | /* Synchronize with the user channel */ |
1010 | nouveau_channel_idle(chan); | |
1011 | ||
18a16a76 | 1012 | ret = nouveau_ramht_remove(chan, objfree->handle); |
cff5c133 BS |
1013 | nouveau_channel_put(&chan); |
1014 | return ret; | |
6ee73861 | 1015 | } |
b3beb167 BS |
1016 | |
1017 | u32 | |
1018 | nv_ro32(struct nouveau_gpuobj *gpuobj, u32 offset) | |
1019 | { | |
5125bfd8 BS |
1020 | struct drm_nouveau_private *dev_priv = gpuobj->dev->dev_private; |
1021 | struct drm_device *dev = gpuobj->dev; | |
1022 | ||
1023 | if (gpuobj->pinst == ~0 || !dev_priv->ramin_available) { | |
1024 | u64 ptr = gpuobj->vinst + offset; | |
1025 | u32 base = ptr >> 16; | |
1026 | u32 val; | |
1027 | ||
1028 | spin_lock(&dev_priv->ramin_lock); | |
1029 | if (dev_priv->ramin_base != base) { | |
1030 | dev_priv->ramin_base = base; | |
1031 | nv_wr32(dev, 0x001700, dev_priv->ramin_base); | |
1032 | } | |
1033 | val = nv_rd32(dev, 0x700000 + (ptr & 0xffff)); | |
1034 | spin_unlock(&dev_priv->ramin_lock); | |
1035 | return val; | |
1036 | } | |
1037 | ||
1038 | return nv_ri32(dev, gpuobj->pinst + offset); | |
b3beb167 BS |
1039 | } |
1040 | ||
1041 | void | |
1042 | nv_wo32(struct nouveau_gpuobj *gpuobj, u32 offset, u32 val) | |
1043 | { | |
5125bfd8 BS |
1044 | struct drm_nouveau_private *dev_priv = gpuobj->dev->dev_private; |
1045 | struct drm_device *dev = gpuobj->dev; | |
1046 | ||
1047 | if (gpuobj->pinst == ~0 || !dev_priv->ramin_available) { | |
1048 | u64 ptr = gpuobj->vinst + offset; | |
1049 | u32 base = ptr >> 16; | |
1050 | ||
1051 | spin_lock(&dev_priv->ramin_lock); | |
1052 | if (dev_priv->ramin_base != base) { | |
1053 | dev_priv->ramin_base = base; | |
1054 | nv_wr32(dev, 0x001700, dev_priv->ramin_base); | |
1055 | } | |
1056 | nv_wr32(dev, 0x700000 + (ptr & 0xffff), val); | |
1057 | spin_unlock(&dev_priv->ramin_lock); | |
1058 | return; | |
1059 | } | |
1060 | ||
1061 | nv_wi32(dev, gpuobj->pinst + offset, val); | |
b3beb167 | 1062 | } |