Merge branch 'drm-next' of git://people.freedesktop.org/~airlied/linux
[GitHub/exynos8895/android_kernel_samsung_universal8895.git] / drivers / gpu / drm / nouveau / nv50_evo.c
CommitLineData
b7bc613a
BS
1/*
2 * Copyright 2010 Red Hat Inc.
3 *
4 * Permission is hereby granted, free of charge, to any person obtaining a
5 * copy of this software and associated documentation files (the "Software"),
6 * to deal in the Software without restriction, including without limitation
7 * the rights to use, copy, modify, merge, publish, distribute, sublicense,
8 * and/or sell copies of the Software, and to permit persons to whom the
9 * Software is furnished to do so, subject to the following conditions:
10 *
11 * The above copyright notice and this permission notice shall be included in
12 * all copies or substantial portions of the Software.
13 *
14 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
15 * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
16 * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
17 * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR
18 * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
19 * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
20 * OTHER DEALINGS IN THE SOFTWARE.
21 *
22 * Authors: Ben Skeggs
23 */
24
760285e7 25#include <drm/drmP.h>
b7bc613a 26
77145f1c 27#include "nouveau_drm.h"
b7bc613a 28#include "nouveau_dma.h"
ef8389a8 29#include "nv50_display.h"
b7bc613a 30
77145f1c
BS
31#include <core/gpuobj.h>
32
33#include <subdev/timer.h>
34#include <subdev/fb.h>
35
ebb945a9
BS
36static u32
37nv50_evo_rd32(struct nouveau_object *object, u32 addr)
38{
39 void __iomem *iomem = object->oclass->ofuncs->rd08;
40 return ioread32_native(iomem + addr);
41}
42
43static void
44nv50_evo_wr32(struct nouveau_object *object, u32 addr, u32 data)
45{
46 void __iomem *iomem = object->oclass->ofuncs->rd08;
47 iowrite32_native(data, iomem + addr);
48}
49
b7bc613a 50static void
1e96268a 51nv50_evo_channel_del(struct nouveau_channel **pevo)
b7bc613a 52{
1e96268a 53 struct nouveau_channel *evo = *pevo;
b7bc613a 54
1e96268a 55 if (!evo)
b7bc613a 56 return;
1e96268a 57 *pevo = NULL;
b7bc613a 58
ebb945a9
BS
59 nouveau_bo_unmap(evo->push.buffer);
60 nouveau_bo_ref(NULL, &evo->push.buffer);
b7bc613a 61
ebb945a9
BS
62 if (evo->object)
63 iounmap(evo->object->oclass->ofuncs);
1e96268a
BS
64
65 kfree(evo);
b7bc613a
BS
66}
67
ebb945a9
BS
68int
69nv50_evo_dmaobj_new(struct nouveau_channel *evo, u32 handle, u32 memtype,
70 u64 base, u64 size, struct nouveau_gpuobj **pobj)
292deb7a 71{
ebb945a9 72 struct drm_device *dev = evo->fence;
77145f1c 73 struct nouveau_drm *drm = nouveau_drm(dev);
ebb945a9
BS
74 struct nv50_display *disp = nv50_display(dev);
75 u32 dmao = disp->dmao;
76 u32 hash = disp->hash;
292deb7a
BS
77 u32 flags5;
78
77145f1c 79 if (nv_device(drm->device)->chipset < 0xc0) {
292deb7a 80 /* not supported on 0x50, specified in format mthd */
77145f1c 81 if (nv_device(drm->device)->chipset == 0x50)
292deb7a
BS
82 memtype = 0;
83 flags5 = 0x00010000;
84 } else {
85 if (memtype & 0x80000000)
86 flags5 = 0x00000000; /* large pages */
87 else
88 flags5 = 0x00020000;
89 }
90
ebb945a9
BS
91 nv_wo32(disp->ramin, dmao + 0x00, 0x0019003d | (memtype << 22));
92 nv_wo32(disp->ramin, dmao + 0x04, lower_32_bits(base + size - 1));
93 nv_wo32(disp->ramin, dmao + 0x08, lower_32_bits(base));
94 nv_wo32(disp->ramin, dmao + 0x0c, upper_32_bits(base + size - 1) << 24 |
95 upper_32_bits(base));
96 nv_wo32(disp->ramin, dmao + 0x10, 0x00000000);
97 nv_wo32(disp->ramin, dmao + 0x14, flags5);
292deb7a 98
ebb945a9
BS
99 nv_wo32(disp->ramin, hash + 0x00, handle);
100 nv_wo32(disp->ramin, hash + 0x04, (evo->handle << 28) | (dmao << 10) |
101 evo->handle);
b7bc613a 102
ebb945a9
BS
103 disp->dmao += 0x20;
104 disp->hash += 0x08;
105 return 0;
b7bc613a
BS
106}
107
108static int
30d81817
BS
109nv50_evo_channel_new(struct drm_device *dev, int chid,
110 struct nouveau_channel **pevo)
b7bc613a 111{
77145f1c 112 struct nouveau_drm *drm = nouveau_drm(dev);
ef8389a8 113 struct nv50_display *disp = nv50_display(dev);
1e96268a 114 struct nouveau_channel *evo;
b7bc613a
BS
115 int ret;
116
1e96268a
BS
117 evo = kzalloc(sizeof(struct nouveau_channel), GFP_KERNEL);
118 if (!evo)
b7bc613a 119 return -ENOMEM;
1e96268a 120 *pevo = evo;
b7bc613a 121
77145f1c 122 evo->drm = drm;
ebb945a9
BS
123 evo->handle = chid;
124 evo->fence = dev;
1e96268a
BS
125 evo->user_get = 4;
126 evo->user_put = 0;
b7bc613a 127
22b33e8e 128 ret = nouveau_bo_new(dev, 4096, 0, TTM_PL_FLAG_VRAM, 0, 0, NULL,
ebb945a9 129 &evo->push.buffer);
b7bc613a 130 if (ret == 0)
ebb945a9 131 ret = nouveau_bo_pin(evo->push.buffer, TTM_PL_FLAG_VRAM);
b7bc613a 132 if (ret) {
77145f1c 133 NV_ERROR(drm, "Error creating EVO DMA push buffer: %d\n", ret);
1e96268a 134 nv50_evo_channel_del(pevo);
b7bc613a
BS
135 return ret;
136 }
137
ebb945a9 138 ret = nouveau_bo_map(evo->push.buffer);
b7bc613a 139 if (ret) {
77145f1c 140 NV_ERROR(drm, "Error mapping EVO DMA push buffer: %d\n", ret);
1e96268a 141 nv50_evo_channel_del(pevo);
b7bc613a
BS
142 return ret;
143 }
144
ebb945a9
BS
145 evo->object = kzalloc(sizeof(*evo->object), GFP_KERNEL);
146#ifdef NOUVEAU_OBJECT_MAGIC
147 evo->object->_magic = NOUVEAU_OBJECT_MAGIC;
148#endif
149 evo->object->parent = nv_object(disp->ramin)->parent;
150 evo->object->engine = nv_object(disp->ramin)->engine;
151 evo->object->oclass =
152 kzalloc(sizeof(*evo->object->oclass), GFP_KERNEL);
153 evo->object->oclass->ofuncs =
154 kzalloc(sizeof(*evo->object->oclass->ofuncs), GFP_KERNEL);
155 evo->object->oclass->ofuncs->rd32 = nv50_evo_rd32;
156 evo->object->oclass->ofuncs->wr32 = nv50_evo_wr32;
157 evo->object->oclass->ofuncs->rd08 =
158 ioremap(pci_resource_start(dev->pdev, 0) +
159 NV50_PDISPLAY_USER(evo->handle), PAGE_SIZE);
b7bc613a
BS
160 return 0;
161}
162
163static int
164nv50_evo_channel_init(struct nouveau_channel *evo)
165{
77145f1c
BS
166 struct nouveau_drm *drm = evo->drm;
167 struct nouveau_device *device = nv_device(drm->device);
ebb945a9
BS
168 int id = evo->handle, ret, i;
169 u64 pushbuf = evo->push.buffer->bo.offset;
b7bc613a
BS
170 u32 tmp;
171
77145f1c 172 tmp = nv_rd32(device, NV50_PDISPLAY_EVO_CTRL(id));
43ce028f 173 if ((tmp & 0x009f0000) == 0x00020000)
77145f1c 174 nv_wr32(device, NV50_PDISPLAY_EVO_CTRL(id), tmp | 0x00800000);
b7bc613a 175
77145f1c 176 tmp = nv_rd32(device, NV50_PDISPLAY_EVO_CTRL(id));
43ce028f 177 if ((tmp & 0x003f0000) == 0x00030000)
77145f1c 178 nv_wr32(device, NV50_PDISPLAY_EVO_CTRL(id), tmp | 0x00600000);
b7bc613a
BS
179
180 /* initialise fifo */
77145f1c 181 nv_wr32(device, NV50_PDISPLAY_EVO_DMA_CB(id), pushbuf >> 8 |
43ce028f
BS
182 NV50_PDISPLAY_EVO_DMA_CB_LOCATION_VRAM |
183 NV50_PDISPLAY_EVO_DMA_CB_VALID);
77145f1c
BS
184 nv_wr32(device, NV50_PDISPLAY_EVO_UNK2(id), 0x00010000);
185 nv_wr32(device, NV50_PDISPLAY_EVO_HASH_TAG(id), id);
186 nv_mask(device, NV50_PDISPLAY_EVO_CTRL(id), NV50_PDISPLAY_EVO_CTRL_DMA,
43ce028f
BS
187 NV50_PDISPLAY_EVO_CTRL_DMA_ENABLED);
188
77145f1c
BS
189 nv_wr32(device, NV50_PDISPLAY_USER_PUT(id), 0x00000000);
190 nv_wr32(device, NV50_PDISPLAY_EVO_CTRL(id), 0x01000003 |
43ce028f 191 NV50_PDISPLAY_EVO_CTRL_DMA_ENABLED);
77145f1c
BS
192 if (!nv_wait(device, NV50_PDISPLAY_EVO_CTRL(id), 0x80000000, 0x00000000)) {
193 NV_ERROR(drm, "EvoCh %d init timeout: 0x%08x\n", id,
194 nv_rd32(device, NV50_PDISPLAY_EVO_CTRL(id)));
b7bc613a
BS
195 return -EBUSY;
196 }
b7bc613a
BS
197
198 /* enable error reporting on the channel */
77145f1c 199 nv_mask(device, 0x610028, 0x00000000, 0x00010001 << id);
b7bc613a
BS
200
201 evo->dma.max = (4096/4) - 2;
59197c02 202 evo->dma.max &= ~7;
b7bc613a
BS
203 evo->dma.put = 0;
204 evo->dma.cur = evo->dma.put;
205 evo->dma.free = evo->dma.max - evo->dma.cur;
206
207 ret = RING_SPACE(evo, NOUVEAU_DMA_SKIPS);
208 if (ret)
209 return ret;
210
211 for (i = 0; i < NOUVEAU_DMA_SKIPS; i++)
212 OUT_RING(evo, 0);
213
214 return 0;
215}
216
217static void
218nv50_evo_channel_fini(struct nouveau_channel *evo)
219{
77145f1c
BS
220 struct nouveau_drm *drm = evo->drm;
221 struct nouveau_device *device = nv_device(drm->device);
ebb945a9 222 int id = evo->handle;
43ce028f 223
77145f1c
BS
224 nv_mask(device, 0x610028, 0x00010001 << id, 0x00000000);
225 nv_mask(device, NV50_PDISPLAY_EVO_CTRL(id), 0x00001010, 0x00001000);
226 nv_wr32(device, NV50_PDISPLAY_INTR_0, (1 << id));
227 nv_mask(device, NV50_PDISPLAY_EVO_CTRL(id), 0x00000003, 0x00000000);
228 if (!nv_wait(device, NV50_PDISPLAY_EVO_CTRL(id), 0x001e0000, 0x00000000)) {
229 NV_ERROR(drm, "EvoCh %d takedown timeout: 0x%08x\n", id,
230 nv_rd32(device, NV50_PDISPLAY_EVO_CTRL(id)));
1e96268a
BS
231 }
232}
233
1772fcc6 234void
33f409df
BS
235nv50_evo_destroy(struct drm_device *dev)
236{
237 struct nv50_display *disp = nv50_display(dev);
cdccc70e
BS
238 int i;
239
240 for (i = 0; i < 2; i++) {
241 if (disp->crtc[i].sem.bo) {
242 nouveau_bo_unmap(disp->crtc[i].sem.bo);
243 nouveau_bo_ref(NULL, &disp->crtc[i].sem.bo);
244 }
245 nv50_evo_channel_del(&disp->crtc[i].sync);
246 }
33f409df 247 nv50_evo_channel_del(&disp->master);
ebb945a9 248 nouveau_gpuobj_ref(NULL, &disp->ramin);
33f409df
BS
249}
250
1772fcc6 251int
1e96268a
BS
252nv50_evo_create(struct drm_device *dev)
253{
77145f1c
BS
254 struct nouveau_drm *drm = nouveau_drm(dev);
255 struct nouveau_fb *pfb = nouveau_fb(drm->device);
ef8389a8 256 struct nv50_display *disp = nv50_display(dev);
1e96268a 257 struct nouveau_channel *evo;
cdccc70e 258 int ret, i, j;
1e96268a 259
1e96268a
BS
260 /* setup object management on it, any other evo channel will
261 * use this also as there's no per-channel support on the
262 * hardware
263 */
77145f1c 264 ret = nouveau_gpuobj_new(drm->device, NULL, 32768, 65536,
ebb945a9 265 NVOBJ_FLAG_ZERO_ALLOC, &disp->ramin);
1e96268a 266 if (ret) {
77145f1c 267 NV_ERROR(drm, "Error allocating EVO channel memory: %d\n", ret);
33f409df 268 goto err;
1e96268a
BS
269 }
270
ebb945a9
BS
271 disp->hash = 0x0000;
272 disp->dmao = 0x1000;
1e96268a 273
ebb945a9
BS
274 /* create primary evo channel, the one we use for modesetting
275 * purporses
60f60bf1 276 */
ebb945a9 277 ret = nv50_evo_channel_new(dev, 0, &disp->master);
60f60bf1 278 if (ret)
ebb945a9
BS
279 return ret;
280 evo = disp->master;
60f60bf1 281
292deb7a 282 ret = nv50_evo_dmaobj_new(disp->master, NvEvoSync, 0x0000,
ebb945a9 283 disp->ramin->addr + 0x2000, 0x1000, NULL);
60f60bf1
BS
284 if (ret)
285 goto err;
286
1e96268a 287 /* create some default objects for the scanout memtypes we support */
292deb7a 288 ret = nv50_evo_dmaobj_new(disp->master, NvEvoVRAM, 0x0000,
77145f1c 289 0, pfb->ram.size, NULL);
292deb7a
BS
290 if (ret)
291 goto err;
1e96268a 292
292deb7a 293 ret = nv50_evo_dmaobj_new(disp->master, NvEvoVRAM_LP, 0x80000000,
77145f1c 294 0, pfb->ram.size, NULL);
292deb7a
BS
295 if (ret)
296 goto err;
1e96268a 297
292deb7a 298 ret = nv50_evo_dmaobj_new(disp->master, NvEvoFB32, 0x80000000 |
77145f1c
BS
299 (nv_device(drm->device)->chipset < 0xc0 ? 0x7a : 0xfe),
300 0, pfb->ram.size, NULL);
292deb7a
BS
301 if (ret)
302 goto err;
6d86951a 303
292deb7a 304 ret = nv50_evo_dmaobj_new(disp->master, NvEvoFB16, 0x80000000 |
77145f1c
BS
305 (nv_device(drm->device)->chipset < 0xc0 ? 0x70 : 0xfe),
306 0, pfb->ram.size, NULL);
292deb7a
BS
307 if (ret)
308 goto err;
1e96268a 309
cdccc70e
BS
310 /* create "display sync" channels and other structures we need
311 * to implement page flipping
312 */
313 for (i = 0; i < 2; i++) {
314 struct nv50_display_crtc *dispc = &disp->crtc[i];
315 u64 offset;
316
317 ret = nv50_evo_channel_new(dev, 1 + i, &dispc->sync);
318 if (ret)
319 goto err;
320
7375c95b 321 ret = nouveau_bo_new(dev, 4096, 0x1000, TTM_PL_FLAG_VRAM,
22b33e8e 322 0, 0x0000, NULL, &dispc->sem.bo);
cdccc70e 323 if (!ret) {
cdccc70e
BS
324 ret = nouveau_bo_pin(dispc->sem.bo, TTM_PL_FLAG_VRAM);
325 if (!ret)
326 ret = nouveau_bo_map(dispc->sem.bo);
327 if (ret)
328 nouveau_bo_ref(NULL, &dispc->sem.bo);
180cc306 329 offset = dispc->sem.bo->bo.offset;
cdccc70e
BS
330 }
331
332 if (ret)
333 goto err;
334
335 ret = nv50_evo_dmaobj_new(dispc->sync, NvEvoSync, 0x0000,
336 offset, 4096, NULL);
337 if (ret)
338 goto err;
339
340 ret = nv50_evo_dmaobj_new(dispc->sync, NvEvoVRAM_LP, 0x80000000,
77145f1c 341 0, pfb->ram.size, NULL);
cdccc70e
BS
342 if (ret)
343 goto err;
344
345 ret = nv50_evo_dmaobj_new(dispc->sync, NvEvoFB32, 0x80000000 |
77145f1c 346 (nv_device(drm->device)->chipset < 0xc0 ?
ebb945a9 347 0x7a : 0xfe),
77145f1c 348 0, pfb->ram.size, NULL);
cdccc70e
BS
349 if (ret)
350 goto err;
351
352 ret = nv50_evo_dmaobj_new(dispc->sync, NvEvoFB16, 0x80000000 |
77145f1c 353 (nv_device(drm->device)->chipset < 0xc0 ?
ebb945a9 354 0x70 : 0xfe),
77145f1c 355 0, pfb->ram.size, NULL);
cdccc70e
BS
356 if (ret)
357 goto err;
358
359 for (j = 0; j < 4096; j += 4)
360 nouveau_bo_wr32(dispc->sem.bo, j / 4, 0x74b1e000);
361 dispc->sem.offset = 0;
362 }
363
1e96268a 364 return 0;
33f409df
BS
365
366err:
367 nv50_evo_destroy(dev);
368 return ret;
b7bc613a
BS
369}
370
371int
372nv50_evo_init(struct drm_device *dev)
373{
ef8389a8 374 struct nv50_display *disp = nv50_display(dev);
cdccc70e 375 int ret, i;
b7bc613a 376
cdccc70e
BS
377 ret = nv50_evo_channel_init(disp->master);
378 if (ret)
379 return ret;
380
381 for (i = 0; i < 2; i++) {
382 ret = nv50_evo_channel_init(disp->crtc[i].sync);
383 if (ret)
384 return ret;
385 }
386
387 return 0;
b7bc613a
BS
388}
389
390void
391nv50_evo_fini(struct drm_device *dev)
392{
ef8389a8 393 struct nv50_display *disp = nv50_display(dev);
cdccc70e
BS
394 int i;
395
396 for (i = 0; i < 2; i++) {
397 if (disp->crtc[i].sync)
398 nv50_evo_channel_fini(disp->crtc[i].sync);
399 }
b7bc613a 400
33f409df 401 if (disp->master)
59c0f578 402 nv50_evo_channel_fini(disp->master);
b7bc613a 403}