drm/nv50/graph: remove ability to do interrupt-driven context switching
[GitHub/mt8127/android_kernel_alcatel_ttab.git] / drivers / gpu / drm / nouveau / nv50_fifo.c
CommitLineData
6ee73861
BS
1/*
2 * Copyright (C) 2007 Ben Skeggs.
3 * All Rights Reserved.
4 *
5 * Permission is hereby granted, free of charge, to any person obtaining
6 * a copy of this software and associated documentation files (the
7 * "Software"), to deal in the Software without restriction, including
8 * without limitation the rights to use, copy, modify, merge, publish,
9 * distribute, sublicense, and/or sell copies of the Software, and to
10 * permit persons to whom the Software is furnished to do so, subject to
11 * the following conditions:
12 *
13 * The above copyright notice and this permission notice (including the
14 * next paragraph) shall be included in all copies or substantial
15 * portions of the Software.
16 *
17 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND,
18 * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
19 * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT.
20 * IN NO EVENT SHALL THE COPYRIGHT OWNER(S) AND/OR ITS SUPPLIERS BE
21 * LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION
22 * OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION
23 * WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE.
24 *
25 */
26
27#include "drmP.h"
28#include "drm.h"
29#include "nouveau_drv.h"
a8eaebc6 30#include "nouveau_ramht.h"
a11c3198 31#include "nouveau_vm.h"
6ee73861 32
6ee73861 33static void
ac94a343 34nv50_fifo_playlist_update(struct drm_device *dev)
6ee73861
BS
35{
36 struct drm_nouveau_private *dev_priv = dev->dev_private;
ac94a343 37 struct nouveau_fifo_engine *pfifo = &dev_priv->engine.fifo;
a8eaebc6 38 struct nouveau_gpuobj *cur;
694931d2 39 int i, p;
6ee73861
BS
40
41 NV_DEBUG(dev, "\n");
42
ac94a343
BS
43 cur = pfifo->playlist[pfifo->cur_playlist];
44 pfifo->cur_playlist = !pfifo->cur_playlist;
6ee73861 45
694931d2
BS
46 for (i = 0, p = 0; i < pfifo->channels; i++) {
47 if (nv_rd32(dev, 0x002600 + (i * 4)) & 0x80000000)
48 nv_wo32(cur, p++ * 4, i);
6ee73861 49 }
694931d2 50
f56cb86f 51 dev_priv->engine.instmem.flush(dev);
6ee73861 52
a8eaebc6 53 nv_wr32(dev, 0x32f4, cur->vinst >> 12);
694931d2 54 nv_wr32(dev, 0x32ec, p);
6ee73861
BS
55 nv_wr32(dev, 0x2500, 0x101);
56}
57
ac94a343
BS
58static void
59nv50_fifo_channel_enable(struct drm_device *dev, int channel)
6ee73861
BS
60{
61 struct drm_nouveau_private *dev_priv = dev->dev_private;
cff5c133 62 struct nouveau_channel *chan = dev_priv->channels.ptr[channel];
6ee73861
BS
63 uint32_t inst;
64
65 NV_DEBUG(dev, "ch%d\n", channel);
66
ac94a343 67 if (dev_priv->chipset == 0x50)
a8eaebc6 68 inst = chan->ramfc->vinst >> 12;
6ee73861 69 else
a8eaebc6 70 inst = chan->ramfc->vinst >> 8;
6ee73861 71
ac94a343
BS
72 nv_wr32(dev, NV50_PFIFO_CTX_TABLE(channel), inst |
73 NV50_PFIFO_CTX_TABLE_CHANNEL_ENABLED);
6ee73861
BS
74}
75
76static void
ac94a343 77nv50_fifo_channel_disable(struct drm_device *dev, int channel)
6ee73861
BS
78{
79 struct drm_nouveau_private *dev_priv = dev->dev_private;
80 uint32_t inst;
81
ac94a343 82 NV_DEBUG(dev, "ch%d\n", channel);
6ee73861 83
ac94a343 84 if (dev_priv->chipset == 0x50)
6ee73861
BS
85 inst = NV50_PFIFO_CTX_TABLE_INSTANCE_MASK_G80;
86 else
87 inst = NV50_PFIFO_CTX_TABLE_INSTANCE_MASK_G84;
88 nv_wr32(dev, NV50_PFIFO_CTX_TABLE(channel), inst);
6ee73861
BS
89}
90
91static void
92nv50_fifo_init_reset(struct drm_device *dev)
93{
94 uint32_t pmc_e = NV_PMC_ENABLE_PFIFO;
95
96 NV_DEBUG(dev, "\n");
97
98 nv_wr32(dev, NV03_PMC_ENABLE, nv_rd32(dev, NV03_PMC_ENABLE) & ~pmc_e);
99 nv_wr32(dev, NV03_PMC_ENABLE, nv_rd32(dev, NV03_PMC_ENABLE) | pmc_e);
100}
101
102static void
103nv50_fifo_init_intr(struct drm_device *dev)
104{
105 NV_DEBUG(dev, "\n");
106
5178d40d 107 nouveau_irq_register(dev, 8, nv04_fifo_isr);
6ee73861
BS
108 nv_wr32(dev, NV03_PFIFO_INTR_0, 0xFFFFFFFF);
109 nv_wr32(dev, NV03_PFIFO_INTR_EN_0, 0xFFFFFFFF);
110}
111
112static void
113nv50_fifo_init_context_table(struct drm_device *dev)
114{
115 struct drm_nouveau_private *dev_priv = dev->dev_private;
116 int i;
117
118 NV_DEBUG(dev, "\n");
119
120 for (i = 0; i < NV50_PFIFO_CTX_TABLE__SIZE; i++) {
cff5c133 121 if (dev_priv->channels.ptr[i])
ac94a343 122 nv50_fifo_channel_enable(dev, i);
6ee73861 123 else
ac94a343 124 nv50_fifo_channel_disable(dev, i);
6ee73861
BS
125 }
126
ac94a343 127 nv50_fifo_playlist_update(dev);
6ee73861
BS
128}
129
130static void
131nv50_fifo_init_regs__nv(struct drm_device *dev)
132{
133 NV_DEBUG(dev, "\n");
134
135 nv_wr32(dev, 0x250c, 0x6f3cfc34);
136}
137
138static void
139nv50_fifo_init_regs(struct drm_device *dev)
140{
141 NV_DEBUG(dev, "\n");
142
143 nv_wr32(dev, 0x2500, 0);
144 nv_wr32(dev, 0x3250, 0);
145 nv_wr32(dev, 0x3220, 0);
146 nv_wr32(dev, 0x3204, 0);
147 nv_wr32(dev, 0x3210, 0);
148 nv_wr32(dev, 0x3270, 0);
ec23802d 149 nv_wr32(dev, 0x2044, 0x01003fff);
6ee73861
BS
150
151 /* Enable dummy channels setup by nv50_instmem.c */
ac94a343
BS
152 nv50_fifo_channel_enable(dev, 0);
153 nv50_fifo_channel_enable(dev, 127);
6ee73861
BS
154}
155
156int
157nv50_fifo_init(struct drm_device *dev)
158{
159 struct drm_nouveau_private *dev_priv = dev->dev_private;
ac94a343 160 struct nouveau_fifo_engine *pfifo = &dev_priv->engine.fifo;
6ee73861
BS
161 int ret;
162
163 NV_DEBUG(dev, "\n");
164
ac94a343
BS
165 if (pfifo->playlist[0]) {
166 pfifo->cur_playlist = !pfifo->cur_playlist;
6ee73861
BS
167 goto just_reset;
168 }
169
a8eaebc6
BS
170 ret = nouveau_gpuobj_new(dev, NULL, 128*4, 0x1000,
171 NVOBJ_FLAG_ZERO_ALLOC,
172 &pfifo->playlist[0]);
6ee73861 173 if (ret) {
ac94a343 174 NV_ERROR(dev, "error creating playlist 0: %d\n", ret);
6ee73861
BS
175 return ret;
176 }
177
a8eaebc6
BS
178 ret = nouveau_gpuobj_new(dev, NULL, 128*4, 0x1000,
179 NVOBJ_FLAG_ZERO_ALLOC,
180 &pfifo->playlist[1]);
6ee73861 181 if (ret) {
a8eaebc6 182 nouveau_gpuobj_ref(NULL, &pfifo->playlist[0]);
ac94a343 183 NV_ERROR(dev, "error creating playlist 1: %d\n", ret);
6ee73861
BS
184 return ret;
185 }
186
187just_reset:
188 nv50_fifo_init_reset(dev);
189 nv50_fifo_init_intr(dev);
190 nv50_fifo_init_context_table(dev);
191 nv50_fifo_init_regs__nv(dev);
192 nv50_fifo_init_regs(dev);
67b342ef
BS
193 nv_wr32(dev, NV03_PFIFO_CACHE1_PUSH0, 1);
194 nv_wr32(dev, NV04_PFIFO_CACHE1_PULL0, 1);
195 nv_wr32(dev, NV03_PFIFO_CACHES, 1);
6ee73861
BS
196
197 return 0;
198}
199
200void
201nv50_fifo_takedown(struct drm_device *dev)
202{
203 struct drm_nouveau_private *dev_priv = dev->dev_private;
ac94a343 204 struct nouveau_fifo_engine *pfifo = &dev_priv->engine.fifo;
6ee73861
BS
205
206 NV_DEBUG(dev, "\n");
207
ac94a343 208 if (!pfifo->playlist[0])
6ee73861
BS
209 return;
210
5178d40d
BS
211 nv_wr32(dev, 0x2140, 0x00000000);
212 nouveau_irq_unregister(dev, 8);
213
a8eaebc6
BS
214 nouveau_gpuobj_ref(NULL, &pfifo->playlist[0]);
215 nouveau_gpuobj_ref(NULL, &pfifo->playlist[1]);
6ee73861
BS
216}
217
6ee73861
BS
218int
219nv50_fifo_create_context(struct nouveau_channel *chan)
220{
221 struct drm_device *dev = chan->dev;
222 struct drm_nouveau_private *dev_priv = dev->dev_private;
223 struct nouveau_gpuobj *ramfc = NULL;
4e03b4af 224 uint64_t ib_offset = chan->pushbuf_base + chan->dma.ib_base * 4;
ff9e5279 225 unsigned long flags;
6ee73861
BS
226 int ret;
227
228 NV_DEBUG(dev, "ch%d\n", chan->id);
229
ac94a343 230 if (dev_priv->chipset == 0x50) {
a8eaebc6
BS
231 ret = nouveau_gpuobj_new_fake(dev, chan->ramin->pinst,
232 chan->ramin->vinst, 0x100,
de3a6c0a 233 NVOBJ_FLAG_ZERO_ALLOC |
a8eaebc6 234 NVOBJ_FLAG_ZERO_FREE,
6ee73861
BS
235 &chan->ramfc);
236 if (ret)
237 return ret;
238
a8eaebc6
BS
239 ret = nouveau_gpuobj_new_fake(dev, chan->ramin->pinst + 0x0400,
240 chan->ramin->vinst + 0x0400,
241 4096, 0, &chan->cache);
6ee73861
BS
242 if (ret)
243 return ret;
244 } else {
a8eaebc6
BS
245 ret = nouveau_gpuobj_new(dev, chan, 0x100, 256,
246 NVOBJ_FLAG_ZERO_ALLOC |
247 NVOBJ_FLAG_ZERO_FREE, &chan->ramfc);
6ee73861
BS
248 if (ret)
249 return ret;
6ee73861 250
a8eaebc6
BS
251 ret = nouveau_gpuobj_new(dev, chan, 4096, 1024,
252 0, &chan->cache);
6ee73861
BS
253 if (ret)
254 return ret;
255 }
a8eaebc6 256 ramfc = chan->ramfc;
6ee73861 257
d908175c
BS
258 chan->user = ioremap(pci_resource_start(dev->pdev, 0) +
259 NV50_USER(chan->id), PAGE_SIZE);
260 if (!chan->user)
261 return -ENOMEM;
262
ff9e5279
MM
263 spin_lock_irqsave(&dev_priv->context_switch_lock, flags);
264
a8eaebc6 265 nv_wo32(ramfc, 0x48, chan->pushbuf->cinst >> 4);
e05c5a31 266 nv_wo32(ramfc, 0x80, ((chan->ramht->bits - 9) << 27) |
b3beb167 267 (4 << 24) /* SEARCH_FULL */ |
a8eaebc6 268 (chan->ramht->gpuobj->cinst >> 4));
ec23802d 269 nv_wo32(ramfc, 0x44, 0x01003fff);
b3beb167
BS
270 nv_wo32(ramfc, 0x60, 0x7fffffff);
271 nv_wo32(ramfc, 0x40, 0x00000000);
272 nv_wo32(ramfc, 0x7c, 0x30000001);
273 nv_wo32(ramfc, 0x78, 0x00000000);
274 nv_wo32(ramfc, 0x3c, 0x403f6078);
4e03b4af
FJ
275 nv_wo32(ramfc, 0x50, lower_32_bits(ib_offset));
276 nv_wo32(ramfc, 0x54, upper_32_bits(ib_offset) |
277 drm_order(chan->dma.ib_max + 1) << 16);
6ee73861 278
ac94a343 279 if (dev_priv->chipset != 0x50) {
a8eaebc6
BS
280 nv_wo32(chan->ramin, 0, chan->id);
281 nv_wo32(chan->ramin, 4, chan->ramfc->vinst >> 8);
6ee73861 282
a8eaebc6
BS
283 nv_wo32(ramfc, 0x88, chan->cache->vinst >> 10);
284 nv_wo32(ramfc, 0x98, chan->ramin->vinst >> 12);
6ee73861
BS
285 }
286
f56cb86f 287 dev_priv->engine.instmem.flush(dev);
6ee73861 288
ac94a343
BS
289 nv50_fifo_channel_enable(dev, chan->id);
290 nv50_fifo_playlist_update(dev);
ff9e5279 291 spin_unlock_irqrestore(&dev_priv->context_switch_lock, flags);
6ee73861
BS
292 return 0;
293}
294
03bd6efa
BS
295static bool
296nv50_fifo_wait_kickoff(void *data)
297{
298 struct drm_nouveau_private *dev_priv = data;
299 struct drm_device *dev = dev_priv->dev;
300
301 if (dev_priv->chipset == 0x50) {
302 u32 me_enable = nv_mask(dev, 0x00b860, 0x00000001, 0x00000001);
303 nv_wr32(dev, 0x00b860, me_enable);
304 }
305
306 return nv_rd32(dev, 0x0032fc) != 0xffffffff;
307}
308
6ee73861
BS
309void
310nv50_fifo_destroy_context(struct nouveau_channel *chan)
311{
312 struct drm_device *dev = chan->dev;
3945e475 313 struct drm_nouveau_private *dev_priv = dev->dev_private;
3945e475 314 unsigned long flags;
6ee73861 315
03bd6efa 316 /* remove channel from playlist, will context switch if active */
3945e475 317 spin_lock_irqsave(&dev_priv->context_switch_lock, flags);
03bd6efa 318 nv_mask(dev, 0x002600 + (chan->id * 4), 0x80000000, 0x00000000);
ac94a343 319 nv50_fifo_playlist_update(dev);
a87ff62a 320
03bd6efa
BS
321 /* tell any engines on this channel to unload their contexts */
322 nv_wr32(dev, 0x0032fc, chan->ramin->vinst >> 12);
323 if (!nv_wait_cb(dev, nv50_fifo_wait_kickoff, dev_priv))
324 NV_INFO(dev, "PFIFO: channel %d unload timeout\n", chan->id);
325
326 nv_wr32(dev, 0x002600 + (chan->id * 4), 0x00000000);
3945e475
FJ
327 spin_unlock_irqrestore(&dev_priv->context_switch_lock, flags);
328
03bd6efa 329 /* clean up */
d908175c
BS
330 if (chan->user) {
331 iounmap(chan->user);
332 chan->user = NULL;
333 }
03bd6efa 334
694931d2 335 nouveau_gpuobj_ref(NULL, &chan->ramfc);
a8eaebc6 336 nouveau_gpuobj_ref(NULL, &chan->cache);
6ee73861
BS
337}
338
339int
340nv50_fifo_load_context(struct nouveau_channel *chan)
341{
6ee73861
BS
342 return 0;
343}
344
345int
346nv50_fifo_unload_context(struct drm_device *dev)
347{
348 struct drm_nouveau_private *dev_priv = dev->dev_private;
03bd6efa 349 int i;
6ee73861 350
03bd6efa
BS
351 /* set playlist length to zero, fifo will unload context */
352 nv_wr32(dev, 0x0032ec, 0);
353
354 /* tell all connected engines to unload their contexts */
355 for (i = 0; i < dev_priv->engine.fifo.channels; i++) {
356 struct nouveau_channel *chan = dev_priv->channels.ptr[i];
357 if (chan)
358 nv_wr32(dev, 0x0032fc, chan->ramin->vinst >> 12);
359 if (!nv_wait_cb(dev, nv50_fifo_wait_kickoff, dev_priv)) {
360 NV_INFO(dev, "PFIFO: channel %d unload timeout\n", i);
361 return -EBUSY;
362 }
6ee73861
BS
363 }
364
6ee73861
BS
365 return 0;
366}
367
56ac7475
BS
368void
369nv50_fifo_tlb_flush(struct drm_device *dev)
370{
a11c3198 371 nv50_vm_flush_engine(dev, 5);
56ac7475 372}