Merge branch 'drm-core-next' into drm-linus
[GitHub/mt8127/android_kernel_alcatel_ttab.git] / drivers / gpu / drm / i915 / i915_irq.c
1 /* i915_irq.c -- IRQ support for the I915 -*- linux-c -*-
2 */
3 /*
4 * Copyright 2003 Tungsten Graphics, Inc., Cedar Park, Texas.
5 * All Rights Reserved.
6 *
7 * Permission is hereby granted, free of charge, to any person obtaining a
8 * copy of this software and associated documentation files (the
9 * "Software"), to deal in the Software without restriction, including
10 * without limitation the rights to use, copy, modify, merge, publish,
11 * distribute, sub license, and/or sell copies of the Software, and to
12 * permit persons to whom the Software is furnished to do so, subject to
13 * the following conditions:
14 *
15 * The above copyright notice and this permission notice (including the
16 * next paragraph) shall be included in all copies or substantial portions
17 * of the Software.
18 *
19 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS
20 * OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
21 * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND NON-INFRINGEMENT.
22 * IN NO EVENT SHALL TUNGSTEN GRAPHICS AND/OR ITS SUPPLIERS BE LIABLE FOR
23 * ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT,
24 * TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION WITH THE
25 * SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE.
26 *
27 */
28
29 #include <linux/sysrq.h>
30 #include "drmP.h"
31 #include "drm.h"
32 #include "i915_drm.h"
33 #include "i915_drv.h"
34 #include "i915_trace.h"
35 #include "intel_drv.h"
36
37 #define MAX_NOPID ((u32)~0)
38
39 /**
40 * Interrupts that are always left unmasked.
41 *
42 * Since pipe events are edge-triggered from the PIPESTAT register to IIR,
43 * we leave them always unmasked in IMR and then control enabling them through
44 * PIPESTAT alone.
45 */
46 #define I915_INTERRUPT_ENABLE_FIX (I915_ASLE_INTERRUPT | \
47 I915_DISPLAY_PIPE_A_EVENT_INTERRUPT | \
48 I915_DISPLAY_PIPE_B_EVENT_INTERRUPT | \
49 I915_RENDER_COMMAND_PARSER_ERROR_INTERRUPT)
50
51 /** Interrupts that we mask and unmask at runtime. */
52 #define I915_INTERRUPT_ENABLE_VAR (I915_USER_INTERRUPT)
53
54 #define I915_PIPE_VBLANK_STATUS (PIPE_START_VBLANK_INTERRUPT_STATUS |\
55 PIPE_VBLANK_INTERRUPT_STATUS)
56
57 #define I915_PIPE_VBLANK_ENABLE (PIPE_START_VBLANK_INTERRUPT_ENABLE |\
58 PIPE_VBLANK_INTERRUPT_ENABLE)
59
60 #define DRM_I915_VBLANK_PIPE_ALL (DRM_I915_VBLANK_PIPE_A | \
61 DRM_I915_VBLANK_PIPE_B)
62
63 void
64 igdng_enable_graphics_irq(drm_i915_private_t *dev_priv, u32 mask)
65 {
66 if ((dev_priv->gt_irq_mask_reg & mask) != 0) {
67 dev_priv->gt_irq_mask_reg &= ~mask;
68 I915_WRITE(GTIMR, dev_priv->gt_irq_mask_reg);
69 (void) I915_READ(GTIMR);
70 }
71 }
72
73 static inline void
74 igdng_disable_graphics_irq(drm_i915_private_t *dev_priv, u32 mask)
75 {
76 if ((dev_priv->gt_irq_mask_reg & mask) != mask) {
77 dev_priv->gt_irq_mask_reg |= mask;
78 I915_WRITE(GTIMR, dev_priv->gt_irq_mask_reg);
79 (void) I915_READ(GTIMR);
80 }
81 }
82
83 /* For display hotplug interrupt */
84 void
85 igdng_enable_display_irq(drm_i915_private_t *dev_priv, u32 mask)
86 {
87 if ((dev_priv->irq_mask_reg & mask) != 0) {
88 dev_priv->irq_mask_reg &= ~mask;
89 I915_WRITE(DEIMR, dev_priv->irq_mask_reg);
90 (void) I915_READ(DEIMR);
91 }
92 }
93
94 static inline void
95 igdng_disable_display_irq(drm_i915_private_t *dev_priv, u32 mask)
96 {
97 if ((dev_priv->irq_mask_reg & mask) != mask) {
98 dev_priv->irq_mask_reg |= mask;
99 I915_WRITE(DEIMR, dev_priv->irq_mask_reg);
100 (void) I915_READ(DEIMR);
101 }
102 }
103
104 void
105 i915_enable_irq(drm_i915_private_t *dev_priv, u32 mask)
106 {
107 if ((dev_priv->irq_mask_reg & mask) != 0) {
108 dev_priv->irq_mask_reg &= ~mask;
109 I915_WRITE(IMR, dev_priv->irq_mask_reg);
110 (void) I915_READ(IMR);
111 }
112 }
113
114 static inline void
115 i915_disable_irq(drm_i915_private_t *dev_priv, u32 mask)
116 {
117 if ((dev_priv->irq_mask_reg & mask) != mask) {
118 dev_priv->irq_mask_reg |= mask;
119 I915_WRITE(IMR, dev_priv->irq_mask_reg);
120 (void) I915_READ(IMR);
121 }
122 }
123
124 static inline u32
125 i915_pipestat(int pipe)
126 {
127 if (pipe == 0)
128 return PIPEASTAT;
129 if (pipe == 1)
130 return PIPEBSTAT;
131 BUG();
132 }
133
134 void
135 i915_enable_pipestat(drm_i915_private_t *dev_priv, int pipe, u32 mask)
136 {
137 if ((dev_priv->pipestat[pipe] & mask) != mask) {
138 u32 reg = i915_pipestat(pipe);
139
140 dev_priv->pipestat[pipe] |= mask;
141 /* Enable the interrupt, clear any pending status */
142 I915_WRITE(reg, dev_priv->pipestat[pipe] | (mask >> 16));
143 (void) I915_READ(reg);
144 }
145 }
146
147 void
148 i915_disable_pipestat(drm_i915_private_t *dev_priv, int pipe, u32 mask)
149 {
150 if ((dev_priv->pipestat[pipe] & mask) != 0) {
151 u32 reg = i915_pipestat(pipe);
152
153 dev_priv->pipestat[pipe] &= ~mask;
154 I915_WRITE(reg, dev_priv->pipestat[pipe]);
155 (void) I915_READ(reg);
156 }
157 }
158
159 /**
160 * i915_pipe_enabled - check if a pipe is enabled
161 * @dev: DRM device
162 * @pipe: pipe to check
163 *
164 * Reading certain registers when the pipe is disabled can hang the chip.
165 * Use this routine to make sure the PLL is running and the pipe is active
166 * before reading such registers if unsure.
167 */
168 static int
169 i915_pipe_enabled(struct drm_device *dev, int pipe)
170 {
171 drm_i915_private_t *dev_priv = (drm_i915_private_t *) dev->dev_private;
172 unsigned long pipeconf = pipe ? PIPEBCONF : PIPEACONF;
173
174 if (I915_READ(pipeconf) & PIPEACONF_ENABLE)
175 return 1;
176
177 return 0;
178 }
179
180 /* Called from drm generic code, passed a 'crtc', which
181 * we use as a pipe index
182 */
183 u32 i915_get_vblank_counter(struct drm_device *dev, int pipe)
184 {
185 drm_i915_private_t *dev_priv = (drm_i915_private_t *) dev->dev_private;
186 unsigned long high_frame;
187 unsigned long low_frame;
188 u32 high1, high2, low, count;
189
190 high_frame = pipe ? PIPEBFRAMEHIGH : PIPEAFRAMEHIGH;
191 low_frame = pipe ? PIPEBFRAMEPIXEL : PIPEAFRAMEPIXEL;
192
193 if (!i915_pipe_enabled(dev, pipe)) {
194 DRM_DEBUG("trying to get vblank count for disabled pipe %d\n", pipe);
195 return 0;
196 }
197
198 /*
199 * High & low register fields aren't synchronized, so make sure
200 * we get a low value that's stable across two reads of the high
201 * register.
202 */
203 do {
204 high1 = ((I915_READ(high_frame) & PIPE_FRAME_HIGH_MASK) >>
205 PIPE_FRAME_HIGH_SHIFT);
206 low = ((I915_READ(low_frame) & PIPE_FRAME_LOW_MASK) >>
207 PIPE_FRAME_LOW_SHIFT);
208 high2 = ((I915_READ(high_frame) & PIPE_FRAME_HIGH_MASK) >>
209 PIPE_FRAME_HIGH_SHIFT);
210 } while (high1 != high2);
211
212 count = (high1 << 8) | low;
213
214 return count;
215 }
216
217 u32 gm45_get_vblank_counter(struct drm_device *dev, int pipe)
218 {
219 drm_i915_private_t *dev_priv = (drm_i915_private_t *) dev->dev_private;
220 int reg = pipe ? PIPEB_FRMCOUNT_GM45 : PIPEA_FRMCOUNT_GM45;
221
222 if (!i915_pipe_enabled(dev, pipe)) {
223 DRM_DEBUG("trying to get vblank count for disabled pipe %d\n", pipe);
224 return 0;
225 }
226
227 return I915_READ(reg);
228 }
229
230 /*
231 * Handle hotplug events outside the interrupt handler proper.
232 */
233 static void i915_hotplug_work_func(struct work_struct *work)
234 {
235 drm_i915_private_t *dev_priv = container_of(work, drm_i915_private_t,
236 hotplug_work);
237 struct drm_device *dev = dev_priv->dev;
238 struct drm_mode_config *mode_config = &dev->mode_config;
239 struct drm_connector *connector;
240
241 if (mode_config->num_connector) {
242 list_for_each_entry(connector, &mode_config->connector_list, head) {
243 struct intel_output *intel_output = to_intel_output(connector);
244
245 if (intel_output->hot_plug)
246 (*intel_output->hot_plug) (intel_output);
247 }
248 }
249 /* Just fire off a uevent and let userspace tell us what to do */
250 drm_sysfs_hotplug_event(dev);
251 }
252
253 irqreturn_t igdng_irq_handler(struct drm_device *dev)
254 {
255 drm_i915_private_t *dev_priv = (drm_i915_private_t *) dev->dev_private;
256 int ret = IRQ_NONE;
257 u32 de_iir, gt_iir, de_ier;
258 u32 new_de_iir, new_gt_iir;
259 struct drm_i915_master_private *master_priv;
260
261 /* disable master interrupt before clearing iir */
262 de_ier = I915_READ(DEIER);
263 I915_WRITE(DEIER, de_ier & ~DE_MASTER_IRQ_CONTROL);
264 (void)I915_READ(DEIER);
265
266 de_iir = I915_READ(DEIIR);
267 gt_iir = I915_READ(GTIIR);
268
269 for (;;) {
270 if (de_iir == 0 && gt_iir == 0)
271 break;
272
273 ret = IRQ_HANDLED;
274
275 I915_WRITE(DEIIR, de_iir);
276 new_de_iir = I915_READ(DEIIR);
277 I915_WRITE(GTIIR, gt_iir);
278 new_gt_iir = I915_READ(GTIIR);
279
280 if (dev->primary->master) {
281 master_priv = dev->primary->master->driver_priv;
282 if (master_priv->sarea_priv)
283 master_priv->sarea_priv->last_dispatch =
284 READ_BREADCRUMB(dev_priv);
285 }
286
287 if (gt_iir & GT_USER_INTERRUPT) {
288 u32 seqno = i915_get_gem_seqno(dev);
289 dev_priv->mm.irq_gem_seqno = seqno;
290 trace_i915_gem_request_complete(dev, seqno);
291 DRM_WAKEUP(&dev_priv->irq_queue);
292 }
293
294 de_iir = new_de_iir;
295 gt_iir = new_gt_iir;
296 }
297
298 I915_WRITE(DEIER, de_ier);
299 (void)I915_READ(DEIER);
300
301 return ret;
302 }
303
304 /**
305 * i915_error_work_func - do process context error handling work
306 * @work: work struct
307 *
308 * Fire an error uevent so userspace can see that a hang or error
309 * was detected.
310 */
311 static void i915_error_work_func(struct work_struct *work)
312 {
313 drm_i915_private_t *dev_priv = container_of(work, drm_i915_private_t,
314 error_work);
315 struct drm_device *dev = dev_priv->dev;
316 char *error_event[] = { "ERROR=1", NULL };
317 char *reset_event[] = { "RESET=1", NULL };
318 char *reset_done_event[] = { "ERROR=0", NULL };
319
320 DRM_DEBUG("generating error event\n");
321 kobject_uevent_env(&dev->primary->kdev.kobj, KOBJ_CHANGE, error_event);
322
323 if (atomic_read(&dev_priv->mm.wedged)) {
324 if (IS_I965G(dev)) {
325 DRM_DEBUG("resetting chip\n");
326 kobject_uevent_env(&dev->primary->kdev.kobj, KOBJ_CHANGE, reset_event);
327 if (!i965_reset(dev, GDRST_RENDER)) {
328 atomic_set(&dev_priv->mm.wedged, 0);
329 kobject_uevent_env(&dev->primary->kdev.kobj, KOBJ_CHANGE, reset_done_event);
330 }
331 } else {
332 printk("reboot required\n");
333 }
334 }
335 }
336
337 /**
338 * i915_capture_error_state - capture an error record for later analysis
339 * @dev: drm device
340 *
341 * Should be called when an error is detected (either a hang or an error
342 * interrupt) to capture error state from the time of the error. Fills
343 * out a structure which becomes available in debugfs for user level tools
344 * to pick up.
345 */
346 static void i915_capture_error_state(struct drm_device *dev)
347 {
348 struct drm_i915_private *dev_priv = dev->dev_private;
349 struct drm_i915_error_state *error;
350 unsigned long flags;
351
352 spin_lock_irqsave(&dev_priv->error_lock, flags);
353 if (dev_priv->first_error)
354 goto out;
355
356 error = kmalloc(sizeof(*error), GFP_ATOMIC);
357 if (!error) {
358 DRM_DEBUG("out ot memory, not capturing error state\n");
359 goto out;
360 }
361
362 error->eir = I915_READ(EIR);
363 error->pgtbl_er = I915_READ(PGTBL_ER);
364 error->pipeastat = I915_READ(PIPEASTAT);
365 error->pipebstat = I915_READ(PIPEBSTAT);
366 error->instpm = I915_READ(INSTPM);
367 if (!IS_I965G(dev)) {
368 error->ipeir = I915_READ(IPEIR);
369 error->ipehr = I915_READ(IPEHR);
370 error->instdone = I915_READ(INSTDONE);
371 error->acthd = I915_READ(ACTHD);
372 } else {
373 error->ipeir = I915_READ(IPEIR_I965);
374 error->ipehr = I915_READ(IPEHR_I965);
375 error->instdone = I915_READ(INSTDONE_I965);
376 error->instps = I915_READ(INSTPS);
377 error->instdone1 = I915_READ(INSTDONE1);
378 error->acthd = I915_READ(ACTHD_I965);
379 }
380
381 do_gettimeofday(&error->time);
382
383 dev_priv->first_error = error;
384
385 out:
386 spin_unlock_irqrestore(&dev_priv->error_lock, flags);
387 }
388
389 /**
390 * i915_handle_error - handle an error interrupt
391 * @dev: drm device
392 *
393 * Do some basic checking of regsiter state at error interrupt time and
394 * dump it to the syslog. Also call i915_capture_error_state() to make
395 * sure we get a record and make it available in debugfs. Fire a uevent
396 * so userspace knows something bad happened (should trigger collection
397 * of a ring dump etc.).
398 */
399 static void i915_handle_error(struct drm_device *dev, bool wedged)
400 {
401 struct drm_i915_private *dev_priv = dev->dev_private;
402 u32 eir = I915_READ(EIR);
403 u32 pipea_stats = I915_READ(PIPEASTAT);
404 u32 pipeb_stats = I915_READ(PIPEBSTAT);
405
406 i915_capture_error_state(dev);
407
408 printk(KERN_ERR "render error detected, EIR: 0x%08x\n",
409 eir);
410
411 if (IS_G4X(dev)) {
412 if (eir & (GM45_ERROR_MEM_PRIV | GM45_ERROR_CP_PRIV)) {
413 u32 ipeir = I915_READ(IPEIR_I965);
414
415 printk(KERN_ERR " IPEIR: 0x%08x\n",
416 I915_READ(IPEIR_I965));
417 printk(KERN_ERR " IPEHR: 0x%08x\n",
418 I915_READ(IPEHR_I965));
419 printk(KERN_ERR " INSTDONE: 0x%08x\n",
420 I915_READ(INSTDONE_I965));
421 printk(KERN_ERR " INSTPS: 0x%08x\n",
422 I915_READ(INSTPS));
423 printk(KERN_ERR " INSTDONE1: 0x%08x\n",
424 I915_READ(INSTDONE1));
425 printk(KERN_ERR " ACTHD: 0x%08x\n",
426 I915_READ(ACTHD_I965));
427 I915_WRITE(IPEIR_I965, ipeir);
428 (void)I915_READ(IPEIR_I965);
429 }
430 if (eir & GM45_ERROR_PAGE_TABLE) {
431 u32 pgtbl_err = I915_READ(PGTBL_ER);
432 printk(KERN_ERR "page table error\n");
433 printk(KERN_ERR " PGTBL_ER: 0x%08x\n",
434 pgtbl_err);
435 I915_WRITE(PGTBL_ER, pgtbl_err);
436 (void)I915_READ(PGTBL_ER);
437 }
438 }
439
440 if (IS_I9XX(dev)) {
441 if (eir & I915_ERROR_PAGE_TABLE) {
442 u32 pgtbl_err = I915_READ(PGTBL_ER);
443 printk(KERN_ERR "page table error\n");
444 printk(KERN_ERR " PGTBL_ER: 0x%08x\n",
445 pgtbl_err);
446 I915_WRITE(PGTBL_ER, pgtbl_err);
447 (void)I915_READ(PGTBL_ER);
448 }
449 }
450
451 if (eir & I915_ERROR_MEMORY_REFRESH) {
452 printk(KERN_ERR "memory refresh error\n");
453 printk(KERN_ERR "PIPEASTAT: 0x%08x\n",
454 pipea_stats);
455 printk(KERN_ERR "PIPEBSTAT: 0x%08x\n",
456 pipeb_stats);
457 /* pipestat has already been acked */
458 }
459 if (eir & I915_ERROR_INSTRUCTION) {
460 printk(KERN_ERR "instruction error\n");
461 printk(KERN_ERR " INSTPM: 0x%08x\n",
462 I915_READ(INSTPM));
463 if (!IS_I965G(dev)) {
464 u32 ipeir = I915_READ(IPEIR);
465
466 printk(KERN_ERR " IPEIR: 0x%08x\n",
467 I915_READ(IPEIR));
468 printk(KERN_ERR " IPEHR: 0x%08x\n",
469 I915_READ(IPEHR));
470 printk(KERN_ERR " INSTDONE: 0x%08x\n",
471 I915_READ(INSTDONE));
472 printk(KERN_ERR " ACTHD: 0x%08x\n",
473 I915_READ(ACTHD));
474 I915_WRITE(IPEIR, ipeir);
475 (void)I915_READ(IPEIR);
476 } else {
477 u32 ipeir = I915_READ(IPEIR_I965);
478
479 printk(KERN_ERR " IPEIR: 0x%08x\n",
480 I915_READ(IPEIR_I965));
481 printk(KERN_ERR " IPEHR: 0x%08x\n",
482 I915_READ(IPEHR_I965));
483 printk(KERN_ERR " INSTDONE: 0x%08x\n",
484 I915_READ(INSTDONE_I965));
485 printk(KERN_ERR " INSTPS: 0x%08x\n",
486 I915_READ(INSTPS));
487 printk(KERN_ERR " INSTDONE1: 0x%08x\n",
488 I915_READ(INSTDONE1));
489 printk(KERN_ERR " ACTHD: 0x%08x\n",
490 I915_READ(ACTHD_I965));
491 I915_WRITE(IPEIR_I965, ipeir);
492 (void)I915_READ(IPEIR_I965);
493 }
494 }
495
496 I915_WRITE(EIR, eir);
497 (void)I915_READ(EIR);
498 eir = I915_READ(EIR);
499 if (eir) {
500 /*
501 * some errors might have become stuck,
502 * mask them.
503 */
504 DRM_ERROR("EIR stuck: 0x%08x, masking\n", eir);
505 I915_WRITE(EMR, I915_READ(EMR) | eir);
506 I915_WRITE(IIR, I915_RENDER_COMMAND_PARSER_ERROR_INTERRUPT);
507 }
508
509 if (wedged) {
510 atomic_set(&dev_priv->mm.wedged, 1);
511
512 /*
513 * Wakeup waiting processes so they don't hang
514 */
515 printk("i915: Waking up sleeping processes\n");
516 DRM_WAKEUP(&dev_priv->irq_queue);
517 }
518
519 queue_work(dev_priv->wq, &dev_priv->error_work);
520 }
521
522 irqreturn_t i915_driver_irq_handler(DRM_IRQ_ARGS)
523 {
524 struct drm_device *dev = (struct drm_device *) arg;
525 drm_i915_private_t *dev_priv = (drm_i915_private_t *) dev->dev_private;
526 struct drm_i915_master_private *master_priv;
527 u32 iir, new_iir;
528 u32 pipea_stats, pipeb_stats;
529 u32 vblank_status;
530 u32 vblank_enable;
531 int vblank = 0;
532 unsigned long irqflags;
533 int irq_received;
534 int ret = IRQ_NONE;
535
536 atomic_inc(&dev_priv->irq_received);
537
538 if (IS_IGDNG(dev))
539 return igdng_irq_handler(dev);
540
541 iir = I915_READ(IIR);
542
543 if (IS_I965G(dev)) {
544 vblank_status = I915_START_VBLANK_INTERRUPT_STATUS;
545 vblank_enable = PIPE_START_VBLANK_INTERRUPT_ENABLE;
546 } else {
547 vblank_status = I915_VBLANK_INTERRUPT_STATUS;
548 vblank_enable = I915_VBLANK_INTERRUPT_ENABLE;
549 }
550
551 for (;;) {
552 irq_received = iir != 0;
553
554 /* Can't rely on pipestat interrupt bit in iir as it might
555 * have been cleared after the pipestat interrupt was received.
556 * It doesn't set the bit in iir again, but it still produces
557 * interrupts (for non-MSI).
558 */
559 spin_lock_irqsave(&dev_priv->user_irq_lock, irqflags);
560 pipea_stats = I915_READ(PIPEASTAT);
561 pipeb_stats = I915_READ(PIPEBSTAT);
562
563 if (iir & I915_RENDER_COMMAND_PARSER_ERROR_INTERRUPT)
564 i915_handle_error(dev, false);
565
566 /*
567 * Clear the PIPE(A|B)STAT regs before the IIR
568 */
569 if (pipea_stats & 0x8000ffff) {
570 if (pipea_stats & PIPE_FIFO_UNDERRUN_STATUS)
571 DRM_DEBUG("pipe a underrun\n");
572 I915_WRITE(PIPEASTAT, pipea_stats);
573 irq_received = 1;
574 }
575
576 if (pipeb_stats & 0x8000ffff) {
577 if (pipeb_stats & PIPE_FIFO_UNDERRUN_STATUS)
578 DRM_DEBUG("pipe b underrun\n");
579 I915_WRITE(PIPEBSTAT, pipeb_stats);
580 irq_received = 1;
581 }
582 spin_unlock_irqrestore(&dev_priv->user_irq_lock, irqflags);
583
584 if (!irq_received)
585 break;
586
587 ret = IRQ_HANDLED;
588
589 /* Consume port. Then clear IIR or we'll miss events */
590 if ((I915_HAS_HOTPLUG(dev)) &&
591 (iir & I915_DISPLAY_PORT_INTERRUPT)) {
592 u32 hotplug_status = I915_READ(PORT_HOTPLUG_STAT);
593
594 DRM_DEBUG("hotplug event received, stat 0x%08x\n",
595 hotplug_status);
596 if (hotplug_status & dev_priv->hotplug_supported_mask)
597 queue_work(dev_priv->wq,
598 &dev_priv->hotplug_work);
599
600 I915_WRITE(PORT_HOTPLUG_STAT, hotplug_status);
601 I915_READ(PORT_HOTPLUG_STAT);
602
603 /* EOS interrupts occurs */
604 if (IS_IGD(dev) &&
605 (hotplug_status & CRT_EOS_INT_STATUS)) {
606 u32 temp;
607
608 DRM_DEBUG("EOS interrupt occurs\n");
609 /* status is already cleared */
610 temp = I915_READ(ADPA);
611 temp &= ~ADPA_DAC_ENABLE;
612 I915_WRITE(ADPA, temp);
613
614 temp = I915_READ(PORT_HOTPLUG_EN);
615 temp &= ~CRT_EOS_INT_EN;
616 I915_WRITE(PORT_HOTPLUG_EN, temp);
617
618 temp = I915_READ(PORT_HOTPLUG_STAT);
619 if (temp & CRT_EOS_INT_STATUS)
620 I915_WRITE(PORT_HOTPLUG_STAT,
621 CRT_EOS_INT_STATUS);
622 }
623 }
624
625 I915_WRITE(IIR, iir);
626 new_iir = I915_READ(IIR); /* Flush posted writes */
627
628 if (dev->primary->master) {
629 master_priv = dev->primary->master->driver_priv;
630 if (master_priv->sarea_priv)
631 master_priv->sarea_priv->last_dispatch =
632 READ_BREADCRUMB(dev_priv);
633 }
634
635 if (iir & I915_USER_INTERRUPT) {
636 u32 seqno = i915_get_gem_seqno(dev);
637 dev_priv->mm.irq_gem_seqno = seqno;
638 trace_i915_gem_request_complete(dev, seqno);
639 DRM_WAKEUP(&dev_priv->irq_queue);
640 dev_priv->hangcheck_count = 0;
641 mod_timer(&dev_priv->hangcheck_timer, jiffies + DRM_I915_HANGCHECK_PERIOD);
642 }
643
644 if (pipea_stats & vblank_status) {
645 vblank++;
646 drm_handle_vblank(dev, 0);
647 }
648
649 if (pipeb_stats & vblank_status) {
650 vblank++;
651 drm_handle_vblank(dev, 1);
652 }
653
654 if ((pipeb_stats & I915_LEGACY_BLC_EVENT_STATUS) ||
655 (iir & I915_ASLE_INTERRUPT))
656 opregion_asle_intr(dev);
657
658 /* With MSI, interrupts are only generated when iir
659 * transitions from zero to nonzero. If another bit got
660 * set while we were handling the existing iir bits, then
661 * we would never get another interrupt.
662 *
663 * This is fine on non-MSI as well, as if we hit this path
664 * we avoid exiting the interrupt handler only to generate
665 * another one.
666 *
667 * Note that for MSI this could cause a stray interrupt report
668 * if an interrupt landed in the time between writing IIR and
669 * the posting read. This should be rare enough to never
670 * trigger the 99% of 100,000 interrupts test for disabling
671 * stray interrupts.
672 */
673 iir = new_iir;
674 }
675
676 return ret;
677 }
678
679 static int i915_emit_irq(struct drm_device * dev)
680 {
681 drm_i915_private_t *dev_priv = dev->dev_private;
682 struct drm_i915_master_private *master_priv = dev->primary->master->driver_priv;
683 RING_LOCALS;
684
685 i915_kernel_lost_context(dev);
686
687 DRM_DEBUG("\n");
688
689 dev_priv->counter++;
690 if (dev_priv->counter > 0x7FFFFFFFUL)
691 dev_priv->counter = 1;
692 if (master_priv->sarea_priv)
693 master_priv->sarea_priv->last_enqueue = dev_priv->counter;
694
695 BEGIN_LP_RING(4);
696 OUT_RING(MI_STORE_DWORD_INDEX);
697 OUT_RING(I915_BREADCRUMB_INDEX << MI_STORE_DWORD_INDEX_SHIFT);
698 OUT_RING(dev_priv->counter);
699 OUT_RING(MI_USER_INTERRUPT);
700 ADVANCE_LP_RING();
701
702 return dev_priv->counter;
703 }
704
705 void i915_user_irq_get(struct drm_device *dev)
706 {
707 drm_i915_private_t *dev_priv = (drm_i915_private_t *) dev->dev_private;
708 unsigned long irqflags;
709
710 spin_lock_irqsave(&dev_priv->user_irq_lock, irqflags);
711 if (dev->irq_enabled && (++dev_priv->user_irq_refcount == 1)) {
712 if (IS_IGDNG(dev))
713 igdng_enable_graphics_irq(dev_priv, GT_USER_INTERRUPT);
714 else
715 i915_enable_irq(dev_priv, I915_USER_INTERRUPT);
716 }
717 spin_unlock_irqrestore(&dev_priv->user_irq_lock, irqflags);
718 }
719
720 void i915_user_irq_put(struct drm_device *dev)
721 {
722 drm_i915_private_t *dev_priv = (drm_i915_private_t *) dev->dev_private;
723 unsigned long irqflags;
724
725 spin_lock_irqsave(&dev_priv->user_irq_lock, irqflags);
726 BUG_ON(dev->irq_enabled && dev_priv->user_irq_refcount <= 0);
727 if (dev->irq_enabled && (--dev_priv->user_irq_refcount == 0)) {
728 if (IS_IGDNG(dev))
729 igdng_disable_graphics_irq(dev_priv, GT_USER_INTERRUPT);
730 else
731 i915_disable_irq(dev_priv, I915_USER_INTERRUPT);
732 }
733 spin_unlock_irqrestore(&dev_priv->user_irq_lock, irqflags);
734 }
735
736 void i915_trace_irq_get(struct drm_device *dev, u32 seqno)
737 {
738 drm_i915_private_t *dev_priv = (drm_i915_private_t *) dev->dev_private;
739
740 if (dev_priv->trace_irq_seqno == 0)
741 i915_user_irq_get(dev);
742
743 dev_priv->trace_irq_seqno = seqno;
744 }
745
746 static int i915_wait_irq(struct drm_device * dev, int irq_nr)
747 {
748 drm_i915_private_t *dev_priv = (drm_i915_private_t *) dev->dev_private;
749 struct drm_i915_master_private *master_priv = dev->primary->master->driver_priv;
750 int ret = 0;
751
752 DRM_DEBUG("irq_nr=%d breadcrumb=%d\n", irq_nr,
753 READ_BREADCRUMB(dev_priv));
754
755 if (READ_BREADCRUMB(dev_priv) >= irq_nr) {
756 if (master_priv->sarea_priv)
757 master_priv->sarea_priv->last_dispatch = READ_BREADCRUMB(dev_priv);
758 return 0;
759 }
760
761 if (master_priv->sarea_priv)
762 master_priv->sarea_priv->perf_boxes |= I915_BOX_WAIT;
763
764 i915_user_irq_get(dev);
765 DRM_WAIT_ON(ret, dev_priv->irq_queue, 3 * DRM_HZ,
766 READ_BREADCRUMB(dev_priv) >= irq_nr);
767 i915_user_irq_put(dev);
768
769 if (ret == -EBUSY) {
770 DRM_ERROR("EBUSY -- rec: %d emitted: %d\n",
771 READ_BREADCRUMB(dev_priv), (int)dev_priv->counter);
772 }
773
774 return ret;
775 }
776
777 /* Needs the lock as it touches the ring.
778 */
779 int i915_irq_emit(struct drm_device *dev, void *data,
780 struct drm_file *file_priv)
781 {
782 drm_i915_private_t *dev_priv = dev->dev_private;
783 drm_i915_irq_emit_t *emit = data;
784 int result;
785
786 if (!dev_priv || !dev_priv->ring.virtual_start) {
787 DRM_ERROR("called with no initialization\n");
788 return -EINVAL;
789 }
790
791 RING_LOCK_TEST_WITH_RETURN(dev, file_priv);
792
793 mutex_lock(&dev->struct_mutex);
794 result = i915_emit_irq(dev);
795 mutex_unlock(&dev->struct_mutex);
796
797 if (DRM_COPY_TO_USER(emit->irq_seq, &result, sizeof(int))) {
798 DRM_ERROR("copy_to_user\n");
799 return -EFAULT;
800 }
801
802 return 0;
803 }
804
805 /* Doesn't need the hardware lock.
806 */
807 int i915_irq_wait(struct drm_device *dev, void *data,
808 struct drm_file *file_priv)
809 {
810 drm_i915_private_t *dev_priv = dev->dev_private;
811 drm_i915_irq_wait_t *irqwait = data;
812
813 if (!dev_priv) {
814 DRM_ERROR("called with no initialization\n");
815 return -EINVAL;
816 }
817
818 return i915_wait_irq(dev, irqwait->irq_seq);
819 }
820
821 /* Called from drm generic code, passed 'crtc' which
822 * we use as a pipe index
823 */
824 int i915_enable_vblank(struct drm_device *dev, int pipe)
825 {
826 drm_i915_private_t *dev_priv = (drm_i915_private_t *) dev->dev_private;
827 unsigned long irqflags;
828 int pipeconf_reg = (pipe == 0) ? PIPEACONF : PIPEBCONF;
829 u32 pipeconf;
830
831 pipeconf = I915_READ(pipeconf_reg);
832 if (!(pipeconf & PIPEACONF_ENABLE))
833 return -EINVAL;
834
835 if (IS_IGDNG(dev))
836 return 0;
837
838 spin_lock_irqsave(&dev_priv->user_irq_lock, irqflags);
839 if (IS_I965G(dev))
840 i915_enable_pipestat(dev_priv, pipe,
841 PIPE_START_VBLANK_INTERRUPT_ENABLE);
842 else
843 i915_enable_pipestat(dev_priv, pipe,
844 PIPE_VBLANK_INTERRUPT_ENABLE);
845 spin_unlock_irqrestore(&dev_priv->user_irq_lock, irqflags);
846 return 0;
847 }
848
849 /* Called from drm generic code, passed 'crtc' which
850 * we use as a pipe index
851 */
852 void i915_disable_vblank(struct drm_device *dev, int pipe)
853 {
854 drm_i915_private_t *dev_priv = (drm_i915_private_t *) dev->dev_private;
855 unsigned long irqflags;
856
857 if (IS_IGDNG(dev))
858 return;
859
860 spin_lock_irqsave(&dev_priv->user_irq_lock, irqflags);
861 i915_disable_pipestat(dev_priv, pipe,
862 PIPE_VBLANK_INTERRUPT_ENABLE |
863 PIPE_START_VBLANK_INTERRUPT_ENABLE);
864 spin_unlock_irqrestore(&dev_priv->user_irq_lock, irqflags);
865 }
866
867 void i915_enable_interrupt (struct drm_device *dev)
868 {
869 struct drm_i915_private *dev_priv = dev->dev_private;
870
871 if (!IS_IGDNG(dev))
872 opregion_enable_asle(dev);
873 dev_priv->irq_enabled = 1;
874 }
875
876
877 /* Set the vblank monitor pipe
878 */
879 int i915_vblank_pipe_set(struct drm_device *dev, void *data,
880 struct drm_file *file_priv)
881 {
882 drm_i915_private_t *dev_priv = dev->dev_private;
883
884 if (!dev_priv) {
885 DRM_ERROR("called with no initialization\n");
886 return -EINVAL;
887 }
888
889 return 0;
890 }
891
892 int i915_vblank_pipe_get(struct drm_device *dev, void *data,
893 struct drm_file *file_priv)
894 {
895 drm_i915_private_t *dev_priv = dev->dev_private;
896 drm_i915_vblank_pipe_t *pipe = data;
897
898 if (!dev_priv) {
899 DRM_ERROR("called with no initialization\n");
900 return -EINVAL;
901 }
902
903 pipe->pipe = DRM_I915_VBLANK_PIPE_A | DRM_I915_VBLANK_PIPE_B;
904
905 return 0;
906 }
907
908 /**
909 * Schedule buffer swap at given vertical blank.
910 */
911 int i915_vblank_swap(struct drm_device *dev, void *data,
912 struct drm_file *file_priv)
913 {
914 /* The delayed swap mechanism was fundamentally racy, and has been
915 * removed. The model was that the client requested a delayed flip/swap
916 * from the kernel, then waited for vblank before continuing to perform
917 * rendering. The problem was that the kernel might wake the client
918 * up before it dispatched the vblank swap (since the lock has to be
919 * held while touching the ringbuffer), in which case the client would
920 * clear and start the next frame before the swap occurred, and
921 * flicker would occur in addition to likely missing the vblank.
922 *
923 * In the absence of this ioctl, userland falls back to a correct path
924 * of waiting for a vblank, then dispatching the swap on its own.
925 * Context switching to userland and back is plenty fast enough for
926 * meeting the requirements of vblank swapping.
927 */
928 return -EINVAL;
929 }
930
931 struct drm_i915_gem_request *i915_get_tail_request(struct drm_device *dev) {
932 drm_i915_private_t *dev_priv = dev->dev_private;
933 return list_entry(dev_priv->mm.request_list.prev, struct drm_i915_gem_request, list);
934 }
935
936 /**
937 * This is called when the chip hasn't reported back with completed
938 * batchbuffers in a long time. The first time this is called we simply record
939 * ACTHD. If ACTHD hasn't changed by the time the hangcheck timer elapses
940 * again, we assume the chip is wedged and try to fix it.
941 */
942 void i915_hangcheck_elapsed(unsigned long data)
943 {
944 struct drm_device *dev = (struct drm_device *)data;
945 drm_i915_private_t *dev_priv = dev->dev_private;
946 uint32_t acthd;
947
948 if (!IS_I965G(dev))
949 acthd = I915_READ(ACTHD);
950 else
951 acthd = I915_READ(ACTHD_I965);
952
953 /* If all work is done then ACTHD clearly hasn't advanced. */
954 if (list_empty(&dev_priv->mm.request_list) ||
955 i915_seqno_passed(i915_get_gem_seqno(dev), i915_get_tail_request(dev)->seqno)) {
956 dev_priv->hangcheck_count = 0;
957 return;
958 }
959
960 if (dev_priv->last_acthd == acthd && dev_priv->hangcheck_count > 0) {
961 DRM_ERROR("Hangcheck timer elapsed... GPU hung\n");
962 i915_handle_error(dev, true);
963 return;
964 }
965
966 /* Reset timer case chip hangs without another request being added */
967 mod_timer(&dev_priv->hangcheck_timer, jiffies + DRM_I915_HANGCHECK_PERIOD);
968
969 if (acthd != dev_priv->last_acthd)
970 dev_priv->hangcheck_count = 0;
971 else
972 dev_priv->hangcheck_count++;
973
974 dev_priv->last_acthd = acthd;
975 }
976
977 /* drm_dma.h hooks
978 */
979 static void igdng_irq_preinstall(struct drm_device *dev)
980 {
981 drm_i915_private_t *dev_priv = (drm_i915_private_t *) dev->dev_private;
982
983 I915_WRITE(HWSTAM, 0xeffe);
984
985 /* XXX hotplug from PCH */
986
987 I915_WRITE(DEIMR, 0xffffffff);
988 I915_WRITE(DEIER, 0x0);
989 (void) I915_READ(DEIER);
990
991 /* and GT */
992 I915_WRITE(GTIMR, 0xffffffff);
993 I915_WRITE(GTIER, 0x0);
994 (void) I915_READ(GTIER);
995 }
996
997 static int igdng_irq_postinstall(struct drm_device *dev)
998 {
999 drm_i915_private_t *dev_priv = (drm_i915_private_t *) dev->dev_private;
1000 /* enable kind of interrupts always enabled */
1001 u32 display_mask = DE_MASTER_IRQ_CONTROL /*| DE_PCH_EVENT */;
1002 u32 render_mask = GT_USER_INTERRUPT;
1003
1004 dev_priv->irq_mask_reg = ~display_mask;
1005 dev_priv->de_irq_enable_reg = display_mask;
1006
1007 /* should always can generate irq */
1008 I915_WRITE(DEIIR, I915_READ(DEIIR));
1009 I915_WRITE(DEIMR, dev_priv->irq_mask_reg);
1010 I915_WRITE(DEIER, dev_priv->de_irq_enable_reg);
1011 (void) I915_READ(DEIER);
1012
1013 /* user interrupt should be enabled, but masked initial */
1014 dev_priv->gt_irq_mask_reg = 0xffffffff;
1015 dev_priv->gt_irq_enable_reg = render_mask;
1016
1017 I915_WRITE(GTIIR, I915_READ(GTIIR));
1018 I915_WRITE(GTIMR, dev_priv->gt_irq_mask_reg);
1019 I915_WRITE(GTIER, dev_priv->gt_irq_enable_reg);
1020 (void) I915_READ(GTIER);
1021
1022 return 0;
1023 }
1024
1025 void i915_driver_irq_preinstall(struct drm_device * dev)
1026 {
1027 drm_i915_private_t *dev_priv = (drm_i915_private_t *) dev->dev_private;
1028
1029 atomic_set(&dev_priv->irq_received, 0);
1030
1031 INIT_WORK(&dev_priv->hotplug_work, i915_hotplug_work_func);
1032 INIT_WORK(&dev_priv->error_work, i915_error_work_func);
1033
1034 if (IS_IGDNG(dev)) {
1035 igdng_irq_preinstall(dev);
1036 return;
1037 }
1038
1039 if (I915_HAS_HOTPLUG(dev)) {
1040 I915_WRITE(PORT_HOTPLUG_EN, 0);
1041 I915_WRITE(PORT_HOTPLUG_STAT, I915_READ(PORT_HOTPLUG_STAT));
1042 }
1043
1044 I915_WRITE(HWSTAM, 0xeffe);
1045 I915_WRITE(PIPEASTAT, 0);
1046 I915_WRITE(PIPEBSTAT, 0);
1047 I915_WRITE(IMR, 0xffffffff);
1048 I915_WRITE(IER, 0x0);
1049 (void) I915_READ(IER);
1050 }
1051
1052 int i915_driver_irq_postinstall(struct drm_device *dev)
1053 {
1054 drm_i915_private_t *dev_priv = (drm_i915_private_t *) dev->dev_private;
1055 u32 enable_mask = I915_INTERRUPT_ENABLE_FIX | I915_INTERRUPT_ENABLE_VAR;
1056 u32 error_mask;
1057
1058 DRM_INIT_WAITQUEUE(&dev_priv->irq_queue);
1059
1060 dev_priv->vblank_pipe = DRM_I915_VBLANK_PIPE_A | DRM_I915_VBLANK_PIPE_B;
1061
1062 if (IS_IGDNG(dev))
1063 return igdng_irq_postinstall(dev);
1064
1065 /* Unmask the interrupts that we always want on. */
1066 dev_priv->irq_mask_reg = ~I915_INTERRUPT_ENABLE_FIX;
1067
1068 dev_priv->pipestat[0] = 0;
1069 dev_priv->pipestat[1] = 0;
1070
1071 if (I915_HAS_HOTPLUG(dev)) {
1072 u32 hotplug_en = I915_READ(PORT_HOTPLUG_EN);
1073
1074 /* Leave other bits alone */
1075 hotplug_en |= HOTPLUG_EN_MASK;
1076 I915_WRITE(PORT_HOTPLUG_EN, hotplug_en);
1077
1078 dev_priv->hotplug_supported_mask = CRT_HOTPLUG_INT_STATUS |
1079 TV_HOTPLUG_INT_STATUS | SDVOC_HOTPLUG_INT_STATUS |
1080 SDVOB_HOTPLUG_INT_STATUS;
1081 if (IS_G4X(dev)) {
1082 dev_priv->hotplug_supported_mask |=
1083 HDMIB_HOTPLUG_INT_STATUS |
1084 HDMIC_HOTPLUG_INT_STATUS |
1085 HDMID_HOTPLUG_INT_STATUS;
1086 }
1087 /* Enable in IER... */
1088 enable_mask |= I915_DISPLAY_PORT_INTERRUPT;
1089 /* and unmask in IMR */
1090 i915_enable_irq(dev_priv, I915_DISPLAY_PORT_INTERRUPT);
1091 }
1092
1093 /*
1094 * Enable some error detection, note the instruction error mask
1095 * bit is reserved, so we leave it masked.
1096 */
1097 if (IS_G4X(dev)) {
1098 error_mask = ~(GM45_ERROR_PAGE_TABLE |
1099 GM45_ERROR_MEM_PRIV |
1100 GM45_ERROR_CP_PRIV |
1101 I915_ERROR_MEMORY_REFRESH);
1102 } else {
1103 error_mask = ~(I915_ERROR_PAGE_TABLE |
1104 I915_ERROR_MEMORY_REFRESH);
1105 }
1106 I915_WRITE(EMR, error_mask);
1107
1108 /* Disable pipe interrupt enables, clear pending pipe status */
1109 I915_WRITE(PIPEASTAT, I915_READ(PIPEASTAT) & 0x8000ffff);
1110 I915_WRITE(PIPEBSTAT, I915_READ(PIPEBSTAT) & 0x8000ffff);
1111 /* Clear pending interrupt status */
1112 I915_WRITE(IIR, I915_READ(IIR));
1113
1114 I915_WRITE(IER, enable_mask);
1115 I915_WRITE(IMR, dev_priv->irq_mask_reg);
1116 (void) I915_READ(IER);
1117
1118 opregion_enable_asle(dev);
1119
1120 return 0;
1121 }
1122
1123 static void igdng_irq_uninstall(struct drm_device *dev)
1124 {
1125 drm_i915_private_t *dev_priv = (drm_i915_private_t *) dev->dev_private;
1126 I915_WRITE(HWSTAM, 0xffffffff);
1127
1128 I915_WRITE(DEIMR, 0xffffffff);
1129 I915_WRITE(DEIER, 0x0);
1130 I915_WRITE(DEIIR, I915_READ(DEIIR));
1131
1132 I915_WRITE(GTIMR, 0xffffffff);
1133 I915_WRITE(GTIER, 0x0);
1134 I915_WRITE(GTIIR, I915_READ(GTIIR));
1135 }
1136
1137 void i915_driver_irq_uninstall(struct drm_device * dev)
1138 {
1139 drm_i915_private_t *dev_priv = (drm_i915_private_t *) dev->dev_private;
1140
1141 if (!dev_priv)
1142 return;
1143
1144 dev_priv->vblank_pipe = 0;
1145
1146 if (IS_IGDNG(dev)) {
1147 igdng_irq_uninstall(dev);
1148 return;
1149 }
1150
1151 if (I915_HAS_HOTPLUG(dev)) {
1152 I915_WRITE(PORT_HOTPLUG_EN, 0);
1153 I915_WRITE(PORT_HOTPLUG_STAT, I915_READ(PORT_HOTPLUG_STAT));
1154 }
1155
1156 I915_WRITE(HWSTAM, 0xffffffff);
1157 I915_WRITE(PIPEASTAT, 0);
1158 I915_WRITE(PIPEBSTAT, 0);
1159 I915_WRITE(IMR, 0xffffffff);
1160 I915_WRITE(IER, 0x0);
1161
1162 I915_WRITE(PIPEASTAT, I915_READ(PIPEASTAT) & 0x8000ffff);
1163 I915_WRITE(PIPEBSTAT, I915_READ(PIPEBSTAT) & 0x8000ffff);
1164 I915_WRITE(IIR, I915_READ(IIR));
1165 }