Merge branches 'pxa-ian' and 'pxa-xm270' into pxa
[GitHub/mt8127/android_kernel_alcatel_ttab.git] / drivers / media / video / ivtv / ivtv-irq.c
1 /* interrupt handling
2 Copyright (C) 2003-2004 Kevin Thayer <nufan_wfk at yahoo.com>
3 Copyright (C) 2004 Chris Kennedy <c@groovy.org>
4 Copyright (C) 2005-2007 Hans Verkuil <hverkuil@xs4all.nl>
5
6 This program is free software; you can redistribute it and/or modify
7 it under the terms of the GNU General Public License as published by
8 the Free Software Foundation; either version 2 of the License, or
9 (at your option) any later version.
10
11 This program is distributed in the hope that it will be useful,
12 but WITHOUT ANY WARRANTY; without even the implied warranty of
13 MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
14 GNU General Public License for more details.
15
16 You should have received a copy of the GNU General Public License
17 along with this program; if not, write to the Free Software
18 Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA
19 */
20
21 #include "ivtv-driver.h"
22 #include "ivtv-queue.h"
23 #include "ivtv-udma.h"
24 #include "ivtv-irq.h"
25 #include "ivtv-mailbox.h"
26 #include "ivtv-vbi.h"
27 #include "ivtv-yuv.h"
28
29 #define DMA_MAGIC_COOKIE 0x000001fe
30
31 static void ivtv_dma_dec_start(struct ivtv_stream *s);
32
33 static const int ivtv_stream_map[] = {
34 IVTV_ENC_STREAM_TYPE_MPG,
35 IVTV_ENC_STREAM_TYPE_YUV,
36 IVTV_ENC_STREAM_TYPE_PCM,
37 IVTV_ENC_STREAM_TYPE_VBI,
38 };
39
40
41 static void ivtv_pio_work_handler(struct ivtv *itv)
42 {
43 struct ivtv_stream *s = &itv->streams[itv->cur_pio_stream];
44 struct ivtv_buffer *buf;
45 int i = 0;
46
47 IVTV_DEBUG_HI_DMA("ivtv_pio_work_handler\n");
48 if (itv->cur_pio_stream < 0 || itv->cur_pio_stream >= IVTV_MAX_STREAMS ||
49 s->v4l2dev == NULL || !ivtv_use_pio(s)) {
50 itv->cur_pio_stream = -1;
51 /* trigger PIO complete user interrupt */
52 write_reg(IVTV_IRQ_ENC_PIO_COMPLETE, 0x44);
53 return;
54 }
55 IVTV_DEBUG_HI_DMA("Process PIO %s\n", s->name);
56 list_for_each_entry(buf, &s->q_dma.list, list) {
57 u32 size = s->sg_processing[i].size & 0x3ffff;
58
59 /* Copy the data from the card to the buffer */
60 if (s->type == IVTV_DEC_STREAM_TYPE_VBI) {
61 memcpy_fromio(buf->buf, itv->dec_mem + s->sg_processing[i].src - IVTV_DECODER_OFFSET, size);
62 }
63 else {
64 memcpy_fromio(buf->buf, itv->enc_mem + s->sg_processing[i].src, size);
65 }
66 i++;
67 if (i == s->sg_processing_size)
68 break;
69 }
70 write_reg(IVTV_IRQ_ENC_PIO_COMPLETE, 0x44);
71 }
72
73 void ivtv_irq_work_handler(struct work_struct *work)
74 {
75 struct ivtv *itv = container_of(work, struct ivtv, irq_work_queue);
76
77 DEFINE_WAIT(wait);
78
79 if (test_and_clear_bit(IVTV_F_I_WORK_HANDLER_PIO, &itv->i_flags))
80 ivtv_pio_work_handler(itv);
81
82 if (test_and_clear_bit(IVTV_F_I_WORK_HANDLER_VBI, &itv->i_flags))
83 ivtv_vbi_work_handler(itv);
84
85 if (test_and_clear_bit(IVTV_F_I_WORK_HANDLER_YUV, &itv->i_flags))
86 ivtv_yuv_work_handler(itv);
87 }
88
89 /* Determine the required DMA size, setup enough buffers in the predma queue and
90 actually copy the data from the card to the buffers in case a PIO transfer is
91 required for this stream.
92 */
93 static int stream_enc_dma_append(struct ivtv_stream *s, u32 data[CX2341X_MBOX_MAX_DATA])
94 {
95 struct ivtv *itv = s->itv;
96 struct ivtv_buffer *buf;
97 u32 bytes_needed = 0;
98 u32 offset, size;
99 u32 UVoffset = 0, UVsize = 0;
100 int skip_bufs = s->q_predma.buffers;
101 int idx = s->sg_pending_size;
102 int rc;
103
104 /* sanity checks */
105 if (s->v4l2dev == NULL) {
106 IVTV_DEBUG_WARN("Stream %s not started\n", s->name);
107 return -1;
108 }
109 if (!test_bit(IVTV_F_S_CLAIMED, &s->s_flags)) {
110 IVTV_DEBUG_WARN("Stream %s not open\n", s->name);
111 return -1;
112 }
113
114 /* determine offset, size and PTS for the various streams */
115 switch (s->type) {
116 case IVTV_ENC_STREAM_TYPE_MPG:
117 offset = data[1];
118 size = data[2];
119 s->pending_pts = 0;
120 break;
121
122 case IVTV_ENC_STREAM_TYPE_YUV:
123 offset = data[1];
124 size = data[2];
125 UVoffset = data[3];
126 UVsize = data[4];
127 s->pending_pts = ((u64) data[5] << 32) | data[6];
128 break;
129
130 case IVTV_ENC_STREAM_TYPE_PCM:
131 offset = data[1] + 12;
132 size = data[2] - 12;
133 s->pending_pts = read_dec(offset - 8) |
134 ((u64)(read_dec(offset - 12)) << 32);
135 if (itv->has_cx23415)
136 offset += IVTV_DECODER_OFFSET;
137 break;
138
139 case IVTV_ENC_STREAM_TYPE_VBI:
140 size = itv->vbi.enc_size * itv->vbi.fpi;
141 offset = read_enc(itv->vbi.enc_start - 4) + 12;
142 if (offset == 12) {
143 IVTV_DEBUG_INFO("VBI offset == 0\n");
144 return -1;
145 }
146 s->pending_pts = read_enc(offset - 4) | ((u64)read_enc(offset - 8) << 32);
147 break;
148
149 case IVTV_DEC_STREAM_TYPE_VBI:
150 size = read_dec(itv->vbi.dec_start + 4) + 8;
151 offset = read_dec(itv->vbi.dec_start) + itv->vbi.dec_start;
152 s->pending_pts = 0;
153 offset += IVTV_DECODER_OFFSET;
154 break;
155 default:
156 /* shouldn't happen */
157 return -1;
158 }
159
160 /* if this is the start of the DMA then fill in the magic cookie */
161 if (s->sg_pending_size == 0 && ivtv_use_dma(s)) {
162 if (itv->has_cx23415 && (s->type == IVTV_ENC_STREAM_TYPE_PCM ||
163 s->type == IVTV_DEC_STREAM_TYPE_VBI)) {
164 s->pending_backup = read_dec(offset - IVTV_DECODER_OFFSET);
165 write_dec_sync(cpu_to_le32(DMA_MAGIC_COOKIE), offset - IVTV_DECODER_OFFSET);
166 }
167 else {
168 s->pending_backup = read_enc(offset);
169 write_enc_sync(cpu_to_le32(DMA_MAGIC_COOKIE), offset);
170 }
171 s->pending_offset = offset;
172 }
173
174 bytes_needed = size;
175 if (s->type == IVTV_ENC_STREAM_TYPE_YUV) {
176 /* The size for the Y samples needs to be rounded upwards to a
177 multiple of the buf_size. The UV samples then start in the
178 next buffer. */
179 bytes_needed = s->buf_size * ((bytes_needed + s->buf_size - 1) / s->buf_size);
180 bytes_needed += UVsize;
181 }
182
183 IVTV_DEBUG_HI_DMA("%s %s: 0x%08x bytes at 0x%08x\n",
184 ivtv_use_pio(s) ? "PIO" : "DMA", s->name, bytes_needed, offset);
185
186 rc = ivtv_queue_move(s, &s->q_free, &s->q_full, &s->q_predma, bytes_needed);
187 if (rc < 0) { /* Insufficient buffers */
188 IVTV_DEBUG_WARN("Cannot obtain %d bytes for %s data transfer\n",
189 bytes_needed, s->name);
190 return -1;
191 }
192 if (rc && !s->buffers_stolen && (s->s_flags & IVTV_F_S_APPL_IO)) {
193 IVTV_WARN("All %s stream buffers are full. Dropping data.\n", s->name);
194 IVTV_WARN("Cause: the application is not reading fast enough.\n");
195 }
196 s->buffers_stolen = rc;
197
198 /* got the buffers, now fill in sg_pending */
199 buf = list_entry(s->q_predma.list.next, struct ivtv_buffer, list);
200 memset(buf->buf, 0, 128);
201 list_for_each_entry(buf, &s->q_predma.list, list) {
202 if (skip_bufs-- > 0)
203 continue;
204 s->sg_pending[idx].dst = buf->dma_handle;
205 s->sg_pending[idx].src = offset;
206 s->sg_pending[idx].size = s->buf_size;
207 buf->bytesused = min(size, s->buf_size);
208 buf->dma_xfer_cnt = s->dma_xfer_cnt;
209
210 s->q_predma.bytesused += buf->bytesused;
211 size -= buf->bytesused;
212 offset += s->buf_size;
213
214 /* Sync SG buffers */
215 ivtv_buf_sync_for_device(s, buf);
216
217 if (size == 0) { /* YUV */
218 /* process the UV section */
219 offset = UVoffset;
220 size = UVsize;
221 }
222 idx++;
223 }
224 s->sg_pending_size = idx;
225 return 0;
226 }
227
228 static void dma_post(struct ivtv_stream *s)
229 {
230 struct ivtv *itv = s->itv;
231 struct ivtv_buffer *buf = NULL;
232 struct list_head *p;
233 u32 offset;
234 __le32 *u32buf;
235 int x = 0;
236
237 IVTV_DEBUG_HI_DMA("%s %s completed (%x)\n", ivtv_use_pio(s) ? "PIO" : "DMA",
238 s->name, s->dma_offset);
239 list_for_each(p, &s->q_dma.list) {
240 buf = list_entry(p, struct ivtv_buffer, list);
241 u32buf = (__le32 *)buf->buf;
242
243 /* Sync Buffer */
244 ivtv_buf_sync_for_cpu(s, buf);
245
246 if (x == 0 && ivtv_use_dma(s)) {
247 offset = s->dma_last_offset;
248 if (u32buf[offset / 4] != DMA_MAGIC_COOKIE)
249 {
250 for (offset = 0; offset < 64; offset++) {
251 if (u32buf[offset] == DMA_MAGIC_COOKIE) {
252 break;
253 }
254 }
255 offset *= 4;
256 if (offset == 256) {
257 IVTV_DEBUG_WARN("%s: Couldn't find start of buffer within the first 256 bytes\n", s->name);
258 offset = s->dma_last_offset;
259 }
260 if (s->dma_last_offset != offset)
261 IVTV_DEBUG_WARN("%s: offset %d -> %d\n", s->name, s->dma_last_offset, offset);
262 s->dma_last_offset = offset;
263 }
264 if (itv->has_cx23415 && (s->type == IVTV_ENC_STREAM_TYPE_PCM ||
265 s->type == IVTV_DEC_STREAM_TYPE_VBI)) {
266 write_dec_sync(0, s->dma_offset - IVTV_DECODER_OFFSET);
267 }
268 else {
269 write_enc_sync(0, s->dma_offset);
270 }
271 if (offset) {
272 buf->bytesused -= offset;
273 memcpy(buf->buf, buf->buf + offset, buf->bytesused + offset);
274 }
275 *u32buf = cpu_to_le32(s->dma_backup);
276 }
277 x++;
278 /* flag byteswap ABCD -> DCBA for MPG & VBI data outside irq */
279 if (s->type == IVTV_ENC_STREAM_TYPE_MPG ||
280 s->type == IVTV_ENC_STREAM_TYPE_VBI)
281 buf->b_flags |= IVTV_F_B_NEED_BUF_SWAP;
282 }
283 if (buf)
284 buf->bytesused += s->dma_last_offset;
285 if (buf && s->type == IVTV_DEC_STREAM_TYPE_VBI) {
286 list_for_each_entry(buf, &s->q_dma.list, list) {
287 /* Parse and Groom VBI Data */
288 s->q_dma.bytesused -= buf->bytesused;
289 ivtv_process_vbi_data(itv, buf, 0, s->type);
290 s->q_dma.bytesused += buf->bytesused;
291 }
292 if (s->id == -1) {
293 ivtv_queue_move(s, &s->q_dma, NULL, &s->q_free, 0);
294 return;
295 }
296 }
297 ivtv_queue_move(s, &s->q_dma, NULL, &s->q_full, s->q_dma.bytesused);
298 if (s->id != -1)
299 wake_up(&s->waitq);
300 }
301
302 void ivtv_dma_stream_dec_prepare(struct ivtv_stream *s, u32 offset, int lock)
303 {
304 struct ivtv *itv = s->itv;
305 struct yuv_playback_info *yi = &itv->yuv_info;
306 u8 frame = yi->draw_frame;
307 struct yuv_frame_info *f = &yi->new_frame_info[frame];
308 struct ivtv_buffer *buf;
309 u32 y_size = 720 * ((f->src_h + 31) & ~31);
310 u32 uv_offset = offset + IVTV_YUV_BUFFER_UV_OFFSET;
311 int y_done = 0;
312 int bytes_written = 0;
313 unsigned long flags = 0;
314 int idx = 0;
315
316 IVTV_DEBUG_HI_DMA("DEC PREPARE DMA %s: %08x %08x\n", s->name, s->q_predma.bytesused, offset);
317
318 /* Insert buffer block for YUV if needed */
319 if (s->type == IVTV_DEC_STREAM_TYPE_YUV && f->offset_y) {
320 if (yi->blanking_dmaptr) {
321 s->sg_pending[idx].src = yi->blanking_dmaptr;
322 s->sg_pending[idx].dst = offset;
323 s->sg_pending[idx].size = 720 * 16;
324 }
325 offset += 720 * 16;
326 idx++;
327 }
328
329 list_for_each_entry(buf, &s->q_predma.list, list) {
330 /* YUV UV Offset from Y Buffer */
331 if (s->type == IVTV_DEC_STREAM_TYPE_YUV && !y_done &&
332 (bytes_written + buf->bytesused) >= y_size) {
333 s->sg_pending[idx].src = buf->dma_handle;
334 s->sg_pending[idx].dst = offset;
335 s->sg_pending[idx].size = y_size - bytes_written;
336 offset = uv_offset;
337 if (s->sg_pending[idx].size != buf->bytesused) {
338 idx++;
339 s->sg_pending[idx].src =
340 buf->dma_handle + s->sg_pending[idx - 1].size;
341 s->sg_pending[idx].dst = offset;
342 s->sg_pending[idx].size =
343 buf->bytesused - s->sg_pending[idx - 1].size;
344 offset += s->sg_pending[idx].size;
345 }
346 y_done = 1;
347 } else {
348 s->sg_pending[idx].src = buf->dma_handle;
349 s->sg_pending[idx].dst = offset;
350 s->sg_pending[idx].size = buf->bytesused;
351 offset += buf->bytesused;
352 }
353 bytes_written += buf->bytesused;
354
355 /* Sync SG buffers */
356 ivtv_buf_sync_for_device(s, buf);
357 idx++;
358 }
359 s->sg_pending_size = idx;
360
361 /* Sync Hardware SG List of buffers */
362 ivtv_stream_sync_for_device(s);
363 if (lock)
364 spin_lock_irqsave(&itv->dma_reg_lock, flags);
365 if (!test_bit(IVTV_F_I_DMA, &itv->i_flags)) {
366 ivtv_dma_dec_start(s);
367 }
368 else {
369 set_bit(IVTV_F_S_DMA_PENDING, &s->s_flags);
370 }
371 if (lock)
372 spin_unlock_irqrestore(&itv->dma_reg_lock, flags);
373 }
374
375 static void ivtv_dma_enc_start_xfer(struct ivtv_stream *s)
376 {
377 struct ivtv *itv = s->itv;
378
379 s->sg_dma->src = cpu_to_le32(s->sg_processing[s->sg_processed].src);
380 s->sg_dma->dst = cpu_to_le32(s->sg_processing[s->sg_processed].dst);
381 s->sg_dma->size = cpu_to_le32(s->sg_processing[s->sg_processed].size | 0x80000000);
382 s->sg_processed++;
383 /* Sync Hardware SG List of buffers */
384 ivtv_stream_sync_for_device(s);
385 write_reg(s->sg_handle, IVTV_REG_ENCDMAADDR);
386 write_reg_sync(read_reg(IVTV_REG_DMAXFER) | 0x02, IVTV_REG_DMAXFER);
387 itv->dma_timer.expires = jiffies + msecs_to_jiffies(300);
388 add_timer(&itv->dma_timer);
389 }
390
391 static void ivtv_dma_dec_start_xfer(struct ivtv_stream *s)
392 {
393 struct ivtv *itv = s->itv;
394
395 s->sg_dma->src = cpu_to_le32(s->sg_processing[s->sg_processed].src);
396 s->sg_dma->dst = cpu_to_le32(s->sg_processing[s->sg_processed].dst);
397 s->sg_dma->size = cpu_to_le32(s->sg_processing[s->sg_processed].size | 0x80000000);
398 s->sg_processed++;
399 /* Sync Hardware SG List of buffers */
400 ivtv_stream_sync_for_device(s);
401 write_reg(s->sg_handle, IVTV_REG_DECDMAADDR);
402 write_reg_sync(read_reg(IVTV_REG_DMAXFER) | 0x01, IVTV_REG_DMAXFER);
403 itv->dma_timer.expires = jiffies + msecs_to_jiffies(300);
404 add_timer(&itv->dma_timer);
405 }
406
407 /* start the encoder DMA */
408 static void ivtv_dma_enc_start(struct ivtv_stream *s)
409 {
410 struct ivtv *itv = s->itv;
411 struct ivtv_stream *s_vbi = &itv->streams[IVTV_ENC_STREAM_TYPE_VBI];
412 int i;
413
414 IVTV_DEBUG_HI_DMA("start %s for %s\n", ivtv_use_dma(s) ? "DMA" : "PIO", s->name);
415
416 if (s->q_predma.bytesused)
417 ivtv_queue_move(s, &s->q_predma, NULL, &s->q_dma, s->q_predma.bytesused);
418
419 if (ivtv_use_dma(s))
420 s->sg_pending[s->sg_pending_size - 1].size += 256;
421
422 /* If this is an MPEG stream, and VBI data is also pending, then append the
423 VBI DMA to the MPEG DMA and transfer both sets of data at once.
424
425 VBI DMA is a second class citizen compared to MPEG and mixing them together
426 will confuse the firmware (the end of a VBI DMA is seen as the end of a
427 MPEG DMA, thus effectively dropping an MPEG frame). So instead we make
428 sure we only use the MPEG DMA to transfer the VBI DMA if both are in
429 use. This way no conflicts occur. */
430 clear_bit(IVTV_F_S_DMA_HAS_VBI, &s->s_flags);
431 if (s->type == IVTV_ENC_STREAM_TYPE_MPG && s_vbi->sg_pending_size &&
432 s->sg_pending_size + s_vbi->sg_pending_size <= s->buffers) {
433 ivtv_queue_move(s_vbi, &s_vbi->q_predma, NULL, &s_vbi->q_dma, s_vbi->q_predma.bytesused);
434 if (ivtv_use_dma(s_vbi))
435 s_vbi->sg_pending[s_vbi->sg_pending_size - 1].size += 256;
436 for (i = 0; i < s_vbi->sg_pending_size; i++) {
437 s->sg_pending[s->sg_pending_size++] = s_vbi->sg_pending[i];
438 }
439 s_vbi->dma_offset = s_vbi->pending_offset;
440 s_vbi->sg_pending_size = 0;
441 s_vbi->dma_xfer_cnt++;
442 set_bit(IVTV_F_S_DMA_HAS_VBI, &s->s_flags);
443 IVTV_DEBUG_HI_DMA("include DMA for %s\n", s_vbi->name);
444 }
445
446 s->dma_xfer_cnt++;
447 memcpy(s->sg_processing, s->sg_pending, sizeof(struct ivtv_sg_host_element) * s->sg_pending_size);
448 s->sg_processing_size = s->sg_pending_size;
449 s->sg_pending_size = 0;
450 s->sg_processed = 0;
451 s->dma_offset = s->pending_offset;
452 s->dma_backup = s->pending_backup;
453 s->dma_pts = s->pending_pts;
454
455 if (ivtv_use_pio(s)) {
456 set_bit(IVTV_F_I_WORK_HANDLER_PIO, &itv->i_flags);
457 set_bit(IVTV_F_I_HAVE_WORK, &itv->i_flags);
458 set_bit(IVTV_F_I_PIO, &itv->i_flags);
459 itv->cur_pio_stream = s->type;
460 }
461 else {
462 itv->dma_retries = 0;
463 ivtv_dma_enc_start_xfer(s);
464 set_bit(IVTV_F_I_DMA, &itv->i_flags);
465 itv->cur_dma_stream = s->type;
466 }
467 }
468
469 static void ivtv_dma_dec_start(struct ivtv_stream *s)
470 {
471 struct ivtv *itv = s->itv;
472
473 if (s->q_predma.bytesused)
474 ivtv_queue_move(s, &s->q_predma, NULL, &s->q_dma, s->q_predma.bytesused);
475 s->dma_xfer_cnt++;
476 memcpy(s->sg_processing, s->sg_pending, sizeof(struct ivtv_sg_host_element) * s->sg_pending_size);
477 s->sg_processing_size = s->sg_pending_size;
478 s->sg_pending_size = 0;
479 s->sg_processed = 0;
480
481 IVTV_DEBUG_HI_DMA("start DMA for %s\n", s->name);
482 itv->dma_retries = 0;
483 ivtv_dma_dec_start_xfer(s);
484 set_bit(IVTV_F_I_DMA, &itv->i_flags);
485 itv->cur_dma_stream = s->type;
486 }
487
488 static void ivtv_irq_dma_read(struct ivtv *itv)
489 {
490 struct ivtv_stream *s = NULL;
491 struct ivtv_buffer *buf;
492 int hw_stream_type = 0;
493
494 IVTV_DEBUG_HI_IRQ("DEC DMA READ\n");
495
496 del_timer(&itv->dma_timer);
497
498 if (!test_bit(IVTV_F_I_UDMA, &itv->i_flags) && itv->cur_dma_stream < 0)
499 return;
500
501 if (!test_bit(IVTV_F_I_UDMA, &itv->i_flags)) {
502 s = &itv->streams[itv->cur_dma_stream];
503 ivtv_stream_sync_for_cpu(s);
504
505 if (read_reg(IVTV_REG_DMASTATUS) & 0x14) {
506 IVTV_DEBUG_WARN("DEC DMA ERROR %x (xfer %d of %d, retry %d)\n",
507 read_reg(IVTV_REG_DMASTATUS),
508 s->sg_processed, s->sg_processing_size, itv->dma_retries);
509 write_reg(read_reg(IVTV_REG_DMASTATUS) & 3, IVTV_REG_DMASTATUS);
510 if (itv->dma_retries == 3) {
511 /* Too many retries, give up on this frame */
512 itv->dma_retries = 0;
513 s->sg_processed = s->sg_processing_size;
514 }
515 else {
516 /* Retry, starting with the first xfer segment.
517 Just retrying the current segment is not sufficient. */
518 s->sg_processed = 0;
519 itv->dma_retries++;
520 }
521 }
522 if (s->sg_processed < s->sg_processing_size) {
523 /* DMA next buffer */
524 ivtv_dma_dec_start_xfer(s);
525 return;
526 }
527 if (s->type == IVTV_DEC_STREAM_TYPE_YUV)
528 hw_stream_type = 2;
529 IVTV_DEBUG_HI_DMA("DEC DATA READ %s: %d\n", s->name, s->q_dma.bytesused);
530
531 /* For some reason must kick the firmware, like PIO mode,
532 I think this tells the firmware we are done and the size
533 of the xfer so it can calculate what we need next.
534 I think we can do this part ourselves but would have to
535 fully calculate xfer info ourselves and not use interrupts
536 */
537 ivtv_vapi(itv, CX2341X_DEC_SCHED_DMA_FROM_HOST, 3, 0, s->q_dma.bytesused,
538 hw_stream_type);
539
540 /* Free last DMA call */
541 while ((buf = ivtv_dequeue(s, &s->q_dma)) != NULL) {
542 ivtv_buf_sync_for_cpu(s, buf);
543 ivtv_enqueue(s, buf, &s->q_free);
544 }
545 wake_up(&s->waitq);
546 }
547 clear_bit(IVTV_F_I_UDMA, &itv->i_flags);
548 clear_bit(IVTV_F_I_DMA, &itv->i_flags);
549 itv->cur_dma_stream = -1;
550 wake_up(&itv->dma_waitq);
551 }
552
553 static void ivtv_irq_enc_dma_complete(struct ivtv *itv)
554 {
555 u32 data[CX2341X_MBOX_MAX_DATA];
556 struct ivtv_stream *s;
557
558 ivtv_api_get_data(&itv->enc_mbox, IVTV_MBOX_DMA_END, data);
559 IVTV_DEBUG_HI_IRQ("ENC DMA COMPLETE %x %d (%d)\n", data[0], data[1], itv->cur_dma_stream);
560
561 del_timer(&itv->dma_timer);
562
563 if (itv->cur_dma_stream < 0)
564 return;
565
566 s = &itv->streams[itv->cur_dma_stream];
567 ivtv_stream_sync_for_cpu(s);
568
569 if (data[0] & 0x18) {
570 IVTV_DEBUG_WARN("ENC DMA ERROR %x (offset %08x, xfer %d of %d, retry %d)\n", data[0],
571 s->dma_offset, s->sg_processed, s->sg_processing_size, itv->dma_retries);
572 write_reg(read_reg(IVTV_REG_DMASTATUS) & 3, IVTV_REG_DMASTATUS);
573 if (itv->dma_retries == 3) {
574 /* Too many retries, give up on this frame */
575 itv->dma_retries = 0;
576 s->sg_processed = s->sg_processing_size;
577 }
578 else {
579 /* Retry, starting with the first xfer segment.
580 Just retrying the current segment is not sufficient. */
581 s->sg_processed = 0;
582 itv->dma_retries++;
583 }
584 }
585 if (s->sg_processed < s->sg_processing_size) {
586 /* DMA next buffer */
587 ivtv_dma_enc_start_xfer(s);
588 return;
589 }
590 clear_bit(IVTV_F_I_DMA, &itv->i_flags);
591 itv->cur_dma_stream = -1;
592 dma_post(s);
593 if (test_and_clear_bit(IVTV_F_S_DMA_HAS_VBI, &s->s_flags)) {
594 s = &itv->streams[IVTV_ENC_STREAM_TYPE_VBI];
595 dma_post(s);
596 }
597 s->sg_processing_size = 0;
598 s->sg_processed = 0;
599 wake_up(&itv->dma_waitq);
600 }
601
602 static void ivtv_irq_enc_pio_complete(struct ivtv *itv)
603 {
604 struct ivtv_stream *s;
605
606 if (itv->cur_pio_stream < 0 || itv->cur_pio_stream >= IVTV_MAX_STREAMS) {
607 itv->cur_pio_stream = -1;
608 return;
609 }
610 s = &itv->streams[itv->cur_pio_stream];
611 IVTV_DEBUG_HI_IRQ("ENC PIO COMPLETE %s\n", s->name);
612 clear_bit(IVTV_F_I_PIO, &itv->i_flags);
613 itv->cur_pio_stream = -1;
614 dma_post(s);
615 if (s->type == IVTV_ENC_STREAM_TYPE_MPG)
616 ivtv_vapi(itv, CX2341X_ENC_SCHED_DMA_TO_HOST, 3, 0, 0, 0);
617 else if (s->type == IVTV_ENC_STREAM_TYPE_YUV)
618 ivtv_vapi(itv, CX2341X_ENC_SCHED_DMA_TO_HOST, 3, 0, 0, 1);
619 else if (s->type == IVTV_ENC_STREAM_TYPE_PCM)
620 ivtv_vapi(itv, CX2341X_ENC_SCHED_DMA_TO_HOST, 3, 0, 0, 2);
621 clear_bit(IVTV_F_I_PIO, &itv->i_flags);
622 if (test_and_clear_bit(IVTV_F_S_DMA_HAS_VBI, &s->s_flags)) {
623 s = &itv->streams[IVTV_ENC_STREAM_TYPE_VBI];
624 dma_post(s);
625 }
626 wake_up(&itv->dma_waitq);
627 }
628
629 static void ivtv_irq_dma_err(struct ivtv *itv)
630 {
631 u32 data[CX2341X_MBOX_MAX_DATA];
632
633 del_timer(&itv->dma_timer);
634 ivtv_api_get_data(&itv->enc_mbox, IVTV_MBOX_DMA_END, data);
635 IVTV_DEBUG_WARN("DMA ERROR %08x %08x %08x %d\n", data[0], data[1],
636 read_reg(IVTV_REG_DMASTATUS), itv->cur_dma_stream);
637 write_reg(read_reg(IVTV_REG_DMASTATUS) & 3, IVTV_REG_DMASTATUS);
638 if (!test_bit(IVTV_F_I_UDMA, &itv->i_flags) &&
639 itv->cur_dma_stream >= 0 && itv->cur_dma_stream < IVTV_MAX_STREAMS) {
640 struct ivtv_stream *s = &itv->streams[itv->cur_dma_stream];
641
642 /* retry */
643 if (s->type >= IVTV_DEC_STREAM_TYPE_MPG)
644 ivtv_dma_dec_start(s);
645 else
646 ivtv_dma_enc_start(s);
647 return;
648 }
649 if (test_bit(IVTV_F_I_UDMA, &itv->i_flags)) {
650 ivtv_udma_start(itv);
651 return;
652 }
653 clear_bit(IVTV_F_I_UDMA, &itv->i_flags);
654 clear_bit(IVTV_F_I_DMA, &itv->i_flags);
655 itv->cur_dma_stream = -1;
656 wake_up(&itv->dma_waitq);
657 }
658
659 static void ivtv_irq_enc_start_cap(struct ivtv *itv)
660 {
661 u32 data[CX2341X_MBOX_MAX_DATA];
662 struct ivtv_stream *s;
663
664 /* Get DMA destination and size arguments from card */
665 ivtv_api_get_data(&itv->enc_mbox, IVTV_MBOX_DMA, data);
666 IVTV_DEBUG_HI_IRQ("ENC START CAP %d: %08x %08x\n", data[0], data[1], data[2]);
667
668 if (data[0] > 2 || data[1] == 0 || data[2] == 0) {
669 IVTV_DEBUG_WARN("Unknown input: %08x %08x %08x\n",
670 data[0], data[1], data[2]);
671 return;
672 }
673 s = &itv->streams[ivtv_stream_map[data[0]]];
674 if (!stream_enc_dma_append(s, data)) {
675 set_bit(ivtv_use_pio(s) ? IVTV_F_S_PIO_PENDING : IVTV_F_S_DMA_PENDING, &s->s_flags);
676 }
677 }
678
679 static void ivtv_irq_enc_vbi_cap(struct ivtv *itv)
680 {
681 struct ivtv_stream *s_mpg = &itv->streams[IVTV_ENC_STREAM_TYPE_MPG];
682 u32 data[CX2341X_MBOX_MAX_DATA];
683 struct ivtv_stream *s;
684
685 IVTV_DEBUG_HI_IRQ("ENC START VBI CAP\n");
686 s = &itv->streams[IVTV_ENC_STREAM_TYPE_VBI];
687
688 /* If more than two VBI buffers are pending, then
689 clear the old ones and start with this new one.
690 This can happen during transition stages when MPEG capturing is
691 started, but the first interrupts haven't arrived yet. During
692 that period VBI requests can accumulate without being able to
693 DMA the data. Since at most four VBI DMA buffers are available,
694 we just drop the old requests when there are already three
695 requests queued. */
696 if (s->sg_pending_size > 2) {
697 struct ivtv_buffer *buf;
698 list_for_each_entry(buf, &s->q_predma.list, list)
699 ivtv_buf_sync_for_cpu(s, buf);
700 ivtv_queue_move(s, &s->q_predma, NULL, &s->q_free, 0);
701 s->sg_pending_size = 0;
702 }
703 /* if we can append the data, and the MPEG stream isn't capturing,
704 then start a DMA request for just the VBI data. */
705 if (!stream_enc_dma_append(s, data) &&
706 !test_bit(IVTV_F_S_STREAMING, &s_mpg->s_flags)) {
707 set_bit(ivtv_use_pio(s) ? IVTV_F_S_PIO_PENDING : IVTV_F_S_DMA_PENDING, &s->s_flags);
708 }
709 }
710
711 static void ivtv_irq_dec_vbi_reinsert(struct ivtv *itv)
712 {
713 u32 data[CX2341X_MBOX_MAX_DATA];
714 struct ivtv_stream *s = &itv->streams[IVTV_DEC_STREAM_TYPE_VBI];
715
716 IVTV_DEBUG_HI_IRQ("DEC VBI REINSERT\n");
717 if (test_bit(IVTV_F_S_CLAIMED, &s->s_flags) &&
718 !stream_enc_dma_append(s, data)) {
719 set_bit(IVTV_F_S_PIO_PENDING, &s->s_flags);
720 }
721 }
722
723 static void ivtv_irq_dec_data_req(struct ivtv *itv)
724 {
725 u32 data[CX2341X_MBOX_MAX_DATA];
726 struct ivtv_stream *s;
727
728 /* YUV or MPG */
729 ivtv_api_get_data(&itv->dec_mbox, IVTV_MBOX_DMA, data);
730
731 if (test_bit(IVTV_F_I_DEC_YUV, &itv->i_flags)) {
732 itv->dma_data_req_size =
733 1080 * ((itv->yuv_info.v4l2_src_h + 31) & ~31);
734 itv->dma_data_req_offset = data[1];
735 if (atomic_read(&itv->yuv_info.next_dma_frame) >= 0)
736 ivtv_yuv_frame_complete(itv);
737 s = &itv->streams[IVTV_DEC_STREAM_TYPE_YUV];
738 }
739 else {
740 itv->dma_data_req_size = min_t(u32, data[2], 0x10000);
741 itv->dma_data_req_offset = data[1];
742 s = &itv->streams[IVTV_DEC_STREAM_TYPE_MPG];
743 }
744 IVTV_DEBUG_HI_IRQ("DEC DATA REQ %s: %d %08x %u\n", s->name, s->q_full.bytesused,
745 itv->dma_data_req_offset, itv->dma_data_req_size);
746 if (itv->dma_data_req_size == 0 || s->q_full.bytesused < itv->dma_data_req_size) {
747 set_bit(IVTV_F_S_NEEDS_DATA, &s->s_flags);
748 }
749 else {
750 if (test_bit(IVTV_F_I_DEC_YUV, &itv->i_flags))
751 ivtv_yuv_setup_stream_frame(itv);
752 clear_bit(IVTV_F_S_NEEDS_DATA, &s->s_flags);
753 ivtv_queue_move(s, &s->q_full, NULL, &s->q_predma, itv->dma_data_req_size);
754 ivtv_dma_stream_dec_prepare(s, itv->dma_data_req_offset + IVTV_DECODER_OFFSET, 0);
755 }
756 }
757
758 static void ivtv_irq_vsync(struct ivtv *itv)
759 {
760 /* The vsync interrupt is unusual in that it won't clear until
761 * the end of the first line for the current field, at which
762 * point it clears itself. This can result in repeated vsync
763 * interrupts, or a missed vsync. Read some of the registers
764 * to determine the line being displayed and ensure we handle
765 * one vsync per frame.
766 */
767 unsigned int frame = read_reg(0x28c0) & 1;
768 struct yuv_playback_info *yi = &itv->yuv_info;
769 int last_dma_frame = atomic_read(&itv->yuv_info.next_dma_frame);
770 struct yuv_frame_info *f = &yi->new_frame_info[last_dma_frame];
771
772 if (0) IVTV_DEBUG_IRQ("DEC VSYNC\n");
773
774 if (((frame ^ f->sync_field) == 0 &&
775 ((itv->last_vsync_field & 1) ^ f->sync_field)) ||
776 (frame != (itv->last_vsync_field & 1) && !f->interlaced)) {
777 int next_dma_frame = last_dma_frame;
778
779 if (!(f->interlaced && f->delay && yi->fields_lapsed < 1)) {
780 if (next_dma_frame >= 0 && next_dma_frame != atomic_read(&yi->next_fill_frame)) {
781 write_reg(yuv_offset[next_dma_frame] >> 4, 0x82c);
782 write_reg((yuv_offset[next_dma_frame] + IVTV_YUV_BUFFER_UV_OFFSET) >> 4, 0x830);
783 write_reg(yuv_offset[next_dma_frame] >> 4, 0x834);
784 write_reg((yuv_offset[next_dma_frame] + IVTV_YUV_BUFFER_UV_OFFSET) >> 4, 0x838);
785 next_dma_frame = (next_dma_frame + 1) % IVTV_YUV_BUFFERS;
786 atomic_set(&yi->next_dma_frame, next_dma_frame);
787 yi->fields_lapsed = -1;
788 }
789 }
790 }
791 if (frame != (itv->last_vsync_field & 1)) {
792 struct ivtv_stream *s = ivtv_get_output_stream(itv);
793
794 itv->last_vsync_field += 1;
795 if (frame == 0) {
796 clear_bit(IVTV_F_I_VALID_DEC_TIMINGS, &itv->i_flags);
797 clear_bit(IVTV_F_I_EV_VSYNC_FIELD, &itv->i_flags);
798 }
799 else {
800 set_bit(IVTV_F_I_EV_VSYNC_FIELD, &itv->i_flags);
801 }
802 if (test_bit(IVTV_F_I_EV_VSYNC_ENABLED, &itv->i_flags)) {
803 set_bit(IVTV_F_I_EV_VSYNC, &itv->i_flags);
804 wake_up(&itv->event_waitq);
805 }
806 wake_up(&itv->vsync_waitq);
807 if (s)
808 wake_up(&s->waitq);
809
810 /* Send VBI to saa7127 */
811 if (frame && (itv->output_mode == OUT_PASSTHROUGH ||
812 test_bit(IVTV_F_I_UPDATE_WSS, &itv->i_flags) ||
813 test_bit(IVTV_F_I_UPDATE_VPS, &itv->i_flags) ||
814 test_bit(IVTV_F_I_UPDATE_CC, &itv->i_flags))) {
815 set_bit(IVTV_F_I_WORK_HANDLER_VBI, &itv->i_flags);
816 set_bit(IVTV_F_I_HAVE_WORK, &itv->i_flags);
817 }
818
819 /* Check if we need to update the yuv registers */
820 if ((yi->yuv_forced_update || f->update) && last_dma_frame != -1) {
821 if (!f->update) {
822 last_dma_frame = (u8)(last_dma_frame - 1) % IVTV_YUV_BUFFERS;
823 f = &yi->new_frame_info[last_dma_frame];
824 }
825
826 if (f->src_w) {
827 yi->update_frame = last_dma_frame;
828 f->update = 0;
829 yi->yuv_forced_update = 0;
830 set_bit(IVTV_F_I_WORK_HANDLER_YUV, &itv->i_flags);
831 set_bit(IVTV_F_I_HAVE_WORK, &itv->i_flags);
832 }
833 }
834
835 yi->fields_lapsed++;
836 }
837 }
838
839 #define IVTV_IRQ_DMA (IVTV_IRQ_DMA_READ | IVTV_IRQ_ENC_DMA_COMPLETE | IVTV_IRQ_DMA_ERR | IVTV_IRQ_ENC_START_CAP | IVTV_IRQ_ENC_VBI_CAP | IVTV_IRQ_DEC_DATA_REQ | IVTV_IRQ_DEC_VBI_RE_INSERT)
840
841 irqreturn_t ivtv_irq_handler(int irq, void *dev_id)
842 {
843 struct ivtv *itv = (struct ivtv *)dev_id;
844 u32 combo;
845 u32 stat;
846 int i;
847 u8 vsync_force = 0;
848
849 spin_lock(&itv->dma_reg_lock);
850 /* get contents of irq status register */
851 stat = read_reg(IVTV_REG_IRQSTATUS);
852
853 combo = ~itv->irqmask & stat;
854
855 /* Clear out IRQ */
856 if (combo) write_reg(combo, IVTV_REG_IRQSTATUS);
857
858 if (0 == combo) {
859 /* The vsync interrupt is unusual and clears itself. If we
860 * took too long, we may have missed it. Do some checks
861 */
862 if (~itv->irqmask & IVTV_IRQ_DEC_VSYNC) {
863 /* vsync is enabled, see if we're in a new field */
864 if ((itv->last_vsync_field & 1) != (read_reg(0x28c0) & 1)) {
865 /* New field, looks like we missed it */
866 IVTV_DEBUG_YUV("VSync interrupt missed %d\n",read_reg(0x28c0)>>16);
867 vsync_force = 1;
868 }
869 }
870
871 if (!vsync_force) {
872 /* No Vsync expected, wasn't for us */
873 spin_unlock(&itv->dma_reg_lock);
874 return IRQ_NONE;
875 }
876 }
877
878 /* Exclude interrupts noted below from the output, otherwise the log is flooded with
879 these messages */
880 if (combo & ~0xff6d0400)
881 IVTV_DEBUG_HI_IRQ("======= valid IRQ bits: 0x%08x ======\n", combo);
882
883 if (combo & IVTV_IRQ_DEC_DMA_COMPLETE) {
884 IVTV_DEBUG_HI_IRQ("DEC DMA COMPLETE\n");
885 }
886
887 if (combo & IVTV_IRQ_DMA_READ) {
888 ivtv_irq_dma_read(itv);
889 }
890
891 if (combo & IVTV_IRQ_ENC_DMA_COMPLETE) {
892 ivtv_irq_enc_dma_complete(itv);
893 }
894
895 if (combo & IVTV_IRQ_ENC_PIO_COMPLETE) {
896 ivtv_irq_enc_pio_complete(itv);
897 }
898
899 if (combo & IVTV_IRQ_DMA_ERR) {
900 ivtv_irq_dma_err(itv);
901 }
902
903 if (combo & IVTV_IRQ_ENC_START_CAP) {
904 ivtv_irq_enc_start_cap(itv);
905 }
906
907 if (combo & IVTV_IRQ_ENC_VBI_CAP) {
908 ivtv_irq_enc_vbi_cap(itv);
909 }
910
911 if (combo & IVTV_IRQ_DEC_VBI_RE_INSERT) {
912 ivtv_irq_dec_vbi_reinsert(itv);
913 }
914
915 if (combo & IVTV_IRQ_ENC_EOS) {
916 IVTV_DEBUG_IRQ("ENC EOS\n");
917 set_bit(IVTV_F_I_EOS, &itv->i_flags);
918 wake_up(&itv->eos_waitq);
919 }
920
921 if (combo & IVTV_IRQ_DEC_DATA_REQ) {
922 ivtv_irq_dec_data_req(itv);
923 }
924
925 /* Decoder Vertical Sync - We can't rely on 'combo', so check if vsync enabled */
926 if (~itv->irqmask & IVTV_IRQ_DEC_VSYNC) {
927 ivtv_irq_vsync(itv);
928 }
929
930 if (combo & IVTV_IRQ_ENC_VIM_RST) {
931 IVTV_DEBUG_IRQ("VIM RST\n");
932 /*ivtv_vapi(itv, CX2341X_ENC_REFRESH_INPUT, 0); */
933 }
934
935 if (combo & IVTV_IRQ_DEC_AUD_MODE_CHG) {
936 IVTV_DEBUG_INFO("Stereo mode changed\n");
937 }
938
939 if ((combo & IVTV_IRQ_DMA) && !test_bit(IVTV_F_I_DMA, &itv->i_flags)) {
940 itv->irq_rr_idx++;
941 for (i = 0; i < IVTV_MAX_STREAMS; i++) {
942 int idx = (i + itv->irq_rr_idx) % IVTV_MAX_STREAMS;
943 struct ivtv_stream *s = &itv->streams[idx];
944
945 if (!test_and_clear_bit(IVTV_F_S_DMA_PENDING, &s->s_flags))
946 continue;
947 if (s->type >= IVTV_DEC_STREAM_TYPE_MPG)
948 ivtv_dma_dec_start(s);
949 else
950 ivtv_dma_enc_start(s);
951 break;
952 }
953 if (i == IVTV_MAX_STREAMS && test_and_clear_bit(IVTV_F_I_UDMA_PENDING, &itv->i_flags)) {
954 ivtv_udma_start(itv);
955 }
956 }
957
958 if ((combo & IVTV_IRQ_DMA) && !test_bit(IVTV_F_I_PIO, &itv->i_flags)) {
959 itv->irq_rr_idx++;
960 for (i = 0; i < IVTV_MAX_STREAMS; i++) {
961 int idx = (i + itv->irq_rr_idx) % IVTV_MAX_STREAMS;
962 struct ivtv_stream *s = &itv->streams[idx];
963
964 if (!test_and_clear_bit(IVTV_F_S_PIO_PENDING, &s->s_flags))
965 continue;
966 if (s->type == IVTV_DEC_STREAM_TYPE_VBI || s->type < IVTV_DEC_STREAM_TYPE_MPG)
967 ivtv_dma_enc_start(s);
968 break;
969 }
970 }
971
972 if (test_and_clear_bit(IVTV_F_I_HAVE_WORK, &itv->i_flags)) {
973 queue_work(itv->irq_work_queues, &itv->irq_work_queue);
974 }
975
976 spin_unlock(&itv->dma_reg_lock);
977
978 /* If we've just handled a 'forced' vsync, it's safest to say it
979 * wasn't ours. Another device may have triggered it at just
980 * the right time.
981 */
982 return vsync_force ? IRQ_NONE : IRQ_HANDLED;
983 }
984
985 void ivtv_unfinished_dma(unsigned long arg)
986 {
987 struct ivtv *itv = (struct ivtv *)arg;
988
989 if (!test_bit(IVTV_F_I_DMA, &itv->i_flags))
990 return;
991 IVTV_ERR("DMA TIMEOUT %08x %d\n", read_reg(IVTV_REG_DMASTATUS), itv->cur_dma_stream);
992
993 write_reg(read_reg(IVTV_REG_DMASTATUS) & 3, IVTV_REG_DMASTATUS);
994 clear_bit(IVTV_F_I_UDMA, &itv->i_flags);
995 clear_bit(IVTV_F_I_DMA, &itv->i_flags);
996 itv->cur_dma_stream = -1;
997 wake_up(&itv->dma_waitq);
998 }