return 0;
}
+static int _cx18_process_idx_data(struct cx18_buffer *buf,
+ struct v4l2_enc_idx *idx)
+{
+ int consumed, remaining;
+ struct v4l2_enc_idx_entry *e_idx;
+ struct cx18_enc_idx_entry *e_buf;
+
+ /* Frame type lookup: 1=I, 2=P, 4=B */
+ const int mapping[8] = {
+ -1, V4L2_ENC_IDX_FRAME_I, V4L2_ENC_IDX_FRAME_P,
+ -1, V4L2_ENC_IDX_FRAME_B, -1, -1, -1
+ };
+
+ /*
+ * Assumption here is that a buf holds an integral number of
+ * struct cx18_enc_idx_entry objects and is properly aligned.
+ * This is enforced by the module options on IDX buffer sizes.
+ */
+ remaining = buf->bytesused - buf->readpos;
+ consumed = 0;
+ e_idx = &idx->entry[idx->entries];
+ e_buf = (struct cx18_enc_idx_entry *) &buf->buf[buf->readpos];
+
+ while (remaining >= sizeof(struct cx18_enc_idx_entry) &&
+ idx->entries < V4L2_ENC_IDX_ENTRIES) {
+
+ e_idx->offset = (((u64) le32_to_cpu(e_buf->offset_high)) << 32)
+ | le32_to_cpu(e_buf->offset_low);
+
+ e_idx->pts = (((u64) (le32_to_cpu(e_buf->pts_high) & 1)) << 32)
+ | le32_to_cpu(e_buf->pts_low);
+
+ e_idx->length = le32_to_cpu(e_buf->length);
+
+ e_idx->flags = mapping[le32_to_cpu(e_buf->flags) & 0x7];
+
+ e_idx->reserved[0] = 0;
+ e_idx->reserved[1] = 0;
+
+ idx->entries++;
+ e_idx = &idx->entry[idx->entries];
+ e_buf++;
+
+ remaining -= sizeof(struct cx18_enc_idx_entry);
+ consumed += sizeof(struct cx18_enc_idx_entry);
+ }
+
+ /* Swallow any partial entries at the end, if there are any */
+ if (remaining > 0 && remaining < sizeof(struct cx18_enc_idx_entry))
+ consumed += remaining;
+
+ buf->readpos += consumed;
+ return consumed;
+}
+
+static int cx18_process_idx_data(struct cx18_stream *s, struct cx18_mdl *mdl,
+ struct v4l2_enc_idx *idx)
+{
+ if (s->type != CX18_ENC_STREAM_TYPE_IDX)
+ return -EINVAL;
+
+ if (mdl->curr_buf == NULL)
+ mdl->curr_buf = list_first_entry(&mdl->buf_list,
+ struct cx18_buffer, list);
+
+ if (list_entry_is_past_end(mdl->curr_buf, &mdl->buf_list, list)) {
+ /*
+ * For some reason we've exhausted the buffers, but the MDL
+ * object still said some data was unread.
+ * Fix that and bail out.
+ */
+ mdl->readpos = mdl->bytesused;
+ return 0;
+ }
+
+ list_for_each_entry_from(mdl->curr_buf, &mdl->buf_list, list) {
+
+ /* Skip any empty buffers in the MDL */
+ if (mdl->curr_buf->readpos >= mdl->curr_buf->bytesused)
+ continue;
+
+ mdl->readpos += _cx18_process_idx_data(mdl->curr_buf, idx);
+
+ /* exit when MDL drained or request satisfied */
+ if (idx->entries >= V4L2_ENC_IDX_ENTRIES ||
+ mdl->curr_buf->readpos < mdl->curr_buf->bytesused ||
+ mdl->readpos >= mdl->bytesused)
+ break;
+ }
+ return 0;
+}
+
static int cx18_g_enc_index(struct file *file, void *fh,
struct v4l2_enc_idx *idx)
{
- return -EINVAL;
+ struct cx18 *cx = ((struct cx18_open_id *)fh)->cx;
+ struct cx18_stream *s = &cx->streams[CX18_ENC_STREAM_TYPE_IDX];
+ s32 tmp;
+ struct cx18_mdl *mdl;
+
+ if (!cx18_stream_enabled(s)) /* Module options inhibited IDX stream */
+ return -EINVAL;
+
+ /* Compute the best case number of entries we can buffer */
+ tmp = s->buffers -
+ s->bufs_per_mdl * CX18_ENC_STREAM_TYPE_IDX_FW_MDL_MIN;
+ if (tmp <= 0)
+ tmp = 1;
+ tmp = tmp * s->buf_size / sizeof(struct cx18_enc_idx_entry);
+
+ /* Fill out the header of the return structure */
+ idx->entries = 0;
+ idx->entries_cap = tmp;
+ memset(idx->reserved, 0, sizeof(idx->reserved));
+
+ /* Pull IDX MDLs and buffers from q_full and populate the entries */
+ do {
+ mdl = cx18_dequeue(s, &s->q_full);
+ if (mdl == NULL) /* No more IDX data right now */
+ break;
+
+ /* Extract the Index entry data from the MDL and buffers */
+ cx18_process_idx_data(s, mdl, idx);
+ if (mdl->readpos < mdl->bytesused) {
+ /* We finished with data remaining, push the MDL back */
+ cx18_push(s, mdl, &s->q_full);
+ break;
+ }
+
+ /* We drained this MDL, schedule it to go to the firmware */
+ cx18_enqueue(s, mdl, &s->q_free);
+
+ } while (idx->entries < V4L2_ENC_IDX_ENTRIES);
+
+ /* Tell the work handler to send free IDX MDLs to the firmware */
+ cx18_stream_load_fw_queue(s);
+ return 0;
}
static int cx18_encoder_cmd(struct file *file, void *fh,