if (ret < 0)
ALOGE("%s: exynos_v4l2_s_fmt fail (%d)",__FUNCTION__, ret);
+ node->streamOn = false;
+
return ret;
}
enum v4l2_buf_type type = node->type;
int ret;
+ if (node->streamOn)
+ return 0;
+
ret = exynos_v4l2_streamon(node->fd, type);
if (ret < 0)
ALOGE("%s: VIDIOC_STREAMON failed (%d)",__FUNCTION__, ret);
+ else
+ node->streamOn = true;
ALOGV("On streaming I/O... ... fd(%d)", node->fd);
enum v4l2_buf_type type = V4L2_BUF_TYPE_VIDEO_CAPTURE_MPLANE;
int ret;
+ if (!node->streamOn)
+ return 0;
+
ALOGV("Off streaming I/O... fd(%d)", node->fd);
ret = exynos_v4l2_streamoff(node->fd, type);
if (ret < 0)
ALOGE("%s: VIDIOC_STREAMOFF failed (%d)",__FUNCTION__, ret);
+ else
+ node->streamOn = false;
return ret;
}
m_mainThread = main_thread;
for (int i=0 ; i<NUM_MAX_REQUEST_MGR_ENTRY; i++) {
memset(&(entries[i]), 0x00, sizeof(request_manager_entry_t));
- entries[i].internal_shot.ctl.request.frameCount = -1;
+ entries[i].internal_shot.shot.ctl.request.frameCount = -1;
}
m_sensorPipelineSkipCnt = 8;
return;
newEntry->original_request = new_request;
// TODO : allocate internal_request dynamically
m_metadataConverter->ToInternalShot(new_request, &(newEntry->internal_shot));
- newEntry->output_stream_count = newEntry->internal_shot.ctl.request.numOutputStream;
+ newEntry->output_stream_count = newEntry->internal_shot.shot.ctl.request.id; // temp
m_numOfEntries++;
m_entryInsertionIndex = newInsertionIndex;
- // Dump();
ALOGV("## RegisterReq DONE num(%d), insert(%d), processing(%d), frame(%d), (frameCnt(%d))",
- m_numOfEntries,m_entryInsertionIndex,m_entryProcessingIndex, m_entryFrameOutputIndex, newEntry->internal_shot.ctl.request.frameCount);
+ m_numOfEntries,m_entryInsertionIndex,m_entryProcessingIndex, m_entryFrameOutputIndex, newEntry->internal_shot.shot.ctl.request.frameCount);
}
void RequestManager::DeregisterRequest(camera_metadata_t ** deregistered_request)
request_manager_entry * currentEntry = &(entries[m_entryFrameOutputIndex]);
- if (currentEntry->status!=PROCESSING) {
+ if (currentEntry->status != CAPTURED) {
ALOGD("DBG(%s): Circular buffer abnormal. processing(%d), frame(%d), status(%d) ", __FUNCTION__
, m_entryProcessingIndex, m_entryFrameOutputIndex,(int)(currentEntry->status));
return;
currentEntry->status = EMPTY;
currentEntry->original_request = NULL;
- memset(&(currentEntry->internal_shot), 0, sizeof(camera2_ctl_metadata_NEW_t));
- currentEntry->internal_shot.ctl.request.frameCount = -1;
+ memset(&(currentEntry->internal_shot), 0, sizeof(struct camera2_shot_ext));
+ currentEntry->internal_shot.shot.ctl.request.frameCount = -1;
currentEntry->output_stream_count = 0;
currentEntry->dynamic_meta_vaild = false;
m_numOfEntries--;
request_manager_entry * currentEntry2 = &(entries[tempFrameOutputIndex]);
currentEntry2->status = EMPTY;
currentEntry2->original_request = NULL;
- memset(&(currentEntry2->internal_shot), 0, sizeof(camera2_ctl_metadata_NEW_t));
- currentEntry2->internal_shot.ctl.request.frameCount = -1;
+ memset(&(currentEntry2->internal_shot), 0, sizeof(struct camera2_shot_ext));
+ currentEntry2->internal_shot.shot.ctl.request.frameCount = -1;
currentEntry2->output_stream_count = 0;
currentEntry2->dynamic_meta_vaild = false;
m_numOfEntries--;
return false;
}
- if (currentEntry->status!=PROCESSING) {
+ if (currentEntry->status != CAPTURED) {
ALOGD("DBG(%s): Circular buffer abnormal status(%d)", __FUNCTION__, (int)(currentEntry->status));
return false;
*frame_size = get_camera_metadata_size(m_tempFrameMetadata);
*prepared_frame = m_tempFrameMetadata;
ALOGV("## PrepareFrame DONE: frameOut(%d) frameCnt-req(%d)", m_entryFrameOutputIndex,
- currentEntry->internal_shot.ctl.request.frameCount);
+ currentEntry->internal_shot.shot.ctl.request.frameCount);
// Dump();
return true;
}
}
if ((m_entryProcessingIndex == m_entryInsertionIndex)
- && (entries[m_entryProcessingIndex].status == PROCESSING)) {
+ && (entries[m_entryProcessingIndex].status == REQUESTED || entries[m_entryProcessingIndex].status == CAPTURED)) {
ALOGV("## MarkProcReq skipping(request underrun) - num(%d), insert(%d), processing(%d), frame(%d)",
m_numOfEntries,m_entryInsertionIndex,m_entryProcessingIndex, m_entryFrameOutputIndex);
return -1;
newEntry = &(entries[newProcessingIndex]);
- if (newEntry->status!=REGISTERED) {
+ if (newEntry->status != REGISTERED) {
ALOGV("DEBUG(%s): Circular buffer abnormal ", __FUNCTION__);
- // Dump();
return -1;
}
- newEntry->status = PROCESSING;
+ newEntry->status = REQUESTED;
// TODO : replace the codes below with a single memcpy of pre-converted 'shot'
shot_ext = (struct camera2_shot_ext *)(buf->virt.extP[1]);
shot_ext->dnr_bypass = 1;
for (int i = 0; i < newEntry->output_stream_count; i++) {
// TODO : match with actual stream index;
- targetStreamIndex = newEntry->internal_shot.ctl.request.outputStreams[i];
+ targetStreamIndex = newEntry->internal_shot.shot.ctl.request.outputStreams[i];
if (targetStreamIndex==0) {
ALOGV("DEBUG(%s): outputstreams(%d) is for scalerP", __FUNCTION__, i);
// Dump();
ALOGV("## MarkProcReq DONE totalentry(%d), insert(%d), processing(%d), frame(%d) frameCnt(%d)",
- m_numOfEntries,m_entryInsertionIndex,m_entryProcessingIndex, m_entryFrameOutputIndex, newEntry->internal_shot.ctl.request.frameCount);
+ m_numOfEntries,m_entryInsertionIndex,m_entryProcessingIndex, m_entryFrameOutputIndex, newEntry->internal_shot.shot.ctl.request.frameCount);
return m_entryProcessingIndex;
}
}
ALOGV("DEBUG(%s): frameCnt(%d), stream_id(%d) last cnt (%d)", __FUNCTION__, frameCnt, stream_id, entries[index].output_stream_count);
- if (entries[index].output_stream_count == 0) {
- ALOGV("(%s): applying to next frame", __FUNCTION__);
- entries[GetNextIndex(index)].output_stream_count--;
- }
- else {
- entries[index].output_stream_count--; //TODO : match stream id also
- CheckCompleted(index);
- }
+ entries[index].output_stream_count--; //TODO : match stream id also
+ CheckCompleted(index);
return;
}
request_manager_entry * newEntry = &(entries[index]);
- if (newEntry->dynamic_meta_vaild) {
- ALOGV("(%s): applying to next frame", __FUNCTION__);
- newEntry = &(entries[GetNextIndex(index)]);
- newEntry->dynamic_meta_vaild = true;
- }
- else {
- newEntry->dynamic_meta_vaild = true;
- // TODO : move some code of PrepareFrame here
- CheckCompleted(index);
- }
+ newEntry->dynamic_meta_vaild = true;
+ // TODO : move some code of PrepareFrame here
+ CheckCompleted(index);
}
void RequestManager::DumpInfoWithIndex(int index)
{
- camera2_ctl_metadata_NEW_t * currMetadata = &(entries[index].internal_shot);
+ struct camera2_shot_ext * currMetadata = &(entries[index].internal_shot);
ALOGV("#### frameCount(%d) exposureTime(%lld) ISO(%d)",
- currMetadata->ctl.request.frameCount,
- currMetadata->ctl.sensor.exposureTime,
- currMetadata->ctl.sensor.sensitivity);
- if (currMetadata->ctl.request.numOutputStream==0)
+ currMetadata->shot.ctl.request.frameCount,
+ currMetadata->shot.ctl.sensor.exposureTime,
+ currMetadata->shot.ctl.sensor.sensitivity);
+ if (currMetadata->shot.ctl.request.id==0)
ALOGV("#### No output stream selected");
- else if (currMetadata->ctl.request.numOutputStream==1)
- ALOGV("#### OutputStreamId : %d", currMetadata->ctl.request.outputStreams[0]);
- else if (currMetadata->ctl.request.numOutputStream==2)
- ALOGV("#### OutputStreamId : %d, %d", currMetadata->ctl.request.outputStreams[0],
- currMetadata->ctl.request.outputStreams[1]);
+ else if (currMetadata->shot.ctl.request.id==1)
+ ALOGV("#### OutputStreamId : %d", currMetadata->shot.ctl.request.outputStreams[0]);
+ else if (currMetadata->shot.ctl.request.id==2)
+ ALOGV("#### OutputStreamId : %d, %d", currMetadata->shot.ctl.request.outputStreams[0],
+ currMetadata->shot.ctl.request.outputStreams[1]);
else
- ALOGV("#### OutputStream num (%d) abnormal ", currMetadata->ctl.request.numOutputStream);
+ ALOGV("#### OutputStream num (%d) abnormal ", currMetadata->shot.ctl.request.id);
}
void RequestManager::UpdateOutputStreamInfo(struct camera2_shot_ext *shot_ext, int frameCnt)
for (int i = 0; i < newEntry->output_stream_count; i++) {
// TODO : match with actual stream index;
- targetStreamIndex = newEntry->internal_shot.ctl.request.outputStreams[i];
+ targetStreamIndex = newEntry->internal_shot.shot.ctl.request.outputStreams[i];
if (targetStreamIndex==0) {
ALOGV("DEBUG(%s): outputstreams item[%d] is for scalerP", __FUNCTION__, i);
int RequestManager::FindEntryIndexByFrameCnt(int frameCnt)
{
for (int i = 0 ; i < NUM_MAX_REQUEST_MGR_ENTRY ; i++) {
- if (entries[i].internal_shot.ctl.request.frameCount == frameCnt)
+ if (entries[i].internal_shot.shot.ctl.request.frameCount == frameCnt)
return i;
}
return -1;
}
request_manager_entry * currentEntry = &(entries[index]);
- currentEntry->internal_shot.dm.sensor.timeStamp = *((uint64_t*)frameTime);
+ currentEntry->internal_shot.shot.dm.sensor.timeStamp = *((uint64_t*)frameTime);
ALOGV("DEBUG(%s): applied timestamp for reqIndex(%d) frameCnt(%d) (%lld)", __FUNCTION__,
- index, frameCnt, currentEntry->internal_shot.dm.sensor.timeStamp);
+ index, frameCnt, currentEntry->internal_shot.shot.dm.sensor.timeStamp);
}
uint64_t RequestManager::GetTimestamp(int frameCnt)
}
request_manager_entry * currentEntry = &(entries[index]);
- uint64_t frameTime = currentEntry->internal_shot.dm.sensor.timeStamp;
+ uint64_t frameTime = currentEntry->internal_shot.shot.dm.sensor.timeStamp;
ALOGV("DEBUG(%s): Returning timestamp for reqIndex(%d) (%lld)", __FUNCTION__, index, frameTime);
return frameTime;
}
int RequestManager::FindFrameCnt(struct camera2_shot_ext * shot_ext)
{
- int tempIndex;
+ int tempIndex, i;
if (m_sensorPipelineSkipCnt > 0) {
m_sensorPipelineSkipCnt--;
return -1;
}
-/*
- * tempIndex = GetNextIndex(tempIndex);
- * return entries[tempIndex].internal_shot.ctl.request.frameCount;
- * */
+ if (m_numOfEntries == 0) {
+ ALOGD("(%s): No Entry found", __FUNCTION__);
+ return -1;
+ }
tempIndex = GetNextIndex(m_entryFrameOutputIndex);
- return entries[tempIndex].internal_shot.ctl.request.frameCount;
+ for (i = 0 ; i < NUM_MAX_REQUEST_MGR_ENTRY ; i++) {
+ if (entries[tempIndex].status == REQUESTED) {
+ entries[tempIndex].status = CAPTURED;
+ return entries[tempIndex].internal_shot.shot.ctl.request.frameCount;
+ }
+ else if (entries[tempIndex].status == CAPTURED) {
+ tempIndex = GetNextIndex(tempIndex);
+ continue;
+ }
+ else {
+ ALOGE("(%s): enry state abnormal status(%d)", __FUNCTION__, entries[tempIndex].status);
+ Dump();
+ return -1;
+ }
+ }
+ return -1;
}
void RequestManager::SetInitialSkip(int count)
for (i = 0 ; i < NUM_MAX_REQUEST_MGR_ENTRY ; i++) {
currentEntry = &(entries[i]);
ALOGV("[%2d] status[%d] frameCnt[%3d] numOutput[%d]", i,
- currentEntry->status, currentEntry->internal_shot.ctl.request.frameCount,
+ currentEntry->status, currentEntry->internal_shot.shot.ctl.request.frameCount,
currentEntry->output_stream_count);
}
}
m_scp_closing(false),
m_scp_closed(false),
m_halDevice(dev),
- m_sensor_drop(false),
- m_cameraId(0)
+ m_cameraId(cameraId)
{
ALOGV("DEBUG(%s):", __FUNCTION__);
int ret = 0;
if(m_ionCameraClient == 0)
ALOGE("ERR(%s):Fail on ion_client_create", __FUNCTION__);
- m_cameraId = cameraId;
m_BayerManager = new BayerBufManager();
m_mainThread = new MainThread(this);
newParameters.nodeFormat = HAL_PIXEL_FORMAT_2_V4L2_PIX(*format_actual);
newParameters.streamOps = stream_ops;
newParameters.usage = *usage;
- newParameters.numHwBuffers = *max_buffers;
+ newParameters.numHwBuffers = 8;
+ newParameters.numOwnSvcBuffers = *max_buffers;
newParameters.fd = m_fd_scp;
newParameters.nodePlanes = 3;
newParameters.svcPlanes = 3;
newParameters.halBuftype = V4L2_BUF_TYPE_VIDEO_CAPTURE_MPLANE;
newParameters.memory = V4L2_MEMORY_DMABUF;
newParameters.ionClient = m_ionCameraClient;
+ newParameters.numSvcBufsInHal = 0;
AllocatedStream->m_index = *stream_id;
AllocatedStream->setParameter(&newParameters);
AllocatedStream->m_activated = true;
recordParameters.svcPlanes = 1;
recordParameters.streamOps = stream_ops;
recordParameters.usage = *usage;
- recordParameters.numBufsInHal = 0;
+ recordParameters.numOwnSvcBuffers = *max_buffers;
+ recordParameters.numSvcBufsInHal = 0;
parentStream->setRecordingParameter(&recordParameters);
m_scp_flushing = false;
newParameters.nodeFormat = V4L2_PIX_FMT_YUYV;
newParameters.streamOps = stream_ops;
newParameters.usage = *usage;
- newParameters.numHwBuffers = *max_buffers;
+ newParameters.numHwBuffers = 8;
+ newParameters.numOwnSvcBuffers = *max_buffers;
newParameters.fd = fd;
newParameters.nodePlanes = 1;
newParameters.svcPlanes = 1;
newParameters.halBuftype = V4L2_BUF_TYPE_VIDEO_CAPTURE_MPLANE;
newParameters.memory = V4L2_MEMORY_DMABUF;
newParameters.ionClient = m_ionCameraClient;
+ newParameters.numSvcBufsInHal = 0;
AllocatedStream->m_index = *stream_id;
AllocatedStream->setParameter(&newParameters);
return 0;
currentBuf.fd.extFd[0] = priv_handle->fd;
currentBuf.fd.extFd[1] = priv_handle->fd1;
currentBuf.fd.extFd[2] = priv_handle->fd2;
- ALOGV("DEBUG(%s): yddr(%x), uoffset(%d), voffset(%d)", __FUNCTION__,priv_handle->yaddr, priv_handle->uoffset, priv_handle->voffset);
ALOGV("DEBUG(%s): ion_size(%d), stride(%d), ", __FUNCTION__,priv_handle->size, priv_handle->stride);
for (plane_index=0 ; plane_index < targetRecordParms->svcPlanes ; plane_index++) {
currentBuf.virt.extP[plane_index] = (char *)virtAddr[plane_index];
}
else {
ALOGE("ERR(%s) unregisterd stream id (%d)", __FUNCTION__, stream_id);
- return 1; // TODO : proper error code?
+ return 1;
}
- if (targetStreamParms->streamType ==0) {
+ if (targetStreamParms->streamType == 0) {
if (num_buffers < targetStreamParms->numHwBuffers) {
ALOGE("ERR(%s) registering insufficient num of buffers (%d) < (%d)",
__FUNCTION__, num_buffers, targetStreamParms->numHwBuffers);
- return 1; // TODO : proper error code?
+ return 1;
}
}
ALOGV("DEBUG(%s): format(%x) width(%d), height(%d) svcPlanes(%d)",
currentBuf.fd.extFd[0] = priv_handle->fd;
currentBuf.fd.extFd[2] = priv_handle->fd1;
currentBuf.fd.extFd[1] = priv_handle->fd2;
- ALOGV("DEBUG(%s): yddr(%x), uoffset(%d), voffset(%d)", __FUNCTION__,priv_handle->yaddr, priv_handle->uoffset, priv_handle->voffset);
ALOGV("DEBUG(%s): ion_size(%d), stride(%d), ", __FUNCTION__,priv_handle->size, priv_handle->stride);
- for (plane_index=0 ; plane_index < v4l2_buf.length ; plane_index++) {
+ for (plane_index = 0 ; plane_index < v4l2_buf.length ; plane_index++) {
currentBuf.virt.extP[plane_index] = (char *)virtAddr[plane_index];
v4l2_buf.m.planes[plane_index].length = currentBuf.size.extS[plane_index];
ALOGV("DEBUG(%s): plane(%d): fd(%d) addr(%x), length(%d)",
}
else {
ALOGE("ERR:(%s): wrong stream id (%d)", __FUNCTION__, stream_id);
- return 1; // TODO : proper error code?
+ return 1;
}
targetStream->m_releasing = true;
shot_ext->shot.dm.sensor.exposureTime,
shot_ext->shot.dm.sensor.frameDuration,
shot_ext->shot.dm.sensor.sensitivity,
-// shot_ext->shot.dm.sensor.frameCount,
shot_ext->shot.dm.sensor.timeStamp);
}
if (currentSignal & SIGNAL_THREAD_RELEASE) {
ALOGD("(%s): ENTER processing SIGNAL_THREAD_RELEASE", __FUNCTION__);
-#if 0 // TODO
- for (int i = 0 ; i < NUM_BAYER_BUFFERS ; i++) {
- ALOGV("DEBUG(%s):### BayerIndex[%d] Status (%d)", __FUNCTION__, i, m_bayerBufStatus[i]);
- if (m_bayerBufStatus[i]==BAYER_ON_SENSOR) {
- bayersOnSensor++;
- }
- else if (m_bayerBufStatus[i]==BAYER_ON_ISP) {
- bayersOnIsp++;
- }
- }
- for (int i = 0 ; i < bayersOnSensor ; i++) {
- index = cam_int_dqbuf(&(m_camera_info.sensor));
- ALOGV("DEBUG(%s):### sensor dqbuf done index(%d)", __FUNCTION__, index);
- m_bayerBufStatus[index] = BAYER_ON_HAL_EMPTY;
- }
- for (int i = 0 ; i < bayersOnIsp ; i++) {
- index = cam_int_dqbuf(&(m_camera_info.isp));
- ALOGV("DEBUG(%s):### isp dqbuf done index(%d)", __FUNCTION__, index);
- m_bayerBufStatus[index] = BAYER_ON_HAL_EMPTY;
- }
-
- for (int i = 0 ; i < NUM_BAYER_BUFFERS ; i++) {
- ALOGV("DEBUG(%s):### Bayer Buf[%d] Status (%d)", __FUNCTION__, i, m_bayerBufStatus[i]);
- }
-#endif
ALOGV("(%s): calling sensor streamoff", __FUNCTION__);
cam_int_streamoff(&(m_camera_info.sensor));
ALOGV("(%s): calling sensor streamoff done", __FUNCTION__);
index = cam_int_dqbuf(&(m_camera_info.sensor));
frameTime = systemTime();
ALOGV("### Sensor DQBUF done BayerIndex(%d)", index);
- bool wait = false;
shot_ext = (struct camera2_shot_ext *)(m_camera_info.sensor.buffer[index].virt.extP[1]);
matchedFrameCnt = m_requestManager->FindFrameCnt(shot_ext);
- ALOGV("### Matched(%d) last(%d)", matchedFrameCnt, lastFrameCnt);
-#if 1
+ ALOGV("### Matched(%d) last(%d), dqbuf timestamp(%lld)", matchedFrameCnt, lastFrameCnt
+ , shot_ext->shot.dm.sensor.timeStamp);
if (matchedFrameCnt != -1) {
while (matchedFrameCnt == lastFrameCnt) {
m_BayerManager->MarkSensorDequeue(index, -1, &frameTime);
index = cam_int_dqbuf(&(m_camera_info.sensor));
frameTime = systemTime();
ALOGV("### Sensor DQBUF done BayerIndex(%d)", index);
- bool wait = false;
shot_ext = (struct camera2_shot_ext *)(m_camera_info.sensor.buffer[index].virt.extP[1]);
matchedFrameCnt = m_requestManager->FindFrameCnt(shot_ext);
ALOGV("### Matched(%d) last(%d)", matchedFrameCnt, lastFrameCnt);
}
lastFrameCnt = matchedFrameCnt;
- }
-#else
- if (m_sensor_drop) {
- matchedFrameCnt = -1;
- m_sensor_drop = false;
- }
- else if (matchedFrameCnt != -1) {
- if (matchedFrameCnt == lastFrameCnt) {
- m_sensor_drop = true;
- matchedFrameCnt++;
- }
- lastFrameCnt = matchedFrameCnt;
m_scp_closing = false;
m_scp_closed = false;
}
-#endif
m_BayerManager->MarkSensorDequeue(index, matchedFrameCnt, &frameTime);
m_requestManager->RegisterTimestamp(matchedFrameCnt, &frameTime);
if (!m_closing){
selfThread->SetSignal(SIGNAL_SENSOR_START_REQ_PROCESSING);
}
- /*if (wait) {
- ALOGE("###waiting###");
- usleep(20000);
- }*/
return;
}
return;
if (processingFrameCnt != -1) {
ALOGV("### writing output stream info");
m_requestManager->UpdateOutputStreamInfo(shot_ext, processingFrameCnt);
- DumpInfoWithShot(shot_ext);
}
else {
memcpy(shot_ext, &(m_camera_info.dummy_shot), sizeof(struct camera2_shot_ext));
shot_ext->request_scp = 0;
shot_ext->request_sensor = 0;
}
- //if (m_sensor_drop)
- // usleep(25000);
cam_int_qbuf(&(m_camera_info.isp), bayerIndexToEnqueue);
ALOGV("### isp QBUF done bayerIndex[%d] scp(%d)", bayerIndexToEnqueue, shot_ext->request_scp);
m_BayerManager->MarkIspEnqueue(bayerIndexToEnqueue);
else
m_scp_closed = false;
if (processingFrameCnt != -1) {
- DumpInfoWithShot(shot_ext);
m_requestManager->ApplyDynamicMetadata(shot_ext, processingFrameCnt);
}
m_BayerManager->MarkIspDequeue(index);
ALOGV("DBG(%s): buffer status abnormal (%d) "
, __FUNCTION__, selfStreamParms->svcBufStatus[index]);
}
+ selfStreamParms->numSvcBufsInHal++;
if (*buf != selfStreamParms->svcBufHandle[index])
ALOGV("DBG(%s): different buf_handle index ", __FUNCTION__);
else
ALOGV("DEBUG(%s): same buf_handle index", __FUNCTION__);
}
- m_svcBufIndex = 0;
+ selfStreamParms->svcBufIndex = 0;
}
selfThread->m_isBufferInit = true;
}
m_resizeBuf2.size.extS[1] = 0;
m_resizeBuf2.size.extS[2] = 0;
ALOGV("DEBUG(%s): resizebuf2 size0(%d) size1(%d)", __FUNCTION__, m_resizeBuf2.size.extS[0], m_resizeBuf2.size.extS[1]);
- if (allocCameraMemory(selfStreamParms->ionClient, &m_resizeBuf2, 1) == -1) {
+ if (allocCameraMemory(selfStreamParms->ionClient, &m_resizeBuf2, selfRecordParms->svcPlanes) == -1) {
ALOGE("ERR(%s): Failed to allocate resize buf2", __FUNCTION__);
}
ALOGE("ERR(%s): Init: unable to dequeue buffer : %d",__FUNCTION__ , res);
return;
}
- selfRecordParms->numBufsInHal++;
+ selfRecordParms->numSvcBufsInHal++;
ALOGV("DEBUG(%s): [record] got buf(%x) bufInHal(%d) version(%d), numFds(%d), numInts(%d)", __FUNCTION__, (uint32_t)(*buf),
- selfRecordParms->numBufsInHal, ((native_handle_t*)(*buf))->version, ((native_handle_t*)(*buf))->numFds, ((native_handle_t*)(*buf))->numInts);
+ selfRecordParms->numSvcBufsInHal, ((native_handle_t*)(*buf))->version, ((native_handle_t*)(*buf))->numFds, ((native_handle_t*)(*buf))->numInts);
if (m_grallocHal->lock(m_grallocHal, *buf,
selfRecordParms->usage, 0, 0,
}
found = false;
for (checkingIndex = 0; checkingIndex < selfRecordParms->numSvcBuffers ; checkingIndex++) {
- //ALOGV("DEBUG(%s) : comparing %d %x %x", __FUNCTION__, checkingIndex,
- //selfRecordParms->svcBufHandle[checkingIndex], *buf);
if (selfRecordParms->svcBufHandle[checkingIndex] == *buf ) {
found = true;
break;
else
ALOGV("DEBUG(%s): same buf_handle index", __FUNCTION__);
}
- selfRecordParms->m_svcBufIndex = 0;
+ selfRecordParms->svcBufIndex = 0;
}
m_needsRecordBufferInit = false;
}
selfStreamParms->svcBufStatus[index] = ON_HAL;
if (m_recordOutput && m_recordingEnabled) {
- ALOGV("DEBUG(%s): Entering record frame creator, index(%d)",__FUNCTION__, selfRecordParms->m_svcBufIndex);
+ ALOGV("DEBUG(%s): Entering record frame creator, index(%d)",__FUNCTION__, selfRecordParms->svcBufIndex);
bool found = false;
for (int i = 0 ; selfRecordParms->numSvcBuffers ; i++) {
- if (selfRecordParms->svcBufStatus[selfRecordParms->m_svcBufIndex] == ON_HAL) {
+ if (selfRecordParms->svcBufStatus[selfRecordParms->svcBufIndex] == ON_HAL) {
found = true;
break;
}
- selfRecordParms->m_svcBufIndex++;
- if (selfRecordParms->m_svcBufIndex >= selfRecordParms->numSvcBuffers)
- selfRecordParms->m_svcBufIndex = 0;
+ selfRecordParms->svcBufIndex++;
+ if (selfRecordParms->svcBufIndex >= selfRecordParms->numSvcBuffers)
+ selfRecordParms->svcBufIndex = 0;
}
if (!found) {
ALOGE("(%s): cannot find free recording buffer", __FUNCTION__);
- selfRecordParms->m_svcBufIndex++;
+ selfRecordParms->svcBufIndex++;
break;
}
csc_set_src_format(m_exynosVideoCSC,
- //ALIGN(previewW, 32), ALIGN(previewH, 32),
previewW, previewH,
cropX, cropY, cropW, cropH,
HAL_PIXEL_FORMAT_YV12,
csc_set_dst_format(m_exynosVideoCSC,
ALIGN(videoW, 32), ALIGN(videoH, 32),
0, 0, videoW, videoH,
- HAL_PIXEL_FORMAT_RGBA_8888,
+ selfRecordParms->outputFormat,
1);
- ALOGV("DEBUG(%s) [1]-- bufindex(%d)", __FUNCTION__, selfRecordParms->m_svcBufIndex);
+ ALOGV("DEBUG(%s) [1]-- bufindex(%d)", __FUNCTION__, selfRecordParms->svcBufIndex);
csc_set_src_buffer(m_exynosVideoCSC,
(void **)(&(selfStreamParms->svcBuffers[index].fd.fd)));
for (int i=0 ; i <3 ; i++)
ALOGV("DEBUG(%s): src [%d] - %d, %x size(%d)",
__FUNCTION__, i, selfStreamParms->svcBuffers[index].fd.extFd[i],
- selfStreamParms->svcBuffers[index].virt.extP[i],
+ (unsigned int)selfStreamParms->svcBuffers[index].virt.extP[i],
selfStreamParms->svcBuffers[index].size.extS[i]);
- //m_resizeBuf2.fd.extFd[2] = 0;
for (int i=0 ; i <selfRecordParms->svcPlanes; i++)
ALOGV("DEBUG(%s): m_resizeBuf2.fd.extFd[%d]=%d addr(%x) m_resizeBuf2.size.extS[%d]=%d",
__FUNCTION__, i, m_resizeBuf2.fd.extFd[i], (unsigned int)m_resizeBuf2.virt.extP[i], i,
ALOGE("ERR(%s):csc_convert() fail", __FUNCTION__);
}
else {
- ALOGV("ERR(%s):csc_convert() SUCCESS", __FUNCTION__);
+ ALOGV("(%s):csc_convert() SUCCESS", __FUNCTION__);
}
- /*tempFd = selfStreamParms->svcBuffers[index].fd.extFd[2];
- selfStreamParms->svcBuffers[index].fd.extFd[2] = selfStreamParms->svcBuffers[index].fd.extFd[1];
- selfStreamParms->svcBuffers[index].fd.extFd[1] = tempFd; */
ALOGV("DEBUG(%s): svc addr[0] %x addr[1] %x", __FUNCTION__,
- (unsigned int)selfRecordParms->svcBuffers[selfRecordParms->m_svcBufIndex].virt.extP[0],
- (unsigned int)selfRecordParms->svcBuffers[selfRecordParms->m_svcBufIndex].virt.extP[1]);
- memcpy(selfRecordParms->svcBuffers[selfRecordParms->m_svcBufIndex].virt.extP[0],
+ (unsigned int)selfRecordParms->svcBuffers[selfRecordParms->svcBufIndex].virt.extP[0],
+ (unsigned int)selfRecordParms->svcBuffers[selfRecordParms->svcBufIndex].virt.extP[1]);
+ memcpy(selfRecordParms->svcBuffers[selfRecordParms->svcBufIndex].virt.extP[0],
m_resizeBuf2.virt.extP[0], videoW * videoH * 4);
}
else {
ALOGE("ERR(%s):m_exynosVideoCSC == NULL", __FUNCTION__);
}
- /* res = selfRecordParms->streamOps->enqueue_buffer(selfRecordParms->streamOps,
- m_requestManager->GetTimestamp(m_ispProcessingFrameCnt),
- &(selfRecordParms->svcBufHandle[selfRecordParms->m_svcBufIndex]));*/
res = selfRecordParms->streamOps->enqueue_buffer(selfRecordParms->streamOps,
- systemTime(),
- &(selfRecordParms->svcBufHandle[selfRecordParms->m_svcBufIndex]));
+ m_requestManager->GetTimestamp(m_ispProcessingFrameCnt),
+ &(selfRecordParms->svcBufHandle[selfRecordParms->svcBufIndex]));
ALOGV("DEBUG(%s): stream(%d) record enqueue_buffer to svc done res(%d)", __FUNCTION__,
selfThread->m_index, res);
if (res == 0) {
- selfRecordParms->svcBufStatus[selfRecordParms->m_svcBufIndex] = ON_SERVICE;
- selfRecordParms->numBufsInHal--;
+ selfRecordParms->svcBufStatus[selfRecordParms->svcBufIndex] = ON_SERVICE;
+ selfRecordParms->numSvcBufsInHal--;
}
- /*selfRecordParms->m_svcBufIndex++;
- if (selfRecordParms->m_svcBufIndex >= selfRecordParms->numSvcBuffers)
- selfRecordParms->m_svcBufIndex = 0;*/
+
m_requestManager->NotifyStreamOutput(m_ispProcessingFrameCnt, 2);
}
}
if (res == 0) {
selfStreamParms->svcBufStatus[index] = ON_SERVICE;
+ selfStreamParms->numSvcBufsInHal--;
}
else {
selfStreamParms->svcBufStatus[index] = ON_HAL;
ALOGV("DEBUG(%s): jpegBuf.size.s = %d , jpegBuf.virt.p = %x", __FUNCTION__,
jpegBuf.size.s, (unsigned int)jpegBuf.virt.p);
-
+ m_requestManager->NotifyStreamOutput(m_jpegEncodingFrameCnt, selfThread->m_index);
if (yuv2Jpeg(&m_resizeBuf, &jpegBuf, &jpegRect) == false)
ALOGE("ERR(%s):yuv2Jpeg() fail", __FUNCTION__);
cam_int_qbuf(&(selfStreamParms->node), index);
m_resizeBuf = resizeBufInfo;
for (int i = 0; i < selfStreamParms->numSvcBuffers ; i++) {
- if (selfStreamParms->svcBufStatus[m_svcBufIndex] == ON_HAL) {
+ if (selfStreamParms->svcBufStatus[selfStreamParms->svcBufIndex] == ON_HAL) {
found = true;
break;
}
- m_svcBufIndex++;
- if (m_svcBufIndex >= selfStreamParms->numSvcBuffers) m_svcBufIndex = 0;
+ selfStreamParms->svcBufIndex++;
+ if (selfStreamParms->svcBufIndex >= selfStreamParms->numSvcBuffers)
+ selfStreamParms->svcBufIndex = 0;
}
if (!found) {
ALOGE("ERR(%s): NO free SVC buffer for JPEG", __FUNCTION__);
}
else {
- memcpy(selfStreamParms->svcBuffers[m_svcBufIndex].virt.extP[0], jpegBuf.virt.extP[0], 5*1024*1024);
+ memcpy(selfStreamParms->svcBuffers[selfStreamParms->svcBufIndex].virt.extP[0], jpegBuf.virt.extP[0], 5*1024*1024);
res = selfStreamParms->streamOps->enqueue_buffer(selfStreamParms->streamOps,
- m_requestManager->GetTimestamp(m_jpegEncodingFrameCnt), &(selfStreamParms->svcBufHandle[m_svcBufIndex]));
+ m_requestManager->GetTimestamp(m_jpegEncodingFrameCnt), &(selfStreamParms->svcBufHandle[selfStreamParms->svcBufIndex]));
freeCameraMemory(&jpegBuf, 1);
ALOGV("DEBUG(%s): stream(%d) enqueue_buffer index(%d) to svc done res(%d)",
- __FUNCTION__, selfThread->m_index, m_svcBufIndex, res);
+ __FUNCTION__, selfThread->m_index, selfStreamParms->svcBufIndex, res);
if (res == 0) {
- selfStreamParms->svcBufStatus[m_svcBufIndex] = ON_SERVICE;
+ selfStreamParms->svcBufStatus[selfStreamParms->svcBufIndex] = ON_SERVICE;
+ selfStreamParms->numSvcBufsInHal--;
}
else {
- selfStreamParms->svcBufStatus[m_svcBufIndex] = ON_HAL;
+ selfStreamParms->svcBufStatus[selfStreamParms->svcBufIndex] = ON_HAL;
}
- m_requestManager->NotifyStreamOutput(m_jpegEncodingFrameCnt, selfThread->m_index);
+
}
}
if (selfStreamParms->streamType==0 && m_recordOutput && m_recordingEnabled) {
do {
- ALOGV("DEBUG(%s): record currentBuf#(%d)", __FUNCTION__ , selfRecordParms->numBufsInHal);
- if (selfRecordParms->numBufsInHal>=1)
+ ALOGV("DEBUG(%s): record currentBuf#(%d)", __FUNCTION__ , selfRecordParms->numSvcBufsInHal);
+ if (selfRecordParms->numSvcBufsInHal >= 1)
{
ALOGV("DEBUG(%s): breaking", __FUNCTION__);
break;
ALOGV("DEBUG(%s): record stream(%d) dequeue_buffer fail res(%d)",__FUNCTION__ , selfThread->m_index, res);
break;
}
- selfRecordParms->numBufsInHal ++;
+ selfRecordParms->numSvcBufsInHal ++;
ALOGV("DEBUG(%s): record got buf(%x) numBufInHal(%d) version(%d), numFds(%d), numInts(%d)", __FUNCTION__, (uint32_t)(*buf),
- selfRecordParms->numBufsInHal, ((native_handle_t*)(*buf))->version, ((native_handle_t*)(*buf))->numFds, ((native_handle_t*)(*buf))->numInts);
+ selfRecordParms->numSvcBufsInHal, ((native_handle_t*)(*buf))->version, ((native_handle_t*)(*buf))->numFds, ((native_handle_t*)(*buf))->numInts);
const private_handle_t *priv_handle = reinterpret_cast<const private_handle_t *>(*buf);
bool found = false;
}
} while (0);
}
- while(1) {
- res = selfStreamParms->streamOps->dequeue_buffer(selfStreamParms->streamOps, &buf);
- if (res != NO_ERROR || buf == NULL) {
- ALOGV("DEBUG(%s): stream(%d) dequeue_buffer fail res(%d)",__FUNCTION__ , selfThread->m_index, res);
- break;
- }
-
- ALOGV("DEBUG(%s): got buf(%x) version(%d), numFds(%d), numInts(%d)", __FUNCTION__, (uint32_t)(*buf),
- ((native_handle_t*)(*buf))->version, ((native_handle_t*)(*buf))->numFds, ((native_handle_t*)(*buf))->numInts);
- const private_handle_t *priv_handle = reinterpret_cast<const private_handle_t *>(*buf);
-
- bool found = false;
- int checkingIndex = 0;
- for (checkingIndex = 0; checkingIndex < selfStreamParms->numSvcBuffers ; checkingIndex++) {
- if (priv_handle->fd == selfStreamParms->svcBuffers[checkingIndex].fd.extFd[0] ) {
- found = true;
+ if (selfStreamParms->streamType == 0) {
+ while (selfStreamParms->numSvcBufsInHal < selfStreamParms->numOwnSvcBuffers) {
+ res = selfStreamParms->streamOps->dequeue_buffer(selfStreamParms->streamOps, &buf);
+ if (res != NO_ERROR || buf == NULL) {
+ ALOGV("DEBUG(%s): stream(%d) dequeue_buffer fail res(%d)",__FUNCTION__ , selfThread->m_index, res);
break;
}
- }
- ALOGV("DEBUG(%s): post_dequeue_buffer found(%d)", __FUNCTION__, found);
- if (!found) break;
- ALOGV("DEBUG(%s): preparing to qbuf [%d]", __FUNCTION__, checkingIndex);
- index = checkingIndex;
- if (index < selfStreamParms->numHwBuffers) {
- uint32_t plane_index = 0;
- ExynosBuffer* currentBuf = &(selfStreamParms->svcBuffers[index]);
- struct v4l2_buffer v4l2_buf;
- struct v4l2_plane planes[VIDEO_MAX_PLANES];
-
- v4l2_buf.m.planes = planes;
- v4l2_buf.type = currentNode->type;
- v4l2_buf.memory = currentNode->memory;
- v4l2_buf.index = index;
- v4l2_buf.length = currentNode->planes;
+ selfStreamParms->numSvcBufsInHal++;
+ ALOGV("DEBUG(%s): stream(%d) got buf(%x) numInHal(%d) version(%d), numFds(%d), numInts(%d)", __FUNCTION__,
+ selfThread->m_index, (uint32_t)(*buf), selfStreamParms->numSvcBufsInHal,
+ ((native_handle_t*)(*buf))->version, ((native_handle_t*)(*buf))->numFds, ((native_handle_t*)(*buf))->numInts);
+ const private_handle_t *priv_handle = reinterpret_cast<const private_handle_t *>(*buf);
- v4l2_buf.m.planes[0].m.fd = priv_handle->fd;
- v4l2_buf.m.planes[2].m.fd = priv_handle->fd1;
- v4l2_buf.m.planes[1].m.fd = priv_handle->fd2;
- for (plane_index=0 ; plane_index < v4l2_buf.length ; plane_index++) {
- v4l2_buf.m.planes[plane_index].length = currentBuf->size.extS[plane_index];
- ALOGV("DEBUG(%s): plane(%d): fd(%d) length(%d)",
- __FUNCTION__, plane_index, v4l2_buf.m.planes[plane_index].m.fd,
- v4l2_buf.m.planes[plane_index].length);
+ bool found = false;
+ int checkingIndex = 0;
+ for (checkingIndex = 0; checkingIndex < selfStreamParms->numSvcBuffers ; checkingIndex++) {
+ if (priv_handle->fd == selfStreamParms->svcBuffers[checkingIndex].fd.extFd[0] ) {
+ found = true;
+ break;
+ }
}
-
- if (selfStreamParms->streamType == 0) {
+ ALOGV("DEBUG(%s): post_dequeue_buffer found(%d)", __FUNCTION__, found);
+ if (!found) break;
+ ALOGV("DEBUG(%s): preparing to qbuf [%d]", __FUNCTION__, checkingIndex);
+ index = checkingIndex;
+ if (index < selfStreamParms->numHwBuffers) {
+ uint32_t plane_index = 0;
+ ExynosBuffer* currentBuf = &(selfStreamParms->svcBuffers[index]);
+ struct v4l2_buffer v4l2_buf;
+ struct v4l2_plane planes[VIDEO_MAX_PLANES];
+
+ v4l2_buf.m.planes = planes;
+ v4l2_buf.type = currentNode->type;
+ v4l2_buf.memory = currentNode->memory;
+ v4l2_buf.index = index;
+ v4l2_buf.length = currentNode->planes;
+
+ v4l2_buf.m.planes[0].m.fd = priv_handle->fd;
+ v4l2_buf.m.planes[2].m.fd = priv_handle->fd1;
+ v4l2_buf.m.planes[1].m.fd = priv_handle->fd2;
+ for (plane_index=0 ; plane_index < v4l2_buf.length ; plane_index++) {
+ v4l2_buf.m.planes[plane_index].length = currentBuf->size.extS[plane_index];
+ ALOGV("DEBUG(%s): plane(%d): fd(%d) length(%d)",
+ __FUNCTION__, plane_index, v4l2_buf.m.planes[plane_index].m.fd,
+ v4l2_buf.m.planes[plane_index].length);
+ }
if (exynos_v4l2_qbuf(currentNode->fd, &v4l2_buf) < 0) {
ALOGE("ERR(%s): stream id(%d) exynos_v4l2_qbuf() fail",
__FUNCTION__, selfThread->m_index);
ALOGV("DEBUG(%s): stream id(%d) type0 QBUF done index(%d)",
__FUNCTION__, selfThread->m_index, index);
}
- else if (selfStreamParms->streamType == 1) {
- selfStreamParms->svcBufStatus[index] = ON_HAL;
- ALOGV("DEBUG(%s): stream id(%d) type1 DQBUF done index(%d)",
- __FUNCTION__, selfThread->m_index, index);
+ }
+ }
+ else if (selfStreamParms->streamType == 1) {
+ while (selfStreamParms->numSvcBufsInHal < selfStreamParms->numOwnSvcBuffers) {
+ res = selfStreamParms->streamOps->dequeue_buffer(selfStreamParms->streamOps, &buf);
+ if (res != NO_ERROR || buf == NULL) {
+ ALOGV("DEBUG(%s): stream(%d) dequeue_buffer fail res(%d)",__FUNCTION__ , selfThread->m_index, res);
+ break;
}
+
+ ALOGV("DEBUG(%s): stream(%d) got buf(%x) numInHal(%d) version(%d), numFds(%d), numInts(%d)", __FUNCTION__,
+ selfThread->m_index, (uint32_t)(*buf), selfStreamParms->numSvcBufsInHal,
+ ((native_handle_t*)(*buf))->version, ((native_handle_t*)(*buf))->numFds, ((native_handle_t*)(*buf))->numInts);
+
+ const private_handle_t *priv_handle = reinterpret_cast<const private_handle_t *>(*buf);
+
+ bool found = false;
+ int checkingIndex = 0;
+ for (checkingIndex = 0; checkingIndex < selfStreamParms->numSvcBuffers ; checkingIndex++) {
+ if (priv_handle->fd == selfStreamParms->svcBuffers[checkingIndex].fd.extFd[0] ) {
+ found = true;
+ break;
+ }
+ }
+ if (!found) break;
+ selfStreamParms->svcBufStatus[checkingIndex] = ON_HAL;
+ selfStreamParms->numSvcBufsInHal++;
}
+
}
ALOGV("DEBUG(%s): stream(%d) processing SIGNAL_STREAM_DATA_COMING DONE",
__FUNCTION__,selfThread->m_index);
ALOGE("ERR(%s):jpegEnc.setColorFormat() fail", __FUNCTION__);
goto jpeg_encode_done;
}
- ALOGV("%s : color = %d\n", __FUNCTION__, &(rect->colorFormat));
if (jpegEnc.setJpegFormat(V4L2_PIX_FMT_JPEG_422)) {
ALOGE("ERR(%s):jpegEnc.setJpegFormat() fail", __FUNCTION__);
mExifInfo.enableThumb = false;
}
ALOGV("DEBUG(%s):calling jpegEnc.setInBuf() yuvSize(%d)", __FUNCTION__, *yuvSize);
- /*for (int i=0 ; i < 3 ; i++)
- ALOGV("DEBUG(%s):calling jpegEnc.setInBuf() virt.extP[%d]=%x extS[%d]=%d",
- __FUNCTION__, i, yuvBuf->fd.extFd[i], i, yuvBuf->size.extS[i]);*/
if (jpegEnc.setInBuf((int *)&(yuvBuf->fd.fd), (int *)yuvSize)) {
ALOGE("ERR(%s):jpegEnc.setInBuf() fail", __FUNCTION__);
goto jpeg_encode_done;
ALOGE("ERR(%s):jpegEnc.setOutBuf() fail", __FUNCTION__);
goto jpeg_encode_done;
}
- /*for (int i=0 ; i < 3 ; i++)
- ALOGV("DEBUG(%s): jpegBuf->virt.extP[%d]=%x jpegBuf->size.extS[%d]=%d",
- __FUNCTION__, i, jpegBuf->fd.extFd[i], i, jpegBuf->size.extS[i]);*/
memset(jpegBuf->virt.p,0,jpegBuf->size.extS[0] + jpegBuf->size.extS[1] + jpegBuf->size.extS[2]);
if (jpegEnc.updateConfig()) {
return NO_ERROR;
}
-status_t MetadataConverter::ToInternalShot(camera_metadata_t * request, camera2_ctl_metadata_NEW_t * dst)
+status_t MetadataConverter::ToInternalShot(camera_metadata_t * request, struct camera2_shot_ext * dst_ext)
{
uint32_t num_entry = 0;
uint32_t index = 0;
uint32_t i = 0;
camera_metadata_entry_t curr_entry;
+ struct camera2_shot * dst = NULL;
ALOGV("DEBUG(%s):", __FUNCTION__);
- if (request == NULL || dst == NULL)
+ if (request == NULL || dst_ext == NULL)
return BAD_VALUE;
+ dst = &(dst_ext->shot);
+
num_entry = (uint32_t)get_camera_metadata_data_count(request);
for (index = 0 ; index < num_entry ; index++) {
if (NO_ERROR != CheckEntryTypeMismatch(&curr_entry, TYPE_BYTE, 1))
break;
dst->ctl.lens.opticalStabilizationMode =
- (optical_stabilization_mode_NEW_t)curr_entry.data.u8[0];
+ (enum optical_stabilization_mode)curr_entry.data.u8[0];
break;
case ANDROID_FLASH_MODE:
if (NO_ERROR != CheckEntryTypeMismatch(&curr_entry, TYPE_BYTE, 1))
break;
- dst->ctl.flash.flashMode = (flash_mode_NEW_t)curr_entry.data.u8[0];
+ dst->ctl.flash.flashMode = (enum flash_mode)curr_entry.data.u8[0];
break;
case ANDROID_FLASH_FIRING_POWER:
case ANDROID_HOT_PIXEL_MODE:
if (NO_ERROR != CheckEntryTypeMismatch(&curr_entry, TYPE_BYTE, 1))
break;
- dst->ctl.hotpixel.mode = (hotpixel_mode_NEW_t)curr_entry.data.u8[0];
+ dst->ctl.hotpixel.mode = (enum processing_mode)curr_entry.data.u8[0];
break;
case ANDROID_DEMOSAIC_MODE:
if (NO_ERROR != CheckEntryTypeMismatch(&curr_entry, TYPE_BYTE, 1))
break;
- dst->ctl.demosaic.mode = (demosaic_mode_NEW_t)curr_entry.data.u8[0];
+ dst->ctl.demosaic.mode = (enum processing_mode)curr_entry.data.u8[0];
break;
case ANDROID_NOISE_MODE:
if (NO_ERROR != CheckEntryTypeMismatch(&curr_entry, TYPE_BYTE, 1))
break;
- dst->ctl.noise.mode = (noise_mode_NEW_t)curr_entry.data.u8[0];
+ dst->ctl.noise.mode = (enum processing_mode)curr_entry.data.u8[0];
break;
case ANDROID_NOISE_STRENGTH:
case ANDROID_SHADING_MODE:
if (NO_ERROR != CheckEntryTypeMismatch(&curr_entry, TYPE_BYTE, 1))
break;
- dst->ctl.shading.mode = (shading_mode_NEW_t)curr_entry.data.u8[0];
+ dst->ctl.shading.mode = (enum processing_mode)curr_entry.data.u8[0];
break;
case ANDROID_GEOMETRIC_MODE:
if (NO_ERROR != CheckEntryTypeMismatch(&curr_entry, TYPE_BYTE, 1))
break;
- dst->ctl.geometric.mode = (geometric_mode_NEW_t)curr_entry.data.u8[0];
+ dst->ctl.geometric.mode = (enum processing_mode)curr_entry.data.u8[0];
break;
case ANDROID_COLOR_MODE:
if (NO_ERROR != CheckEntryTypeMismatch(&curr_entry, TYPE_BYTE, 1))
break;
- dst->ctl.color.mode = (colorcorrection_mode_NEW_t)curr_entry.data.u8[0];
+ dst->ctl.color.mode = (enum colorcorrection_mode)curr_entry.data.u8[0];
break;
case ANDROID_COLOR_TRANSFORM:
case ANDROID_TONEMAP_MODE:
if (NO_ERROR != CheckEntryTypeMismatch(&curr_entry, TYPE_BYTE, 1))
break;
- dst->ctl.tonemap.mode = (tonemap_mode_NEW_t)curr_entry.data.u8[0];
+ dst->ctl.tonemap.mode = (enum tonemap_mode)curr_entry.data.u8[0];
break;
case ANDROID_TONEMAP_CURVE_RED:
case ANDROID_EDGE_MODE:
if (NO_ERROR != CheckEntryTypeMismatch(&curr_entry, TYPE_BYTE, 1))
break;
- dst->ctl.edge.mode = (edge_mode_NEW_t)curr_entry.data.u8[0];
+ dst->ctl.edge.mode = (enum processing_mode)curr_entry.data.u8[0];
break;
case ANDROID_EDGE_STRENGTH:
if (NO_ERROR != CheckEntryTypeMismatch(&curr_entry, TYPE_BYTE, 1))
break;
- dst->ctl.edge.strength = (edge_mode_NEW_t)curr_entry.data.u8[0];
+ dst->ctl.edge.strength = curr_entry.data.u8[0];
break;
case ANDROID_JPEG_GPS_PROCESSING_METHOD:
if (NO_ERROR != CheckEntryTypeMismatch(&curr_entry, TYPE_BYTE, 32))
break;
- for (i=0 ; i<curr_entry.count ; i++)
- dst->ctl.jpeg.gpsProcessingMethod[i] = curr_entry.data.u8[i];
+ dst->ctl.jpeg.gpsProcessingMethod = curr_entry.data.u8[0];
break;
case ANDROID_JPEG_GPS_TIMESTAMP:
case ANDROID_STATS_FACE_DETECT_MODE:
if (NO_ERROR != CheckEntryTypeMismatch(&curr_entry, TYPE_BYTE, 1))
break;
- dst->ctl.stats.faceDetectMode = (facedetect_mode_NEW_t)curr_entry.data.u8[0];
+ dst->ctl.stats.faceDetectMode = (enum facedetect_mode)curr_entry.data.u8[0];
break;
case ANDROID_STATS_HISTOGRAM_MODE:
if (NO_ERROR != CheckEntryTypeMismatch(&curr_entry, TYPE_BYTE, 1))
break;
- dst->ctl.stats.histogramMode = (histogram_mode_NEW_t)curr_entry.data.u8[0];
+ dst->ctl.stats.histogramMode = (enum stats_mode)curr_entry.data.u8[0];
break;
case ANDROID_STATS_SHARPNESS_MAP_MODE:
if (NO_ERROR != CheckEntryTypeMismatch(&curr_entry, TYPE_BYTE, 1))
break;
- dst->ctl.stats.sharpnessMapMode = (sharpnessmap_mode_NEW_t)curr_entry.data.u8[0];
+ dst->ctl.stats.sharpnessMapMode = (enum stats_mode)curr_entry.data.u8[0];
break;
case ANDROID_CONTROL_CAPTURE_INTENT:
if (NO_ERROR != CheckEntryTypeMismatch(&curr_entry, TYPE_BYTE, 1))
break;
- dst->ctl.aa.captureIntent = (aa_captureintent_NEW_t)curr_entry.data.u8[0];
+ dst->ctl.aa.captureIntent = (enum aa_capture_intent)curr_entry.data.u8[0];
break;
case ANDROID_CONTROL_MODE:
if (NO_ERROR != CheckEntryTypeMismatch(&curr_entry, TYPE_BYTE, 1))
break;
- dst->ctl.aa.mode = (aa_mode_NEW_t)curr_entry.data.u8[0];
+ dst->ctl.aa.mode = (enum aa_mode)curr_entry.data.u8[0];
break;
case ANDROID_CONTROL_EFFECT_MODE:
if (NO_ERROR != CheckEntryTypeMismatch(&curr_entry, TYPE_BYTE, 1))
break;
- dst->ctl.aa.effect_mode = (aa_effect_mode_NEW_t)curr_entry.data.u8[0];
+ dst->ctl.aa.effectMode = (enum aa_effect_mode)curr_entry.data.u8[0];
break;
case ANDROID_CONTROL_SCENE_MODE:
if (NO_ERROR != CheckEntryTypeMismatch(&curr_entry, TYPE_BYTE, 1))
break;
- dst->ctl.aa.scene_mode = (aa_scene_mode_NEW_t)curr_entry.data.u8[0];
+ dst->ctl.aa.sceneMode = (enum aa_scene_mode)curr_entry.data.u8[0];
break;
case ANDROID_CONTROL_VIDEO_STABILIZATION_MODE:
if (NO_ERROR != CheckEntryTypeMismatch(&curr_entry, TYPE_BYTE, 1))
break;
- dst->ctl.aa.videoStabilizationMode = (aa_video_stab_mode_NEW_t)curr_entry.data.u8[0];
+ dst->ctl.aa.videoStabilizationMode = curr_entry.data.u8[0];
break;
case ANDROID_CONTROL_AE_MODE:
if (NO_ERROR != CheckEntryTypeMismatch(&curr_entry, TYPE_BYTE, 1))
break;
- dst->ctl.aa.aeMode= (aa_aemode_NEW_t)curr_entry.data.u8[0];
+ dst->ctl.aa.aeMode= (enum aa_aemode)curr_entry.data.u8[0];
break;
case ANDROID_CONTROL_AE_REGIONS:
case ANDROID_CONTROL_AE_EXP_COMPENSATION:
if (NO_ERROR != CheckEntryTypeMismatch(&curr_entry, TYPE_INT32, 1))
break;
- dst->ctl.aa.aeExpCompensation= (aa_aemode_NEW_t)curr_entry.data.i32[0];
+ dst->ctl.aa.aeExpCompensation = curr_entry.data.i32[0];
break;
case ANDROID_CONTROL_AE_TARGET_FPS_RANGE:
case ANDROID_CONTROL_AE_ANTIBANDING_MODE:
if (NO_ERROR != CheckEntryTypeMismatch(&curr_entry, TYPE_BYTE, 1))
break;
- dst->ctl.aa.aeAntibandingMode = (aa_ae_antibanding_mode_NEW_t)curr_entry.data.u8[0];
+ dst->ctl.aa.aeAntibandingMode = (enum aa_ae_antibanding_mode)curr_entry.data.u8[0];
break;
case ANDROID_CONTROL_AWB_MODE:
if (NO_ERROR != CheckEntryTypeMismatch(&curr_entry, TYPE_BYTE, 1))
break;
- dst->ctl.aa.awbMode = (aa_awbmode_NEW_t)curr_entry.data.u8[0];
+ dst->ctl.aa.awbMode = (enum aa_awbmode)(curr_entry.data.u8[0] + 1);
break;
case ANDROID_CONTROL_AWB_REGIONS:
case ANDROID_CONTROL_AF_MODE:
if (NO_ERROR != CheckEntryTypeMismatch(&curr_entry, TYPE_BYTE, 1))
break;
- dst->ctl.aa.afMode = (aa_afmode_NEW_t)curr_entry.data.u8[0];
+ dst->ctl.aa.afMode = (enum aa_afmode)curr_entry.data.u8[0];
break;
case ANDROID_CONTROL_AF_REGIONS:
case ANDROID_REQUEST_METADATA_MODE:
if (NO_ERROR != CheckEntryTypeMismatch(&curr_entry, TYPE_BYTE, 1))
break;
- dst->ctl.request.metadataMode = (metadata_mode_NEW_t)curr_entry.data.u8[0];
+ dst->ctl.request.metadataMode = (enum metadata_mode)curr_entry.data.u8[0];
ALOGV("DEBUG(%s): ANDROID_REQUEST_METADATA_MODE (%d)", __FUNCTION__, (int)( dst->ctl.request.metadataMode));
break;
dst->ctl.request.outputStreams[i] = curr_entry.data.u8[i];
ALOGV("DEBUG(%s): OUTPUT_STREAM[%d] = %d ", __FUNCTION__, i, (int)(dst->ctl.request.outputStreams[i]));
}
- dst->ctl.request.numOutputStream = curr_entry.count;
+ dst->ctl.request.id = curr_entry.count; // temporary
break;
case ANDROID_REQUEST_FRAME_COUNT:
-status_t MetadataConverter::ToDynamicMetadata(camera2_ctl_metadata_NEW_t * metadata, camera_metadata_t * dst)
+status_t MetadataConverter::ToDynamicMetadata(struct camera2_shot_ext * metadata_ext, camera_metadata_t * dst)
{
status_t res;
+ struct camera2_shot * metadata = &(metadata_ext->shot);
+ uint8_t byteData;
+ uint32_t intData;
ALOGV("DEBUG(%s): TEMP version using original request METADATA", __FUNCTION__);
if (0 != add_camera_metadata_entry(dst, ANDROID_REQUEST_ID,
return NO_MEMORY;
- if (metadata->ctl.request.metadataMode == METADATA_MODE_NONE_NEW) {
+ if (metadata->ctl.request.metadataMode == METADATA_MODE_NONE) {
ALOGV("DEBUG(%s): METADATA_MODE_NONE", __FUNCTION__);
return NO_ERROR;
}
+++ /dev/null
-/*
- * Samsung Exynos5 SoC series Camera API 2.0 HAL
- *
- * Internal Metadata (controls/dynamic metadata and static metadata)
- *
- * Copyright (c) 2012 Samsung Electronics Co., Ltd
- * Contact: Sungjoong Kang, <sj3.kang@samsung.com>
- *
- * This program is free software; you can redistribute it and/or modify
- * it under the terms of the GNU General Public License version 2 as
- * published by the Free Software Foundation.
- */
-
-/* 2012.04.18 Version 0.1 Initial Release */
-/* 2012.04.23 Version 0.2 Added static metadata (draft) */
-/* 2012.05.14 Version 0.3 Bug fixes and data type modification */
-/* 2012.05.15 Version 0.4 Modified for Google's new camera_metadata.h */
-
-
-#ifndef CAMERA2_INTERNAL_METADATA_H_
-#define CAMERA2_INTERNAL_METADATA_H_
-
-//#include "camera_common.h"
-#include <stdint.h>
-#include <sys/cdefs.h>
-#include <sys/types.h>
-
-
-typedef struct rational_NEW {
- uint32_t num;
- uint32_t den;
-} rational_NEW_t;
-
-
-/*
- * controls/dynamic metadata
- */
-
-
-/* android.request */
-
-typedef enum metadata_mode_NEW
-{
- METADATA_MODE_NONE_NEW,
- METADATA_MODE_FULL_NEW
-} metadata_mode_NEW_t;
-
-typedef struct camera2_request_ctl_NEW {
- uint32_t id;
- metadata_mode_NEW_t metadataMode;
- uint8_t outputStreams[16];
- uint8_t numOutputStream;
- uint32_t frameCount;
-} camera2_request_ctl_NEW_t;
-
-typedef struct camera2_request_dm_NEW {
- uint32_t id;
- metadata_mode_NEW_t metadataMode;
- uint32_t frameCount;
-} camera2_request_dm_NEW_t;
-
-
-
-/* android.lens */
-
-typedef enum optical_stabilization_mode_NEW
-{
- OPTICAL_STABILIZATION_MODE_OFF_NEW,
- OPTICAL_STABILIZATION_MODE_ON_NEW
-} optical_stabilization_mode_NEW_t;
-
-typedef struct camera2_lens_ctl_NEW {
- float focusDistance;
- float aperture;
- float focalLength;
- float filterDensity;
- optical_stabilization_mode_NEW_t opticalStabilizationMode;
-} camera2_lens_ctl_NEW_t;
-
-typedef struct camera2_lens_dm_NEW {
- float focusDistance;
- float aperture;
- float focalLength;
- float filterDensity;
- optical_stabilization_mode_NEW_t opticalStabilizationMode;
- float focusRange[2];
-} camera2_lens_dm_NEW_t;
-
-
-
-/* android.sensor */
-
-typedef struct camera2_sensor_ctl_NEW {
- uint64_t exposureTime;
- uint64_t frameDuration;
- uint32_t sensitivity;
-} camera2_sensor_ctl_NEW_t;
-
-typedef struct camera2_sensor_dm_NEW {
- uint64_t exposureTime;
- uint64_t frameDuration;
- uint32_t sensitivity;
- uint64_t timeStamp;
- uint32_t frameCount;
-} camera2_sensor_dm_NEW_t;
-
-
-
-/* android.flash */
-
-typedef enum flash_mode_NEW
-{
- FLASH_MODE_OFF_NEW,
- FLASH_MODE_SINGLE_NEW,
- FLASH_MODE_AUTO_SINGLE_NEW,
- FLASH_MODE_TORCH_NEW
-} flash_mode_NEW_t;
-
-typedef struct camera2_flash_ctl_NEW {
- flash_mode_NEW_t flashMode;
- uint8_t firingPower;
- uint64_t firingTime;
-} camera2_flash_ctl_NEW_t;
-
-typedef struct camera2_flash_dm_NEW {
- flash_mode_NEW_t flashMode;
- uint8_t firingPower;
- uint64_t firingTime;
-} camera2_flash_dm_NEW_t;
-
-
-
-/* android.flash */
-
-typedef enum hotpixel_mode_NEW
-{
- HOTPIXEL_MODE_OFF_NEW,
- HOTPIXEL_MODE_FAST_NEW,
- HOTPIXEL_MODE_HIGH_QUALITY_NEW
-} hotpixel_mode_NEW_t;
-
-
-typedef struct camera2_hotpixel_ctl_NEW {
- hotpixel_mode_NEW_t mode;
-} camera2_hotpixel_ctl_NEW_t;
-
-typedef struct camera2_hotpixel_dm_NEW {
- hotpixel_mode_NEW_t mode;
-} camera2_hotpixel_dm_NEW_t;
-
-
-
-/* android.demosaic */
-
-typedef enum demosaic_mode_NEW
-{
- DEMOSAIC_MODE_FAST_NEW = 1,
- DEMOSAIC_MODE_HIGH_QUALITY_NEW
-} demosaic_mode_NEW_t;
-
-typedef struct camera2_demosaic_ctl_NEW {
- demosaic_mode_NEW_t mode;
-} camera2_demosaic_ctl_NEW_t;
-
-typedef struct camera2_demosaic_dm_NEW {
- demosaic_mode_NEW_t mode;
-} camera2_demosaic_dm_NEW_t;
-
-
-
-/* android.noiseReduction */
-
-typedef enum noise_mode_NEW
-{
- NOISEREDUCTION_MODE_OFF_NEW,
- NOISEREDUCTION_MODE_FAST_NEW,
- NOISEREDUCTION_MODE_HIGH_QUALITY_NEW
-} noise_mode_NEW_t;
-
-typedef struct camera2_noisereduction_ctl_NEW {
- noise_mode_NEW_t mode;
- uint8_t strength;
-} camera2_noisereduction_ctl_NEW_t;
-
-typedef struct camera2_noisereduction_dm_NEW {
- noise_mode_NEW_t mode;
- uint8_t strength;
-} camera2_noisereduction_dm_NEW_t;
-
-
-
-/* android.shading */
-
-typedef enum shading_mode_NEW
-{
- SHADING_MODE_OFF_NEW,
- SHADING_MODE_FAST_NEW,
- SHADING_MODE_HIGH_QUALITY_NEW
-} shading_mode_NEW_t;
-
-typedef struct camera2_shading_ctl_NEW {
- shading_mode_NEW_t mode;
-} camera2_shading_ctl_NEW_t;
-
-typedef struct camera2_shading_dm_NEW {
- shading_mode_NEW_t mode;
-} camera2_shading_dm_NEW_t;
-
-
-
-/* android.geometric */
-
-typedef enum geometric_mode_NEW
-{
- GEOMETRIC_MODE_OFF_NEW,
- GEOMETRIC_MODE_FAST_NEW,
- GEOMETRIC_MODE_HIGH_QUALITY_NEW
-} geometric_mode_NEW_t;
-
-typedef struct camera2_geometric_ctl_NEW {
- geometric_mode_NEW_t mode;
-} camera2_geometric_ctl_NEW_t;
-
-typedef struct camera2_geometric_dm_NEW {
- geometric_mode_NEW_t mode;
-} camera2_geometric_dm_NEW_t;
-
-
-
-/* android.colorCorrection */
-
-typedef enum colorcorrection_mode_NEW
-{
- COLORCORRECTION_MODE_FAST_NEW = 1,
- COLORCORRECTION_MODE_HIGH_QUALITY_NEW,
- COLORCORRECTION_MODE_TRANSFORM_MATRIX_NEW
-} colorcorrection_mode_NEW_t;
-
-
-typedef struct camera2_colorcorrection_ctl_NEW {
- colorcorrection_mode_NEW_t mode;
- float transform[9];
-} camera2_colorcorrection_ctl_NEW_t;
-
-typedef struct camera2_colorcorrection_dm_NEW {
- colorcorrection_mode_NEW_t mode;
- float transform[9];
-} camera2_colorcorrection_dm_NEW_t;
-
-
-
-/* android.tonemap */
-
-typedef enum tonemap_mode_NEW
-{
- TONEMAP_MODE_FAST_NEW = 1,
- TONEMAP_MODE_HIGH_QUALITY_NEW,
- TONEMAP_MODE_CONTRAST_CURVE_NEW,
-} tonemap_mode_NEW_t;
-
-typedef struct camera2_tonemap_ctl_NEW {
- tonemap_mode_NEW_t mode;
- float curveRed[32]; // assuming maxCurvePoints = 32
- float curveGreen[32];
- float curveBlue[32];
-} camera2_tonemap_ctl_NEW_t;
-
-typedef struct camera2_tonemap_dm_NEW {
- tonemap_mode_NEW_t mode;
- float curveRed[32]; // assuming maxCurvePoints = 32
- float curveGreen[32];
- float curveBlue[32];
-} camera2_tonemap_dm_NEW_t;
-
-
-
-/* android.edge */
-
-typedef enum edge_mode_NEW
-{
- EDGE_MODE_OFF_NEW,
- EDGE_MODE_FAST_NEW,
- EDGE_MODE_HIGH_QUALITY_NEW
-} edge_mode_NEW_t;
-
-typedef struct camera2_edge_ctl_NEW {
- edge_mode_NEW_t mode;
- uint8_t strength;
-} camera2_edge_ctl_NEW_t;
-
-typedef struct camera2_edge_dm_NEW {
- edge_mode_NEW_t mode;
- uint8_t strength;
-} camera2_edge_dm_NEW_t;
-
-
-
-/* android.scaler */
-
-typedef struct camera2_scaler_ctl_NEW {
- uint32_t cropRegion[3];
- //uint32_t rotation;
-} camera2_scaler_ctl_NEW_t;
-
-typedef struct camera2_scaler_dm_NEW {
- //uint32_t size[2];
- //uint8_t format;
- uint32_t cropRegion[3];
- //uint32_t rotation;
-} camera2_scaler_dm_NEW_t;
-
-
-
-/* android.jpeg */
-
-typedef struct camera2_jpeg_ctl_NEW {
- uint32_t quality;
- uint32_t thumbnailSize[2];
- uint32_t thumbnailQuality;
- double gpsCoordinates[2]; // needs check
- uint8_t gpsProcessingMethod[32];
- uint64_t gpsTimestamp;
- uint32_t orientation;
-} camera2_jpeg_ctl_NEW_t;
-
-typedef struct camera2_jpeg_dm_NEW {
- uint8_t quality;
- uint32_t thumbnailSize[2];
- uint8_t thumbnailQuality;
- double gpsCoordinates[3];
- uint8_t gpsProcessingMethod;
- uint64_t gpsTimestamp;
- uint32_t orientation;
-} camera2_jpeg_dm_NEW_t;
-
-
-
-/* android.statistics */
-
-typedef enum facedetect_mode_NEW
-{
- FACEDETECT_MODE_OFF_NEW,
- FACEDETECT_MODE_SIMPLE_NEW,
- FACEDETECT_MODE_FULL_NEW
-} facedetect_mode_NEW_t;
-
-typedef enum histogram_mode_NEW
-{
- HISTOGRAM_MODE_OFF_NEW,
- HISTOGRAM_MODE_ON_NEW
-} histogram_mode_NEW_t;
-
-typedef enum sharpnessmap_mode_NEW
-{
- SHARPNESSMAP_MODE_OFF_NEW,
- SHARPNESSMAP_MODE_ON_NEW
-} sharpnessmap_mode_NEW_t;
-
-typedef struct camera2_stats_ctl_NEW {
- facedetect_mode_NEW_t faceDetectMode;
- histogram_mode_NEW_t histogramMode;
- sharpnessmap_mode_NEW_t sharpnessMapMode;
-} camera2_stats_ctl_NEW_t;
-
-/* REMARKS : FD results are not included */
-typedef struct camera2_stats_dm_NEW {
- facedetect_mode_NEW_t faceDetectMode;
- // faceRetangles
- // faceScores
- // faceLandmarks
- // faceIds
- histogram_mode_NEW_t histogramMode;
- // histogram
- sharpnessmap_mode_NEW_t sharpnessMapMode;
- // sharpnessMap
-} camera2_stats_dm_NEW_t;
-
-
-
-/* android.control */
-
-typedef enum aa_captureintent_NEW
-{
- AA_CAPTURE_INTENT_CUSTOM_NEW,
- AA_CAPTURE_INTENT_PREVIEW_NEW,
- AA_CAPTURE_INTENT_STILL_CAPTURE_NEW,
- AA_CAPTURE_INTENT_VIDEO_RECORD_NEW,
- AA_CAPTURE_INTENT_VIDEO_SNAPSHOT_NEW,
- AA_CAPTURE_INTENT_ZERO_SHUTTER_LAG_NEW
-} aa_captureintent_NEW_t;
-
-typedef enum aa_mode_NEW
-{
- AA_MODE_OFF_NEW,
- AA_MODE_AUTO_NEW,
- AA_MODE_USE_SCENE_MODE_NEW
-} aa_mode_NEW_t;
-
-typedef enum aa_scene_mode_NEW
-{
- AA_SCENE_MODE_FACE_PRIORITY_NEW,
- AA_SCENE_MODE_ACTION_NEW,
- AA_SCENE_MODE_PORTRAIT_NEW,
- AA_SCENE_MODE_LANDSCAPE_NEW,
- AA_SCENE_MODE_NIGHT_NEW,
- AA_SCENE_MODE_NIGHT_PORTRAIT_NEW,
- AA_SCENE_MODE_THEATRE_NEW,
- AA_SCENE_MODE_BEACH_NEW,
- AA_SCENE_MODE_SNOW_NEW,
- AA_SCENE_MODE_SUNSET_NEW,
- AA_SCENE_MODE_STEADYPHOTO_NEW,
- AA_SCENE_MODE_FIREWORKS_NEW,
- AA_SCENE_MODE_SPORTS_NEW,
- AA_SCENE_MODE_PARTY_NEW,
- AA_SCENE_MODE_CANDLELIGHT_NEW,
- AA_SCENE_MODE_BARCODE_NEW
-} aa_scene_mode_NEW_t;
-
-typedef enum aa_video_stab_mode_NEW
-{
- AA_VIDEO_STABILIZATION_OFF_NEW,
- AA_VIDEO_STABILIZATION_ON_NEW
-} aa_video_stab_mode_NEW_t;
-
-typedef enum aa_effect_mode_NEW
-{
- AA_EFFECT_MODE_OFF_NEW,
- AA_EFFECT_MODE_MONO_NEW,
- AA_EFFECT_MODE_NEGATIVE_NEW,
- AA_EFFECT_MODE_SOLARIZE_NEW,
- AA_EFFECT_MODE_SEPIA_NEW,
- AA_EFFECT_MODE_POSTERIZE_NEW,
- AA_EFFECT_MODE_WHITEBOARD_NEW,
- AA_EFFECT_MODE_BLACKBOARD_NEW,
- AA_EFFECT_MODE_AQUA
-} aa_effect_mode_NEW_t;
-
-typedef enum aa_aemode_NEW
-{
- AA_AEMODE_OFF_NEW,
- AA_AEMODE_ON_NEW,
- AA_AEMODE_ON_AUTO_FLASH_NEW,
- AA_AEMODE_ON_ALWAYS_FLASH_NEW,
- AA_AEMODE_ON_AUTO_FLASH_REDEYE_NEW
-} aa_aemode_NEW_t;
-
-typedef enum aa_ae_antibanding_mode_NEW
-{
- AA_AE_ANTIBANDING_OFF_NEW,
- AA_AE_ANTIBANDING_50HZ_NEW,
- AA_AE_ANTIBANDING_60HZ_NEW,
- AA_AE_ANTIBANDING_AUTO_NEW
-} aa_ae_antibanding_mode_NEW_t;
-
-typedef enum aa_awbmode_NEW
-{
- AA_AWBMODE_OFF_NEW,
- AA_AWBMODE_WB_AUTO_NEW,
- AA_AWBMODE_WB_INCANDESCENT_NEW,
- AA_AWBMODE_WB_FLUORESCENT_NEW,
- AA_AWBMODE_WB_WARM_FLUORESCENT_NEW,
- AA_AWBMODE_WB_DAYLIGHT_NEW,
- AA_AWBMODE_WB_CLOUDY_DAYLIGHT_NEW,
- AA_AWBMODE_WB_TWILIGHT_NEW,
- AA_AWBMODE_WB_SHADE_NEW
-} aa_awbmode_NEW_t;
-
-typedef enum aa_afmode_NEW
-{
- AA_AFMODE_OFF_NEW,
- AA_AFMODE_FOCUS_MODE_AUTO_NEW,
- AA_AFMODE_FOCUS_MODE_MACRO_NEW,
- AA_AFMODE_FOCUS_MODE_CONTINUOUS_VIDEO_NEW,
- AA_AFMODE_FOCUS_MODE_CONTINUOUS_PICTURE_NEW
-} aa_afmode_NEW_t;
-
-typedef enum aa_afstate_NEW
-{
- AA_AFSTATE_INACTIVE_NEW,
- AA_AFSTATE_PASSIVE_SCAN_NEW,
- AA_AFSTATE_ACTIVE_SCAN_NEW,
- AA_AFSTATE_AF_ACQUIRED_FOCUS_NEW,
- AA_AFSTATE_AF_FAILED_FOCUS_NEW
-} aa_afstate_NEW_t;
-
-typedef struct camera2_aa_ctl_NEW {
- aa_captureintent_NEW_t captureIntent;
- aa_mode_NEW_t mode;
- aa_effect_mode_NEW_t effect_mode;
- aa_scene_mode_NEW_t scene_mode;
- aa_video_stab_mode_NEW_t videoStabilizationMode;
- aa_aemode_NEW_t aeMode;
- uint32_t aeRegions[5]; // 5 per region(x1,y1,x2,y2,weight). currently assuming 1 region.
- int32_t aeExpCompensation;
- uint32_t aeTargetFpsRange[2];
- aa_ae_antibanding_mode_NEW_t aeAntibandingMode;
- uint8_t aeState; // NEEDS_VERIFY after official release
- aa_awbmode_NEW_t awbMode;
- uint32_t awbRegions[5]; // 5 per region(x1,y1,x2,y2,weight). currently assuming 1 region.
- uint8_t awbState; // NEEDS_VERIFY after official release
- aa_afmode_NEW_t afMode;
- uint32_t afRegions[5]; // 5 per region(x1,y1,x2,y2,weight). currently assuming 1 region.
-
-} camera2_aa_ctl_NEW_t;
-
-typedef struct camera2_aa_dm_NEW {
- aa_captureintent_NEW_t captureIntent;
- aa_mode_NEW_t mode;
- aa_effect_mode_NEW_t effect_mode;
- aa_scene_mode_NEW_t scene_mode;
- aa_video_stab_mode_NEW_t videoStabilizationMode;
- aa_aemode_NEW_t aeMode; // needs check
- uint32_t aeRegions[5]; // 5 per region(x1,y1,x2,y2,weight). currently assuming 1 region.
- int32_t aeExpCompensation; // needs check
- uint8_t aeState; // NEEDS_VERIFY after official release
- aa_awbmode_NEW_t awbMode;
- uint32_t awbRegions[5]; // 5 per region(x1,y1,x2,y2,weight). currently assuming 1 region.
- uint8_t awbState; // NEEDS_VERIFY after official release
- aa_afmode_NEW_t afMode;
- uint32_t afRegions[5]; // 5 per region(x1,y1,x2,y2,weight). currently assuming 1 region.
- aa_afstate_NEW_t afState;
-} camera2_aa_dm_NEW_t;
-
-
-
-
-// sizeof(camera2_ctl) = ?
-typedef struct camera2_ctl_NEW {
- camera2_request_ctl_NEW_t request;
- camera2_lens_ctl_NEW_t lens;
- camera2_sensor_ctl_NEW_t sensor;
- camera2_flash_ctl_NEW_t flash;
- camera2_hotpixel_ctl_NEW_t hotpixel;
- camera2_demosaic_ctl_NEW_t demosaic;
- camera2_noisereduction_ctl_NEW_t noise;
- camera2_shading_ctl_NEW_t shading;
- camera2_geometric_ctl_NEW_t geometric;
- camera2_colorcorrection_ctl_NEW_t color;
- camera2_tonemap_ctl_NEW_t tonemap;
- camera2_edge_ctl_NEW_t edge;
- camera2_scaler_ctl_NEW_t scaler;
- camera2_jpeg_ctl_NEW_t jpeg;
- camera2_stats_ctl_NEW_t stats;
- camera2_aa_ctl_NEW_t aa;
-} camera2_ctl_NEW_t;
-
-// sizeof(camera2_dm) = ?
-typedef struct camera2_dm_NEW {
- camera2_request_dm_NEW_t request;
- camera2_lens_dm_NEW_t lens;
- camera2_sensor_dm_NEW_t sensor;
- camera2_flash_dm_NEW_t flash;
- camera2_hotpixel_dm_NEW_t hotpixel;
- camera2_demosaic_dm_NEW_t demosaic;
- camera2_noisereduction_dm_NEW_t noise;
- camera2_shading_dm_NEW_t shading;
- camera2_geometric_dm_NEW_t geometric;
- camera2_colorcorrection_dm_NEW_t color;
- camera2_tonemap_dm_NEW_t tonemap;
- camera2_edge_dm_NEW_t edge;
- camera2_scaler_dm_NEW_t scaler;
- camera2_jpeg_dm_NEW_t jpeg;
- camera2_stats_dm_NEW_t stats;
- camera2_aa_dm_NEW_t aa;
-} camera2_dm_NEW_t;
-
-
-
-
-typedef struct camera2_ctl_metadata_NEW {
- camera2_ctl_NEW_t ctl;
- camera2_dm_NEW_t dm;
-} camera2_ctl_metadata_NEW_t;
-
-
-
-
-/*
- * static metadata
- */
-
-
-/* android.lens */
-
-typedef enum lens_facing_NEW
-{
- LENS_FACING_FRONT_NEW,
- LENS_FACING_BACK_NEW
-} lens_facing_NEW_t;
-
-typedef struct camera2_lens_sm_NEW {
- float minimumFocusDistance;
- float availableFocalLength[2];
- float availableApertures; // assuming 1 aperture
- float availableFilterDensities; // assuming 1 ND filter value
- uint8_t availableOpticalStabilization; // assuming 1
- float shadingMap[3][40][30];
- float geometricCorrectionMap[2][3][40][30];
- lens_facing_NEW_t facing;
- float position[2];
-} camera2_lens_sm_NEW_t;
-
-
-
-/* android.sensor */
-
-typedef enum sensor_colorfilterarrangement_NEW
-{
- SENSOR_COLORFILTERARRANGEMENT_RGGB_NEW,
- SENSOR_COLORFILTERARRANGEMENT_GRBG_NEW,
- SENSOR_COLORFILTERARRANGEMENT_GBRG_NEW,
- SENSOR_COLORFILTERARRANGEMENT_BGGR_NEW,
- SENSOR_COLORFILTERARRANGEMENT_RGB_NEW
-} sensor_colorfilterarrangement_NEW_t;
-
-typedef enum sensor_ref_illuminant_NEW
-{
- SENSOR_ILLUMINANT_DAYLIGHT_NEW = 1,
- SENSOR_ILLUMINANT_FLUORESCENT_NEW = 2,
- SENSOR_ILLUMINANT_TUNGSTEN_NEW = 3,
- SENSOR_ILLUMINANT_FLASH_NEW = 4,
- SENSOR_ILLUMINANT_FINE_WEATHER_NEW = 9,
- SENSOR_ILLUMINANT_CLOUDY_WEATHER_NEW = 10,
- SENSOR_ILLUMINANT_SHADE_NEW = 11,
- SENSOR_ILLUMINANT_DAYLIGHT_FLUORESCENT_NEW = 12,
- SENSOR_ILLUMINANT_DAY_WHITE_FLUORESCENT_NEW = 13,
- SENSOR_ILLUMINANT_COOL_WHITE_FLUORESCENT_NEW = 14,
- SENSOR_ILLUMINANT_WHITE_FLUORESCENT_NEW = 15,
- SENSOR_ILLUMINANT_STANDARD_A_NEW = 17,
- SENSOR_ILLUMINANT_STANDARD_B_NEW = 18,
- SENSOR_ILLUMINANT_STANDARD_C_NEW = 19,
- SENSOR_ILLUMINANT_D55_NEW = 20,
- SENSOR_ILLUMINANT_D65_NEW = 21,
- SENSOR_ILLUMINANT_D75_NEW = 22,
- SENSOR_ILLUMINANT_D50_NEW = 23,
- SENSOR_ILLUMINANT_ISO_STUDIO_TUNGSTEN_NEW = 24
-} sensor_ref_illuminant_NEW_t;
-
-typedef struct camera2_sensor_sm_NEW {
- uint32_t exposureTimeRange[2];
- uint32_t maxFrameDuration;
- uint32_t sensitivityRange[2];
- sensor_colorfilterarrangement_NEW_t colorFilterArrangement;
- uint32_t pixelArraySize[2];
- uint32_t activeArraySize[4];
- uint32_t whiteLevel;
- uint32_t blackLevelPattern[4];
- rational_NEW_t colorTransform1[9];
- rational_NEW_t colorTransform2[9];
- sensor_ref_illuminant_NEW_t referenceIlluminant1;
- sensor_ref_illuminant_NEW_t referenceIlluminant2;
- rational_NEW_t forwardMatrix1[9];
- rational_NEW_t forwardMatrix2[9];
- rational_NEW_t calibrationTransform1[9];
- rational_NEW_t calibrationTransform2[9];
- rational_NEW_t baseGainFactor;
- uint32_t maxAnalogSensitivity;
- float noiseModelCoefficients[2];
- uint32_t orientation;
-} camera2_sensor_sm_NEW_t;
-
-
-
-/* android.flash */
-
-typedef struct camera2_flash_sm_NEW {
- uint8_t available;
- uint64_t chargeDuration;
-} camera2_flash_sm_NEW_t;
-
-
-
-/* android.colorCorrection */
-
-typedef struct camera2_colorcorrection_sm_NEW {
- colorcorrection_mode_NEW_t availableModes[10]; // assuming 10 supported modes
-} camera2_colorcorrection_sm_NEW_t;
-
-
-
-/* android.tonemap */
-
-typedef struct camera2_tonemap_sm_NEW {
- uint32_t maxCurvePoints;
-} camera2_tonemap_sm_NEW_t;
-
-
-
-/* android.scaler */
-
-typedef enum scaler_availableformats_NEW {
- SCALER_FORMAT_BAYER_RAW_NEW,
- SCALER_FORMAT_YV12_NEW,
- SCALER_FORMAT_NV21_NEW,
- SCALER_FORMAT_JPEG_NEW,
- SCALER_FORMAT_UNKNOWN_NEW
-} scaler_availableformats_NEW_t;
-
-typedef struct camera2_scaler_sm_NEW {
- scaler_availableformats_NEW_t availableFormats[4]; // assuming
- // # of availableFormats = 4
- uint32_t availableSizesPerFormat[4];
- uint32_t availableSizes[4][8][2]; // assuning availableSizesPerFormat=8
- uint64_t availableMinFrameDurations[4][8];
- float maxDigitalZoom;
-} camera2_scaler_sm_NEW_t;
-
-
-
-/* android.jpeg */
-
-typedef struct camera2_jpeg_sm_NEW {
- uint32_t availableThumbnailSizes[2][8]; // assuming supported size=8
-} camera2_jpeg_sm_NEW_t;
-
-
-
-/* android.statistics */
-
-typedef struct camera2_statistics_sm_NEW {
- uint8_t availableFaceDetectModes[3]; // assuming supported modes = 3;
- uint32_t maxFaceCount;
- uint32_t histogramBucketCount;
- uint32_t maxHistogramCount;
- uint32_t sharpnessMapSize[2];
- uint32_t maxSharpnessMapValue;
-} camera2_statistics_sm_NEW_t;
-
-
-
-/* android.control */
-
-typedef struct camera2_aa_sm_NEW {
- uint8_t availableModes[10]; // assuming # of available scene modes = 10
- uint32_t maxRegions;
- uint8_t aeAvailableModes[8]; // assuming # of available ae modes = 8
- rational_NEW_t aeCompensationStep;
- int32_t aeCompensationRange[2];
- uint32_t aeAvailableTargetFpsRanges[2][8];
- uint8_t aeAvailableAntibandingModes[4];
- uint8_t awbAvailableModes[10]; // assuming # of awbAvailableModes = 10
- uint8_t afAvailableModes[4]; // assuming # of afAvailableModes = 4
-} camera2_aa_sm_NEW_t;
-
-
-
-
-
-typedef struct camera2_static_metadata_NEW {
- camera2_lens_sm_NEW_t lens;
- camera2_sensor_sm_NEW_t sensor;
- camera2_flash_sm_NEW_t flash;
- camera2_colorcorrection_sm_NEW_t color;
- camera2_tonemap_sm_NEW_t tonemap;
- camera2_scaler_sm_NEW_t scaler;
- camera2_jpeg_sm_NEW_t jpeg;
- camera2_statistics_sm_NEW_t statistics;
- camera2_aa_sm_NEW_t aa;
-} camera2_static_metadata_NEW_t;
-
-
-#endif
-