struct pcf50633_adc *adc = __to_adc(pcf);
int head;
- mutex_lock(&adc->queue_mutex);
-
head = adc->queue_head;
- if (!adc->queue[head]) {
- mutex_unlock(&adc->queue_mutex);
+ if (!adc->queue[head])
return;
- }
- mutex_unlock(&adc->queue_mutex);
adc_setup(pcf, adc->queue[head]->mux, adc->queue[head]->avg);
}
if (adc->queue[tail]) {
mutex_unlock(&adc->queue_mutex);
+ dev_err(pcf->dev, "ADC queue is full, dropping request\n");
return -EBUSY;
}
adc->queue[tail] = req;
+ if (head == tail)
+ trigger_next_adc_job_if_any(pcf);
adc->queue_tail = (tail + 1) & (PCF50633_MAX_ADC_FIFO_DEPTH - 1);
mutex_unlock(&adc->queue_mutex);
- trigger_next_adc_job_if_any(pcf);
-
return 0;
}
int pcf50633_adc_sync_read(struct pcf50633 *pcf, int mux, int avg)
{
struct pcf50633_adc_request *req;
+ int err;
/* req is freed when the result is ready, in interrupt handler */
req = kzalloc(sizeof(*req), GFP_KERNEL);
req->callback_param = req;
init_completion(&req->completion);
- adc_enqueue_request(pcf, req);
+ err = adc_enqueue_request(pcf, req);
+ if (err)
+ return err;
+
wait_for_completion(&req->completion);
+ /* FIXME by this time req might be already freed */
return req->result;
}
EXPORT_SYMBOL_GPL(pcf50633_adc_sync_read);
req->callback = callback;
req->callback_param = callback_param;
- adc_enqueue_request(pcf, req);
-
- return 0;
+ return adc_enqueue_request(pcf, req);
}
EXPORT_SYMBOL_GPL(pcf50633_adc_async_read);
struct pcf50633_adc *adc = data;
struct pcf50633 *pcf = adc->pcf;
struct pcf50633_adc_request *req;
- int head;
+ int head, res;
mutex_lock(&adc->queue_mutex);
head = adc->queue_head;
adc->queue_head = (head + 1) &
(PCF50633_MAX_ADC_FIFO_DEPTH - 1);
+ res = adc_result(pcf);
+ trigger_next_adc_job_if_any(pcf);
+
mutex_unlock(&adc->queue_mutex);
- req->callback(pcf, req->callback_param, adc_result(pcf));
+ req->callback(pcf, req->callback_param, res);
kfree(req);
-
- trigger_next_adc_job_if_any(pcf);
}
static int __devinit pcf50633_adc_probe(struct platform_device *pdev)