staging: ti dspbridge: Rename words with camel case
[GitHub/mt8127/android_kernel_alcatel_ttab.git] / drivers / staging / tidspbridge / core / chnl_sm.c
1 /*
2 * chnl_sm.c
3 *
4 * DSP-BIOS Bridge driver support functions for TI OMAP processors.
5 *
6 * Implements upper edge functions for Bridge driver channel module.
7 *
8 * Copyright (C) 2005-2006 Texas Instruments, Inc.
9 *
10 * This package is free software; you can redistribute it and/or modify
11 * it under the terms of the GNU General Public License version 2 as
12 * published by the Free Software Foundation.
13 *
14 * THIS PACKAGE IS PROVIDED ``AS IS'' AND WITHOUT ANY EXPRESS OR
15 * IMPLIED WARRANTIES, INCLUDING, WITHOUT LIMITATION, THE IMPLIED
16 * WARRANTIES OF MERCHANTIBILITY AND FITNESS FOR A PARTICULAR PURPOSE.
17 */
18
19 /*
20 * The lower edge functions must be implemented by the Bridge driver
21 * writer, and are declared in chnl_sm.h.
22 *
23 * Care is taken in this code to prevent simulataneous access to channel
24 * queues from
25 * 1. Threads.
26 * 2. io_dpc(), scheduled from the io_isr() as an event.
27 *
28 * This is done primarily by:
29 * - Semaphores.
30 * - state flags in the channel object; and
31 * - ensuring the IO_Dispatch() routine, which is called from both
32 * CHNL_AddIOReq() and the DPC(if implemented), is not re-entered.
33 *
34 * Channel Invariant:
35 * There is an important invariant condition which must be maintained per
36 * channel outside of bridge_chnl_get_ioc() and IO_Dispatch(), violation of
37 * which may cause timeouts and/or failure offunction sync_wait_on_event.
38 * This invariant condition is:
39 *
40 * LST_Empty(pchnl->pio_completions) ==> pchnl->sync_event is reset
41 * and
42 * !LST_Empty(pchnl->pio_completions) ==> pchnl->sync_event is set.
43 */
44
45 /* ----------------------------------- OS */
46 #include <dspbridge/host_os.h>
47
48 /* ----------------------------------- DSP/BIOS Bridge */
49 #include <dspbridge/std.h>
50 #include <dspbridge/dbdefs.h>
51
52 /* ----------------------------------- Trace & Debug */
53 #include <dspbridge/dbc.h>
54
55 /* ----------------------------------- OS Adaptation Layer */
56 #include <dspbridge/cfg.h>
57 #include <dspbridge/sync.h>
58
59 /* ----------------------------------- Bridge Driver */
60 #include <dspbridge/dspdefs.h>
61 #include <dspbridge/dspchnl.h>
62 #include "_tiomap.h"
63
64 /* ----------------------------------- Platform Manager */
65 #include <dspbridge/dev.h>
66
67 /* ----------------------------------- Others */
68 #include <dspbridge/io_sm.h>
69
70 /* ----------------------------------- Define for This */
71 #define USERMODE_ADDR PAGE_OFFSET
72
73 #define MAILBOX_IRQ INT_MAIL_MPU_IRQ
74
75 /* ----------------------------------- Function Prototypes */
76 static struct lst_list *create_chirp_list(u32 chirps);
77
78 static void free_chirp_list(struct lst_list *lst);
79
80 static struct chnl_irp *make_new_chirp(void);
81
82 static int search_free_channel(struct chnl_mgr *chnl_mgr_obj,
83 OUT u32 *chnl);
84
85 /*
86 * ======== bridge_chnl_add_io_req ========
87 * Enqueue an I/O request for data transfer on a channel to the DSP.
88 * The direction (mode) is specified in the channel object. Note the DSP
89 * address is specified for channels opened in direct I/O mode.
90 */
91 int bridge_chnl_add_io_req(struct chnl_object *chnl_obj, void *host_buf,
92 u32 byte_size, u32 buf_size,
93 OPTIONAL u32 dw_dsp_addr, u32 dw_arg)
94 {
95 int status = 0;
96 struct chnl_object *pchnl = (struct chnl_object *)chnl_obj;
97 struct chnl_irp *chnl_packet_obj = NULL;
98 struct bridge_dev_context *dev_ctxt;
99 struct dev_object *dev_obj;
100 u8 dw_state;
101 bool is_eos;
102 struct chnl_mgr *chnl_mgr_obj = pchnl->chnl_mgr_obj;
103 u8 *host_sys_buf = NULL;
104 bool sched_dpc = false;
105 u16 mb_val = 0;
106
107 is_eos = (byte_size == 0);
108
109 /* Validate args */
110 if (!host_buf || !pchnl) {
111 status = -EFAULT;
112 } else if (is_eos && CHNL_IS_INPUT(pchnl->chnl_mode)) {
113 status = -EPERM;
114 } else {
115 /*
116 * Check the channel state: only queue chirp if channel state
117 * allows it.
118 */
119 dw_state = pchnl->dw_state;
120 if (dw_state != CHNL_STATEREADY) {
121 if (dw_state & CHNL_STATECANCEL)
122 status = -ECANCELED;
123 else if ((dw_state & CHNL_STATEEOS) &&
124 CHNL_IS_OUTPUT(pchnl->chnl_mode))
125 status = -EPIPE;
126 else
127 /* No other possible states left */
128 DBC_ASSERT(0);
129 }
130 }
131
132 dev_obj = dev_get_first();
133 dev_get_bridge_context(dev_obj, &dev_ctxt);
134 if (!dev_ctxt)
135 status = -EFAULT;
136
137 if (DSP_FAILED(status))
138 goto func_end;
139
140 if (pchnl->chnl_type == CHNL_PCPY && pchnl->chnl_id > 1 && host_buf) {
141 if (!(host_buf < (void *)USERMODE_ADDR)) {
142 host_sys_buf = host_buf;
143 goto func_cont;
144 }
145 /* if addr in user mode, then copy to kernel space */
146 host_sys_buf = kmalloc(buf_size, GFP_KERNEL);
147 if (host_sys_buf == NULL) {
148 status = -ENOMEM;
149 goto func_end;
150 }
151 if (CHNL_IS_OUTPUT(pchnl->chnl_mode)) {
152 status = copy_from_user(host_sys_buf, host_buf,
153 buf_size);
154 if (status) {
155 kfree(host_sys_buf);
156 host_sys_buf = NULL;
157 status = -EFAULT;
158 goto func_end;
159 }
160 }
161 }
162 func_cont:
163 /* Mailbox IRQ is disabled to avoid race condition with DMA/ZCPY
164 * channels. DPCCS is held to avoid race conditions with PCPY channels.
165 * If DPC is scheduled in process context (iosm_schedule) and any
166 * non-mailbox interrupt occurs, that DPC will run and break CS. Hence
167 * we disable ALL DPCs. We will try to disable ONLY IO DPC later. */
168 spin_lock_bh(&chnl_mgr_obj->chnl_mgr_lock);
169 omap_mbox_disable_irq(dev_ctxt->mbox, IRQ_RX);
170 if (pchnl->chnl_type == CHNL_PCPY) {
171 /* This is a processor-copy channel. */
172 if (DSP_SUCCEEDED(status) && CHNL_IS_OUTPUT(pchnl->chnl_mode)) {
173 /* Check buffer size on output channels for fit. */
174 if (byte_size >
175 io_buf_size(pchnl->chnl_mgr_obj->hio_mgr))
176 status = -EINVAL;
177
178 }
179 }
180 if (DSP_SUCCEEDED(status)) {
181 /* Get a free chirp: */
182 chnl_packet_obj =
183 (struct chnl_irp *)lst_get_head(pchnl->free_packets_list);
184 if (chnl_packet_obj == NULL)
185 status = -EIO;
186
187 }
188 if (DSP_SUCCEEDED(status)) {
189 /* Enqueue the chirp on the chnl's IORequest queue: */
190 chnl_packet_obj->host_user_buf = chnl_packet_obj->host_sys_buf =
191 host_buf;
192 if (pchnl->chnl_type == CHNL_PCPY && pchnl->chnl_id > 1)
193 chnl_packet_obj->host_sys_buf = host_sys_buf;
194
195 /*
196 * Note: for dma chans dw_dsp_addr contains dsp address
197 * of SM buffer.
198 */
199 DBC_ASSERT(chnl_mgr_obj->word_size != 0);
200 /* DSP address */
201 chnl_packet_obj->dsp_tx_addr =
202 dw_dsp_addr / chnl_mgr_obj->word_size;
203 chnl_packet_obj->byte_size = byte_size;
204 chnl_packet_obj->buf_size = buf_size;
205 /* Only valid for output channel */
206 chnl_packet_obj->dw_arg = dw_arg;
207 chnl_packet_obj->status = (is_eos ? CHNL_IOCSTATEOS :
208 CHNL_IOCSTATCOMPLETE);
209 lst_put_tail(pchnl->pio_requests,
210 (struct list_head *)chnl_packet_obj);
211 pchnl->cio_reqs++;
212 DBC_ASSERT(pchnl->cio_reqs <= pchnl->chnl_packets);
213 /*
214 * If end of stream, update the channel state to prevent
215 * more IOR's.
216 */
217 if (is_eos)
218 pchnl->dw_state |= CHNL_STATEEOS;
219
220 /* Legacy DSM Processor-Copy */
221 DBC_ASSERT(pchnl->chnl_type == CHNL_PCPY);
222 /* Request IO from the DSP */
223 io_request_chnl(chnl_mgr_obj->hio_mgr, pchnl,
224 (CHNL_IS_INPUT(pchnl->chnl_mode) ? IO_INPUT :
225 IO_OUTPUT), &mb_val);
226 sched_dpc = true;
227
228 }
229 omap_mbox_enable_irq(dev_ctxt->mbox, IRQ_RX);
230 spin_unlock_bh(&chnl_mgr_obj->chnl_mgr_lock);
231 if (mb_val != 0)
232 io_intr_dsp2(chnl_mgr_obj->hio_mgr, mb_val);
233
234 /* Schedule a DPC, to do the actual data transfer */
235 if (sched_dpc)
236 iosm_schedule(chnl_mgr_obj->hio_mgr);
237
238 func_end:
239 return status;
240 }
241
242 /*
243 * ======== bridge_chnl_cancel_io ========
244 * Return all I/O requests to the client which have not yet been
245 * transferred. The channel's I/O completion object is
246 * signalled, and all the I/O requests are queued as IOC's, with the
247 * status field set to CHNL_IOCSTATCANCEL.
248 * This call is typically used in abort situations, and is a prelude to
249 * chnl_close();
250 */
251 int bridge_chnl_cancel_io(struct chnl_object *chnl_obj)
252 {
253 int status = 0;
254 struct chnl_object *pchnl = (struct chnl_object *)chnl_obj;
255 u32 chnl_id = -1;
256 s8 chnl_mode;
257 struct chnl_irp *chnl_packet_obj;
258 struct chnl_mgr *chnl_mgr_obj = NULL;
259
260 /* Check args: */
261 if (pchnl && pchnl->chnl_mgr_obj) {
262 chnl_id = pchnl->chnl_id;
263 chnl_mode = pchnl->chnl_mode;
264 chnl_mgr_obj = pchnl->chnl_mgr_obj;
265 } else {
266 status = -EFAULT;
267 }
268 if (DSP_FAILED(status))
269 goto func_end;
270
271 /* Mark this channel as cancelled, to prevent further IORequests or
272 * IORequests or dispatching. */
273 spin_lock_bh(&chnl_mgr_obj->chnl_mgr_lock);
274 pchnl->dw_state |= CHNL_STATECANCEL;
275 if (LST_IS_EMPTY(pchnl->pio_requests))
276 goto func_cont;
277
278 if (pchnl->chnl_type == CHNL_PCPY) {
279 /* Indicate we have no more buffers available for transfer: */
280 if (CHNL_IS_INPUT(pchnl->chnl_mode)) {
281 io_cancel_chnl(chnl_mgr_obj->hio_mgr, chnl_id);
282 } else {
283 /* Record that we no longer have output buffers
284 * available: */
285 chnl_mgr_obj->dw_output_mask &= ~(1 << chnl_id);
286 }
287 }
288 /* Move all IOR's to IOC queue: */
289 while (!LST_IS_EMPTY(pchnl->pio_requests)) {
290 chnl_packet_obj =
291 (struct chnl_irp *)lst_get_head(pchnl->pio_requests);
292 if (chnl_packet_obj) {
293 chnl_packet_obj->byte_size = 0;
294 chnl_packet_obj->status |= CHNL_IOCSTATCANCEL;
295 lst_put_tail(pchnl->pio_completions,
296 (struct list_head *)chnl_packet_obj);
297 pchnl->cio_cs++;
298 pchnl->cio_reqs--;
299 DBC_ASSERT(pchnl->cio_reqs >= 0);
300 }
301 }
302 func_cont:
303 spin_unlock_bh(&chnl_mgr_obj->chnl_mgr_lock);
304 func_end:
305 return status;
306 }
307
308 /*
309 * ======== bridge_chnl_close ========
310 * Purpose:
311 * Ensures all pending I/O on this channel is cancelled, discards all
312 * queued I/O completion notifications, then frees the resources allocated
313 * for this channel, and makes the corresponding logical channel id
314 * available for subsequent use.
315 */
316 int bridge_chnl_close(struct chnl_object *chnl_obj)
317 {
318 int status;
319 struct chnl_object *pchnl = (struct chnl_object *)chnl_obj;
320
321 /* Check args: */
322 if (!pchnl) {
323 status = -EFAULT;
324 goto func_cont;
325 }
326 {
327 /* Cancel IO: this ensures no further IO requests or
328 * notifications. */
329 status = bridge_chnl_cancel_io(chnl_obj);
330 }
331 func_cont:
332 if (DSP_SUCCEEDED(status)) {
333 /* Assert I/O on this channel is now cancelled: Protects
334 * from io_dpc. */
335 DBC_ASSERT((pchnl->dw_state & CHNL_STATECANCEL));
336 /* Invalidate channel object: Protects from
337 * CHNL_GetIOCompletion(). */
338 /* Free the slot in the channel manager: */
339 pchnl->chnl_mgr_obj->ap_channel[pchnl->chnl_id] = NULL;
340 spin_lock_bh(&pchnl->chnl_mgr_obj->chnl_mgr_lock);
341 pchnl->chnl_mgr_obj->open_channels -= 1;
342 spin_unlock_bh(&pchnl->chnl_mgr_obj->chnl_mgr_lock);
343 if (pchnl->ntfy_obj) {
344 ntfy_delete(pchnl->ntfy_obj);
345 kfree(pchnl->ntfy_obj);
346 pchnl->ntfy_obj = NULL;
347 }
348 /* Reset channel event: (NOTE: user_event freed in user
349 * context.). */
350 if (pchnl->sync_event) {
351 sync_reset_event(pchnl->sync_event);
352 kfree(pchnl->sync_event);
353 pchnl->sync_event = NULL;
354 }
355 /* Free I/O request and I/O completion queues: */
356 if (pchnl->pio_completions) {
357 free_chirp_list(pchnl->pio_completions);
358 pchnl->pio_completions = NULL;
359 pchnl->cio_cs = 0;
360 }
361 if (pchnl->pio_requests) {
362 free_chirp_list(pchnl->pio_requests);
363 pchnl->pio_requests = NULL;
364 pchnl->cio_reqs = 0;
365 }
366 if (pchnl->free_packets_list) {
367 free_chirp_list(pchnl->free_packets_list);
368 pchnl->free_packets_list = NULL;
369 }
370 /* Release channel object. */
371 kfree(pchnl);
372 pchnl = NULL;
373 }
374 DBC_ENSURE(DSP_FAILED(status) || !pchnl);
375 return status;
376 }
377
378 /*
379 * ======== bridge_chnl_create ========
380 * Create a channel manager object, responsible for opening new channels
381 * and closing old ones for a given board.
382 */
383 int bridge_chnl_create(OUT struct chnl_mgr **channel_mgr,
384 struct dev_object *hdev_obj,
385 IN CONST struct chnl_mgrattrs *mgr_attrts)
386 {
387 int status = 0;
388 struct chnl_mgr *chnl_mgr_obj = NULL;
389 u8 max_channels;
390
391 /* Check DBC requirements: */
392 DBC_REQUIRE(channel_mgr != NULL);
393 DBC_REQUIRE(mgr_attrts != NULL);
394 DBC_REQUIRE(mgr_attrts->max_channels > 0);
395 DBC_REQUIRE(mgr_attrts->max_channels <= CHNL_MAXCHANNELS);
396 DBC_REQUIRE(mgr_attrts->word_size != 0);
397
398 /* Allocate channel manager object */
399 chnl_mgr_obj = kzalloc(sizeof(struct chnl_mgr), GFP_KERNEL);
400 if (chnl_mgr_obj) {
401 /*
402 * The max_channels attr must equal the # of supported chnls for
403 * each transport(# chnls for PCPY = DDMA = ZCPY): i.e.
404 * mgr_attrts->max_channels = CHNL_MAXCHANNELS =
405 * DDMA_MAXDDMACHNLS = DDMA_MAXZCPYCHNLS.
406 */
407 DBC_ASSERT(mgr_attrts->max_channels == CHNL_MAXCHANNELS);
408 max_channels = CHNL_MAXCHANNELS + CHNL_MAXCHANNELS * CHNL_PCPY;
409 /* Create array of channels */
410 chnl_mgr_obj->ap_channel = kzalloc(sizeof(struct chnl_object *)
411 * max_channels, GFP_KERNEL);
412 if (chnl_mgr_obj->ap_channel) {
413 /* Initialize chnl_mgr object */
414 chnl_mgr_obj->dw_type = CHNL_TYPESM;
415 chnl_mgr_obj->word_size = mgr_attrts->word_size;
416 /* Total # chnls supported */
417 chnl_mgr_obj->max_channels = max_channels;
418 chnl_mgr_obj->open_channels = 0;
419 chnl_mgr_obj->dw_output_mask = 0;
420 chnl_mgr_obj->dw_last_output = 0;
421 chnl_mgr_obj->hdev_obj = hdev_obj;
422 if (DSP_SUCCEEDED(status))
423 spin_lock_init(&chnl_mgr_obj->chnl_mgr_lock);
424 } else {
425 status = -ENOMEM;
426 }
427 } else {
428 status = -ENOMEM;
429 }
430
431 if (DSP_FAILED(status)) {
432 bridge_chnl_destroy(chnl_mgr_obj);
433 *channel_mgr = NULL;
434 } else {
435 /* Return channel manager object to caller... */
436 *channel_mgr = chnl_mgr_obj;
437 }
438 return status;
439 }
440
441 /*
442 * ======== bridge_chnl_destroy ========
443 * Purpose:
444 * Close all open channels, and destroy the channel manager.
445 */
446 int bridge_chnl_destroy(struct chnl_mgr *hchnl_mgr)
447 {
448 int status = 0;
449 struct chnl_mgr *chnl_mgr_obj = hchnl_mgr;
450 u32 chnl_id;
451
452 if (hchnl_mgr) {
453 /* Close all open channels: */
454 for (chnl_id = 0; chnl_id < chnl_mgr_obj->max_channels;
455 chnl_id++) {
456 status =
457 bridge_chnl_close(chnl_mgr_obj->ap_channel
458 [chnl_id]);
459 if (DSP_FAILED(status))
460 dev_dbg(bridge, "%s: Error status 0x%x\n",
461 __func__, status);
462 }
463
464 /* Free channel manager object: */
465 kfree(chnl_mgr_obj->ap_channel);
466
467 /* Set hchnl_mgr to NULL in device object. */
468 dev_set_chnl_mgr(chnl_mgr_obj->hdev_obj, NULL);
469 /* Free this Chnl Mgr object: */
470 kfree(hchnl_mgr);
471 } else {
472 status = -EFAULT;
473 }
474 return status;
475 }
476
477 /*
478 * ======== bridge_chnl_flush_io ========
479 * purpose:
480 * Flushes all the outstanding data requests on a channel.
481 */
482 int bridge_chnl_flush_io(struct chnl_object *chnl_obj, u32 timeout)
483 {
484 int status = 0;
485 struct chnl_object *pchnl = (struct chnl_object *)chnl_obj;
486 s8 chnl_mode = -1;
487 struct chnl_mgr *chnl_mgr_obj;
488 struct chnl_ioc chnl_ioc_obj;
489 /* Check args: */
490 if (pchnl) {
491 if ((timeout == CHNL_IOCNOWAIT)
492 && CHNL_IS_OUTPUT(pchnl->chnl_mode)) {
493 status = -EINVAL;
494 } else {
495 chnl_mode = pchnl->chnl_mode;
496 chnl_mgr_obj = pchnl->chnl_mgr_obj;
497 }
498 } else {
499 status = -EFAULT;
500 }
501 if (DSP_SUCCEEDED(status)) {
502 /* Note: Currently, if another thread continues to add IO
503 * requests to this channel, this function will continue to
504 * flush all such queued IO requests. */
505 if (CHNL_IS_OUTPUT(chnl_mode)
506 && (pchnl->chnl_type == CHNL_PCPY)) {
507 /* Wait for IO completions, up to the specified
508 * timeout: */
509 while (!LST_IS_EMPTY(pchnl->pio_requests) &&
510 DSP_SUCCEEDED(status)) {
511 status = bridge_chnl_get_ioc(chnl_obj,
512 timeout, &chnl_ioc_obj);
513 if (DSP_FAILED(status))
514 continue;
515
516 if (chnl_ioc_obj.status & CHNL_IOCSTATTIMEOUT)
517 status = -ETIMEDOUT;
518
519 }
520 } else {
521 status = bridge_chnl_cancel_io(chnl_obj);
522 /* Now, leave the channel in the ready state: */
523 pchnl->dw_state &= ~CHNL_STATECANCEL;
524 }
525 }
526 DBC_ENSURE(DSP_FAILED(status) || LST_IS_EMPTY(pchnl->pio_requests));
527 return status;
528 }
529
530 /*
531 * ======== bridge_chnl_get_info ========
532 * Purpose:
533 * Retrieve information related to a channel.
534 */
535 int bridge_chnl_get_info(struct chnl_object *chnl_obj,
536 OUT struct chnl_info *channel_info)
537 {
538 int status = 0;
539 struct chnl_object *pchnl = (struct chnl_object *)chnl_obj;
540 if (channel_info != NULL) {
541 if (pchnl) {
542 /* Return the requested information: */
543 channel_info->hchnl_mgr = pchnl->chnl_mgr_obj;
544 channel_info->event_obj = pchnl->user_event;
545 channel_info->cnhl_id = pchnl->chnl_id;
546 channel_info->dw_mode = pchnl->chnl_mode;
547 channel_info->bytes_tx = pchnl->bytes_moved;
548 channel_info->process = pchnl->process;
549 channel_info->sync_event = pchnl->sync_event;
550 channel_info->cio_cs = pchnl->cio_cs;
551 channel_info->cio_reqs = pchnl->cio_reqs;
552 channel_info->dw_state = pchnl->dw_state;
553 } else {
554 status = -EFAULT;
555 }
556 } else {
557 status = -EFAULT;
558 }
559 return status;
560 }
561
562 /*
563 * ======== bridge_chnl_get_ioc ========
564 * Optionally wait for I/O completion on a channel. Dequeue an I/O
565 * completion record, which contains information about the completed
566 * I/O request.
567 * Note: Ensures Channel Invariant (see notes above).
568 */
569 int bridge_chnl_get_ioc(struct chnl_object *chnl_obj, u32 timeout,
570 OUT struct chnl_ioc *chan_ioc)
571 {
572 int status = 0;
573 struct chnl_object *pchnl = (struct chnl_object *)chnl_obj;
574 struct chnl_irp *chnl_packet_obj;
575 int stat_sync;
576 bool dequeue_ioc = true;
577 struct chnl_ioc ioc = { NULL, 0, 0, 0, 0 };
578 u8 *host_sys_buf = NULL;
579 struct bridge_dev_context *dev_ctxt;
580 struct dev_object *dev_obj;
581
582 /* Check args: */
583 if (!chan_ioc || !pchnl) {
584 status = -EFAULT;
585 } else if (timeout == CHNL_IOCNOWAIT) {
586 if (LST_IS_EMPTY(pchnl->pio_completions))
587 status = -EREMOTEIO;
588
589 }
590
591 dev_obj = dev_get_first();
592 dev_get_bridge_context(dev_obj, &dev_ctxt);
593 if (!dev_ctxt)
594 status = -EFAULT;
595
596 if (DSP_FAILED(status))
597 goto func_end;
598
599 ioc.status = CHNL_IOCSTATCOMPLETE;
600 if (timeout !=
601 CHNL_IOCNOWAIT && LST_IS_EMPTY(pchnl->pio_completions)) {
602 if (timeout == CHNL_IOCINFINITE)
603 timeout = SYNC_INFINITE;
604
605 stat_sync = sync_wait_on_event(pchnl->sync_event, timeout);
606 if (stat_sync == -ETIME) {
607 /* No response from DSP */
608 ioc.status |= CHNL_IOCSTATTIMEOUT;
609 dequeue_ioc = false;
610 } else if (stat_sync == -EPERM) {
611 /* This can occur when the user mode thread is
612 * aborted (^C), or when _VWIN32_WaitSingleObject()
613 * fails due to unkown causes. */
614 /* Even though Wait failed, there may be something in
615 * the Q: */
616 if (LST_IS_EMPTY(pchnl->pio_completions)) {
617 ioc.status |= CHNL_IOCSTATCANCEL;
618 dequeue_ioc = false;
619 }
620 }
621 }
622 /* See comment in AddIOReq */
623 spin_lock_bh(&pchnl->chnl_mgr_obj->chnl_mgr_lock);
624 omap_mbox_disable_irq(dev_ctxt->mbox, IRQ_RX);
625 if (dequeue_ioc) {
626 /* Dequeue IOC and set chan_ioc; */
627 DBC_ASSERT(!LST_IS_EMPTY(pchnl->pio_completions));
628 chnl_packet_obj =
629 (struct chnl_irp *)lst_get_head(pchnl->pio_completions);
630 /* Update chan_ioc from channel state and chirp: */
631 if (chnl_packet_obj) {
632 pchnl->cio_cs--;
633 /* If this is a zero-copy channel, then set IOC's pbuf
634 * to the DSP's address. This DSP address will get
635 * translated to user's virtual addr later. */
636 {
637 host_sys_buf = chnl_packet_obj->host_sys_buf;
638 ioc.pbuf = chnl_packet_obj->host_user_buf;
639 }
640 ioc.byte_size = chnl_packet_obj->byte_size;
641 ioc.buf_size = chnl_packet_obj->buf_size;
642 ioc.dw_arg = chnl_packet_obj->dw_arg;
643 ioc.status |= chnl_packet_obj->status;
644 /* Place the used chirp on the free list: */
645 lst_put_tail(pchnl->free_packets_list,
646 (struct list_head *)chnl_packet_obj);
647 } else {
648 ioc.pbuf = NULL;
649 ioc.byte_size = 0;
650 }
651 } else {
652 ioc.pbuf = NULL;
653 ioc.byte_size = 0;
654 ioc.dw_arg = 0;
655 ioc.buf_size = 0;
656 }
657 /* Ensure invariant: If any IOC's are queued for this channel... */
658 if (!LST_IS_EMPTY(pchnl->pio_completions)) {
659 /* Since DSPStream_Reclaim() does not take a timeout
660 * parameter, we pass the stream's timeout value to
661 * bridge_chnl_get_ioc. We cannot determine whether or not
662 * we have waited in User mode. Since the stream's timeout
663 * value may be non-zero, we still have to set the event.
664 * Therefore, this optimization is taken out.
665 *
666 * if (timeout == CHNL_IOCNOWAIT) {
667 * ... ensure event is set..
668 * sync_set_event(pchnl->sync_event);
669 * } */
670 sync_set_event(pchnl->sync_event);
671 } else {
672 /* else, if list is empty, ensure event is reset. */
673 sync_reset_event(pchnl->sync_event);
674 }
675 omap_mbox_enable_irq(dev_ctxt->mbox, IRQ_RX);
676 spin_unlock_bh(&pchnl->chnl_mgr_obj->chnl_mgr_lock);
677 if (dequeue_ioc
678 && (pchnl->chnl_type == CHNL_PCPY && pchnl->chnl_id > 1)) {
679 if (!(ioc.pbuf < (void *)USERMODE_ADDR))
680 goto func_cont;
681
682 /* If the addr is in user mode, then copy it */
683 if (!host_sys_buf || !ioc.pbuf) {
684 status = -EFAULT;
685 goto func_cont;
686 }
687 if (!CHNL_IS_INPUT(pchnl->chnl_mode))
688 goto func_cont1;
689
690 /*host_user_buf */
691 status = copy_to_user(ioc.pbuf, host_sys_buf, ioc.byte_size);
692 if (status) {
693 if (current->flags & PF_EXITING)
694 status = 0;
695 }
696 if (status)
697 status = -EFAULT;
698 func_cont1:
699 kfree(host_sys_buf);
700 }
701 func_cont:
702 /* Update User's IOC block: */
703 *chan_ioc = ioc;
704 func_end:
705 return status;
706 }
707
708 /*
709 * ======== bridge_chnl_get_mgr_info ========
710 * Retrieve information related to the channel manager.
711 */
712 int bridge_chnl_get_mgr_info(struct chnl_mgr *hchnl_mgr, u32 ch_id,
713 OUT struct chnl_mgrinfo *mgr_info)
714 {
715 int status = 0;
716 struct chnl_mgr *chnl_mgr_obj = (struct chnl_mgr *)hchnl_mgr;
717
718 if (mgr_info != NULL) {
719 if (ch_id <= CHNL_MAXCHANNELS) {
720 if (hchnl_mgr) {
721 /* Return the requested information: */
722 mgr_info->chnl_obj =
723 chnl_mgr_obj->ap_channel[ch_id];
724 mgr_info->open_channels =
725 chnl_mgr_obj->open_channels;
726 mgr_info->dw_type = chnl_mgr_obj->dw_type;
727 /* total # of chnls */
728 mgr_info->max_channels =
729 chnl_mgr_obj->max_channels;
730 } else {
731 status = -EFAULT;
732 }
733 } else {
734 status = -ECHRNG;
735 }
736 } else {
737 status = -EFAULT;
738 }
739
740 return status;
741 }
742
743 /*
744 * ======== bridge_chnl_idle ========
745 * Idles a particular channel.
746 */
747 int bridge_chnl_idle(struct chnl_object *chnl_obj, u32 timeout,
748 bool flush_data)
749 {
750 s8 chnl_mode;
751 struct chnl_mgr *chnl_mgr_obj;
752 int status = 0;
753
754 DBC_REQUIRE(chnl_obj);
755
756 chnl_mode = chnl_obj->chnl_mode;
757 chnl_mgr_obj = chnl_obj->chnl_mgr_obj;
758
759 if (CHNL_IS_OUTPUT(chnl_mode) && !flush_data) {
760 /* Wait for IO completions, up to the specified timeout: */
761 status = bridge_chnl_flush_io(chnl_obj, timeout);
762 } else {
763 status = bridge_chnl_cancel_io(chnl_obj);
764
765 /* Reset the byte count and put channel back in ready state. */
766 chnl_obj->bytes_moved = 0;
767 chnl_obj->dw_state &= ~CHNL_STATECANCEL;
768 }
769
770 return status;
771 }
772
773 /*
774 * ======== bridge_chnl_open ========
775 * Open a new half-duplex channel to the DSP board.
776 */
777 int bridge_chnl_open(OUT struct chnl_object **chnl,
778 struct chnl_mgr *hchnl_mgr, s8 chnl_mode,
779 u32 ch_id, CONST IN struct chnl_attr *pattrs)
780 {
781 int status = 0;
782 struct chnl_mgr *chnl_mgr_obj = hchnl_mgr;
783 struct chnl_object *pchnl = NULL;
784 struct sync_object *sync_event = NULL;
785 /* Ensure DBC requirements: */
786 DBC_REQUIRE(chnl != NULL);
787 DBC_REQUIRE(pattrs != NULL);
788 DBC_REQUIRE(hchnl_mgr != NULL);
789 *chnl = NULL;
790 /* Validate Args: */
791 if (pattrs->uio_reqs == 0) {
792 status = -EINVAL;
793 } else {
794 if (!hchnl_mgr) {
795 status = -EFAULT;
796 } else {
797 if (ch_id != CHNL_PICKFREE) {
798 if (ch_id >= chnl_mgr_obj->max_channels)
799 status = -ECHRNG;
800 else if (chnl_mgr_obj->ap_channel[ch_id] !=
801 NULL)
802 status = -EALREADY;
803 } else {
804 /* Check for free channel */
805 status =
806 search_free_channel(chnl_mgr_obj, &ch_id);
807 }
808 }
809 }
810 if (DSP_FAILED(status))
811 goto func_end;
812
813 DBC_ASSERT(ch_id < chnl_mgr_obj->max_channels);
814 /* Create channel object: */
815 pchnl = kzalloc(sizeof(struct chnl_object), GFP_KERNEL);
816 if (!pchnl) {
817 status = -ENOMEM;
818 goto func_end;
819 }
820 /* Protect queues from io_dpc: */
821 pchnl->dw_state = CHNL_STATECANCEL;
822 /* Allocate initial IOR and IOC queues: */
823 pchnl->free_packets_list = create_chirp_list(pattrs->uio_reqs);
824 pchnl->pio_requests = create_chirp_list(0);
825 pchnl->pio_completions = create_chirp_list(0);
826 pchnl->chnl_packets = pattrs->uio_reqs;
827 pchnl->cio_cs = 0;
828 pchnl->cio_reqs = 0;
829 sync_event = kzalloc(sizeof(struct sync_object), GFP_KERNEL);
830 if (sync_event)
831 sync_init_event(sync_event);
832 else
833 status = -ENOMEM;
834
835 if (DSP_SUCCEEDED(status)) {
836 pchnl->ntfy_obj = kmalloc(sizeof(struct ntfy_object),
837 GFP_KERNEL);
838 if (pchnl->ntfy_obj)
839 ntfy_init(pchnl->ntfy_obj);
840 else
841 status = -ENOMEM;
842 }
843
844 if (DSP_SUCCEEDED(status)) {
845 if (pchnl->pio_completions && pchnl->pio_requests &&
846 pchnl->free_packets_list) {
847 /* Initialize CHNL object fields: */
848 pchnl->chnl_mgr_obj = chnl_mgr_obj;
849 pchnl->chnl_id = ch_id;
850 pchnl->chnl_mode = chnl_mode;
851 pchnl->user_event = sync_event;
852 pchnl->sync_event = sync_event;
853 /* Get the process handle */
854 pchnl->process = current->tgid;
855 pchnl->pcb_arg = 0;
856 pchnl->bytes_moved = 0;
857 /* Default to proc-copy */
858 pchnl->chnl_type = CHNL_PCPY;
859 } else {
860 status = -ENOMEM;
861 }
862 }
863
864 if (DSP_FAILED(status)) {
865 /* Free memory */
866 if (pchnl->pio_completions) {
867 free_chirp_list(pchnl->pio_completions);
868 pchnl->pio_completions = NULL;
869 pchnl->cio_cs = 0;
870 }
871 if (pchnl->pio_requests) {
872 free_chirp_list(pchnl->pio_requests);
873 pchnl->pio_requests = NULL;
874 }
875 if (pchnl->free_packets_list) {
876 free_chirp_list(pchnl->free_packets_list);
877 pchnl->free_packets_list = NULL;
878 }
879 kfree(sync_event);
880 sync_event = NULL;
881
882 if (pchnl->ntfy_obj) {
883 ntfy_delete(pchnl->ntfy_obj);
884 kfree(pchnl->ntfy_obj);
885 pchnl->ntfy_obj = NULL;
886 }
887 kfree(pchnl);
888 } else {
889 /* Insert channel object in channel manager: */
890 chnl_mgr_obj->ap_channel[pchnl->chnl_id] = pchnl;
891 spin_lock_bh(&chnl_mgr_obj->chnl_mgr_lock);
892 chnl_mgr_obj->open_channels++;
893 spin_unlock_bh(&chnl_mgr_obj->chnl_mgr_lock);
894 /* Return result... */
895 pchnl->dw_state = CHNL_STATEREADY;
896 *chnl = pchnl;
897 }
898 func_end:
899 DBC_ENSURE((DSP_SUCCEEDED(status) && pchnl) || (*chnl == NULL));
900 return status;
901 }
902
903 /*
904 * ======== bridge_chnl_register_notify ========
905 * Registers for events on a particular channel.
906 */
907 int bridge_chnl_register_notify(struct chnl_object *chnl_obj,
908 u32 event_mask, u32 notify_type,
909 struct dsp_notification *hnotification)
910 {
911 int status = 0;
912
913 DBC_ASSERT(!(event_mask & ~(DSP_STREAMDONE | DSP_STREAMIOCOMPLETION)));
914
915 if (event_mask)
916 status = ntfy_register(chnl_obj->ntfy_obj, hnotification,
917 event_mask, notify_type);
918 else
919 status = ntfy_unregister(chnl_obj->ntfy_obj, hnotification);
920
921 return status;
922 }
923
924 /*
925 * ======== create_chirp_list ========
926 * Purpose:
927 * Initialize a queue of channel I/O Request/Completion packets.
928 * Parameters:
929 * chirps: Number of Chirps to allocate.
930 * Returns:
931 * Pointer to queue of IRPs, or NULL.
932 * Requires:
933 * Ensures:
934 */
935 static struct lst_list *create_chirp_list(u32 chirps)
936 {
937 struct lst_list *chirp_list;
938 struct chnl_irp *chnl_packet_obj;
939 u32 i;
940
941 chirp_list = kzalloc(sizeof(struct lst_list), GFP_KERNEL);
942
943 if (chirp_list) {
944 INIT_LIST_HEAD(&chirp_list->head);
945 /* Make N chirps and place on queue. */
946 for (i = 0; (i < chirps)
947 && ((chnl_packet_obj = make_new_chirp()) != NULL); i++) {
948 lst_put_tail(chirp_list,
949 (struct list_head *)chnl_packet_obj);
950 }
951
952 /* If we couldn't allocate all chirps, free those allocated: */
953 if (i != chirps) {
954 free_chirp_list(chirp_list);
955 chirp_list = NULL;
956 }
957 }
958
959 return chirp_list;
960 }
961
962 /*
963 * ======== free_chirp_list ========
964 * Purpose:
965 * Free the queue of Chirps.
966 */
967 static void free_chirp_list(struct lst_list *chirp_list)
968 {
969 DBC_REQUIRE(chirp_list != NULL);
970
971 while (!LST_IS_EMPTY(chirp_list))
972 kfree(lst_get_head(chirp_list));
973
974 kfree(chirp_list);
975 }
976
977 /*
978 * ======== make_new_chirp ========
979 * Allocate the memory for a new channel IRP.
980 */
981 static struct chnl_irp *make_new_chirp(void)
982 {
983 struct chnl_irp *chnl_packet_obj;
984
985 chnl_packet_obj = kzalloc(sizeof(struct chnl_irp), GFP_KERNEL);
986 if (chnl_packet_obj != NULL) {
987 /* lst_init_elem only resets the list's member values. */
988 lst_init_elem(&chnl_packet_obj->link);
989 }
990
991 return chnl_packet_obj;
992 }
993
994 /*
995 * ======== search_free_channel ========
996 * Search for a free channel slot in the array of channel pointers.
997 */
998 static int search_free_channel(struct chnl_mgr *chnl_mgr_obj,
999 OUT u32 *chnl)
1000 {
1001 int status = -ENOSR;
1002 u32 i;
1003
1004 DBC_REQUIRE(chnl_mgr_obj);
1005
1006 for (i = 0; i < chnl_mgr_obj->max_channels; i++) {
1007 if (chnl_mgr_obj->ap_channel[i] == NULL) {
1008 status = 0;
1009 *chnl = i;
1010 break;
1011 }
1012 }
1013
1014 return status;
1015 }