4 * DSP-BIOS Bridge driver support functions for TI OMAP processors.
6 * Implements upper edge functions for Bridge driver channel module.
8 * Copyright (C) 2005-2006 Texas Instruments, Inc.
10 * This package is free software; you can redistribute it and/or modify
11 * it under the terms of the GNU General Public License version 2 as
12 * published by the Free Software Foundation.
14 * THIS PACKAGE IS PROVIDED ``AS IS'' AND WITHOUT ANY EXPRESS OR
15 * IMPLIED WARRANTIES, INCLUDING, WITHOUT LIMITATION, THE IMPLIED
16 * WARRANTIES OF MERCHANTIBILITY AND FITNESS FOR A PARTICULAR PURPOSE.
20 * The lower edge functions must be implemented by the Bridge driver
21 * writer, and are declared in chnl_sm.h.
23 * Care is taken in this code to prevent simulataneous access to channel
26 * 2. io_dpc(), scheduled from the io_isr() as an event.
28 * This is done primarily by:
30 * - state flags in the channel object; and
31 * - ensuring the IO_Dispatch() routine, which is called from both
32 * CHNL_AddIOReq() and the DPC(if implemented), is not re-entered.
35 * There is an important invariant condition which must be maintained per
36 * channel outside of bridge_chnl_get_ioc() and IO_Dispatch(), violation of
37 * which may cause timeouts and/or failure offunction sync_wait_on_event.
38 * This invariant condition is:
40 * LST_Empty(pchnl->pio_completions) ==> pchnl->sync_event is reset
42 * !LST_Empty(pchnl->pio_completions) ==> pchnl->sync_event is set.
45 /* ----------------------------------- OS */
46 #include <dspbridge/host_os.h>
48 /* ----------------------------------- DSP/BIOS Bridge */
49 #include <dspbridge/std.h>
50 #include <dspbridge/dbdefs.h>
52 /* ----------------------------------- Trace & Debug */
53 #include <dspbridge/dbc.h>
55 /* ----------------------------------- OS Adaptation Layer */
56 #include <dspbridge/cfg.h>
57 #include <dspbridge/sync.h>
59 /* ----------------------------------- Bridge Driver */
60 #include <dspbridge/dspdefs.h>
61 #include <dspbridge/dspchnl.h>
64 /* ----------------------------------- Platform Manager */
65 #include <dspbridge/dev.h>
67 /* ----------------------------------- Others */
68 #include <dspbridge/io_sm.h>
70 /* ----------------------------------- Define for This */
71 #define USERMODE_ADDR PAGE_OFFSET
73 #define MAILBOX_IRQ INT_MAIL_MPU_IRQ
75 /* ----------------------------------- Function Prototypes */
76 static struct lst_list
*create_chirp_list(u32 chirps
);
78 static void free_chirp_list(struct lst_list
*lst
);
80 static struct chnl_irp
*make_new_chirp(void);
82 static int search_free_channel(struct chnl_mgr
*chnl_mgr_obj
,
86 * ======== bridge_chnl_add_io_req ========
87 * Enqueue an I/O request for data transfer on a channel to the DSP.
88 * The direction (mode) is specified in the channel object. Note the DSP
89 * address is specified for channels opened in direct I/O mode.
91 int bridge_chnl_add_io_req(struct chnl_object
*chnl_obj
, void *host_buf
,
92 u32 byte_size
, u32 buf_size
,
93 OPTIONAL u32 dw_dsp_addr
, u32 dw_arg
)
96 struct chnl_object
*pchnl
= (struct chnl_object
*)chnl_obj
;
97 struct chnl_irp
*chnl_packet_obj
= NULL
;
98 struct bridge_dev_context
*dev_ctxt
;
99 struct dev_object
*dev_obj
;
102 struct chnl_mgr
*chnl_mgr_obj
= pchnl
->chnl_mgr_obj
;
103 u8
*host_sys_buf
= NULL
;
104 bool sched_dpc
= false;
107 is_eos
= (byte_size
== 0);
110 if (!host_buf
|| !pchnl
) {
112 } else if (is_eos
&& CHNL_IS_INPUT(pchnl
->chnl_mode
)) {
116 * Check the channel state: only queue chirp if channel state
119 dw_state
= pchnl
->dw_state
;
120 if (dw_state
!= CHNL_STATEREADY
) {
121 if (dw_state
& CHNL_STATECANCEL
)
123 else if ((dw_state
& CHNL_STATEEOS
) &&
124 CHNL_IS_OUTPUT(pchnl
->chnl_mode
))
127 /* No other possible states left */
132 dev_obj
= dev_get_first();
133 dev_get_bridge_context(dev_obj
, &dev_ctxt
);
137 if (DSP_FAILED(status
))
140 if (pchnl
->chnl_type
== CHNL_PCPY
&& pchnl
->chnl_id
> 1 && host_buf
) {
141 if (!(host_buf
< (void *)USERMODE_ADDR
)) {
142 host_sys_buf
= host_buf
;
145 /* if addr in user mode, then copy to kernel space */
146 host_sys_buf
= kmalloc(buf_size
, GFP_KERNEL
);
147 if (host_sys_buf
== NULL
) {
151 if (CHNL_IS_OUTPUT(pchnl
->chnl_mode
)) {
152 status
= copy_from_user(host_sys_buf
, host_buf
,
163 /* Mailbox IRQ is disabled to avoid race condition with DMA/ZCPY
164 * channels. DPCCS is held to avoid race conditions with PCPY channels.
165 * If DPC is scheduled in process context (iosm_schedule) and any
166 * non-mailbox interrupt occurs, that DPC will run and break CS. Hence
167 * we disable ALL DPCs. We will try to disable ONLY IO DPC later. */
168 spin_lock_bh(&chnl_mgr_obj
->chnl_mgr_lock
);
169 omap_mbox_disable_irq(dev_ctxt
->mbox
, IRQ_RX
);
170 if (pchnl
->chnl_type
== CHNL_PCPY
) {
171 /* This is a processor-copy channel. */
172 if (DSP_SUCCEEDED(status
) && CHNL_IS_OUTPUT(pchnl
->chnl_mode
)) {
173 /* Check buffer size on output channels for fit. */
175 io_buf_size(pchnl
->chnl_mgr_obj
->hio_mgr
))
180 if (DSP_SUCCEEDED(status
)) {
181 /* Get a free chirp: */
183 (struct chnl_irp
*)lst_get_head(pchnl
->free_packets_list
);
184 if (chnl_packet_obj
== NULL
)
188 if (DSP_SUCCEEDED(status
)) {
189 /* Enqueue the chirp on the chnl's IORequest queue: */
190 chnl_packet_obj
->host_user_buf
= chnl_packet_obj
->host_sys_buf
=
192 if (pchnl
->chnl_type
== CHNL_PCPY
&& pchnl
->chnl_id
> 1)
193 chnl_packet_obj
->host_sys_buf
= host_sys_buf
;
196 * Note: for dma chans dw_dsp_addr contains dsp address
199 DBC_ASSERT(chnl_mgr_obj
->word_size
!= 0);
201 chnl_packet_obj
->dsp_tx_addr
=
202 dw_dsp_addr
/ chnl_mgr_obj
->word_size
;
203 chnl_packet_obj
->byte_size
= byte_size
;
204 chnl_packet_obj
->buf_size
= buf_size
;
205 /* Only valid for output channel */
206 chnl_packet_obj
->dw_arg
= dw_arg
;
207 chnl_packet_obj
->status
= (is_eos
? CHNL_IOCSTATEOS
:
208 CHNL_IOCSTATCOMPLETE
);
209 lst_put_tail(pchnl
->pio_requests
,
210 (struct list_head
*)chnl_packet_obj
);
212 DBC_ASSERT(pchnl
->cio_reqs
<= pchnl
->chnl_packets
);
214 * If end of stream, update the channel state to prevent
218 pchnl
->dw_state
|= CHNL_STATEEOS
;
220 /* Legacy DSM Processor-Copy */
221 DBC_ASSERT(pchnl
->chnl_type
== CHNL_PCPY
);
222 /* Request IO from the DSP */
223 io_request_chnl(chnl_mgr_obj
->hio_mgr
, pchnl
,
224 (CHNL_IS_INPUT(pchnl
->chnl_mode
) ? IO_INPUT
:
225 IO_OUTPUT
), &mb_val
);
229 omap_mbox_enable_irq(dev_ctxt
->mbox
, IRQ_RX
);
230 spin_unlock_bh(&chnl_mgr_obj
->chnl_mgr_lock
);
232 io_intr_dsp2(chnl_mgr_obj
->hio_mgr
, mb_val
);
234 /* Schedule a DPC, to do the actual data transfer */
236 iosm_schedule(chnl_mgr_obj
->hio_mgr
);
243 * ======== bridge_chnl_cancel_io ========
244 * Return all I/O requests to the client which have not yet been
245 * transferred. The channel's I/O completion object is
246 * signalled, and all the I/O requests are queued as IOC's, with the
247 * status field set to CHNL_IOCSTATCANCEL.
248 * This call is typically used in abort situations, and is a prelude to
251 int bridge_chnl_cancel_io(struct chnl_object
*chnl_obj
)
254 struct chnl_object
*pchnl
= (struct chnl_object
*)chnl_obj
;
257 struct chnl_irp
*chnl_packet_obj
;
258 struct chnl_mgr
*chnl_mgr_obj
= NULL
;
261 if (pchnl
&& pchnl
->chnl_mgr_obj
) {
262 chnl_id
= pchnl
->chnl_id
;
263 chnl_mode
= pchnl
->chnl_mode
;
264 chnl_mgr_obj
= pchnl
->chnl_mgr_obj
;
268 if (DSP_FAILED(status
))
271 /* Mark this channel as cancelled, to prevent further IORequests or
272 * IORequests or dispatching. */
273 spin_lock_bh(&chnl_mgr_obj
->chnl_mgr_lock
);
274 pchnl
->dw_state
|= CHNL_STATECANCEL
;
275 if (LST_IS_EMPTY(pchnl
->pio_requests
))
278 if (pchnl
->chnl_type
== CHNL_PCPY
) {
279 /* Indicate we have no more buffers available for transfer: */
280 if (CHNL_IS_INPUT(pchnl
->chnl_mode
)) {
281 io_cancel_chnl(chnl_mgr_obj
->hio_mgr
, chnl_id
);
283 /* Record that we no longer have output buffers
285 chnl_mgr_obj
->dw_output_mask
&= ~(1 << chnl_id
);
288 /* Move all IOR's to IOC queue: */
289 while (!LST_IS_EMPTY(pchnl
->pio_requests
)) {
291 (struct chnl_irp
*)lst_get_head(pchnl
->pio_requests
);
292 if (chnl_packet_obj
) {
293 chnl_packet_obj
->byte_size
= 0;
294 chnl_packet_obj
->status
|= CHNL_IOCSTATCANCEL
;
295 lst_put_tail(pchnl
->pio_completions
,
296 (struct list_head
*)chnl_packet_obj
);
299 DBC_ASSERT(pchnl
->cio_reqs
>= 0);
303 spin_unlock_bh(&chnl_mgr_obj
->chnl_mgr_lock
);
309 * ======== bridge_chnl_close ========
311 * Ensures all pending I/O on this channel is cancelled, discards all
312 * queued I/O completion notifications, then frees the resources allocated
313 * for this channel, and makes the corresponding logical channel id
314 * available for subsequent use.
316 int bridge_chnl_close(struct chnl_object
*chnl_obj
)
319 struct chnl_object
*pchnl
= (struct chnl_object
*)chnl_obj
;
327 /* Cancel IO: this ensures no further IO requests or
329 status
= bridge_chnl_cancel_io(chnl_obj
);
332 if (DSP_SUCCEEDED(status
)) {
333 /* Assert I/O on this channel is now cancelled: Protects
335 DBC_ASSERT((pchnl
->dw_state
& CHNL_STATECANCEL
));
336 /* Invalidate channel object: Protects from
337 * CHNL_GetIOCompletion(). */
338 /* Free the slot in the channel manager: */
339 pchnl
->chnl_mgr_obj
->ap_channel
[pchnl
->chnl_id
] = NULL
;
340 spin_lock_bh(&pchnl
->chnl_mgr_obj
->chnl_mgr_lock
);
341 pchnl
->chnl_mgr_obj
->open_channels
-= 1;
342 spin_unlock_bh(&pchnl
->chnl_mgr_obj
->chnl_mgr_lock
);
343 if (pchnl
->ntfy_obj
) {
344 ntfy_delete(pchnl
->ntfy_obj
);
345 kfree(pchnl
->ntfy_obj
);
346 pchnl
->ntfy_obj
= NULL
;
348 /* Reset channel event: (NOTE: user_event freed in user
350 if (pchnl
->sync_event
) {
351 sync_reset_event(pchnl
->sync_event
);
352 kfree(pchnl
->sync_event
);
353 pchnl
->sync_event
= NULL
;
355 /* Free I/O request and I/O completion queues: */
356 if (pchnl
->pio_completions
) {
357 free_chirp_list(pchnl
->pio_completions
);
358 pchnl
->pio_completions
= NULL
;
361 if (pchnl
->pio_requests
) {
362 free_chirp_list(pchnl
->pio_requests
);
363 pchnl
->pio_requests
= NULL
;
366 if (pchnl
->free_packets_list
) {
367 free_chirp_list(pchnl
->free_packets_list
);
368 pchnl
->free_packets_list
= NULL
;
370 /* Release channel object. */
374 DBC_ENSURE(DSP_FAILED(status
) || !pchnl
);
379 * ======== bridge_chnl_create ========
380 * Create a channel manager object, responsible for opening new channels
381 * and closing old ones for a given board.
383 int bridge_chnl_create(OUT
struct chnl_mgr
**channel_mgr
,
384 struct dev_object
*hdev_obj
,
385 IN CONST
struct chnl_mgrattrs
*mgr_attrts
)
388 struct chnl_mgr
*chnl_mgr_obj
= NULL
;
391 /* Check DBC requirements: */
392 DBC_REQUIRE(channel_mgr
!= NULL
);
393 DBC_REQUIRE(mgr_attrts
!= NULL
);
394 DBC_REQUIRE(mgr_attrts
->max_channels
> 0);
395 DBC_REQUIRE(mgr_attrts
->max_channels
<= CHNL_MAXCHANNELS
);
396 DBC_REQUIRE(mgr_attrts
->word_size
!= 0);
398 /* Allocate channel manager object */
399 chnl_mgr_obj
= kzalloc(sizeof(struct chnl_mgr
), GFP_KERNEL
);
402 * The max_channels attr must equal the # of supported chnls for
403 * each transport(# chnls for PCPY = DDMA = ZCPY): i.e.
404 * mgr_attrts->max_channels = CHNL_MAXCHANNELS =
405 * DDMA_MAXDDMACHNLS = DDMA_MAXZCPYCHNLS.
407 DBC_ASSERT(mgr_attrts
->max_channels
== CHNL_MAXCHANNELS
);
408 max_channels
= CHNL_MAXCHANNELS
+ CHNL_MAXCHANNELS
* CHNL_PCPY
;
409 /* Create array of channels */
410 chnl_mgr_obj
->ap_channel
= kzalloc(sizeof(struct chnl_object
*)
411 * max_channels
, GFP_KERNEL
);
412 if (chnl_mgr_obj
->ap_channel
) {
413 /* Initialize chnl_mgr object */
414 chnl_mgr_obj
->dw_type
= CHNL_TYPESM
;
415 chnl_mgr_obj
->word_size
= mgr_attrts
->word_size
;
416 /* Total # chnls supported */
417 chnl_mgr_obj
->max_channels
= max_channels
;
418 chnl_mgr_obj
->open_channels
= 0;
419 chnl_mgr_obj
->dw_output_mask
= 0;
420 chnl_mgr_obj
->dw_last_output
= 0;
421 chnl_mgr_obj
->hdev_obj
= hdev_obj
;
422 if (DSP_SUCCEEDED(status
))
423 spin_lock_init(&chnl_mgr_obj
->chnl_mgr_lock
);
431 if (DSP_FAILED(status
)) {
432 bridge_chnl_destroy(chnl_mgr_obj
);
435 /* Return channel manager object to caller... */
436 *channel_mgr
= chnl_mgr_obj
;
442 * ======== bridge_chnl_destroy ========
444 * Close all open channels, and destroy the channel manager.
446 int bridge_chnl_destroy(struct chnl_mgr
*hchnl_mgr
)
449 struct chnl_mgr
*chnl_mgr_obj
= hchnl_mgr
;
453 /* Close all open channels: */
454 for (chnl_id
= 0; chnl_id
< chnl_mgr_obj
->max_channels
;
457 bridge_chnl_close(chnl_mgr_obj
->ap_channel
459 if (DSP_FAILED(status
))
460 dev_dbg(bridge
, "%s: Error status 0x%x\n",
464 /* Free channel manager object: */
465 kfree(chnl_mgr_obj
->ap_channel
);
467 /* Set hchnl_mgr to NULL in device object. */
468 dev_set_chnl_mgr(chnl_mgr_obj
->hdev_obj
, NULL
);
469 /* Free this Chnl Mgr object: */
478 * ======== bridge_chnl_flush_io ========
480 * Flushes all the outstanding data requests on a channel.
482 int bridge_chnl_flush_io(struct chnl_object
*chnl_obj
, u32 timeout
)
485 struct chnl_object
*pchnl
= (struct chnl_object
*)chnl_obj
;
487 struct chnl_mgr
*chnl_mgr_obj
;
488 struct chnl_ioc chnl_ioc_obj
;
491 if ((timeout
== CHNL_IOCNOWAIT
)
492 && CHNL_IS_OUTPUT(pchnl
->chnl_mode
)) {
495 chnl_mode
= pchnl
->chnl_mode
;
496 chnl_mgr_obj
= pchnl
->chnl_mgr_obj
;
501 if (DSP_SUCCEEDED(status
)) {
502 /* Note: Currently, if another thread continues to add IO
503 * requests to this channel, this function will continue to
504 * flush all such queued IO requests. */
505 if (CHNL_IS_OUTPUT(chnl_mode
)
506 && (pchnl
->chnl_type
== CHNL_PCPY
)) {
507 /* Wait for IO completions, up to the specified
509 while (!LST_IS_EMPTY(pchnl
->pio_requests
) &&
510 DSP_SUCCEEDED(status
)) {
511 status
= bridge_chnl_get_ioc(chnl_obj
,
512 timeout
, &chnl_ioc_obj
);
513 if (DSP_FAILED(status
))
516 if (chnl_ioc_obj
.status
& CHNL_IOCSTATTIMEOUT
)
521 status
= bridge_chnl_cancel_io(chnl_obj
);
522 /* Now, leave the channel in the ready state: */
523 pchnl
->dw_state
&= ~CHNL_STATECANCEL
;
526 DBC_ENSURE(DSP_FAILED(status
) || LST_IS_EMPTY(pchnl
->pio_requests
));
531 * ======== bridge_chnl_get_info ========
533 * Retrieve information related to a channel.
535 int bridge_chnl_get_info(struct chnl_object
*chnl_obj
,
536 OUT
struct chnl_info
*channel_info
)
539 struct chnl_object
*pchnl
= (struct chnl_object
*)chnl_obj
;
540 if (channel_info
!= NULL
) {
542 /* Return the requested information: */
543 channel_info
->hchnl_mgr
= pchnl
->chnl_mgr_obj
;
544 channel_info
->event_obj
= pchnl
->user_event
;
545 channel_info
->cnhl_id
= pchnl
->chnl_id
;
546 channel_info
->dw_mode
= pchnl
->chnl_mode
;
547 channel_info
->bytes_tx
= pchnl
->bytes_moved
;
548 channel_info
->process
= pchnl
->process
;
549 channel_info
->sync_event
= pchnl
->sync_event
;
550 channel_info
->cio_cs
= pchnl
->cio_cs
;
551 channel_info
->cio_reqs
= pchnl
->cio_reqs
;
552 channel_info
->dw_state
= pchnl
->dw_state
;
563 * ======== bridge_chnl_get_ioc ========
564 * Optionally wait for I/O completion on a channel. Dequeue an I/O
565 * completion record, which contains information about the completed
567 * Note: Ensures Channel Invariant (see notes above).
569 int bridge_chnl_get_ioc(struct chnl_object
*chnl_obj
, u32 timeout
,
570 OUT
struct chnl_ioc
*chan_ioc
)
573 struct chnl_object
*pchnl
= (struct chnl_object
*)chnl_obj
;
574 struct chnl_irp
*chnl_packet_obj
;
576 bool dequeue_ioc
= true;
577 struct chnl_ioc ioc
= { NULL
, 0, 0, 0, 0 };
578 u8
*host_sys_buf
= NULL
;
579 struct bridge_dev_context
*dev_ctxt
;
580 struct dev_object
*dev_obj
;
583 if (!chan_ioc
|| !pchnl
) {
585 } else if (timeout
== CHNL_IOCNOWAIT
) {
586 if (LST_IS_EMPTY(pchnl
->pio_completions
))
591 dev_obj
= dev_get_first();
592 dev_get_bridge_context(dev_obj
, &dev_ctxt
);
596 if (DSP_FAILED(status
))
599 ioc
.status
= CHNL_IOCSTATCOMPLETE
;
601 CHNL_IOCNOWAIT
&& LST_IS_EMPTY(pchnl
->pio_completions
)) {
602 if (timeout
== CHNL_IOCINFINITE
)
603 timeout
= SYNC_INFINITE
;
605 stat_sync
= sync_wait_on_event(pchnl
->sync_event
, timeout
);
606 if (stat_sync
== -ETIME
) {
607 /* No response from DSP */
608 ioc
.status
|= CHNL_IOCSTATTIMEOUT
;
610 } else if (stat_sync
== -EPERM
) {
611 /* This can occur when the user mode thread is
612 * aborted (^C), or when _VWIN32_WaitSingleObject()
613 * fails due to unkown causes. */
614 /* Even though Wait failed, there may be something in
616 if (LST_IS_EMPTY(pchnl
->pio_completions
)) {
617 ioc
.status
|= CHNL_IOCSTATCANCEL
;
622 /* See comment in AddIOReq */
623 spin_lock_bh(&pchnl
->chnl_mgr_obj
->chnl_mgr_lock
);
624 omap_mbox_disable_irq(dev_ctxt
->mbox
, IRQ_RX
);
626 /* Dequeue IOC and set chan_ioc; */
627 DBC_ASSERT(!LST_IS_EMPTY(pchnl
->pio_completions
));
629 (struct chnl_irp
*)lst_get_head(pchnl
->pio_completions
);
630 /* Update chan_ioc from channel state and chirp: */
631 if (chnl_packet_obj
) {
633 /* If this is a zero-copy channel, then set IOC's pbuf
634 * to the DSP's address. This DSP address will get
635 * translated to user's virtual addr later. */
637 host_sys_buf
= chnl_packet_obj
->host_sys_buf
;
638 ioc
.pbuf
= chnl_packet_obj
->host_user_buf
;
640 ioc
.byte_size
= chnl_packet_obj
->byte_size
;
641 ioc
.buf_size
= chnl_packet_obj
->buf_size
;
642 ioc
.dw_arg
= chnl_packet_obj
->dw_arg
;
643 ioc
.status
|= chnl_packet_obj
->status
;
644 /* Place the used chirp on the free list: */
645 lst_put_tail(pchnl
->free_packets_list
,
646 (struct list_head
*)chnl_packet_obj
);
657 /* Ensure invariant: If any IOC's are queued for this channel... */
658 if (!LST_IS_EMPTY(pchnl
->pio_completions
)) {
659 /* Since DSPStream_Reclaim() does not take a timeout
660 * parameter, we pass the stream's timeout value to
661 * bridge_chnl_get_ioc. We cannot determine whether or not
662 * we have waited in User mode. Since the stream's timeout
663 * value may be non-zero, we still have to set the event.
664 * Therefore, this optimization is taken out.
666 * if (timeout == CHNL_IOCNOWAIT) {
667 * ... ensure event is set..
668 * sync_set_event(pchnl->sync_event);
670 sync_set_event(pchnl
->sync_event
);
672 /* else, if list is empty, ensure event is reset. */
673 sync_reset_event(pchnl
->sync_event
);
675 omap_mbox_enable_irq(dev_ctxt
->mbox
, IRQ_RX
);
676 spin_unlock_bh(&pchnl
->chnl_mgr_obj
->chnl_mgr_lock
);
678 && (pchnl
->chnl_type
== CHNL_PCPY
&& pchnl
->chnl_id
> 1)) {
679 if (!(ioc
.pbuf
< (void *)USERMODE_ADDR
))
682 /* If the addr is in user mode, then copy it */
683 if (!host_sys_buf
|| !ioc
.pbuf
) {
687 if (!CHNL_IS_INPUT(pchnl
->chnl_mode
))
691 status
= copy_to_user(ioc
.pbuf
, host_sys_buf
, ioc
.byte_size
);
693 if (current
->flags
& PF_EXITING
)
702 /* Update User's IOC block: */
709 * ======== bridge_chnl_get_mgr_info ========
710 * Retrieve information related to the channel manager.
712 int bridge_chnl_get_mgr_info(struct chnl_mgr
*hchnl_mgr
, u32 ch_id
,
713 OUT
struct chnl_mgrinfo
*mgr_info
)
716 struct chnl_mgr
*chnl_mgr_obj
= (struct chnl_mgr
*)hchnl_mgr
;
718 if (mgr_info
!= NULL
) {
719 if (ch_id
<= CHNL_MAXCHANNELS
) {
721 /* Return the requested information: */
723 chnl_mgr_obj
->ap_channel
[ch_id
];
724 mgr_info
->open_channels
=
725 chnl_mgr_obj
->open_channels
;
726 mgr_info
->dw_type
= chnl_mgr_obj
->dw_type
;
727 /* total # of chnls */
728 mgr_info
->max_channels
=
729 chnl_mgr_obj
->max_channels
;
744 * ======== bridge_chnl_idle ========
745 * Idles a particular channel.
747 int bridge_chnl_idle(struct chnl_object
*chnl_obj
, u32 timeout
,
751 struct chnl_mgr
*chnl_mgr_obj
;
754 DBC_REQUIRE(chnl_obj
);
756 chnl_mode
= chnl_obj
->chnl_mode
;
757 chnl_mgr_obj
= chnl_obj
->chnl_mgr_obj
;
759 if (CHNL_IS_OUTPUT(chnl_mode
) && !flush_data
) {
760 /* Wait for IO completions, up to the specified timeout: */
761 status
= bridge_chnl_flush_io(chnl_obj
, timeout
);
763 status
= bridge_chnl_cancel_io(chnl_obj
);
765 /* Reset the byte count and put channel back in ready state. */
766 chnl_obj
->bytes_moved
= 0;
767 chnl_obj
->dw_state
&= ~CHNL_STATECANCEL
;
774 * ======== bridge_chnl_open ========
775 * Open a new half-duplex channel to the DSP board.
777 int bridge_chnl_open(OUT
struct chnl_object
**chnl
,
778 struct chnl_mgr
*hchnl_mgr
, s8 chnl_mode
,
779 u32 ch_id
, CONST IN
struct chnl_attr
*pattrs
)
782 struct chnl_mgr
*chnl_mgr_obj
= hchnl_mgr
;
783 struct chnl_object
*pchnl
= NULL
;
784 struct sync_object
*sync_event
= NULL
;
785 /* Ensure DBC requirements: */
786 DBC_REQUIRE(chnl
!= NULL
);
787 DBC_REQUIRE(pattrs
!= NULL
);
788 DBC_REQUIRE(hchnl_mgr
!= NULL
);
791 if (pattrs
->uio_reqs
== 0) {
797 if (ch_id
!= CHNL_PICKFREE
) {
798 if (ch_id
>= chnl_mgr_obj
->max_channels
)
800 else if (chnl_mgr_obj
->ap_channel
[ch_id
] !=
804 /* Check for free channel */
806 search_free_channel(chnl_mgr_obj
, &ch_id
);
810 if (DSP_FAILED(status
))
813 DBC_ASSERT(ch_id
< chnl_mgr_obj
->max_channels
);
814 /* Create channel object: */
815 pchnl
= kzalloc(sizeof(struct chnl_object
), GFP_KERNEL
);
820 /* Protect queues from io_dpc: */
821 pchnl
->dw_state
= CHNL_STATECANCEL
;
822 /* Allocate initial IOR and IOC queues: */
823 pchnl
->free_packets_list
= create_chirp_list(pattrs
->uio_reqs
);
824 pchnl
->pio_requests
= create_chirp_list(0);
825 pchnl
->pio_completions
= create_chirp_list(0);
826 pchnl
->chnl_packets
= pattrs
->uio_reqs
;
829 sync_event
= kzalloc(sizeof(struct sync_object
), GFP_KERNEL
);
831 sync_init_event(sync_event
);
835 if (DSP_SUCCEEDED(status
)) {
836 pchnl
->ntfy_obj
= kmalloc(sizeof(struct ntfy_object
),
839 ntfy_init(pchnl
->ntfy_obj
);
844 if (DSP_SUCCEEDED(status
)) {
845 if (pchnl
->pio_completions
&& pchnl
->pio_requests
&&
846 pchnl
->free_packets_list
) {
847 /* Initialize CHNL object fields: */
848 pchnl
->chnl_mgr_obj
= chnl_mgr_obj
;
849 pchnl
->chnl_id
= ch_id
;
850 pchnl
->chnl_mode
= chnl_mode
;
851 pchnl
->user_event
= sync_event
;
852 pchnl
->sync_event
= sync_event
;
853 /* Get the process handle */
854 pchnl
->process
= current
->tgid
;
856 pchnl
->bytes_moved
= 0;
857 /* Default to proc-copy */
858 pchnl
->chnl_type
= CHNL_PCPY
;
864 if (DSP_FAILED(status
)) {
866 if (pchnl
->pio_completions
) {
867 free_chirp_list(pchnl
->pio_completions
);
868 pchnl
->pio_completions
= NULL
;
871 if (pchnl
->pio_requests
) {
872 free_chirp_list(pchnl
->pio_requests
);
873 pchnl
->pio_requests
= NULL
;
875 if (pchnl
->free_packets_list
) {
876 free_chirp_list(pchnl
->free_packets_list
);
877 pchnl
->free_packets_list
= NULL
;
882 if (pchnl
->ntfy_obj
) {
883 ntfy_delete(pchnl
->ntfy_obj
);
884 kfree(pchnl
->ntfy_obj
);
885 pchnl
->ntfy_obj
= NULL
;
889 /* Insert channel object in channel manager: */
890 chnl_mgr_obj
->ap_channel
[pchnl
->chnl_id
] = pchnl
;
891 spin_lock_bh(&chnl_mgr_obj
->chnl_mgr_lock
);
892 chnl_mgr_obj
->open_channels
++;
893 spin_unlock_bh(&chnl_mgr_obj
->chnl_mgr_lock
);
894 /* Return result... */
895 pchnl
->dw_state
= CHNL_STATEREADY
;
899 DBC_ENSURE((DSP_SUCCEEDED(status
) && pchnl
) || (*chnl
== NULL
));
904 * ======== bridge_chnl_register_notify ========
905 * Registers for events on a particular channel.
907 int bridge_chnl_register_notify(struct chnl_object
*chnl_obj
,
908 u32 event_mask
, u32 notify_type
,
909 struct dsp_notification
*hnotification
)
913 DBC_ASSERT(!(event_mask
& ~(DSP_STREAMDONE
| DSP_STREAMIOCOMPLETION
)));
916 status
= ntfy_register(chnl_obj
->ntfy_obj
, hnotification
,
917 event_mask
, notify_type
);
919 status
= ntfy_unregister(chnl_obj
->ntfy_obj
, hnotification
);
925 * ======== create_chirp_list ========
927 * Initialize a queue of channel I/O Request/Completion packets.
929 * chirps: Number of Chirps to allocate.
931 * Pointer to queue of IRPs, or NULL.
935 static struct lst_list
*create_chirp_list(u32 chirps
)
937 struct lst_list
*chirp_list
;
938 struct chnl_irp
*chnl_packet_obj
;
941 chirp_list
= kzalloc(sizeof(struct lst_list
), GFP_KERNEL
);
944 INIT_LIST_HEAD(&chirp_list
->head
);
945 /* Make N chirps and place on queue. */
946 for (i
= 0; (i
< chirps
)
947 && ((chnl_packet_obj
= make_new_chirp()) != NULL
); i
++) {
948 lst_put_tail(chirp_list
,
949 (struct list_head
*)chnl_packet_obj
);
952 /* If we couldn't allocate all chirps, free those allocated: */
954 free_chirp_list(chirp_list
);
963 * ======== free_chirp_list ========
965 * Free the queue of Chirps.
967 static void free_chirp_list(struct lst_list
*chirp_list
)
969 DBC_REQUIRE(chirp_list
!= NULL
);
971 while (!LST_IS_EMPTY(chirp_list
))
972 kfree(lst_get_head(chirp_list
));
978 * ======== make_new_chirp ========
979 * Allocate the memory for a new channel IRP.
981 static struct chnl_irp
*make_new_chirp(void)
983 struct chnl_irp
*chnl_packet_obj
;
985 chnl_packet_obj
= kzalloc(sizeof(struct chnl_irp
), GFP_KERNEL
);
986 if (chnl_packet_obj
!= NULL
) {
987 /* lst_init_elem only resets the list's member values. */
988 lst_init_elem(&chnl_packet_obj
->link
);
991 return chnl_packet_obj
;
995 * ======== search_free_channel ========
996 * Search for a free channel slot in the array of channel pointers.
998 static int search_free_channel(struct chnl_mgr
*chnl_mgr_obj
,
1001 int status
= -ENOSR
;
1004 DBC_REQUIRE(chnl_mgr_obj
);
1006 for (i
= 0; i
< chnl_mgr_obj
->max_channels
; i
++) {
1007 if (chnl_mgr_obj
->ap_channel
[i
] == NULL
) {