1 /* -----------------------------------------------------------------------------
2 * Copyright (c) 2011 Ozmo Inc
3 * Released under the GNU General Public License Version 2 (GPLv2).
4 * -----------------------------------------------------------------------------
6 #include <linux/init.h>
7 #include <linux/module.h>
8 #include <linux/timer.h>
9 #include <linux/sched.h>
10 #include <linux/netdevice.h>
11 #include <linux/errno.h>
13 #include "ozprotocol.h"
21 #include <asm/unaligned.h>
22 #include <linux/uaccess.h>
23 #include <net/psnap.h>
24 /*------------------------------------------------------------------------------
26 #define OZ_MAX_TX_POOL_SIZE 6
27 /*------------------------------------------------------------------------------
29 static struct oz_tx_frame
*oz_tx_frame_alloc(struct oz_pd
*pd
);
30 static void oz_tx_frame_free(struct oz_pd
*pd
, struct oz_tx_frame
*f
);
31 static void oz_tx_isoc_free(struct oz_pd
*pd
, struct oz_tx_frame
*f
);
32 static struct sk_buff
*oz_build_frame(struct oz_pd
*pd
, struct oz_tx_frame
*f
);
33 static int oz_send_isoc_frame(struct oz_pd
*pd
);
34 static void oz_retire_frame(struct oz_pd
*pd
, struct oz_tx_frame
*f
);
35 static void oz_isoc_stream_free(struct oz_isoc_stream
*st
);
36 static int oz_send_next_queued_frame(struct oz_pd
*pd
, int more_data
);
37 static void oz_isoc_destructor(struct sk_buff
*skb
);
38 static int oz_def_app_init(void);
39 static void oz_def_app_term(void);
40 static int oz_def_app_start(struct oz_pd
*pd
, int resume
);
41 static void oz_def_app_stop(struct oz_pd
*pd
, int pause
);
42 static void oz_def_app_rx(struct oz_pd
*pd
, struct oz_elt
*elt
);
43 /*------------------------------------------------------------------------------
44 * Counts the uncompleted isoc frames submitted to netcard.
46 static atomic_t g_submitted_isoc
= ATOMIC_INIT(0);
47 /* Application handler functions.
49 static struct oz_app_if g_app_if
[OZ_APPID_MAX
] = {
86 /*------------------------------------------------------------------------------
89 static int oz_def_app_init(void)
93 /*------------------------------------------------------------------------------
96 static void oz_def_app_term(void)
99 /*------------------------------------------------------------------------------
102 static int oz_def_app_start(struct oz_pd
*pd
, int resume
)
106 /*------------------------------------------------------------------------------
109 static void oz_def_app_stop(struct oz_pd
*pd
, int pause
)
112 /*------------------------------------------------------------------------------
115 static void oz_def_app_rx(struct oz_pd
*pd
, struct oz_elt
*elt
)
118 /*------------------------------------------------------------------------------
119 * Context: softirq or process
121 void oz_pd_set_state(struct oz_pd
*pd
, unsigned state
)
124 oz_event_log(OZ_EVT_PD_STATE
, 0, 0, 0, state
);
128 oz_trace("PD State: OZ_PD_S_IDLE\n");
130 case OZ_PD_S_CONNECTED
:
131 oz_trace("PD State: OZ_PD_S_CONNECTED\n");
133 case OZ_PD_S_STOPPED
:
134 oz_trace("PD State: OZ_PD_S_STOPPED\n");
137 oz_trace("PD State: OZ_PD_S_SLEEP\n");
140 #endif /* WANT_TRACE */
142 /*------------------------------------------------------------------------------
143 * Context: softirq or process
145 void oz_pd_get(struct oz_pd
*pd
)
147 atomic_inc(&pd
->ref_count
);
149 /*------------------------------------------------------------------------------
150 * Context: softirq or process
152 void oz_pd_put(struct oz_pd
*pd
)
154 if (atomic_dec_and_test(&pd
->ref_count
))
157 /*------------------------------------------------------------------------------
158 * Context: softirq-serialized
160 struct oz_pd
*oz_pd_alloc(u8
*mac_addr
)
162 struct oz_pd
*pd
= kzalloc(sizeof(struct oz_pd
), GFP_ATOMIC
);
165 atomic_set(&pd
->ref_count
, 2);
166 for (i
= 0; i
< OZ_APPID_MAX
; i
++)
167 spin_lock_init(&pd
->app_lock
[i
]);
168 pd
->last_rx_pkt_num
= 0xffffffff;
169 oz_pd_set_state(pd
, OZ_PD_S_IDLE
);
170 pd
->max_tx_size
= OZ_MAX_TX_SIZE
;
171 memcpy(pd
->mac_addr
, mac_addr
, ETH_ALEN
);
172 if (0 != oz_elt_buf_init(&pd
->elt_buff
)) {
176 spin_lock_init(&pd
->tx_frame_lock
);
177 INIT_LIST_HEAD(&pd
->tx_queue
);
178 INIT_LIST_HEAD(&pd
->farewell_list
);
179 pd
->last_sent_frame
= &pd
->tx_queue
;
180 spin_lock_init(&pd
->stream_lock
);
181 INIT_LIST_HEAD(&pd
->stream_list
);
185 /*------------------------------------------------------------------------------
186 * Context: softirq or process
188 void oz_pd_destroy(struct oz_pd
*pd
)
191 struct oz_tx_frame
*f
;
192 struct oz_isoc_stream
*st
;
193 struct oz_farewell
*fwell
;
194 oz_trace("Destroying PD\n");
195 /* Delete any streams.
197 e
= pd
->stream_list
.next
;
198 while (e
!= &pd
->stream_list
) {
199 st
= container_of(e
, struct oz_isoc_stream
, link
);
201 oz_isoc_stream_free(st
);
203 /* Free any queued tx frames.
205 e
= pd
->tx_queue
.next
;
206 while (e
!= &pd
->tx_queue
) {
207 f
= container_of(e
, struct oz_tx_frame
, link
);
211 oz_retire_frame(pd
, f
);
213 oz_elt_buf_term(&pd
->elt_buff
);
214 /* Free any farewells.
216 e
= pd
->farewell_list
.next
;
217 while (e
!= &pd
->farewell_list
) {
218 fwell
= container_of(e
, struct oz_farewell
, link
);
222 /* Deallocate all frames in tx pool.
224 while (pd
->tx_pool
) {
226 pd
->tx_pool
= e
->next
;
227 kfree(container_of(e
, struct oz_tx_frame
, link
));
230 dev_put(pd
->net_dev
);
233 /*------------------------------------------------------------------------------
234 * Context: softirq-serialized
236 int oz_services_start(struct oz_pd
*pd
, u16 apps
, int resume
)
238 struct oz_app_if
*ai
;
240 oz_trace("oz_services_start(0x%x) resume(%d)\n", apps
, resume
);
241 for (ai
= g_app_if
; ai
< &g_app_if
[OZ_APPID_MAX
]; ai
++) {
242 if (apps
& (1<<ai
->app_id
)) {
243 if (ai
->start(pd
, resume
)) {
245 oz_trace("Unabled to start service %d\n",
249 oz_polling_lock_bh();
250 pd
->total_apps
|= (1<<ai
->app_id
);
252 pd
->paused_apps
&= ~(1<<ai
->app_id
);
253 oz_polling_unlock_bh();
258 /*------------------------------------------------------------------------------
259 * Context: softirq or process
261 void oz_services_stop(struct oz_pd
*pd
, u16 apps
, int pause
)
263 struct oz_app_if
*ai
;
264 oz_trace("oz_stop_services(0x%x) pause(%d)\n", apps
, pause
);
265 for (ai
= g_app_if
; ai
< &g_app_if
[OZ_APPID_MAX
]; ai
++) {
266 if (apps
& (1<<ai
->app_id
)) {
267 oz_polling_lock_bh();
269 pd
->paused_apps
|= (1<<ai
->app_id
);
271 pd
->total_apps
&= ~(1<<ai
->app_id
);
272 pd
->paused_apps
&= ~(1<<ai
->app_id
);
274 oz_polling_unlock_bh();
279 /*------------------------------------------------------------------------------
282 void oz_pd_heartbeat(struct oz_pd
*pd
, u16 apps
)
284 struct oz_app_if
*ai
;
286 for (ai
= g_app_if
; ai
< &g_app_if
[OZ_APPID_MAX
]; ai
++) {
287 if (ai
->heartbeat
&& (apps
& (1<<ai
->app_id
))) {
288 if (ai
->heartbeat(pd
))
293 oz_pd_request_heartbeat(pd
);
294 if (pd
->mode
& OZ_F_ISOC_ANYTIME
) {
296 while (count
-- && (oz_send_isoc_frame(pd
) >= 0))
300 /*------------------------------------------------------------------------------
301 * Context: softirq or process
303 void oz_pd_stop(struct oz_pd
*pd
)
306 oz_trace("oz_pd_stop() State = 0x%x\n", pd
->state
);
307 oz_pd_indicate_farewells(pd
);
308 oz_polling_lock_bh();
309 stop_apps
= pd
->total_apps
;
312 oz_polling_unlock_bh();
313 oz_services_stop(pd
, stop_apps
, 0);
314 oz_polling_lock_bh();
315 oz_pd_set_state(pd
, OZ_PD_S_STOPPED
);
316 /* Remove from PD list.*/
318 oz_polling_unlock_bh();
319 oz_trace("pd ref count = %d\n", atomic_read(&pd
->ref_count
));
320 oz_timer_delete(pd
, 0);
323 /*------------------------------------------------------------------------------
326 int oz_pd_sleep(struct oz_pd
*pd
)
330 oz_polling_lock_bh();
331 if (pd
->state
& (OZ_PD_S_SLEEP
| OZ_PD_S_STOPPED
)) {
332 oz_polling_unlock_bh();
335 if (pd
->keep_alive_j
&& pd
->session_id
) {
336 oz_pd_set_state(pd
, OZ_PD_S_SLEEP
);
337 pd
->pulse_time_j
= jiffies
+ pd
->keep_alive_j
;
338 oz_trace("Sleep Now %lu until %lu\n",
339 jiffies
, pd
->pulse_time_j
);
343 stop_apps
= pd
->total_apps
;
344 oz_polling_unlock_bh();
348 oz_services_stop(pd
, stop_apps
, 1);
349 oz_timer_add(pd
, OZ_TIMER_STOP
, jiffies
+ pd
->keep_alive_j
, 1);
353 /*------------------------------------------------------------------------------
356 static struct oz_tx_frame
*oz_tx_frame_alloc(struct oz_pd
*pd
)
358 struct oz_tx_frame
*f
= 0;
359 spin_lock_bh(&pd
->tx_frame_lock
);
361 f
= container_of(pd
->tx_pool
, struct oz_tx_frame
, link
);
362 pd
->tx_pool
= pd
->tx_pool
->next
;
365 spin_unlock_bh(&pd
->tx_frame_lock
);
367 f
= kmalloc(sizeof(struct oz_tx_frame
), GFP_ATOMIC
);
369 f
->total_size
= sizeof(struct oz_hdr
);
370 INIT_LIST_HEAD(&f
->link
);
371 INIT_LIST_HEAD(&f
->elt_list
);
375 /*------------------------------------------------------------------------------
376 * Context: softirq or process
378 static void oz_tx_isoc_free(struct oz_pd
*pd
, struct oz_tx_frame
*f
)
380 pd
->nb_queued_isoc_frames
--;
381 list_del_init(&f
->link
);
382 if (pd
->tx_pool_count
< OZ_MAX_TX_POOL_SIZE
) {
383 f
->link
.next
= pd
->tx_pool
;
384 pd
->tx_pool
= &f
->link
;
389 oz_trace2(OZ_TRACE_TX_FRAMES
, "Releasing ISOC Frame isoc_nb= %d\n",
390 pd
->nb_queued_isoc_frames
);
392 /*------------------------------------------------------------------------------
393 * Context: softirq or process
395 static void oz_tx_frame_free(struct oz_pd
*pd
, struct oz_tx_frame
*f
)
397 spin_lock_bh(&pd
->tx_frame_lock
);
398 if (pd
->tx_pool_count
< OZ_MAX_TX_POOL_SIZE
) {
399 f
->link
.next
= pd
->tx_pool
;
400 pd
->tx_pool
= &f
->link
;
404 spin_unlock_bh(&pd
->tx_frame_lock
);
408 /*------------------------------------------------------------------------------
409 * Context: softirq-serialized
411 void oz_set_more_bit(struct sk_buff
*skb
)
413 struct oz_hdr
*oz_hdr
= (struct oz_hdr
*)skb_network_header(skb
);
414 oz_hdr
->control
|= OZ_F_MORE_DATA
;
416 /*------------------------------------------------------------------------------
417 * Context: softirq-serialized
419 void oz_set_last_pkt_nb(struct oz_pd
*pd
, struct sk_buff
*skb
)
421 struct oz_hdr
*oz_hdr
= (struct oz_hdr
*)skb_network_header(skb
);
422 oz_hdr
->last_pkt_num
= pd
->trigger_pkt_num
& OZ_LAST_PN_MASK
;
424 /*------------------------------------------------------------------------------
427 int oz_prepare_frame(struct oz_pd
*pd
, int empty
)
429 struct oz_tx_frame
*f
;
430 if ((pd
->mode
& OZ_MODE_MASK
) != OZ_MODE_TRIGGERED
)
432 if (pd
->nb_queued_frames
>= OZ_MAX_QUEUED_FRAMES
)
434 if (!empty
&& !oz_are_elts_available(&pd
->elt_buff
))
436 f
= oz_tx_frame_alloc(pd
);
441 (OZ_PROTOCOL_VERSION
<<OZ_VERSION_SHIFT
) | OZ_F_ACK_REQUESTED
;
442 ++pd
->last_tx_pkt_num
;
443 put_unaligned(cpu_to_le32(pd
->last_tx_pkt_num
), &f
->hdr
.pkt_num
);
445 oz_select_elts_for_tx(&pd
->elt_buff
, 0, &f
->total_size
,
446 pd
->max_tx_size
, &f
->elt_list
);
448 spin_lock(&pd
->tx_frame_lock
);
449 list_add_tail(&f
->link
, &pd
->tx_queue
);
450 pd
->nb_queued_frames
++;
451 spin_unlock(&pd
->tx_frame_lock
);
454 /*------------------------------------------------------------------------------
455 * Context: softirq-serialized
457 static struct sk_buff
*oz_build_frame(struct oz_pd
*pd
, struct oz_tx_frame
*f
)
459 struct sk_buff
*skb
= 0;
460 struct net_device
*dev
= pd
->net_dev
;
461 struct oz_hdr
*oz_hdr
;
464 /* Allocate skb with enough space for the lower layers as well
465 * as the space we need.
467 skb
= alloc_skb(f
->total_size
+ OZ_ALLOCATED_SPACE(dev
), GFP_ATOMIC
);
470 /* Reserve the head room for lower layers.
472 skb_reserve(skb
, LL_RESERVED_SPACE(dev
));
473 skb_reset_network_header(skb
);
475 skb
->protocol
= htons(OZ_ETHERTYPE
);
476 if (dev_hard_header(skb
, dev
, OZ_ETHERTYPE
, pd
->mac_addr
,
477 dev
->dev_addr
, skb
->len
) < 0)
479 /* Push the tail to the end of the area we are going to copy to.
481 oz_hdr
= (struct oz_hdr
*)skb_put(skb
, f
->total_size
);
482 f
->hdr
.last_pkt_num
= pd
->trigger_pkt_num
& OZ_LAST_PN_MASK
;
483 memcpy(oz_hdr
, &f
->hdr
, sizeof(struct oz_hdr
));
484 /* Copy the elements into the frame body.
486 elt
= (struct oz_elt
*)(oz_hdr
+1);
487 for (e
= f
->elt_list
.next
; e
!= &f
->elt_list
; e
= e
->next
) {
488 struct oz_elt_info
*ei
;
489 ei
= container_of(e
, struct oz_elt_info
, link
);
490 memcpy(elt
, ei
->data
, ei
->length
);
491 elt
= oz_next_elt(elt
);
498 /*------------------------------------------------------------------------------
499 * Context: softirq or process
501 static void oz_retire_frame(struct oz_pd
*pd
, struct oz_tx_frame
*f
)
504 struct oz_elt_info
*ei
;
505 e
= f
->elt_list
.next
;
506 while (e
!= &f
->elt_list
) {
507 ei
= container_of(e
, struct oz_elt_info
, link
);
509 list_del_init(&ei
->link
);
511 ei
->callback(pd
, ei
->context
);
512 spin_lock_bh(&pd
->elt_buff
.lock
);
513 oz_elt_info_free(&pd
->elt_buff
, ei
);
514 spin_unlock_bh(&pd
->elt_buff
.lock
);
516 oz_tx_frame_free(pd
, f
);
517 if (pd
->elt_buff
.free_elts
> pd
->elt_buff
.max_free_elts
)
518 oz_trim_elt_pool(&pd
->elt_buff
);
520 /*------------------------------------------------------------------------------
521 * Context: softirq-serialized
523 static int oz_send_next_queued_frame(struct oz_pd
*pd
, int more_data
)
526 struct oz_tx_frame
*f
;
528 spin_lock(&pd
->tx_frame_lock
);
529 e
= pd
->last_sent_frame
->next
;
530 if (e
== &pd
->tx_queue
) {
531 spin_unlock(&pd
->tx_frame_lock
);
534 f
= container_of(e
, struct oz_tx_frame
, link
);
536 if (f
->skb
!= NULL
) {
538 oz_tx_isoc_free(pd
, f
);
539 spin_unlock(&pd
->tx_frame_lock
);
541 oz_set_more_bit(skb
);
542 oz_set_last_pkt_nb(pd
, skb
);
543 if ((int)atomic_read(&g_submitted_isoc
) <
544 OZ_MAX_SUBMITTED_ISOC
) {
545 if (dev_queue_xmit(skb
) < 0) {
546 oz_trace2(OZ_TRACE_TX_FRAMES
,
547 "Dropping ISOC Frame\n");
548 oz_event_log(OZ_EVT_TX_ISOC_DROP
, 0, 0, 0, 0);
551 atomic_inc(&g_submitted_isoc
);
552 oz_trace2(OZ_TRACE_TX_FRAMES
,
553 "Sending ISOC Frame, nb_isoc= %d\n",
554 pd
->nb_queued_isoc_frames
);
558 oz_trace2(OZ_TRACE_TX_FRAMES
, "Dropping ISOC Frame>\n");
559 oz_event_log(OZ_EVT_TX_ISOC_DROP
, 0, 0, 0, 0);
564 pd
->last_sent_frame
= e
;
565 skb
= oz_build_frame(pd
, f
);
566 spin_unlock(&pd
->tx_frame_lock
);
568 oz_set_more_bit(skb
);
569 oz_trace2(OZ_TRACE_TX_FRAMES
, "TX frame PN=0x%x\n", f
->hdr
.pkt_num
);
571 oz_event_log(OZ_EVT_TX_FRAME
,
573 (((u16
)f
->hdr
.control
)<<8)|f
->hdr
.last_pkt_num
,
575 if (dev_queue_xmit(skb
) < 0)
581 /*------------------------------------------------------------------------------
582 * Context: softirq-serialized
584 void oz_send_queued_frames(struct oz_pd
*pd
, int backlog
)
586 while (oz_prepare_frame(pd
, 0) >= 0)
589 switch (pd
->mode
& (OZ_F_ISOC_NO_ELTS
| OZ_F_ISOC_ANYTIME
)) {
591 case OZ_F_ISOC_NO_ELTS
: {
592 backlog
+= pd
->nb_queued_isoc_frames
;
595 if (backlog
> OZ_MAX_SUBMITTED_ISOC
)
596 backlog
= OZ_MAX_SUBMITTED_ISOC
;
599 case OZ_NO_ELTS_ANYTIME
: {
600 if ((backlog
<= 0) && (pd
->isoc_sent
== 0))
611 if (oz_send_next_queued_frame(pd
, backlog
) < 0)
616 out
: oz_prepare_frame(pd
, 1);
617 oz_send_next_queued_frame(pd
, 0);
619 /*------------------------------------------------------------------------------
622 static int oz_send_isoc_frame(struct oz_pd
*pd
)
624 struct sk_buff
*skb
= 0;
625 struct net_device
*dev
= pd
->net_dev
;
626 struct oz_hdr
*oz_hdr
;
629 struct list_head list
;
630 int total_size
= sizeof(struct oz_hdr
);
631 INIT_LIST_HEAD(&list
);
633 oz_select_elts_for_tx(&pd
->elt_buff
, 1, &total_size
,
634 pd
->max_tx_size
, &list
);
635 if (list
.next
== &list
)
637 skb
= alloc_skb(total_size
+ OZ_ALLOCATED_SPACE(dev
), GFP_ATOMIC
);
639 oz_trace("Cannot alloc skb\n");
640 oz_elt_info_free_chain(&pd
->elt_buff
, &list
);
643 skb_reserve(skb
, LL_RESERVED_SPACE(dev
));
644 skb_reset_network_header(skb
);
646 skb
->protocol
= htons(OZ_ETHERTYPE
);
647 if (dev_hard_header(skb
, dev
, OZ_ETHERTYPE
, pd
->mac_addr
,
648 dev
->dev_addr
, skb
->len
) < 0) {
652 oz_hdr
= (struct oz_hdr
*)skb_put(skb
, total_size
);
653 oz_hdr
->control
= (OZ_PROTOCOL_VERSION
<<OZ_VERSION_SHIFT
) | OZ_F_ISOC
;
654 oz_hdr
->last_pkt_num
= pd
->trigger_pkt_num
& OZ_LAST_PN_MASK
;
655 elt
= (struct oz_elt
*)(oz_hdr
+1);
657 for (e
= list
.next
; e
!= &list
; e
= e
->next
) {
658 struct oz_elt_info
*ei
;
659 ei
= container_of(e
, struct oz_elt_info
, link
);
660 memcpy(elt
, ei
->data
, ei
->length
);
661 elt
= oz_next_elt(elt
);
663 oz_event_log(OZ_EVT_TX_ISOC
, 0, 0, 0, 0);
665 oz_elt_info_free_chain(&pd
->elt_buff
, &list
);
668 /*------------------------------------------------------------------------------
669 * Context: softirq-serialized
671 void oz_retire_tx_frames(struct oz_pd
*pd
, u8 lpn
)
674 struct oz_tx_frame
*f
;
675 struct list_head
*first
= 0;
676 struct list_head
*last
= 0;
680 spin_lock(&pd
->tx_frame_lock
);
681 e
= pd
->tx_queue
.next
;
682 while (e
!= &pd
->tx_queue
) {
683 f
= container_of(e
, struct oz_tx_frame
, link
);
684 pkt_num
= le32_to_cpu(get_unaligned(&f
->hdr
.pkt_num
));
685 diff
= (lpn
- (pkt_num
& OZ_LAST_PN_MASK
)) & OZ_LAST_PN_MASK
;
686 if ((diff
> OZ_LAST_PN_HALF_CYCLE
) || (pkt_num
== 0))
688 oz_trace2(OZ_TRACE_TX_FRAMES
, "Releasing pkt_num= %u, nb= %d\n",
689 pkt_num
, pd
->nb_queued_frames
);
694 pd
->nb_queued_frames
--;
697 last
->next
->prev
= &pd
->tx_queue
;
698 pd
->tx_queue
.next
= last
->next
;
701 pd
->last_sent_frame
= &pd
->tx_queue
;
702 spin_unlock(&pd
->tx_frame_lock
);
704 f
= container_of(first
, struct oz_tx_frame
, link
);
706 oz_retire_frame(pd
, f
);
709 /*------------------------------------------------------------------------------
710 * Precondition: stream_lock must be held.
713 static struct oz_isoc_stream
*pd_stream_find(struct oz_pd
*pd
, u8 ep_num
)
716 struct oz_isoc_stream
*st
;
717 list_for_each(e
, &pd
->stream_list
) {
718 st
= container_of(e
, struct oz_isoc_stream
, link
);
719 if (st
->ep_num
== ep_num
)
724 /*------------------------------------------------------------------------------
727 int oz_isoc_stream_create(struct oz_pd
*pd
, u8 ep_num
)
729 struct oz_isoc_stream
*st
=
730 kzalloc(sizeof(struct oz_isoc_stream
), GFP_ATOMIC
);
734 spin_lock_bh(&pd
->stream_lock
);
735 if (!pd_stream_find(pd
, ep_num
)) {
736 list_add(&st
->link
, &pd
->stream_list
);
739 spin_unlock_bh(&pd
->stream_lock
);
744 /*------------------------------------------------------------------------------
745 * Context: softirq or process
747 static void oz_isoc_stream_free(struct oz_isoc_stream
*st
)
752 /*------------------------------------------------------------------------------
755 int oz_isoc_stream_delete(struct oz_pd
*pd
, u8 ep_num
)
757 struct oz_isoc_stream
*st
;
758 spin_lock_bh(&pd
->stream_lock
);
759 st
= pd_stream_find(pd
, ep_num
);
762 spin_unlock_bh(&pd
->stream_lock
);
764 oz_isoc_stream_free(st
);
767 /*------------------------------------------------------------------------------
770 static void oz_isoc_destructor(struct sk_buff
*skb
)
772 atomic_dec(&g_submitted_isoc
);
773 oz_event_log(OZ_EVT_TX_ISOC_DONE
, atomic_read(&g_submitted_isoc
),
776 /*------------------------------------------------------------------------------
779 int oz_send_isoc_unit(struct oz_pd
*pd
, u8 ep_num
, u8
*data
, int len
)
781 struct net_device
*dev
= pd
->net_dev
;
782 struct oz_isoc_stream
*st
;
784 struct sk_buff
*skb
= 0;
785 struct oz_hdr
*oz_hdr
= 0;
787 spin_lock_bh(&pd
->stream_lock
);
788 st
= pd_stream_find(pd
, ep_num
);
792 nb_units
= st
->nb_units
;
797 spin_unlock_bh(&pd
->stream_lock
);
801 /* Allocate enough space for max size frame. */
802 skb
= alloc_skb(pd
->max_tx_size
+ OZ_ALLOCATED_SPACE(dev
),
806 /* Reserve the head room for lower layers. */
807 skb_reserve(skb
, LL_RESERVED_SPACE(dev
));
808 skb_reset_network_header(skb
);
810 skb
->protocol
= htons(OZ_ETHERTYPE
);
811 /* For audio packet set priority to AC_VO */
813 size
= sizeof(struct oz_hdr
) + sizeof(struct oz_isoc_large
);
814 oz_hdr
= (struct oz_hdr
*)skb_put(skb
, size
);
816 memcpy(skb_put(skb
, len
), data
, len
);
818 if (++nb_units
< pd
->ms_per_isoc
) {
819 spin_lock_bh(&pd
->stream_lock
);
821 st
->nb_units
= nb_units
;
824 spin_unlock_bh(&pd
->stream_lock
);
827 struct oz_isoc_large iso
;
828 spin_lock_bh(&pd
->stream_lock
);
829 iso
.frame_number
= st
->frame_num
;
830 st
->frame_num
+= nb_units
;
831 spin_unlock_bh(&pd
->stream_lock
);
833 (OZ_PROTOCOL_VERSION
<<OZ_VERSION_SHIFT
) | OZ_F_ISOC
;
834 oz
.last_pkt_num
= pd
->trigger_pkt_num
& OZ_LAST_PN_MASK
;
836 iso
.endpoint
= ep_num
;
837 iso
.format
= OZ_DATA_F_ISOC_LARGE
;
838 iso
.ms_data
= nb_units
;
839 memcpy(oz_hdr
, &oz
, sizeof(oz
));
840 memcpy(oz_hdr
+1, &iso
, sizeof(iso
));
841 if (dev_hard_header(skb
, dev
, OZ_ETHERTYPE
, pd
->mac_addr
,
842 dev
->dev_addr
, skb
->len
) < 0)
845 skb
->destructor
= oz_isoc_destructor
;
846 /*Queue for Xmit if mode is not ANYTIME*/
847 if (!(pd
->mode
& OZ_F_ISOC_ANYTIME
)) {
848 struct oz_tx_frame
*isoc_unit
= NULL
;
849 int nb
= pd
->nb_queued_isoc_frames
;
850 if (nb
>= pd
->isoc_latency
) {
851 oz_trace2(OZ_TRACE_TX_FRAMES
,
852 "Dropping ISOC Unit nb= %d\n",
856 isoc_unit
= oz_tx_frame_alloc(pd
);
857 if (isoc_unit
== NULL
)
860 isoc_unit
->skb
= skb
;
861 spin_lock_bh(&pd
->tx_frame_lock
);
862 list_add_tail(&isoc_unit
->link
, &pd
->tx_queue
);
863 pd
->nb_queued_isoc_frames
++;
864 spin_unlock_bh(&pd
->tx_frame_lock
);
865 oz_trace2(OZ_TRACE_TX_FRAMES
,
866 "Added ISOC Frame to Tx Queue isoc_nb= %d, nb= %d\n",
867 pd
->nb_queued_isoc_frames
, pd
->nb_queued_frames
);
868 oz_event_log(OZ_EVT_TX_ISOC
, nb_units
, iso
.frame_number
,
869 skb
, atomic_read(&g_submitted_isoc
));
873 /*In ANYTIME mode Xmit unit immediately*/
874 if (atomic_read(&g_submitted_isoc
) < OZ_MAX_SUBMITTED_ISOC
) {
875 atomic_inc(&g_submitted_isoc
);
876 oz_event_log(OZ_EVT_TX_ISOC
, nb_units
, iso
.frame_number
,
877 skb
, atomic_read(&g_submitted_isoc
));
878 if (dev_queue_xmit(skb
) < 0) {
879 oz_event_log(OZ_EVT_TX_ISOC_DROP
, 0, 0, 0, 0);
885 out
: oz_event_log(OZ_EVT_TX_ISOC_DROP
, 0, 0, 0, 0);
892 /*------------------------------------------------------------------------------
895 void oz_apps_init(void)
898 for (i
= 0; i
< OZ_APPID_MAX
; i
++)
899 if (g_app_if
[i
].init
)
902 /*------------------------------------------------------------------------------
905 void oz_apps_term(void)
908 /* Terminate all the apps. */
909 for (i
= 0; i
< OZ_APPID_MAX
; i
++)
910 if (g_app_if
[i
].term
)
913 /*------------------------------------------------------------------------------
914 * Context: softirq-serialized
916 void oz_handle_app_elt(struct oz_pd
*pd
, u8 app_id
, struct oz_elt
*elt
)
918 struct oz_app_if
*ai
;
919 if (app_id
== 0 || app_id
> OZ_APPID_MAX
)
921 ai
= &g_app_if
[app_id
-1];
924 /*------------------------------------------------------------------------------
925 * Context: softirq or process
927 void oz_pd_indicate_farewells(struct oz_pd
*pd
)
929 struct oz_farewell
*f
;
930 struct oz_app_if
*ai
= &g_app_if
[OZ_APPID_USB
-1];
932 oz_polling_lock_bh();
933 if (list_empty(&pd
->farewell_list
)) {
934 oz_polling_unlock_bh();
937 f
= list_first_entry(&pd
->farewell_list
,
938 struct oz_farewell
, link
);
940 oz_polling_unlock_bh();
942 ai
->farewell(pd
, f
->ep_num
, f
->report
, f
->len
);