2 * @file Broadcom Dongle Host Driver (DHD), Flow ring specific code at top level
4 * Flow rings are transmit traffic (=propagating towards antenna) related entities
7 * Copyright (C) 1999-2016, Broadcom Corporation
9 * Unless you and Broadcom execute a separate written software license
10 * agreement governing use of this software, this software is licensed to you
11 * under the terms of the GNU General Public License version 2 (the "GPL"),
12 * available at http://www.broadcom.com/licenses/GPLv2.php, with the
13 * following added to such license:
15 * As a special exception, the copyright holders of this software give you
16 * permission to link this software with independent modules, and to copy and
17 * distribute the resulting executable under terms of your choice, provided that
18 * you also meet, for each linked independent module, the terms and conditions of
19 * the license of that module. An independent module is a module which is not
20 * derived from this software. The special exception does not apply to any
21 * modifications of the software.
23 * Notwithstanding the above, under no circumstances may you combine this
24 * software in any way with any other Broadcom software provided under a license
25 * other than the GPL, without Broadcom's express prior written consent.
28 * <<Broadcom-WL-IPTag/Open:>>
30 * $Id: dhd_flowring.c 591285 2015-10-07 11:56:29Z $
36 #include <bcmendian.h>
39 #include <proto/ethernet.h>
40 #include <proto/bcmevent.h>
41 #include <dngl_stats.h>
45 #include <dhd_flowring.h>
47 #include <dhd_proto.h>
49 #include <proto/802.1d.h>
50 #include <pcie_core.h>
51 #include <bcmmsgbuf.h>
55 static INLINE
int dhd_flow_queue_throttle(flow_queue_t
*queue
);
57 static INLINE uint16
dhd_flowid_find(dhd_pub_t
*dhdp
, uint8 ifindex
,
58 uint8 prio
, char *sa
, char *da
);
60 static INLINE uint16
dhd_flowid_alloc(dhd_pub_t
*dhdp
, uint8 ifindex
,
61 uint8 prio
, char *sa
, char *da
);
63 static INLINE
int dhd_flowid_lookup(dhd_pub_t
*dhdp
, uint8 ifindex
,
64 uint8 prio
, char *sa
, char *da
, uint16
*flowid
);
65 int BCMFASTPATH
dhd_flow_queue_overflow(flow_queue_t
*queue
, void *pkt
);
67 #define FLOW_QUEUE_PKT_NEXT(p) PKTLINK(p)
68 #define FLOW_QUEUE_PKT_SETNEXT(p, x) PKTSETLINK((p), (x))
70 #ifdef DHD_LOSSLESS_ROAMING
71 const uint8 prio2ac
[8] = { 0, 1, 1, 0, 2, 2, 3, 7 };
73 const uint8 prio2ac
[8] = { 0, 1, 1, 0, 2, 2, 3, 3 };
75 const uint8 prio2tid
[8] = { 0, 1, 2, 3, 4, 5, 6, 7 };
77 /** Queue overflow throttle. Return value: TRUE if throttle needs to be applied */
79 dhd_flow_queue_throttle(flow_queue_t
*queue
)
81 return DHD_FLOW_QUEUE_FULL(queue
);
85 dhd_flow_queue_overflow(flow_queue_t
*queue
, void *pkt
)
87 return BCME_NORESOURCE
;
90 /** Returns flow ring given a flowid */
92 dhd_flow_ring_node(dhd_pub_t
*dhdp
, uint16 flowid
)
94 flow_ring_node_t
* flow_ring_node
;
96 ASSERT(dhdp
!= (dhd_pub_t
*)NULL
);
97 ASSERT(flowid
< dhdp
->num_flow_rings
);
99 flow_ring_node
= &(((flow_ring_node_t
*)(dhdp
->flow_ring_table
))[flowid
]);
101 ASSERT(flow_ring_node
->flowid
== flowid
);
102 return flow_ring_node
;
105 /** Returns 'backup' queue given a flowid */
107 dhd_flow_queue(dhd_pub_t
*dhdp
, uint16 flowid
)
109 flow_ring_node_t
* flow_ring_node
;
111 flow_ring_node
= dhd_flow_ring_node(dhdp
, flowid
);
112 return &flow_ring_node
->queue
;
115 /* Flow ring's queue management functions */
117 /** Initialize a flow ring's queue, called on driver initialization. */
119 dhd_flow_queue_init(dhd_pub_t
*dhdp
, flow_queue_t
*queue
, int max
)
121 ASSERT((queue
!= NULL
) && (max
> 0));
123 dll_init(&queue
->list
);
124 queue
->head
= queue
->tail
= NULL
;
127 /* Set queue's threshold and queue's parent cummulative length counter */
129 DHD_FLOW_QUEUE_SET_MAX(queue
, max
);
130 DHD_FLOW_QUEUE_SET_THRESHOLD(queue
, max
);
131 DHD_FLOW_QUEUE_SET_CLEN(queue
, &dhdp
->cumm_ctr
);
133 queue
->failures
= 0U;
134 queue
->cb
= &dhd_flow_queue_overflow
;
137 /** Register an enqueue overflow callback handler */
139 dhd_flow_queue_register(flow_queue_t
*queue
, flow_queue_cb_t cb
)
141 ASSERT(queue
!= NULL
);
146 * Enqueue an 802.3 packet at the back of a flow ring's queue. From there, it will travel later on
147 * to the flow ring itself.
150 dhd_flow_queue_enqueue(dhd_pub_t
*dhdp
, flow_queue_t
*queue
, void *pkt
)
154 ASSERT(queue
!= NULL
);
156 if (dhd_flow_queue_throttle(queue
)) {
158 ret
= (*queue
->cb
)(queue
, pkt
);
163 FLOW_QUEUE_PKT_SETNEXT(queue
->tail
, pkt
);
168 FLOW_QUEUE_PKT_SETNEXT(pkt
, NULL
);
170 queue
->tail
= pkt
; /* at tail */
173 /* increment parent's cummulative length */
174 DHD_CUMM_CTR_INCR(DHD_FLOW_QUEUE_CLEN_PTR(queue
));
180 /** Dequeue an 802.3 packet from a flow ring's queue, from head (FIFO) */
182 dhd_flow_queue_dequeue(dhd_pub_t
*dhdp
, flow_queue_t
*queue
)
186 ASSERT(queue
!= NULL
);
188 pkt
= queue
->head
; /* from head */
191 ASSERT((queue
->len
== 0) && (queue
->tail
== NULL
));
195 queue
->head
= FLOW_QUEUE_PKT_NEXT(pkt
);
196 if (queue
->head
== NULL
)
200 /* decrement parent's cummulative length */
201 DHD_CUMM_CTR_DECR(DHD_FLOW_QUEUE_CLEN_PTR(queue
));
203 FLOW_QUEUE_PKT_SETNEXT(pkt
, NULL
); /* dettach packet from queue */
209 /** Reinsert a dequeued 802.3 packet back at the head */
211 dhd_flow_queue_reinsert(dhd_pub_t
*dhdp
, flow_queue_t
*queue
, void *pkt
)
213 if (queue
->head
== NULL
) {
217 FLOW_QUEUE_PKT_SETNEXT(pkt
, queue
->head
);
220 /* increment parent's cummulative length */
221 DHD_CUMM_CTR_INCR(DHD_FLOW_QUEUE_CLEN_PTR(queue
));
224 /** Fetch the backup queue for a flowring, and assign flow control thresholds */
226 dhd_flow_ring_config_thresholds(dhd_pub_t
*dhdp
, uint16 flowid
,
227 int queue_budget
, int cumm_threshold
, void *cumm_ctr
)
229 flow_queue_t
* queue
;
231 ASSERT(dhdp
!= (dhd_pub_t
*)NULL
);
232 ASSERT(queue_budget
> 1);
233 ASSERT(cumm_threshold
> 1);
234 ASSERT(cumm_ctr
!= (void*)NULL
);
236 queue
= dhd_flow_queue(dhdp
, flowid
);
238 DHD_FLOW_QUEUE_SET_MAX(queue
, queue_budget
); /* Max queue length */
240 /* Set the queue's parent threshold and cummulative counter */
241 DHD_FLOW_QUEUE_SET_THRESHOLD(queue
, cumm_threshold
);
242 DHD_FLOW_QUEUE_SET_CLEN(queue
, cumm_ctr
);
245 /** Initializes data structures of multiple flow rings */
247 dhd_flow_rings_init(dhd_pub_t
*dhdp
, uint32 num_flow_rings
)
250 uint32 flow_ring_table_sz
;
251 uint32 if_flow_lkup_sz
= 0;
252 void * flowid_allocator
;
253 flow_ring_table_t
*flow_ring_table
= NULL
;
254 if_flow_lkup_t
*if_flow_lkup
= NULL
;
256 void *list_lock
= NULL
;
259 DHD_INFO(("%s\n", __FUNCTION__
));
261 /* Construct a 16bit flowid allocator */
262 flowid_allocator
= id16_map_init(dhdp
->osh
,
263 num_flow_rings
- FLOW_RING_COMMON
, FLOWID_RESERVED
);
264 if (flowid_allocator
== NULL
) {
265 DHD_ERROR(("%s: flowid allocator init failure\n", __FUNCTION__
));
269 /* Allocate a flow ring table, comprising of requested number of rings */
270 flow_ring_table_sz
= (num_flow_rings
* sizeof(flow_ring_node_t
));
271 flow_ring_table
= (flow_ring_table_t
*)MALLOCZ(dhdp
->osh
, flow_ring_table_sz
);
272 if (flow_ring_table
== NULL
) {
273 DHD_ERROR(("%s: flow ring table alloc failure\n", __FUNCTION__
));
277 /* Initialize flow ring table state */
278 DHD_CUMM_CTR_INIT(&dhdp
->cumm_ctr
);
279 bzero((uchar
*)flow_ring_table
, flow_ring_table_sz
);
280 for (idx
= 0; idx
< num_flow_rings
; idx
++) {
281 flow_ring_table
[idx
].status
= FLOW_RING_STATUS_CLOSED
;
282 flow_ring_table
[idx
].flowid
= (uint16
)idx
;
283 flow_ring_table
[idx
].lock
= dhd_os_spin_lock_init(dhdp
->osh
);
284 if (flow_ring_table
[idx
].lock
== NULL
) {
285 DHD_ERROR(("%s: Failed to init spinlock for queue!\n", __FUNCTION__
));
289 dll_init(&flow_ring_table
[idx
].list
);
291 /* Initialize the per flow ring backup queue */
292 dhd_flow_queue_init(dhdp
, &flow_ring_table
[idx
].queue
,
293 FLOW_RING_QUEUE_THRESHOLD
);
296 /* Allocate per interface hash table (for fast lookup from interface to flow ring) */
297 if_flow_lkup_sz
= sizeof(if_flow_lkup_t
) * DHD_MAX_IFS
;
298 if_flow_lkup
= (if_flow_lkup_t
*)DHD_OS_PREALLOC(dhdp
,
299 DHD_PREALLOC_IF_FLOW_LKUP
, if_flow_lkup_sz
);
300 if (if_flow_lkup
== NULL
) {
301 DHD_ERROR(("%s: if flow lkup alloc failure\n", __FUNCTION__
));
305 /* Initialize per interface hash table */
306 for (idx
= 0; idx
< DHD_MAX_IFS
; idx
++) {
308 if_flow_lkup
[idx
].status
= 0;
309 if_flow_lkup
[idx
].role
= 0;
310 for (hash_ix
= 0; hash_ix
< DHD_FLOWRING_HASH_SIZE
; hash_ix
++)
311 if_flow_lkup
[idx
].fl_hash
[hash_ix
] = NULL
;
314 lock
= dhd_os_spin_lock_init(dhdp
->osh
);
318 list_lock
= dhd_os_spin_lock_init(dhdp
->osh
);
319 if (list_lock
== NULL
)
322 dhdp
->flow_prio_map_type
= DHD_FLOW_PRIO_AC_MAP
;
323 bcopy(prio2ac
, dhdp
->flow_prio_map
, sizeof(uint8
) * NUMPRIO
);
324 #ifdef DHD_LOSSLESS_ROAMING
325 dhdp
->dequeue_prec_map
= ALLPRIO
;
327 /* Now populate into dhd pub */
328 DHD_FLOWID_LOCK(lock
, flags
);
329 dhdp
->num_flow_rings
= num_flow_rings
;
330 dhdp
->flowid_allocator
= (void *)flowid_allocator
;
331 dhdp
->flow_ring_table
= (void *)flow_ring_table
;
332 dhdp
->if_flow_lkup
= (void *)if_flow_lkup
;
333 dhdp
->flowid_lock
= lock
;
334 dhdp
->flow_rings_inited
= TRUE
;
335 dhdp
->flowring_list_lock
= list_lock
;
336 DHD_FLOWID_UNLOCK(lock
, flags
);
338 DHD_INFO(("%s done\n", __FUNCTION__
));
342 /* deinit the spinlock */
343 dhd_os_spin_lock_deinit(dhdp
->osh
, lock
);
346 /* Destruct the per interface flow lkup table */
347 if (if_flow_lkup
!= NULL
) {
348 DHD_OS_PREFREE(dhdp
, if_flow_lkup
, if_flow_lkup_sz
);
350 if (flow_ring_table
!= NULL
) {
351 for (idx
= 0; idx
< num_flow_rings
; idx
++) {
352 if (flow_ring_table
[idx
].lock
!= NULL
)
353 dhd_os_spin_lock_deinit(dhdp
->osh
, flow_ring_table
[idx
].lock
);
355 MFREE(dhdp
->osh
, flow_ring_table
, flow_ring_table_sz
);
357 id16_map_fini(dhdp
->osh
, flowid_allocator
);
362 /** Deinit Flow Ring specific data structures */
363 void dhd_flow_rings_deinit(dhd_pub_t
*dhdp
)
366 uint32 flow_ring_table_sz
;
367 uint32 if_flow_lkup_sz
;
368 flow_ring_table_t
*flow_ring_table
;
372 DHD_INFO(("dhd_flow_rings_deinit\n"));
374 if (!(dhdp
->flow_rings_inited
)) {
375 DHD_ERROR(("dhd_flow_rings not initialized!\n"));
379 if (dhdp
->flow_ring_table
!= NULL
) {
381 ASSERT(dhdp
->num_flow_rings
> 0);
383 DHD_FLOWID_LOCK(dhdp
->flowid_lock
, flags
);
384 flow_ring_table
= (flow_ring_table_t
*)dhdp
->flow_ring_table
;
385 dhdp
->flow_ring_table
= NULL
;
386 DHD_FLOWID_UNLOCK(dhdp
->flowid_lock
, flags
);
387 for (idx
= 0; idx
< dhdp
->num_flow_rings
; idx
++) {
388 if (flow_ring_table
[idx
].active
) {
389 dhd_bus_clean_flow_ring(dhdp
->bus
, &flow_ring_table
[idx
]);
391 ASSERT(DHD_FLOW_QUEUE_EMPTY(&flow_ring_table
[idx
].queue
));
393 /* Deinit flow ring queue locks before destroying flow ring table */
394 dhd_os_spin_lock_deinit(dhdp
->osh
, flow_ring_table
[idx
].lock
);
395 flow_ring_table
[idx
].lock
= NULL
;
399 /* Destruct the flow ring table */
400 flow_ring_table_sz
= dhdp
->num_flow_rings
* sizeof(flow_ring_table_t
);
401 MFREE(dhdp
->osh
, flow_ring_table
, flow_ring_table_sz
);
404 DHD_FLOWID_LOCK(dhdp
->flowid_lock
, flags
);
406 /* Destruct the per interface flow lkup table */
407 if (dhdp
->if_flow_lkup
!= NULL
) {
408 if_flow_lkup_sz
= sizeof(if_flow_lkup_t
) * DHD_MAX_IFS
;
409 bzero((uchar
*)dhdp
->if_flow_lkup
, if_flow_lkup_sz
);
410 DHD_OS_PREFREE(dhdp
, dhdp
->if_flow_lkup
, if_flow_lkup_sz
);
411 dhdp
->if_flow_lkup
= NULL
;
414 /* Destruct the flowid allocator */
415 if (dhdp
->flowid_allocator
!= NULL
)
416 dhdp
->flowid_allocator
= id16_map_fini(dhdp
->osh
, dhdp
->flowid_allocator
);
418 dhdp
->num_flow_rings
= 0U;
419 bzero(dhdp
->flow_prio_map
, sizeof(uint8
) * NUMPRIO
);
421 lock
= dhdp
->flowid_lock
;
422 dhdp
->flowid_lock
= NULL
;
424 DHD_FLOWID_UNLOCK(lock
, flags
);
425 dhd_os_spin_lock_deinit(dhdp
->osh
, lock
);
427 dhd_os_spin_lock_deinit(dhdp
->osh
, dhdp
->flowring_list_lock
);
428 dhdp
->flowring_list_lock
= NULL
;
430 ASSERT(dhdp
->if_flow_lkup
== NULL
);
431 ASSERT(dhdp
->flowid_allocator
== NULL
);
432 ASSERT(dhdp
->flow_ring_table
== NULL
);
433 dhdp
->flow_rings_inited
= FALSE
;
436 /** Uses hash table to quickly map from ifindex to a flow ring 'role' (STA/AP) */
438 dhd_flow_rings_ifindex2role(dhd_pub_t
*dhdp
, uint8 ifindex
)
440 if_flow_lkup_t
*if_flow_lkup
= (if_flow_lkup_t
*)dhdp
->if_flow_lkup
;
441 ASSERT(if_flow_lkup
);
442 return if_flow_lkup
[ifindex
].role
;
446 bool is_tdls_destination(dhd_pub_t
*dhdp
, uint8
*da
)
448 tdls_peer_node_t
*cur
= dhdp
->peer_tbl
.node
;
449 while (cur
!= NULL
) {
450 if (!memcmp(da
, cur
->addr
, ETHER_ADDR_LEN
)) {
459 /** Uses hash table to quickly map from ifindex+prio+da to a flow ring id */
461 dhd_flowid_find(dhd_pub_t
*dhdp
, uint8 ifindex
, uint8 prio
, char *sa
, char *da
)
464 bool ismcast
= FALSE
;
465 flow_hash_info_t
*cur
;
466 if_flow_lkup_t
*if_flow_lkup
;
469 DHD_FLOWID_LOCK(dhdp
->flowid_lock
, flags
);
470 if_flow_lkup
= (if_flow_lkup_t
*)dhdp
->if_flow_lkup
;
472 ASSERT(if_flow_lkup
);
474 if (if_flow_lkup
[ifindex
].role
== WLC_E_IF_ROLE_STA
) {
476 if (dhdp
->peer_tbl
.tdls_peer_count
&& !(ETHER_ISMULTI(da
)) &&
477 is_tdls_destination(dhdp
, da
)) {
478 hash
= DHD_FLOWRING_HASHINDEX(da
, prio
);
479 cur
= if_flow_lkup
[ifindex
].fl_hash
[hash
];
480 while (cur
!= NULL
) {
481 if (!memcmp(cur
->flow_info
.da
, da
, ETHER_ADDR_LEN
)) {
482 DHD_FLOWID_UNLOCK(dhdp
->flowid_lock
, flags
);
487 DHD_FLOWID_UNLOCK(dhdp
->flowid_lock
, flags
);
488 return FLOWID_INVALID
;
491 cur
= if_flow_lkup
[ifindex
].fl_hash
[prio
];
493 DHD_FLOWID_UNLOCK(dhdp
->flowid_lock
, flags
);
498 if (ETHER_ISMULTI(da
)) {
502 hash
= DHD_FLOWRING_HASHINDEX(da
, prio
);
505 cur
= if_flow_lkup
[ifindex
].fl_hash
[hash
];
508 if ((ismcast
&& ETHER_ISMULTI(cur
->flow_info
.da
)) ||
509 (!memcmp(cur
->flow_info
.da
, da
, ETHER_ADDR_LEN
) &&
510 (cur
->flow_info
.tid
== prio
))) {
511 DHD_FLOWID_UNLOCK(dhdp
->flowid_lock
, flags
);
517 DHD_FLOWID_UNLOCK(dhdp
->flowid_lock
, flags
);
519 DHD_INFO(("%s: cannot find flowid\n", __FUNCTION__
));
520 return FLOWID_INVALID
;
521 } /* dhd_flowid_find */
523 /** Create unique Flow ID, called when a flow ring is created. */
525 dhd_flowid_alloc(dhd_pub_t
*dhdp
, uint8 ifindex
, uint8 prio
, char *sa
, char *da
)
527 flow_hash_info_t
*fl_hash_node
, *cur
;
528 if_flow_lkup_t
*if_flow_lkup
;
533 fl_hash_node
= (flow_hash_info_t
*) MALLOC(dhdp
->osh
, sizeof(flow_hash_info_t
));
534 memcpy(fl_hash_node
->flow_info
.da
, da
, sizeof(fl_hash_node
->flow_info
.da
));
536 DHD_FLOWID_LOCK(dhdp
->flowid_lock
, flags
);
537 ASSERT(dhdp
->flowid_allocator
!= NULL
);
538 flowid
= id16_map_alloc(dhdp
->flowid_allocator
);
539 DHD_FLOWID_UNLOCK(dhdp
->flowid_lock
, flags
);
541 if (flowid
== FLOWID_INVALID
) {
542 MFREE(dhdp
->osh
, fl_hash_node
, sizeof(flow_hash_info_t
));
543 DHD_ERROR(("%s: cannot get free flowid \n", __FUNCTION__
));
544 return FLOWID_INVALID
;
547 fl_hash_node
->flowid
= flowid
;
548 fl_hash_node
->flow_info
.tid
= prio
;
549 fl_hash_node
->flow_info
.ifindex
= ifindex
;
550 fl_hash_node
->next
= NULL
;
552 DHD_FLOWID_LOCK(dhdp
->flowid_lock
, flags
);
553 if_flow_lkup
= (if_flow_lkup_t
*)dhdp
->if_flow_lkup
;
555 if (if_flow_lkup
[ifindex
].role
== WLC_E_IF_ROLE_STA
) {
556 /* For STA non TDLS dest we allocate entry based on prio only */
558 if (dhdp
->peer_tbl
.tdls_peer_count
&&
559 (is_tdls_destination(dhdp
, da
))) {
560 hash
= DHD_FLOWRING_HASHINDEX(da
, prio
);
561 cur
= if_flow_lkup
[ifindex
].fl_hash
[hash
];
566 cur
->next
= fl_hash_node
;
568 if_flow_lkup
[ifindex
].fl_hash
[hash
] = fl_hash_node
;
572 if_flow_lkup
[ifindex
].fl_hash
[prio
] = fl_hash_node
;
575 /* For bcast/mcast assign first slot in in interface */
576 hash
= ETHER_ISMULTI(da
) ? 0 : DHD_FLOWRING_HASHINDEX(da
, prio
);
577 cur
= if_flow_lkup
[ifindex
].fl_hash
[hash
];
582 cur
->next
= fl_hash_node
;
584 if_flow_lkup
[ifindex
].fl_hash
[hash
] = fl_hash_node
;
586 DHD_FLOWID_UNLOCK(dhdp
->flowid_lock
, flags
);
588 DHD_INFO(("%s: allocated flowid %d\n", __FUNCTION__
, fl_hash_node
->flowid
));
590 return fl_hash_node
->flowid
;
591 } /* dhd_flowid_alloc */
593 /** Get flow ring ID, if not present try to create one */
595 dhd_flowid_lookup(dhd_pub_t
*dhdp
, uint8 ifindex
,
596 uint8 prio
, char *sa
, char *da
, uint16
*flowid
)
599 flow_ring_node_t
*flow_ring_node
;
600 flow_ring_table_t
*flow_ring_table
;
604 DHD_INFO(("%s\n", __FUNCTION__
));
606 if (!dhdp
->flow_ring_table
) {
610 flow_ring_table
= (flow_ring_table_t
*)dhdp
->flow_ring_table
;
612 id
= dhd_flowid_find(dhdp
, ifindex
, prio
, sa
, da
);
614 if (id
== FLOWID_INVALID
) {
616 if_flow_lkup_t
*if_flow_lkup
;
617 if_flow_lkup
= (if_flow_lkup_t
*)dhdp
->if_flow_lkup
;
619 if (!if_flow_lkup
[ifindex
].status
)
623 id
= dhd_flowid_alloc(dhdp
, ifindex
, prio
, sa
, da
);
624 if (id
== FLOWID_INVALID
) {
625 DHD_ERROR(("%s: alloc flowid ifindex %u status %u\n",
626 __FUNCTION__
, ifindex
, if_flow_lkup
[ifindex
].status
));
630 /* register this flowid in dhd_pub */
631 dhd_add_flowid(dhdp
, ifindex
, prio
, da
, id
);
633 ASSERT(id
< dhdp
->num_flow_rings
);
635 flow_ring_node
= (flow_ring_node_t
*) &flow_ring_table
[id
];
637 DHD_FLOWRING_LOCK(flow_ring_node
->lock
, flags
);
640 memcpy(flow_ring_node
->flow_info
.sa
, sa
, sizeof(flow_ring_node
->flow_info
.sa
));
641 memcpy(flow_ring_node
->flow_info
.da
, da
, sizeof(flow_ring_node
->flow_info
.da
));
642 flow_ring_node
->flow_info
.tid
= prio
;
643 flow_ring_node
->flow_info
.ifindex
= ifindex
;
644 flow_ring_node
->active
= TRUE
;
645 flow_ring_node
->status
= FLOW_RING_STATUS_PENDING
;
646 DHD_FLOWRING_UNLOCK(flow_ring_node
->lock
, flags
);
648 /* Create and inform device about the new flow */
649 if (dhd_bus_flow_ring_create_request(dhdp
->bus
, (void *)flow_ring_node
)
651 DHD_ERROR(("%s: create error %d\n", __FUNCTION__
, id
));
658 /* if the Flow id was found in the hash */
659 ASSERT(id
< dhdp
->num_flow_rings
);
661 flow_ring_node
= (flow_ring_node_t
*) &flow_ring_table
[id
];
662 DHD_FLOWRING_LOCK(flow_ring_node
->lock
, flags
);
665 * If the flow_ring_node is in Open State or Status pending state then
666 * we can return the Flow id to the caller.If the flow_ring_node is in
667 * FLOW_RING_STATUS_PENDING this means the creation is in progress and
668 * hence the packets should be queued.
670 * If the flow_ring_node is in FLOW_RING_STATUS_DELETE_PENDING Or
671 * FLOW_RING_STATUS_CLOSED, then we should return Error.
672 * Note that if the flowing is being deleted we would mark it as
673 * FLOW_RING_STATUS_DELETE_PENDING. Now before Dongle could respond and
674 * before we mark it as FLOW_RING_STATUS_CLOSED we could get tx packets.
675 * We should drop the packets in that case.
676 * The decission to return OK should NOT be based on 'active' variable, beause
677 * active is made TRUE when a flow_ring_node gets allocated and is made
678 * FALSE when the flow ring gets removed and does not reflect the True state
681 if (flow_ring_node
->status
== FLOW_RING_STATUS_OPEN
||
682 flow_ring_node
->status
== FLOW_RING_STATUS_PENDING
) {
686 *flowid
= FLOWID_INVALID
;
690 DHD_FLOWRING_UNLOCK(flow_ring_node
->lock
, flags
);
693 } /* Flow Id found in the hash */
694 } /* dhd_flowid_lookup */
697 * Assign existing or newly created flowid to an 802.3 packet. This flowid is later on used to
698 * select the flowring to send the packet to the dongle.
701 dhd_flowid_update(dhd_pub_t
*dhdp
, uint8 ifindex
, uint8 prio
, void *pktbuf
)
703 uint8
*pktdata
= (uint8
*)PKTDATA(dhdp
->osh
, pktbuf
);
704 struct ether_header
*eh
= (struct ether_header
*)pktdata
;
707 ASSERT(ifindex
< DHD_MAX_IFS
);
709 if (ifindex
>= DHD_MAX_IFS
) {
713 if (!dhdp
->flowid_allocator
) {
714 DHD_ERROR(("%s: Flow ring not intited yet \n", __FUNCTION__
));
718 if (dhd_flowid_lookup(dhdp
, ifindex
, prio
, eh
->ether_shost
, eh
->ether_dhost
,
719 &flowid
) != BCME_OK
) {
723 DHD_INFO(("%s: prio %d flowid %d\n", __FUNCTION__
, prio
, flowid
));
725 /* Tag the packet with flowid */
726 DHD_PKT_SET_FLOWID(pktbuf
, flowid
);
731 dhd_flowid_free(dhd_pub_t
*dhdp
, uint8 ifindex
, uint16 flowid
)
735 flow_hash_info_t
*cur
, *prev
;
736 if_flow_lkup_t
*if_flow_lkup
;
739 DHD_FLOWID_LOCK(dhdp
->flowid_lock
, flags
);
740 if_flow_lkup
= (if_flow_lkup_t
*)dhdp
->if_flow_lkup
;
742 for (hashix
= 0; hashix
< DHD_FLOWRING_HASH_SIZE
; hashix
++) {
744 cur
= if_flow_lkup
[ifindex
].fl_hash
[hashix
];
747 if (cur
->flowid
== flowid
) {
752 while (!found
&& cur
) {
753 if (cur
->flowid
== flowid
) {
762 if_flow_lkup
[ifindex
].fl_hash
[hashix
] = cur
->next
;
764 prev
->next
= cur
->next
;
767 /* deregister flowid from dhd_pub. */
768 dhd_del_flowid(dhdp
, ifindex
, flowid
);
770 id16_map_free(dhdp
->flowid_allocator
, flowid
);
771 DHD_FLOWID_UNLOCK(dhdp
->flowid_lock
, flags
);
772 MFREE(dhdp
->osh
, cur
, sizeof(flow_hash_info_t
));
779 DHD_FLOWID_UNLOCK(dhdp
->flowid_lock
, flags
);
780 DHD_ERROR(("%s: could not free flow ring hash entry flowid %d\n",
781 __FUNCTION__
, flowid
));
782 } /* dhd_flowid_free */
785 * Delete all Flow rings associated with the given interface. Is called when e.g. the dongle
786 * indicates that a wireless link has gone down.
789 dhd_flow_rings_delete(dhd_pub_t
*dhdp
, uint8 ifindex
)
792 flow_ring_table_t
*flow_ring_table
;
794 DHD_INFO(("%s: ifindex %u\n", __FUNCTION__
, ifindex
));
796 ASSERT(ifindex
< DHD_MAX_IFS
);
797 if (ifindex
>= DHD_MAX_IFS
)
800 if (!dhdp
->flow_ring_table
)
803 flow_ring_table
= (flow_ring_table_t
*)dhdp
->flow_ring_table
;
804 for (id
= 0; id
< dhdp
->num_flow_rings
; id
++) {
805 if (flow_ring_table
[id
].active
&&
806 (flow_ring_table
[id
].flow_info
.ifindex
== ifindex
)) {
807 dhd_bus_flow_ring_delete_request(dhdp
->bus
,
808 (void *) &flow_ring_table
[id
]);
813 /** Delete flow ring(s) for given peer address. Related to AP/AWDL/TDLS functionality. */
815 dhd_flow_rings_delete_for_peer(dhd_pub_t
*dhdp
, uint8 ifindex
, char *addr
)
818 flow_ring_table_t
*flow_ring_table
;
820 DHD_ERROR(("%s: ifindex %u\n", __FUNCTION__
, ifindex
));
822 ASSERT(ifindex
< DHD_MAX_IFS
);
823 if (ifindex
>= DHD_MAX_IFS
)
826 if (!dhdp
->flow_ring_table
)
829 flow_ring_table
= (flow_ring_table_t
*)dhdp
->flow_ring_table
;
830 for (id
= 0; id
< dhdp
->num_flow_rings
; id
++) {
831 if (flow_ring_table
[id
].active
&&
832 (flow_ring_table
[id
].flow_info
.ifindex
== ifindex
) &&
833 (!memcmp(flow_ring_table
[id
].flow_info
.da
, addr
, ETHER_ADDR_LEN
)) &&
834 (flow_ring_table
[id
].status
!= FLOW_RING_STATUS_DELETE_PENDING
)) {
835 DHD_INFO(("%s: deleting flowid %d\n",
836 __FUNCTION__
, flow_ring_table
[id
].flowid
));
837 dhd_bus_flow_ring_delete_request(dhdp
->bus
,
838 (void *) &flow_ring_table
[id
]);
843 /** Handles interface ADD, CHANGE, DEL indications from the dongle */
845 dhd_update_interface_flow_info(dhd_pub_t
*dhdp
, uint8 ifindex
,
846 uint8 op
, uint8 role
)
848 if_flow_lkup_t
*if_flow_lkup
;
851 ASSERT(ifindex
< DHD_MAX_IFS
);
852 if (ifindex
>= DHD_MAX_IFS
)
855 DHD_INFO(("%s: ifindex %u op %u role is %u \n",
856 __FUNCTION__
, ifindex
, op
, role
));
857 if (!dhdp
->flowid_allocator
) {
858 DHD_ERROR(("%s: Flow ring not intited yet \n", __FUNCTION__
));
862 DHD_FLOWID_LOCK(dhdp
->flowid_lock
, flags
);
863 if_flow_lkup
= (if_flow_lkup_t
*)dhdp
->if_flow_lkup
;
865 if (op
== WLC_E_IF_ADD
|| op
== WLC_E_IF_CHANGE
) {
867 if_flow_lkup
[ifindex
].role
= role
;
869 if (role
!= WLC_E_IF_ROLE_STA
) {
870 if_flow_lkup
[ifindex
].status
= TRUE
;
871 DHD_INFO(("%s: Mcast Flow ring for ifindex %d role is %d \n",
872 __FUNCTION__
, ifindex
, role
));
873 /* Create Mcast Flow */
875 } else if (op
== WLC_E_IF_DEL
) {
876 if_flow_lkup
[ifindex
].status
= FALSE
;
877 DHD_INFO(("%s: cleanup all Flow rings for ifindex %d role is %d \n",
878 __FUNCTION__
, ifindex
, role
));
880 DHD_FLOWID_UNLOCK(dhdp
->flowid_lock
, flags
);
883 /** Handles a STA 'link' indication from the dongle */
885 dhd_update_interface_link_status(dhd_pub_t
*dhdp
, uint8 ifindex
, uint8 status
)
887 if_flow_lkup_t
*if_flow_lkup
;
890 ASSERT(ifindex
< DHD_MAX_IFS
);
891 if (ifindex
>= DHD_MAX_IFS
)
894 DHD_INFO(("%s: ifindex %d status %d\n", __FUNCTION__
, ifindex
, status
));
896 DHD_FLOWID_LOCK(dhdp
->flowid_lock
, flags
);
897 if_flow_lkup
= (if_flow_lkup_t
*)dhdp
->if_flow_lkup
;
899 if (if_flow_lkup
[ifindex
].role
== WLC_E_IF_ROLE_STA
) {
901 if_flow_lkup
[ifindex
].status
= TRUE
;
903 if_flow_lkup
[ifindex
].status
= FALSE
;
905 DHD_FLOWID_UNLOCK(dhdp
->flowid_lock
, flags
);
910 /** Update flow priority mapping, called on IOVAR */
911 int dhd_update_flow_prio_map(dhd_pub_t
*dhdp
, uint8 map
)
914 flow_ring_node_t
*flow_ring_node
;
916 if (map
> DHD_FLOW_PRIO_LLR_MAP
)
917 return BCME_BADOPTION
;
919 /* Check if we need to change prio map */
920 if (map
== dhdp
->flow_prio_map_type
)
923 /* If any ring is active we cannot change priority mapping for flow rings */
924 for (flowid
= 0; flowid
< dhdp
->num_flow_rings
; flowid
++) {
925 flow_ring_node
= DHD_FLOW_RING(dhdp
, flowid
);
926 if (flow_ring_node
->active
)
930 /* Inform firmware about new mapping type */
931 if (BCME_OK
!= dhd_flow_prio_map(dhdp
, &map
, TRUE
))
934 /* update internal structures */
935 dhdp
->flow_prio_map_type
= map
;
936 if (dhdp
->flow_prio_map_type
== DHD_FLOW_PRIO_TID_MAP
)
937 bcopy(prio2tid
, dhdp
->flow_prio_map
, sizeof(uint8
) * NUMPRIO
);
939 bcopy(prio2ac
, dhdp
->flow_prio_map
, sizeof(uint8
) * NUMPRIO
);
944 /** Inform firmware on updated flow priority mapping, called on IOVAR */
945 int dhd_flow_prio_map(dhd_pub_t
*dhd
, uint8
*map
, bool set
)
949 bcm_mkiovar("bus:fl_prio_map", NULL
, 0, (char*)iovbuf
, sizeof(iovbuf
));
950 if (dhd_wl_ioctl_cmd(dhd
, WLC_GET_VAR
, iovbuf
, sizeof(iovbuf
), FALSE
, 0) < 0) {
951 DHD_ERROR(("%s: failed to get fl_prio_map\n", __FUNCTION__
));
957 bcm_mkiovar("bus:fl_prio_map", (char *)map
, 4, (char*)iovbuf
, sizeof(iovbuf
));
958 if (dhd_wl_ioctl_cmd(dhd
, WLC_SET_VAR
, iovbuf
, sizeof(iovbuf
), TRUE
, 0) < 0) {
959 DHD_ERROR(("%s: failed to set fl_prio_map \n",