2 * @file Broadcom Dongle Host Driver (DHD), Flow ring specific code at top level
4 * Flow rings are transmit traffic (=propagating towards antenna) related entities
7 * Copyright (C) 1999-2017, Broadcom Corporation
9 * Unless you and Broadcom execute a separate written software license
10 * agreement governing use of this software, this software is licensed to you
11 * under the terms of the GNU General Public License version 2 (the "GPL"),
12 * available at http://www.broadcom.com/licenses/GPLv2.php, with the
13 * following added to such license:
15 * As a special exception, the copyright holders of this software give you
16 * permission to link this software with independent modules, and to copy and
17 * distribute the resulting executable under terms of your choice, provided that
18 * you also meet, for each linked independent module, the terms and conditions of
19 * the license of that module. An independent module is a module which is not
20 * derived from this software. The special exception does not apply to any
21 * modifications of the software.
23 * Notwithstanding the above, under no circumstances may you combine this
24 * software in any way with any other Broadcom software provided under a license
25 * other than the GPL, without Broadcom's express prior written consent.
28 * <<Broadcom-WL-IPTag/Open:>>
30 * $Id: dhd_flowring.c 710862 2017-07-14 07:43:59Z $
36 #include <bcmendian.h>
41 #include <dngl_stats.h>
45 #include <dhd_flowring.h>
47 #include <dhd_proto.h>
50 #include <pcie_core.h>
51 #include <bcmmsgbuf.h>
54 static INLINE
int dhd_flow_queue_throttle(flow_queue_t
*queue
);
56 static INLINE uint16
dhd_flowid_find(dhd_pub_t
*dhdp
, uint8 ifindex
,
57 uint8 prio
, char *sa
, char *da
);
59 static INLINE uint16
dhd_flowid_alloc(dhd_pub_t
*dhdp
, uint8 ifindex
,
60 uint8 prio
, char *sa
, char *da
);
62 static INLINE
int dhd_flowid_lookup(dhd_pub_t
*dhdp
, uint8 ifindex
,
63 uint8 prio
, char *sa
, char *da
, uint16
*flowid
);
64 int BCMFASTPATH
dhd_flow_queue_overflow(flow_queue_t
*queue
, void *pkt
);
66 #define FLOW_QUEUE_PKT_NEXT(p) PKTLINK(p)
67 #define FLOW_QUEUE_PKT_SETNEXT(p, x) PKTSETLINK((p), (x))
69 #if defined(EAPOL_PKT_PRIO) || defined(DHD_LOSSLESS_ROAMING)
70 const uint8 prio2ac
[8] = { 0, 1, 1, 0, 2, 2, 3, 7 };
72 const uint8 prio2ac
[8] = { 0, 1, 1, 0, 2, 2, 3, 3 };
73 #endif /* EAPOL_PKT_PRIO || DHD_LOSSLESS_ROAMING */
74 const uint8 prio2tid
[8] = { 0, 1, 2, 3, 4, 5, 6, 7 };
76 /** Queue overflow throttle. Return value: TRUE if throttle needs to be applied */
78 dhd_flow_queue_throttle(flow_queue_t
*queue
)
80 return DHD_FLOW_QUEUE_FULL(queue
);
84 dhd_flow_queue_overflow(flow_queue_t
*queue
, void *pkt
)
86 return BCME_NORESOURCE
;
89 /** Returns flow ring given a flowid */
91 dhd_flow_ring_node(dhd_pub_t
*dhdp
, uint16 flowid
)
93 flow_ring_node_t
* flow_ring_node
;
95 ASSERT(dhdp
!= (dhd_pub_t
*)NULL
);
96 ASSERT(flowid
< dhdp
->num_flow_rings
);
98 flow_ring_node
= &(((flow_ring_node_t
*)(dhdp
->flow_ring_table
))[flowid
]);
100 ASSERT(flow_ring_node
->flowid
== flowid
);
101 return flow_ring_node
;
104 /** Returns 'backup' queue given a flowid */
106 dhd_flow_queue(dhd_pub_t
*dhdp
, uint16 flowid
)
108 flow_ring_node_t
* flow_ring_node
;
110 flow_ring_node
= dhd_flow_ring_node(dhdp
, flowid
);
111 return &flow_ring_node
->queue
;
114 /* Flow ring's queue management functions */
116 /** Reinitialize a flow ring's queue. */
118 dhd_flow_queue_reinit(dhd_pub_t
*dhdp
, flow_queue_t
*queue
, int max
)
120 ASSERT((queue
!= NULL
) && (max
> 0));
122 queue
->head
= queue
->tail
= NULL
;
125 /* Set queue's threshold and queue's parent cummulative length counter */
127 DHD_FLOW_QUEUE_SET_MAX(queue
, max
);
128 DHD_FLOW_QUEUE_SET_THRESHOLD(queue
, max
);
129 DHD_FLOW_QUEUE_SET_CLEN(queue
, &dhdp
->cumm_ctr
);
130 DHD_FLOW_QUEUE_SET_L2CLEN(queue
, &dhdp
->l2cumm_ctr
);
132 queue
->failures
= 0U;
133 queue
->cb
= &dhd_flow_queue_overflow
;
136 /** Initialize a flow ring's queue, called on driver initialization. */
138 dhd_flow_queue_init(dhd_pub_t
*dhdp
, flow_queue_t
*queue
, int max
)
140 ASSERT((queue
!= NULL
) && (max
> 0));
142 dll_init(&queue
->list
);
143 dhd_flow_queue_reinit(dhdp
, queue
, max
);
146 /** Register an enqueue overflow callback handler */
148 dhd_flow_queue_register(flow_queue_t
*queue
, flow_queue_cb_t cb
)
150 ASSERT(queue
!= NULL
);
155 * Enqueue an 802.3 packet at the back of a flow ring's queue. From there, it will travel later on
156 * to the flow ring itself.
159 dhd_flow_queue_enqueue(dhd_pub_t
*dhdp
, flow_queue_t
*queue
, void *pkt
)
163 ASSERT(queue
!= NULL
);
165 if (dhd_flow_queue_throttle(queue
)) {
167 ret
= (*queue
->cb
)(queue
, pkt
);
172 FLOW_QUEUE_PKT_SETNEXT(queue
->tail
, pkt
);
177 FLOW_QUEUE_PKT_SETNEXT(pkt
, NULL
);
179 queue
->tail
= pkt
; /* at tail */
182 /* increment parent's cummulative length */
183 DHD_CUMM_CTR_INCR(DHD_FLOW_QUEUE_CLEN_PTR(queue
));
184 /* increment grandparent's cummulative length */
185 DHD_CUMM_CTR_INCR(DHD_FLOW_QUEUE_L2CLEN_PTR(queue
));
191 /** Dequeue an 802.3 packet from a flow ring's queue, from head (FIFO) */
193 dhd_flow_queue_dequeue(dhd_pub_t
*dhdp
, flow_queue_t
*queue
)
197 ASSERT(queue
!= NULL
);
199 pkt
= queue
->head
; /* from head */
202 ASSERT((queue
->len
== 0) && (queue
->tail
== NULL
));
206 queue
->head
= FLOW_QUEUE_PKT_NEXT(pkt
);
207 if (queue
->head
== NULL
)
211 /* decrement parent's cummulative length */
212 DHD_CUMM_CTR_DECR(DHD_FLOW_QUEUE_CLEN_PTR(queue
));
213 /* decrement grandparent's cummulative length */
214 DHD_CUMM_CTR_DECR(DHD_FLOW_QUEUE_L2CLEN_PTR(queue
));
216 FLOW_QUEUE_PKT_SETNEXT(pkt
, NULL
); /* dettach packet from queue */
222 /** Reinsert a dequeued 802.3 packet back at the head */
224 dhd_flow_queue_reinsert(dhd_pub_t
*dhdp
, flow_queue_t
*queue
, void *pkt
)
226 if (queue
->head
== NULL
) {
230 FLOW_QUEUE_PKT_SETNEXT(pkt
, queue
->head
);
233 /* increment parent's cummulative length */
234 DHD_CUMM_CTR_INCR(DHD_FLOW_QUEUE_CLEN_PTR(queue
));
235 /* increment grandparent's cummulative length */
236 DHD_CUMM_CTR_INCR(DHD_FLOW_QUEUE_L2CLEN_PTR(queue
));
239 /** Fetch the backup queue for a flowring, and assign flow control thresholds */
241 dhd_flow_ring_config_thresholds(dhd_pub_t
*dhdp
, uint16 flowid
,
242 int queue_budget
, int cumm_threshold
, void *cumm_ctr
,
243 int l2cumm_threshold
, void *l2cumm_ctr
)
245 flow_queue_t
* queue
;
247 ASSERT(dhdp
!= (dhd_pub_t
*)NULL
);
248 ASSERT(queue_budget
> 1);
249 ASSERT(cumm_threshold
> 1);
250 ASSERT(cumm_ctr
!= (void*)NULL
);
251 ASSERT(l2cumm_threshold
> 1);
252 ASSERT(l2cumm_ctr
!= (void*)NULL
);
254 queue
= dhd_flow_queue(dhdp
, flowid
);
256 DHD_FLOW_QUEUE_SET_MAX(queue
, queue_budget
); /* Max queue length */
258 /* Set the queue's parent threshold and cummulative counter */
259 DHD_FLOW_QUEUE_SET_THRESHOLD(queue
, cumm_threshold
);
260 DHD_FLOW_QUEUE_SET_CLEN(queue
, cumm_ctr
);
262 /* Set the queue's grandparent threshold and cummulative counter */
263 DHD_FLOW_QUEUE_SET_L2THRESHOLD(queue
, l2cumm_threshold
);
264 DHD_FLOW_QUEUE_SET_L2CLEN(queue
, l2cumm_ctr
);
267 /** Initializes data structures of multiple flow rings */
269 dhd_flow_rings_init(dhd_pub_t
*dhdp
, uint32 num_flow_rings
)
272 uint32 flow_ring_table_sz
;
273 uint32 if_flow_lkup_sz
= 0;
274 void * flowid_allocator
;
275 flow_ring_table_t
*flow_ring_table
= NULL
;
276 if_flow_lkup_t
*if_flow_lkup
= NULL
;
278 void *list_lock
= NULL
;
281 DHD_INFO(("%s\n", __FUNCTION__
));
283 /* Construct a 16bit flowid allocator */
284 flowid_allocator
= id16_map_init(dhdp
->osh
,
285 num_flow_rings
- dhdp
->bus
->max_cmn_rings
, FLOWID_RESERVED
);
286 if (flowid_allocator
== NULL
) {
287 DHD_ERROR(("%s: flowid allocator init failure\n", __FUNCTION__
));
291 /* Allocate a flow ring table, comprising of requested number of rings */
292 flow_ring_table_sz
= (num_flow_rings
* sizeof(flow_ring_node_t
));
293 flow_ring_table
= (flow_ring_table_t
*)MALLOCZ(dhdp
->osh
, flow_ring_table_sz
);
294 if (flow_ring_table
== NULL
) {
295 DHD_ERROR(("%s: flow ring table alloc failure\n", __FUNCTION__
));
299 /* Initialize flow ring table state */
300 DHD_CUMM_CTR_INIT(&dhdp
->cumm_ctr
);
301 DHD_CUMM_CTR_INIT(&dhdp
->l2cumm_ctr
);
302 bzero((uchar
*)flow_ring_table
, flow_ring_table_sz
);
303 for (idx
= 0; idx
< num_flow_rings
; idx
++) {
304 flow_ring_table
[idx
].status
= FLOW_RING_STATUS_CLOSED
;
305 flow_ring_table
[idx
].flowid
= (uint16
)idx
;
306 flow_ring_table
[idx
].lock
= dhd_os_spin_lock_init(dhdp
->osh
);
307 #ifdef IDLE_TX_FLOW_MGMT
308 flow_ring_table
[idx
].last_active_ts
= OSL_SYSUPTIME();
309 #endif /* IDLE_TX_FLOW_MGMT */
310 if (flow_ring_table
[idx
].lock
== NULL
) {
311 DHD_ERROR(("%s: Failed to init spinlock for queue!\n", __FUNCTION__
));
315 dll_init(&flow_ring_table
[idx
].list
);
317 /* Initialize the per flow ring backup queue */
318 dhd_flow_queue_init(dhdp
, &flow_ring_table
[idx
].queue
,
319 FLOW_RING_QUEUE_THRESHOLD
);
322 /* Allocate per interface hash table (for fast lookup from interface to flow ring) */
323 if_flow_lkup_sz
= sizeof(if_flow_lkup_t
) * DHD_MAX_IFS
;
324 if_flow_lkup
= (if_flow_lkup_t
*)DHD_OS_PREALLOC(dhdp
,
325 DHD_PREALLOC_IF_FLOW_LKUP
, if_flow_lkup_sz
);
326 if (if_flow_lkup
== NULL
) {
327 DHD_ERROR(("%s: if flow lkup alloc failure\n", __FUNCTION__
));
331 /* Initialize per interface hash table */
332 for (idx
= 0; idx
< DHD_MAX_IFS
; idx
++) {
334 if_flow_lkup
[idx
].status
= 0;
335 if_flow_lkup
[idx
].role
= 0;
336 for (hash_ix
= 0; hash_ix
< DHD_FLOWRING_HASH_SIZE
; hash_ix
++)
337 if_flow_lkup
[idx
].fl_hash
[hash_ix
] = NULL
;
340 lock
= dhd_os_spin_lock_init(dhdp
->osh
);
344 list_lock
= dhd_os_spin_lock_init(dhdp
->osh
);
345 if (list_lock
== NULL
)
348 dhdp
->flow_prio_map_type
= DHD_FLOW_PRIO_AC_MAP
;
349 bcopy(prio2ac
, dhdp
->flow_prio_map
, sizeof(uint8
) * NUMPRIO
);
350 #ifdef DHD_LOSSLESS_ROAMING
351 dhdp
->dequeue_prec_map
= ALLPRIO
;
353 /* Now populate into dhd pub */
354 DHD_FLOWID_LOCK(lock
, flags
);
355 dhdp
->num_flow_rings
= num_flow_rings
;
356 dhdp
->flowid_allocator
= (void *)flowid_allocator
;
357 dhdp
->flow_ring_table
= (void *)flow_ring_table
;
358 dhdp
->if_flow_lkup
= (void *)if_flow_lkup
;
359 dhdp
->flowid_lock
= lock
;
360 dhdp
->flow_rings_inited
= TRUE
;
361 dhdp
->flowring_list_lock
= list_lock
;
362 DHD_FLOWID_UNLOCK(lock
, flags
);
364 DHD_INFO(("%s done\n", __FUNCTION__
));
368 /* deinit the spinlock */
369 dhd_os_spin_lock_deinit(dhdp
->osh
, lock
);
372 /* Destruct the per interface flow lkup table */
373 if (if_flow_lkup
!= NULL
) {
374 DHD_OS_PREFREE(dhdp
, if_flow_lkup
, if_flow_lkup_sz
);
376 if (flow_ring_table
!= NULL
) {
377 for (idx
= 0; idx
< num_flow_rings
; idx
++) {
378 if (flow_ring_table
[idx
].lock
!= NULL
)
379 dhd_os_spin_lock_deinit(dhdp
->osh
, flow_ring_table
[idx
].lock
);
381 MFREE(dhdp
->osh
, flow_ring_table
, flow_ring_table_sz
);
383 id16_map_fini(dhdp
->osh
, flowid_allocator
);
388 /** Deinit Flow Ring specific data structures */
389 void dhd_flow_rings_deinit(dhd_pub_t
*dhdp
)
392 uint32 flow_ring_table_sz
;
393 uint32 if_flow_lkup_sz
;
394 flow_ring_table_t
*flow_ring_table
;
398 DHD_INFO(("dhd_flow_rings_deinit\n"));
400 if (!(dhdp
->flow_rings_inited
)) {
401 DHD_ERROR(("dhd_flow_rings not initialized!\n"));
405 if (dhdp
->flow_ring_table
!= NULL
) {
407 ASSERT(dhdp
->num_flow_rings
> 0);
409 DHD_FLOWID_LOCK(dhdp
->flowid_lock
, flags
);
410 flow_ring_table
= (flow_ring_table_t
*)dhdp
->flow_ring_table
;
411 dhdp
->flow_ring_table
= NULL
;
412 DHD_FLOWID_UNLOCK(dhdp
->flowid_lock
, flags
);
413 for (idx
= 0; idx
< dhdp
->num_flow_rings
; idx
++) {
414 if (flow_ring_table
[idx
].active
) {
415 dhd_bus_clean_flow_ring(dhdp
->bus
, &flow_ring_table
[idx
]);
417 ASSERT(DHD_FLOW_QUEUE_EMPTY(&flow_ring_table
[idx
].queue
));
419 /* Deinit flow ring queue locks before destroying flow ring table */
420 if (flow_ring_table
[idx
].lock
!= NULL
) {
421 dhd_os_spin_lock_deinit(dhdp
->osh
, flow_ring_table
[idx
].lock
);
423 flow_ring_table
[idx
].lock
= NULL
;
427 /* Destruct the flow ring table */
428 flow_ring_table_sz
= dhdp
->num_flow_rings
* sizeof(flow_ring_table_t
);
429 MFREE(dhdp
->osh
, flow_ring_table
, flow_ring_table_sz
);
432 DHD_FLOWID_LOCK(dhdp
->flowid_lock
, flags
);
434 /* Destruct the per interface flow lkup table */
435 if (dhdp
->if_flow_lkup
!= NULL
) {
436 if_flow_lkup_sz
= sizeof(if_flow_lkup_t
) * DHD_MAX_IFS
;
437 bzero((uchar
*)dhdp
->if_flow_lkup
, if_flow_lkup_sz
);
438 DHD_OS_PREFREE(dhdp
, dhdp
->if_flow_lkup
, if_flow_lkup_sz
);
439 dhdp
->if_flow_lkup
= NULL
;
442 /* Destruct the flowid allocator */
443 if (dhdp
->flowid_allocator
!= NULL
)
444 dhdp
->flowid_allocator
= id16_map_fini(dhdp
->osh
, dhdp
->flowid_allocator
);
446 dhdp
->num_flow_rings
= 0U;
447 bzero(dhdp
->flow_prio_map
, sizeof(uint8
) * NUMPRIO
);
449 lock
= dhdp
->flowid_lock
;
450 dhdp
->flowid_lock
= NULL
;
453 DHD_FLOWID_UNLOCK(lock
, flags
);
454 dhd_os_spin_lock_deinit(dhdp
->osh
, lock
);
457 dhd_os_spin_lock_deinit(dhdp
->osh
, dhdp
->flowring_list_lock
);
458 dhdp
->flowring_list_lock
= NULL
;
460 ASSERT(dhdp
->if_flow_lkup
== NULL
);
461 ASSERT(dhdp
->flowid_allocator
== NULL
);
462 ASSERT(dhdp
->flow_ring_table
== NULL
);
463 dhdp
->flow_rings_inited
= FALSE
;
466 /** Uses hash table to quickly map from ifindex to a flow ring 'role' (STA/AP) */
468 dhd_flow_rings_ifindex2role(dhd_pub_t
*dhdp
, uint8 ifindex
)
470 if_flow_lkup_t
*if_flow_lkup
= (if_flow_lkup_t
*)dhdp
->if_flow_lkup
;
471 ASSERT(if_flow_lkup
);
472 return if_flow_lkup
[ifindex
].role
;
476 bool is_tdls_destination(dhd_pub_t
*dhdp
, uint8
*da
)
479 tdls_peer_node_t
*cur
= NULL
;
481 DHD_TDLS_LOCK(&dhdp
->tdls_lock
, flags
);
482 cur
= dhdp
->peer_tbl
.node
;
484 while (cur
!= NULL
) {
485 if (!memcmp(da
, cur
->addr
, ETHER_ADDR_LEN
)) {
486 DHD_TDLS_UNLOCK(&dhdp
->tdls_lock
, flags
);
491 DHD_TDLS_UNLOCK(&dhdp
->tdls_lock
, flags
);
496 /** Uses hash table to quickly map from ifindex+prio+da to a flow ring id */
498 dhd_flowid_find(dhd_pub_t
*dhdp
, uint8 ifindex
, uint8 prio
, char *sa
, char *da
)
501 bool ismcast
= FALSE
;
502 flow_hash_info_t
*cur
;
503 if_flow_lkup_t
*if_flow_lkup
;
506 DHD_FLOWID_LOCK(dhdp
->flowid_lock
, flags
);
507 if_flow_lkup
= (if_flow_lkup_t
*)dhdp
->if_flow_lkup
;
509 ASSERT(if_flow_lkup
);
511 if ((if_flow_lkup
[ifindex
].role
== WLC_E_IF_ROLE_STA
) ||
512 (if_flow_lkup
[ifindex
].role
== WLC_E_IF_ROLE_WDS
)) {
514 if (dhdp
->peer_tbl
.tdls_peer_count
&& !(ETHER_ISMULTI(da
)) &&
515 is_tdls_destination(dhdp
, da
)) {
516 hash
= DHD_FLOWRING_HASHINDEX(da
, prio
);
517 cur
= if_flow_lkup
[ifindex
].fl_hash
[hash
];
518 while (cur
!= NULL
) {
519 if (!memcmp(cur
->flow_info
.da
, da
, ETHER_ADDR_LEN
)) {
520 DHD_FLOWID_UNLOCK(dhdp
->flowid_lock
, flags
);
525 DHD_FLOWID_UNLOCK(dhdp
->flowid_lock
, flags
);
526 return FLOWID_INVALID
;
529 /* For STA non TDLS dest and WDS dest flow ring id is mapped based on prio only */
530 cur
= if_flow_lkup
[ifindex
].fl_hash
[prio
];
532 DHD_FLOWID_UNLOCK(dhdp
->flowid_lock
, flags
);
537 if (ETHER_ISMULTI(da
)) {
541 hash
= DHD_FLOWRING_HASHINDEX(da
, prio
);
544 cur
= if_flow_lkup
[ifindex
].fl_hash
[hash
];
547 if ((ismcast
&& ETHER_ISMULTI(cur
->flow_info
.da
)) ||
548 (!memcmp(cur
->flow_info
.da
, da
, ETHER_ADDR_LEN
) &&
549 (cur
->flow_info
.tid
== prio
))) {
550 DHD_FLOWID_UNLOCK(dhdp
->flowid_lock
, flags
);
556 DHD_FLOWID_UNLOCK(dhdp
->flowid_lock
, flags
);
558 DHD_INFO(("%s: cannot find flowid\n", __FUNCTION__
));
559 return FLOWID_INVALID
;
560 } /* dhd_flowid_find */
562 /** Create unique Flow ID, called when a flow ring is created. */
564 dhd_flowid_alloc(dhd_pub_t
*dhdp
, uint8 ifindex
, uint8 prio
, char *sa
, char *da
)
566 flow_hash_info_t
*fl_hash_node
, *cur
;
567 if_flow_lkup_t
*if_flow_lkup
;
572 fl_hash_node
= (flow_hash_info_t
*) MALLOCZ(dhdp
->osh
, sizeof(flow_hash_info_t
));
573 if (fl_hash_node
== NULL
) {
574 DHD_ERROR(("%s: flow_hash_info_t memory allocation failed \n", __FUNCTION__
));
575 return FLOWID_INVALID
;
577 memcpy(fl_hash_node
->flow_info
.da
, da
, sizeof(fl_hash_node
->flow_info
.da
));
579 DHD_FLOWID_LOCK(dhdp
->flowid_lock
, flags
);
580 ASSERT(dhdp
->flowid_allocator
!= NULL
);
581 flowid
= id16_map_alloc(dhdp
->flowid_allocator
);
582 DHD_FLOWID_UNLOCK(dhdp
->flowid_lock
, flags
);
584 if (flowid
== FLOWID_INVALID
) {
585 MFREE(dhdp
->osh
, fl_hash_node
, sizeof(flow_hash_info_t
));
586 DHD_ERROR(("%s: cannot get free flowid \n", __FUNCTION__
));
587 return FLOWID_INVALID
;
590 fl_hash_node
->flowid
= flowid
;
591 fl_hash_node
->flow_info
.tid
= prio
;
592 fl_hash_node
->flow_info
.ifindex
= ifindex
;
593 fl_hash_node
->next
= NULL
;
595 DHD_FLOWID_LOCK(dhdp
->flowid_lock
, flags
);
596 if_flow_lkup
= (if_flow_lkup_t
*)dhdp
->if_flow_lkup
;
598 if ((if_flow_lkup
[ifindex
].role
== WLC_E_IF_ROLE_STA
) ||
599 (if_flow_lkup
[ifindex
].role
== WLC_E_IF_ROLE_WDS
)) {
600 /* For STA non TDLS dest and WDS dest we allocate entry based on prio only */
602 if (dhdp
->peer_tbl
.tdls_peer_count
&&
603 (is_tdls_destination(dhdp
, da
))) {
604 hash
= DHD_FLOWRING_HASHINDEX(da
, prio
);
605 cur
= if_flow_lkup
[ifindex
].fl_hash
[hash
];
610 cur
->next
= fl_hash_node
;
612 if_flow_lkup
[ifindex
].fl_hash
[hash
] = fl_hash_node
;
616 if_flow_lkup
[ifindex
].fl_hash
[prio
] = fl_hash_node
;
619 /* For bcast/mcast assign first slot in in interface */
620 hash
= ETHER_ISMULTI(da
) ? 0 : DHD_FLOWRING_HASHINDEX(da
, prio
);
621 cur
= if_flow_lkup
[ifindex
].fl_hash
[hash
];
626 cur
->next
= fl_hash_node
;
628 if_flow_lkup
[ifindex
].fl_hash
[hash
] = fl_hash_node
;
630 DHD_FLOWID_UNLOCK(dhdp
->flowid_lock
, flags
);
632 DHD_INFO(("%s: allocated flowid %d\n", __FUNCTION__
, fl_hash_node
->flowid
));
634 return fl_hash_node
->flowid
;
635 } /* dhd_flowid_alloc */
637 /** Get flow ring ID, if not present try to create one */
639 dhd_flowid_lookup(dhd_pub_t
*dhdp
, uint8 ifindex
,
640 uint8 prio
, char *sa
, char *da
, uint16
*flowid
)
643 flow_ring_node_t
*flow_ring_node
;
644 flow_ring_table_t
*flow_ring_table
;
649 DHD_INFO(("%s\n", __FUNCTION__
));
650 if (!dhdp
->flow_ring_table
) {
654 flow_ring_table
= (flow_ring_table_t
*)dhdp
->flow_ring_table
;
656 id
= dhd_flowid_find(dhdp
, ifindex
, prio
, sa
, da
);
658 if (id
== FLOWID_INVALID
) {
660 if_flow_lkup_t
*if_flow_lkup
;
661 if_flow_lkup
= (if_flow_lkup_t
*)dhdp
->if_flow_lkup
;
663 if (!if_flow_lkup
[ifindex
].status
)
665 BCM_REFERENCE(is_sta_assoc
);
666 #if defined(PCIE_FULL_DONGLE)
667 is_sta_assoc
= dhd_sta_associated(dhdp
, ifindex
, (uint8
*)da
);
668 DHD_ERROR(("%s: multi %x ifindex %d role %x assoc %d\n", __FUNCTION__
,
669 ETHER_ISMULTI(da
), ifindex
, if_flow_lkup
[ifindex
].role
,
671 if (!ETHER_ISMULTI(da
) &&
672 ((if_flow_lkup
[ifindex
].role
== WLC_E_IF_ROLE_AP
) ||
673 (if_flow_lkup
[ifindex
].role
== WLC_E_IF_ROLE_P2P_GO
)) &&
676 #endif /* (linux || LINUX) && PCIE_FULL_DONGLE */
678 id
= dhd_flowid_alloc(dhdp
, ifindex
, prio
, sa
, da
);
679 if (id
== FLOWID_INVALID
) {
680 DHD_ERROR(("%s: alloc flowid ifindex %u status %u\n",
681 __FUNCTION__
, ifindex
, if_flow_lkup
[ifindex
].status
));
685 ASSERT(id
< dhdp
->num_flow_rings
);
687 /* register this flowid in dhd_pub */
688 dhd_add_flowid(dhdp
, ifindex
, prio
, da
, id
);
690 flow_ring_node
= (flow_ring_node_t
*) &flow_ring_table
[id
];
692 DHD_FLOWRING_LOCK(flow_ring_node
->lock
, flags
);
695 memcpy(flow_ring_node
->flow_info
.sa
, sa
, sizeof(flow_ring_node
->flow_info
.sa
));
696 memcpy(flow_ring_node
->flow_info
.da
, da
, sizeof(flow_ring_node
->flow_info
.da
));
697 flow_ring_node
->flow_info
.tid
= prio
;
698 flow_ring_node
->flow_info
.ifindex
= ifindex
;
699 flow_ring_node
->active
= TRUE
;
700 flow_ring_node
->status
= FLOW_RING_STATUS_CREATE_PENDING
;
702 #ifdef DEVICE_TX_STUCK_DETECT
703 flow_ring_node
->tx_cmpl
= flow_ring_node
->tx_cmpl_prev
= OSL_SYSUPTIME();
704 flow_ring_node
->stuck_count
= 0;
705 #endif /* DEVICE_TX_STUCK_DETECT */
707 DHD_FLOWRING_UNLOCK(flow_ring_node
->lock
, flags
);
709 /* Create and inform device about the new flow */
710 if (dhd_bus_flow_ring_create_request(dhdp
->bus
, (void *)flow_ring_node
)
712 DHD_ERROR(("%s: create error %d\n", __FUNCTION__
, id
));
719 /* if the Flow id was found in the hash */
720 ASSERT(id
< dhdp
->num_flow_rings
);
722 flow_ring_node
= (flow_ring_node_t
*) &flow_ring_table
[id
];
723 DHD_FLOWRING_LOCK(flow_ring_node
->lock
, flags
);
726 * If the flow_ring_node is in Open State or Status pending state then
727 * we can return the Flow id to the caller.If the flow_ring_node is in
728 * FLOW_RING_STATUS_PENDING this means the creation is in progress and
729 * hence the packets should be queued.
731 * If the flow_ring_node is in FLOW_RING_STATUS_DELETE_PENDING Or
732 * FLOW_RING_STATUS_CLOSED, then we should return Error.
733 * Note that if the flowing is being deleted we would mark it as
734 * FLOW_RING_STATUS_DELETE_PENDING. Now before Dongle could respond and
735 * before we mark it as FLOW_RING_STATUS_CLOSED we could get tx packets.
736 * We should drop the packets in that case.
737 * The decission to return OK should NOT be based on 'active' variable, beause
738 * active is made TRUE when a flow_ring_node gets allocated and is made
739 * FALSE when the flow ring gets removed and does not reflect the True state
741 * In case if IDLE_TX_FLOW_MGMT is defined, we have to handle two more flowring
742 * states. If the flow_ring_node's status is FLOW_RING_STATUS_SUSPENDED, the flowid
743 * is to be returned and from dhd_bus_txdata, the flowring would be resumed again.
744 * The status FLOW_RING_STATUS_RESUME_PENDING, is equivalent to
745 * FLOW_RING_STATUS_CREATE_PENDING.
747 if (flow_ring_node
->status
== FLOW_RING_STATUS_DELETE_PENDING
||
748 flow_ring_node
->status
== FLOW_RING_STATUS_CLOSED
) {
749 *flowid
= FLOWID_INVALID
;
756 DHD_FLOWRING_UNLOCK(flow_ring_node
->lock
, flags
);
758 } /* Flow Id found in the hash */
759 } /* dhd_flowid_lookup */
762 * Assign existing or newly created flowid to an 802.3 packet. This flowid is later on used to
763 * select the flowring to send the packet to the dongle.
766 dhd_flowid_update(dhd_pub_t
*dhdp
, uint8 ifindex
, uint8 prio
, void *pktbuf
)
768 uint8
*pktdata
= (uint8
*)PKTDATA(dhdp
->osh
, pktbuf
);
769 struct ether_header
*eh
= (struct ether_header
*)pktdata
;
772 ASSERT(ifindex
< DHD_MAX_IFS
);
774 if (ifindex
>= DHD_MAX_IFS
) {
778 if (!dhdp
->flowid_allocator
) {
779 DHD_ERROR(("%s: Flow ring not intited yet \n", __FUNCTION__
));
783 if (dhd_flowid_lookup(dhdp
, ifindex
, prio
, (char *)eh
->ether_shost
, (char *)eh
->ether_dhost
,
784 &flowid
) != BCME_OK
) {
788 DHD_INFO(("%s: prio %d flowid %d\n", __FUNCTION__
, prio
, flowid
));
790 /* Tag the packet with flowid */
791 DHD_PKT_SET_FLOWID(pktbuf
, flowid
);
796 dhd_flowid_free(dhd_pub_t
*dhdp
, uint8 ifindex
, uint16 flowid
)
800 flow_hash_info_t
*cur
, *prev
;
801 if_flow_lkup_t
*if_flow_lkup
;
804 DHD_FLOWID_LOCK(dhdp
->flowid_lock
, flags
);
805 if_flow_lkup
= (if_flow_lkup_t
*)dhdp
->if_flow_lkup
;
807 for (hashix
= 0; hashix
< DHD_FLOWRING_HASH_SIZE
; hashix
++) {
809 cur
= if_flow_lkup
[ifindex
].fl_hash
[hashix
];
812 if (cur
->flowid
== flowid
) {
817 while (!found
&& cur
) {
818 if (cur
->flowid
== flowid
) {
827 if_flow_lkup
[ifindex
].fl_hash
[hashix
] = cur
->next
;
829 prev
->next
= cur
->next
;
832 /* deregister flowid from dhd_pub. */
833 dhd_del_flowid(dhdp
, ifindex
, flowid
);
835 id16_map_free(dhdp
->flowid_allocator
, flowid
);
836 DHD_FLOWID_UNLOCK(dhdp
->flowid_lock
, flags
);
837 MFREE(dhdp
->osh
, cur
, sizeof(flow_hash_info_t
));
844 DHD_FLOWID_UNLOCK(dhdp
->flowid_lock
, flags
);
845 DHD_ERROR(("%s: could not free flow ring hash entry flowid %d\n",
846 __FUNCTION__
, flowid
));
847 } /* dhd_flowid_free */
850 * Delete all Flow rings associated with the given interface. Is called when eg the dongle
851 * indicates that a wireless link has gone down.
854 dhd_flow_rings_delete(dhd_pub_t
*dhdp
, uint8 ifindex
)
857 flow_ring_table_t
*flow_ring_table
;
859 DHD_ERROR(("%s: ifindex %u\n", __FUNCTION__
, ifindex
));
861 ASSERT(ifindex
< DHD_MAX_IFS
);
862 if (ifindex
>= DHD_MAX_IFS
)
865 if (!dhdp
->flow_ring_table
)
868 flow_ring_table
= (flow_ring_table_t
*)dhdp
->flow_ring_table
;
869 for (id
= 0; id
< dhdp
->num_flow_rings
; id
++) {
870 if (flow_ring_table
[id
].active
&&
871 (flow_ring_table
[id
].flow_info
.ifindex
== ifindex
) &&
872 (flow_ring_table
[id
].status
== FLOW_RING_STATUS_OPEN
)) {
873 dhd_bus_flow_ring_delete_request(dhdp
->bus
,
874 (void *) &flow_ring_table
[id
]);
880 dhd_flow_rings_flush(dhd_pub_t
*dhdp
, uint8 ifindex
)
883 flow_ring_table_t
*flow_ring_table
;
885 DHD_INFO(("%s: ifindex %u\n", __FUNCTION__
, ifindex
));
887 ASSERT(ifindex
< DHD_MAX_IFS
);
888 if (ifindex
>= DHD_MAX_IFS
)
891 if (!dhdp
->flow_ring_table
)
893 flow_ring_table
= (flow_ring_table_t
*)dhdp
->flow_ring_table
;
895 for (id
= 0; id
<= dhdp
->num_flow_rings
; id
++) {
896 if (flow_ring_table
[id
].active
&&
897 (flow_ring_table
[id
].flow_info
.ifindex
== ifindex
) &&
898 (flow_ring_table
[id
].status
== FLOW_RING_STATUS_OPEN
)) {
899 dhd_bus_flow_ring_flush_request(dhdp
->bus
,
900 (void *) &flow_ring_table
[id
]);
906 /** Delete flow ring(s) for given peer address. Related to AP/AWDL/TDLS functionality. */
908 dhd_flow_rings_delete_for_peer(dhd_pub_t
*dhdp
, uint8 ifindex
, char *addr
)
911 flow_ring_table_t
*flow_ring_table
;
913 DHD_ERROR(("%s: ifindex %u\n", __FUNCTION__
, ifindex
));
915 ASSERT(ifindex
< DHD_MAX_IFS
);
916 if (ifindex
>= DHD_MAX_IFS
)
919 if (!dhdp
->flow_ring_table
)
922 flow_ring_table
= (flow_ring_table_t
*)dhdp
->flow_ring_table
;
923 for (id
= 0; id
< dhdp
->num_flow_rings
; id
++) {
924 if (flow_ring_table
[id
].active
&&
925 (flow_ring_table
[id
].flow_info
.ifindex
== ifindex
) &&
926 (!memcmp(flow_ring_table
[id
].flow_info
.da
, addr
, ETHER_ADDR_LEN
)) &&
927 (flow_ring_table
[id
].status
== FLOW_RING_STATUS_OPEN
)) {
928 DHD_ERROR(("%s: deleting flowid %d\n",
929 __FUNCTION__
, flow_ring_table
[id
].flowid
));
930 dhd_bus_flow_ring_delete_request(dhdp
->bus
,
931 (void *) &flow_ring_table
[id
]);
936 /** Handles interface ADD, CHANGE, DEL indications from the dongle */
938 dhd_update_interface_flow_info(dhd_pub_t
*dhdp
, uint8 ifindex
,
939 uint8 op
, uint8 role
)
941 if_flow_lkup_t
*if_flow_lkup
;
944 ASSERT(ifindex
< DHD_MAX_IFS
);
945 if (ifindex
>= DHD_MAX_IFS
)
948 DHD_ERROR(("%s: ifindex %u op %u role is %u \n",
949 __FUNCTION__
, ifindex
, op
, role
));
950 if (!dhdp
->flowid_allocator
) {
951 DHD_ERROR(("%s: Flow ring not intited yet \n", __FUNCTION__
));
955 DHD_FLOWID_LOCK(dhdp
->flowid_lock
, flags
);
956 if_flow_lkup
= (if_flow_lkup_t
*)dhdp
->if_flow_lkup
;
958 if (op
== WLC_E_IF_ADD
|| op
== WLC_E_IF_CHANGE
) {
960 if_flow_lkup
[ifindex
].role
= role
;
962 if (role
== WLC_E_IF_ROLE_WDS
) {
964 * WDS role does not send WLC_E_LINK event after interface is up.
965 * So to create flowrings for WDS, make status as TRUE in WLC_E_IF itself.
966 * same is true while making the status as FALSE.
967 * TODO: Fix FW to send WLC_E_LINK for WDS role aswell. So that all the
968 * interfaces are handled uniformly.
970 if_flow_lkup
[ifindex
].status
= TRUE
;
971 DHD_INFO(("%s: Mcast Flow ring for ifindex %d role is %d \n",
972 __FUNCTION__
, ifindex
, role
));
974 } else if ((op
== WLC_E_IF_DEL
) && (role
== WLC_E_IF_ROLE_WDS
)) {
975 if_flow_lkup
[ifindex
].status
= FALSE
;
976 DHD_INFO(("%s: cleanup all Flow rings for ifindex %d role is %d \n",
977 __FUNCTION__
, ifindex
, role
));
979 DHD_FLOWID_UNLOCK(dhdp
->flowid_lock
, flags
);
982 /** Handles a STA 'link' indication from the dongle */
984 dhd_update_interface_link_status(dhd_pub_t
*dhdp
, uint8 ifindex
, uint8 status
)
986 if_flow_lkup_t
*if_flow_lkup
;
989 ASSERT(ifindex
< DHD_MAX_IFS
);
990 if (ifindex
>= DHD_MAX_IFS
)
993 DHD_ERROR(("%s: ifindex %d status %d\n", __FUNCTION__
, ifindex
, status
));
995 DHD_FLOWID_LOCK(dhdp
->flowid_lock
, flags
);
996 if_flow_lkup
= (if_flow_lkup_t
*)dhdp
->if_flow_lkup
;
999 if_flow_lkup
[ifindex
].status
= TRUE
;
1001 if_flow_lkup
[ifindex
].status
= FALSE
;
1004 DHD_FLOWID_UNLOCK(dhdp
->flowid_lock
, flags
);
1009 /** Update flow priority mapping, called on IOVAR */
1010 int dhd_update_flow_prio_map(dhd_pub_t
*dhdp
, uint8 map
)
1013 flow_ring_node_t
*flow_ring_node
;
1015 if (map
> DHD_FLOW_PRIO_LLR_MAP
)
1016 return BCME_BADOPTION
;
1018 /* Check if we need to change prio map */
1019 if (map
== dhdp
->flow_prio_map_type
)
1022 /* If any ring is active we cannot change priority mapping for flow rings */
1023 for (flowid
= 0; flowid
< dhdp
->num_flow_rings
; flowid
++) {
1024 flow_ring_node
= DHD_FLOW_RING(dhdp
, flowid
);
1025 if (flow_ring_node
->active
)
1029 /* Inform firmware about new mapping type */
1030 if (BCME_OK
!= dhd_flow_prio_map(dhdp
, &map
, TRUE
))
1033 /* update internal structures */
1034 dhdp
->flow_prio_map_type
= map
;
1035 if (dhdp
->flow_prio_map_type
== DHD_FLOW_PRIO_TID_MAP
)
1036 bcopy(prio2tid
, dhdp
->flow_prio_map
, sizeof(uint8
) * NUMPRIO
);
1038 bcopy(prio2ac
, dhdp
->flow_prio_map
, sizeof(uint8
) * NUMPRIO
);
1043 /** Inform firmware on updated flow priority mapping, called on IOVAR */
1044 int dhd_flow_prio_map(dhd_pub_t
*dhd
, uint8
*map
, bool set
)
1046 uint8 iovbuf
[24] = {0};
1048 bcm_mkiovar("bus:fl_prio_map", NULL
, 0, (char*)iovbuf
, sizeof(iovbuf
));
1049 if (dhd_wl_ioctl_cmd(dhd
, WLC_GET_VAR
, iovbuf
, sizeof(iovbuf
), FALSE
, 0) < 0) {
1050 DHD_ERROR(("%s: failed to get fl_prio_map\n", __FUNCTION__
));
1056 bcm_mkiovar("bus:fl_prio_map", (char *)map
, 4, (char*)iovbuf
, sizeof(iovbuf
));
1057 if (dhd_wl_ioctl_cmd(dhd
, WLC_SET_VAR
, iovbuf
, sizeof(iovbuf
), TRUE
, 0) < 0) {
1058 DHD_ERROR(("%s: failed to set fl_prio_map \n",