wifi: add bcm driver 1.579.77.41.1 (r)
[GitHub/LineageOS/G12/android_hardware_amlogic_kernel-modules_dhd-driver.git] / bcmdhd.1.579.77.41.1.cn / dhd_flowring.c
1 /*
2 * @file Broadcom Dongle Host Driver (DHD), Flow ring specific code at top level
3 *
4 * Flow rings are transmit traffic (=propagating towards antenna) related entities
5 *
6 *
7 * Copyright (C) 1999-2017, Broadcom Corporation
8 *
9 * Unless you and Broadcom execute a separate written software license
10 * agreement governing use of this software, this software is licensed to you
11 * under the terms of the GNU General Public License version 2 (the "GPL"),
12 * available at http://www.broadcom.com/licenses/GPLv2.php, with the
13 * following added to such license:
14 *
15 * As a special exception, the copyright holders of this software give you
16 * permission to link this software with independent modules, and to copy and
17 * distribute the resulting executable under terms of your choice, provided that
18 * you also meet, for each linked independent module, the terms and conditions of
19 * the license of that module. An independent module is a module which is not
20 * derived from this software. The special exception does not apply to any
21 * modifications of the software.
22 *
23 * Notwithstanding the above, under no circumstances may you combine this
24 * software in any way with any other Broadcom software provided under a license
25 * other than the GPL, without Broadcom's express prior written consent.
26 *
27 *
28 * <<Broadcom-WL-IPTag/Open:>>
29 *
30 * $Id: dhd_flowring.c 710862 2017-07-14 07:43:59Z $
31 */
32
33
34 #include <typedefs.h>
35 #include <bcmutils.h>
36 #include <bcmendian.h>
37 #include <bcmdevs.h>
38
39 #include <ethernet.h>
40 #include <bcmevent.h>
41 #include <dngl_stats.h>
42
43 #include <dhd.h>
44
45 #include <dhd_flowring.h>
46 #include <dhd_bus.h>
47 #include <dhd_proto.h>
48 #include <dhd_dbg.h>
49 #include <802.1d.h>
50 #include <pcie_core.h>
51 #include <bcmmsgbuf.h>
52 #include <dhd_pcie.h>
53
54 static INLINE int dhd_flow_queue_throttle(flow_queue_t *queue);
55
56 static INLINE uint16 dhd_flowid_find(dhd_pub_t *dhdp, uint8 ifindex,
57 uint8 prio, char *sa, char *da);
58
59 static INLINE uint16 dhd_flowid_alloc(dhd_pub_t *dhdp, uint8 ifindex,
60 uint8 prio, char *sa, char *da);
61
62 static INLINE int dhd_flowid_lookup(dhd_pub_t *dhdp, uint8 ifindex,
63 uint8 prio, char *sa, char *da, uint16 *flowid);
64 int BCMFASTPATH dhd_flow_queue_overflow(flow_queue_t *queue, void *pkt);
65
66 #define FLOW_QUEUE_PKT_NEXT(p) PKTLINK(p)
67 #define FLOW_QUEUE_PKT_SETNEXT(p, x) PKTSETLINK((p), (x))
68
69 #if defined(EAPOL_PKT_PRIO) || defined(DHD_LOSSLESS_ROAMING)
70 const uint8 prio2ac[8] = { 0, 1, 1, 0, 2, 2, 3, 7 };
71 #else
72 const uint8 prio2ac[8] = { 0, 1, 1, 0, 2, 2, 3, 3 };
73 #endif /* EAPOL_PKT_PRIO || DHD_LOSSLESS_ROAMING */
74 const uint8 prio2tid[8] = { 0, 1, 2, 3, 4, 5, 6, 7 };
75
76 /** Queue overflow throttle. Return value: TRUE if throttle needs to be applied */
77 static INLINE int
78 dhd_flow_queue_throttle(flow_queue_t *queue)
79 {
80 return DHD_FLOW_QUEUE_FULL(queue);
81 }
82
83 int BCMFASTPATH
84 dhd_flow_queue_overflow(flow_queue_t *queue, void *pkt)
85 {
86 return BCME_NORESOURCE;
87 }
88
89 /** Returns flow ring given a flowid */
90 flow_ring_node_t *
91 dhd_flow_ring_node(dhd_pub_t *dhdp, uint16 flowid)
92 {
93 flow_ring_node_t * flow_ring_node;
94
95 ASSERT(dhdp != (dhd_pub_t*)NULL);
96 ASSERT(flowid < dhdp->num_flow_rings);
97
98 flow_ring_node = &(((flow_ring_node_t*)(dhdp->flow_ring_table))[flowid]);
99
100 ASSERT(flow_ring_node->flowid == flowid);
101 return flow_ring_node;
102 }
103
104 /** Returns 'backup' queue given a flowid */
105 flow_queue_t *
106 dhd_flow_queue(dhd_pub_t *dhdp, uint16 flowid)
107 {
108 flow_ring_node_t * flow_ring_node;
109
110 flow_ring_node = dhd_flow_ring_node(dhdp, flowid);
111 return &flow_ring_node->queue;
112 }
113
114 /* Flow ring's queue management functions */
115
116 /** Reinitialize a flow ring's queue. */
117 void
118 dhd_flow_queue_reinit(dhd_pub_t *dhdp, flow_queue_t *queue, int max)
119 {
120 ASSERT((queue != NULL) && (max > 0));
121
122 queue->head = queue->tail = NULL;
123 queue->len = 0;
124
125 /* Set queue's threshold and queue's parent cummulative length counter */
126 ASSERT(max > 1);
127 DHD_FLOW_QUEUE_SET_MAX(queue, max);
128 DHD_FLOW_QUEUE_SET_THRESHOLD(queue, max);
129 DHD_FLOW_QUEUE_SET_CLEN(queue, &dhdp->cumm_ctr);
130 DHD_FLOW_QUEUE_SET_L2CLEN(queue, &dhdp->l2cumm_ctr);
131
132 queue->failures = 0U;
133 queue->cb = &dhd_flow_queue_overflow;
134 }
135
136 /** Initialize a flow ring's queue, called on driver initialization. */
137 void
138 dhd_flow_queue_init(dhd_pub_t *dhdp, flow_queue_t *queue, int max)
139 {
140 ASSERT((queue != NULL) && (max > 0));
141
142 dll_init(&queue->list);
143 dhd_flow_queue_reinit(dhdp, queue, max);
144 }
145
146 /** Register an enqueue overflow callback handler */
147 void
148 dhd_flow_queue_register(flow_queue_t *queue, flow_queue_cb_t cb)
149 {
150 ASSERT(queue != NULL);
151 queue->cb = cb;
152 }
153
154 /**
155 * Enqueue an 802.3 packet at the back of a flow ring's queue. From there, it will travel later on
156 * to the flow ring itself.
157 */
158 int BCMFASTPATH
159 dhd_flow_queue_enqueue(dhd_pub_t *dhdp, flow_queue_t *queue, void *pkt)
160 {
161 int ret = BCME_OK;
162
163 ASSERT(queue != NULL);
164
165 if (dhd_flow_queue_throttle(queue)) {
166 queue->failures++;
167 ret = (*queue->cb)(queue, pkt);
168 goto done;
169 }
170
171 if (queue->head) {
172 FLOW_QUEUE_PKT_SETNEXT(queue->tail, pkt);
173 } else {
174 queue->head = pkt;
175 }
176
177 FLOW_QUEUE_PKT_SETNEXT(pkt, NULL);
178
179 queue->tail = pkt; /* at tail */
180
181 queue->len++;
182 /* increment parent's cummulative length */
183 DHD_CUMM_CTR_INCR(DHD_FLOW_QUEUE_CLEN_PTR(queue));
184 /* increment grandparent's cummulative length */
185 DHD_CUMM_CTR_INCR(DHD_FLOW_QUEUE_L2CLEN_PTR(queue));
186
187 done:
188 return ret;
189 }
190
191 /** Dequeue an 802.3 packet from a flow ring's queue, from head (FIFO) */
192 void * BCMFASTPATH
193 dhd_flow_queue_dequeue(dhd_pub_t *dhdp, flow_queue_t *queue)
194 {
195 void * pkt;
196
197 ASSERT(queue != NULL);
198
199 pkt = queue->head; /* from head */
200
201 if (pkt == NULL) {
202 ASSERT((queue->len == 0) && (queue->tail == NULL));
203 goto done;
204 }
205
206 queue->head = FLOW_QUEUE_PKT_NEXT(pkt);
207 if (queue->head == NULL)
208 queue->tail = NULL;
209
210 queue->len--;
211 /* decrement parent's cummulative length */
212 DHD_CUMM_CTR_DECR(DHD_FLOW_QUEUE_CLEN_PTR(queue));
213 /* decrement grandparent's cummulative length */
214 DHD_CUMM_CTR_DECR(DHD_FLOW_QUEUE_L2CLEN_PTR(queue));
215
216 FLOW_QUEUE_PKT_SETNEXT(pkt, NULL); /* dettach packet from queue */
217
218 done:
219 return pkt;
220 }
221
222 /** Reinsert a dequeued 802.3 packet back at the head */
223 void BCMFASTPATH
224 dhd_flow_queue_reinsert(dhd_pub_t *dhdp, flow_queue_t *queue, void *pkt)
225 {
226 if (queue->head == NULL) {
227 queue->tail = pkt;
228 }
229
230 FLOW_QUEUE_PKT_SETNEXT(pkt, queue->head);
231 queue->head = pkt;
232 queue->len++;
233 /* increment parent's cummulative length */
234 DHD_CUMM_CTR_INCR(DHD_FLOW_QUEUE_CLEN_PTR(queue));
235 /* increment grandparent's cummulative length */
236 DHD_CUMM_CTR_INCR(DHD_FLOW_QUEUE_L2CLEN_PTR(queue));
237 }
238
239 /** Fetch the backup queue for a flowring, and assign flow control thresholds */
240 void
241 dhd_flow_ring_config_thresholds(dhd_pub_t *dhdp, uint16 flowid,
242 int queue_budget, int cumm_threshold, void *cumm_ctr,
243 int l2cumm_threshold, void *l2cumm_ctr)
244 {
245 flow_queue_t * queue;
246
247 ASSERT(dhdp != (dhd_pub_t*)NULL);
248 ASSERT(queue_budget > 1);
249 ASSERT(cumm_threshold > 1);
250 ASSERT(cumm_ctr != (void*)NULL);
251 ASSERT(l2cumm_threshold > 1);
252 ASSERT(l2cumm_ctr != (void*)NULL);
253
254 queue = dhd_flow_queue(dhdp, flowid);
255
256 DHD_FLOW_QUEUE_SET_MAX(queue, queue_budget); /* Max queue length */
257
258 /* Set the queue's parent threshold and cummulative counter */
259 DHD_FLOW_QUEUE_SET_THRESHOLD(queue, cumm_threshold);
260 DHD_FLOW_QUEUE_SET_CLEN(queue, cumm_ctr);
261
262 /* Set the queue's grandparent threshold and cummulative counter */
263 DHD_FLOW_QUEUE_SET_L2THRESHOLD(queue, l2cumm_threshold);
264 DHD_FLOW_QUEUE_SET_L2CLEN(queue, l2cumm_ctr);
265 }
266
267 /** Initializes data structures of multiple flow rings */
268 int
269 dhd_flow_rings_init(dhd_pub_t *dhdp, uint32 num_flow_rings)
270 {
271 uint32 idx;
272 uint32 flow_ring_table_sz;
273 uint32 if_flow_lkup_sz = 0;
274 void * flowid_allocator;
275 flow_ring_table_t *flow_ring_table = NULL;
276 if_flow_lkup_t *if_flow_lkup = NULL;
277 void *lock = NULL;
278 void *list_lock = NULL;
279 unsigned long flags;
280
281 DHD_INFO(("%s\n", __FUNCTION__));
282
283 /* Construct a 16bit flowid allocator */
284 flowid_allocator = id16_map_init(dhdp->osh,
285 num_flow_rings - dhdp->bus->max_cmn_rings, FLOWID_RESERVED);
286 if (flowid_allocator == NULL) {
287 DHD_ERROR(("%s: flowid allocator init failure\n", __FUNCTION__));
288 return BCME_NOMEM;
289 }
290
291 /* Allocate a flow ring table, comprising of requested number of rings */
292 flow_ring_table_sz = (num_flow_rings * sizeof(flow_ring_node_t));
293 flow_ring_table = (flow_ring_table_t *)MALLOCZ(dhdp->osh, flow_ring_table_sz);
294 if (flow_ring_table == NULL) {
295 DHD_ERROR(("%s: flow ring table alloc failure\n", __FUNCTION__));
296 goto fail;
297 }
298
299 /* Initialize flow ring table state */
300 DHD_CUMM_CTR_INIT(&dhdp->cumm_ctr);
301 DHD_CUMM_CTR_INIT(&dhdp->l2cumm_ctr);
302 bzero((uchar *)flow_ring_table, flow_ring_table_sz);
303 for (idx = 0; idx < num_flow_rings; idx++) {
304 flow_ring_table[idx].status = FLOW_RING_STATUS_CLOSED;
305 flow_ring_table[idx].flowid = (uint16)idx;
306 flow_ring_table[idx].lock = dhd_os_spin_lock_init(dhdp->osh);
307 #ifdef IDLE_TX_FLOW_MGMT
308 flow_ring_table[idx].last_active_ts = OSL_SYSUPTIME();
309 #endif /* IDLE_TX_FLOW_MGMT */
310 if (flow_ring_table[idx].lock == NULL) {
311 DHD_ERROR(("%s: Failed to init spinlock for queue!\n", __FUNCTION__));
312 goto fail;
313 }
314
315 dll_init(&flow_ring_table[idx].list);
316
317 /* Initialize the per flow ring backup queue */
318 dhd_flow_queue_init(dhdp, &flow_ring_table[idx].queue,
319 FLOW_RING_QUEUE_THRESHOLD);
320 }
321
322 /* Allocate per interface hash table (for fast lookup from interface to flow ring) */
323 if_flow_lkup_sz = sizeof(if_flow_lkup_t) * DHD_MAX_IFS;
324 if_flow_lkup = (if_flow_lkup_t *)DHD_OS_PREALLOC(dhdp,
325 DHD_PREALLOC_IF_FLOW_LKUP, if_flow_lkup_sz);
326 if (if_flow_lkup == NULL) {
327 DHD_ERROR(("%s: if flow lkup alloc failure\n", __FUNCTION__));
328 goto fail;
329 }
330
331 /* Initialize per interface hash table */
332 for (idx = 0; idx < DHD_MAX_IFS; idx++) {
333 int hash_ix;
334 if_flow_lkup[idx].status = 0;
335 if_flow_lkup[idx].role = 0;
336 for (hash_ix = 0; hash_ix < DHD_FLOWRING_HASH_SIZE; hash_ix++)
337 if_flow_lkup[idx].fl_hash[hash_ix] = NULL;
338 }
339
340 lock = dhd_os_spin_lock_init(dhdp->osh);
341 if (lock == NULL)
342 goto fail;
343
344 list_lock = dhd_os_spin_lock_init(dhdp->osh);
345 if (list_lock == NULL)
346 goto lock_fail;
347
348 dhdp->flow_prio_map_type = DHD_FLOW_PRIO_AC_MAP;
349 bcopy(prio2ac, dhdp->flow_prio_map, sizeof(uint8) * NUMPRIO);
350 #ifdef DHD_LOSSLESS_ROAMING
351 dhdp->dequeue_prec_map = ALLPRIO;
352 #endif
353 /* Now populate into dhd pub */
354 DHD_FLOWID_LOCK(lock, flags);
355 dhdp->num_flow_rings = num_flow_rings;
356 dhdp->flowid_allocator = (void *)flowid_allocator;
357 dhdp->flow_ring_table = (void *)flow_ring_table;
358 dhdp->if_flow_lkup = (void *)if_flow_lkup;
359 dhdp->flowid_lock = lock;
360 dhdp->flow_rings_inited = TRUE;
361 dhdp->flowring_list_lock = list_lock;
362 DHD_FLOWID_UNLOCK(lock, flags);
363
364 DHD_INFO(("%s done\n", __FUNCTION__));
365 return BCME_OK;
366
367 lock_fail:
368 /* deinit the spinlock */
369 dhd_os_spin_lock_deinit(dhdp->osh, lock);
370
371 fail:
372 /* Destruct the per interface flow lkup table */
373 if (if_flow_lkup != NULL) {
374 DHD_OS_PREFREE(dhdp, if_flow_lkup, if_flow_lkup_sz);
375 }
376 if (flow_ring_table != NULL) {
377 for (idx = 0; idx < num_flow_rings; idx++) {
378 if (flow_ring_table[idx].lock != NULL)
379 dhd_os_spin_lock_deinit(dhdp->osh, flow_ring_table[idx].lock);
380 }
381 MFREE(dhdp->osh, flow_ring_table, flow_ring_table_sz);
382 }
383 id16_map_fini(dhdp->osh, flowid_allocator);
384
385 return BCME_NOMEM;
386 }
387
388 /** Deinit Flow Ring specific data structures */
389 void dhd_flow_rings_deinit(dhd_pub_t *dhdp)
390 {
391 uint16 idx;
392 uint32 flow_ring_table_sz;
393 uint32 if_flow_lkup_sz;
394 flow_ring_table_t *flow_ring_table;
395 unsigned long flags;
396 void *lock;
397
398 DHD_INFO(("dhd_flow_rings_deinit\n"));
399
400 if (!(dhdp->flow_rings_inited)) {
401 DHD_ERROR(("dhd_flow_rings not initialized!\n"));
402 return;
403 }
404
405 if (dhdp->flow_ring_table != NULL) {
406
407 ASSERT(dhdp->num_flow_rings > 0);
408
409 DHD_FLOWID_LOCK(dhdp->flowid_lock, flags);
410 flow_ring_table = (flow_ring_table_t *)dhdp->flow_ring_table;
411 dhdp->flow_ring_table = NULL;
412 DHD_FLOWID_UNLOCK(dhdp->flowid_lock, flags);
413 for (idx = 0; idx < dhdp->num_flow_rings; idx++) {
414 if (flow_ring_table[idx].active) {
415 dhd_bus_clean_flow_ring(dhdp->bus, &flow_ring_table[idx]);
416 }
417 ASSERT(DHD_FLOW_QUEUE_EMPTY(&flow_ring_table[idx].queue));
418
419 /* Deinit flow ring queue locks before destroying flow ring table */
420 if (flow_ring_table[idx].lock != NULL) {
421 dhd_os_spin_lock_deinit(dhdp->osh, flow_ring_table[idx].lock);
422 }
423 flow_ring_table[idx].lock = NULL;
424
425 }
426
427 /* Destruct the flow ring table */
428 flow_ring_table_sz = dhdp->num_flow_rings * sizeof(flow_ring_table_t);
429 MFREE(dhdp->osh, flow_ring_table, flow_ring_table_sz);
430 }
431
432 DHD_FLOWID_LOCK(dhdp->flowid_lock, flags);
433
434 /* Destruct the per interface flow lkup table */
435 if (dhdp->if_flow_lkup != NULL) {
436 if_flow_lkup_sz = sizeof(if_flow_lkup_t) * DHD_MAX_IFS;
437 bzero((uchar *)dhdp->if_flow_lkup, if_flow_lkup_sz);
438 DHD_OS_PREFREE(dhdp, dhdp->if_flow_lkup, if_flow_lkup_sz);
439 dhdp->if_flow_lkup = NULL;
440 }
441
442 /* Destruct the flowid allocator */
443 if (dhdp->flowid_allocator != NULL)
444 dhdp->flowid_allocator = id16_map_fini(dhdp->osh, dhdp->flowid_allocator);
445
446 dhdp->num_flow_rings = 0U;
447 bzero(dhdp->flow_prio_map, sizeof(uint8) * NUMPRIO);
448
449 lock = dhdp->flowid_lock;
450 dhdp->flowid_lock = NULL;
451
452 if (lock) {
453 DHD_FLOWID_UNLOCK(lock, flags);
454 dhd_os_spin_lock_deinit(dhdp->osh, lock);
455 }
456
457 dhd_os_spin_lock_deinit(dhdp->osh, dhdp->flowring_list_lock);
458 dhdp->flowring_list_lock = NULL;
459
460 ASSERT(dhdp->if_flow_lkup == NULL);
461 ASSERT(dhdp->flowid_allocator == NULL);
462 ASSERT(dhdp->flow_ring_table == NULL);
463 dhdp->flow_rings_inited = FALSE;
464 }
465
466 /** Uses hash table to quickly map from ifindex to a flow ring 'role' (STA/AP) */
467 uint8
468 dhd_flow_rings_ifindex2role(dhd_pub_t *dhdp, uint8 ifindex)
469 {
470 if_flow_lkup_t *if_flow_lkup = (if_flow_lkup_t *)dhdp->if_flow_lkup;
471 ASSERT(if_flow_lkup);
472 return if_flow_lkup[ifindex].role;
473 }
474
475 #ifdef WLTDLS
476 bool is_tdls_destination(dhd_pub_t *dhdp, uint8 *da)
477 {
478 unsigned long flags;
479 tdls_peer_node_t *cur = NULL;
480
481 DHD_TDLS_LOCK(&dhdp->tdls_lock, flags);
482 cur = dhdp->peer_tbl.node;
483
484 while (cur != NULL) {
485 if (!memcmp(da, cur->addr, ETHER_ADDR_LEN)) {
486 DHD_TDLS_UNLOCK(&dhdp->tdls_lock, flags);
487 return TRUE;
488 }
489 cur = cur->next;
490 }
491 DHD_TDLS_UNLOCK(&dhdp->tdls_lock, flags);
492 return FALSE;
493 }
494 #endif /* WLTDLS */
495
496 /** Uses hash table to quickly map from ifindex+prio+da to a flow ring id */
497 static INLINE uint16
498 dhd_flowid_find(dhd_pub_t *dhdp, uint8 ifindex, uint8 prio, char *sa, char *da)
499 {
500 int hash;
501 bool ismcast = FALSE;
502 flow_hash_info_t *cur;
503 if_flow_lkup_t *if_flow_lkup;
504 unsigned long flags;
505
506 DHD_FLOWID_LOCK(dhdp->flowid_lock, flags);
507 if_flow_lkup = (if_flow_lkup_t *)dhdp->if_flow_lkup;
508
509 ASSERT(if_flow_lkup);
510
511 if ((if_flow_lkup[ifindex].role == WLC_E_IF_ROLE_STA) ||
512 (if_flow_lkup[ifindex].role == WLC_E_IF_ROLE_WDS)) {
513 #ifdef WLTDLS
514 if (dhdp->peer_tbl.tdls_peer_count && !(ETHER_ISMULTI(da)) &&
515 is_tdls_destination(dhdp, da)) {
516 hash = DHD_FLOWRING_HASHINDEX(da, prio);
517 cur = if_flow_lkup[ifindex].fl_hash[hash];
518 while (cur != NULL) {
519 if (!memcmp(cur->flow_info.da, da, ETHER_ADDR_LEN)) {
520 DHD_FLOWID_UNLOCK(dhdp->flowid_lock, flags);
521 return cur->flowid;
522 }
523 cur = cur->next;
524 }
525 DHD_FLOWID_UNLOCK(dhdp->flowid_lock, flags);
526 return FLOWID_INVALID;
527 }
528 #endif /* WLTDLS */
529 /* For STA non TDLS dest and WDS dest flow ring id is mapped based on prio only */
530 cur = if_flow_lkup[ifindex].fl_hash[prio];
531 if (cur) {
532 DHD_FLOWID_UNLOCK(dhdp->flowid_lock, flags);
533 return cur->flowid;
534 }
535 } else {
536
537 if (ETHER_ISMULTI(da)) {
538 ismcast = TRUE;
539 hash = 0;
540 } else {
541 hash = DHD_FLOWRING_HASHINDEX(da, prio);
542 }
543
544 cur = if_flow_lkup[ifindex].fl_hash[hash];
545
546 while (cur) {
547 if ((ismcast && ETHER_ISMULTI(cur->flow_info.da)) ||
548 (!memcmp(cur->flow_info.da, da, ETHER_ADDR_LEN) &&
549 (cur->flow_info.tid == prio))) {
550 DHD_FLOWID_UNLOCK(dhdp->flowid_lock, flags);
551 return cur->flowid;
552 }
553 cur = cur->next;
554 }
555 }
556 DHD_FLOWID_UNLOCK(dhdp->flowid_lock, flags);
557
558 DHD_INFO(("%s: cannot find flowid\n", __FUNCTION__));
559 return FLOWID_INVALID;
560 } /* dhd_flowid_find */
561
562 /** Create unique Flow ID, called when a flow ring is created. */
563 static INLINE uint16
564 dhd_flowid_alloc(dhd_pub_t *dhdp, uint8 ifindex, uint8 prio, char *sa, char *da)
565 {
566 flow_hash_info_t *fl_hash_node, *cur;
567 if_flow_lkup_t *if_flow_lkup;
568 int hash;
569 uint16 flowid;
570 unsigned long flags;
571
572 fl_hash_node = (flow_hash_info_t *) MALLOCZ(dhdp->osh, sizeof(flow_hash_info_t));
573 if (fl_hash_node == NULL) {
574 DHD_ERROR(("%s: flow_hash_info_t memory allocation failed \n", __FUNCTION__));
575 return FLOWID_INVALID;
576 }
577 memcpy(fl_hash_node->flow_info.da, da, sizeof(fl_hash_node->flow_info.da));
578
579 DHD_FLOWID_LOCK(dhdp->flowid_lock, flags);
580 ASSERT(dhdp->flowid_allocator != NULL);
581 flowid = id16_map_alloc(dhdp->flowid_allocator);
582 DHD_FLOWID_UNLOCK(dhdp->flowid_lock, flags);
583
584 if (flowid == FLOWID_INVALID) {
585 MFREE(dhdp->osh, fl_hash_node, sizeof(flow_hash_info_t));
586 DHD_ERROR(("%s: cannot get free flowid \n", __FUNCTION__));
587 return FLOWID_INVALID;
588 }
589
590 fl_hash_node->flowid = flowid;
591 fl_hash_node->flow_info.tid = prio;
592 fl_hash_node->flow_info.ifindex = ifindex;
593 fl_hash_node->next = NULL;
594
595 DHD_FLOWID_LOCK(dhdp->flowid_lock, flags);
596 if_flow_lkup = (if_flow_lkup_t *)dhdp->if_flow_lkup;
597
598 if ((if_flow_lkup[ifindex].role == WLC_E_IF_ROLE_STA) ||
599 (if_flow_lkup[ifindex].role == WLC_E_IF_ROLE_WDS)) {
600 /* For STA non TDLS dest and WDS dest we allocate entry based on prio only */
601 #ifdef WLTDLS
602 if (dhdp->peer_tbl.tdls_peer_count &&
603 (is_tdls_destination(dhdp, da))) {
604 hash = DHD_FLOWRING_HASHINDEX(da, prio);
605 cur = if_flow_lkup[ifindex].fl_hash[hash];
606 if (cur) {
607 while (cur->next) {
608 cur = cur->next;
609 }
610 cur->next = fl_hash_node;
611 } else {
612 if_flow_lkup[ifindex].fl_hash[hash] = fl_hash_node;
613 }
614 } else
615 #endif /* WLTDLS */
616 if_flow_lkup[ifindex].fl_hash[prio] = fl_hash_node;
617 } else {
618
619 /* For bcast/mcast assign first slot in in interface */
620 hash = ETHER_ISMULTI(da) ? 0 : DHD_FLOWRING_HASHINDEX(da, prio);
621 cur = if_flow_lkup[ifindex].fl_hash[hash];
622 if (cur) {
623 while (cur->next) {
624 cur = cur->next;
625 }
626 cur->next = fl_hash_node;
627 } else
628 if_flow_lkup[ifindex].fl_hash[hash] = fl_hash_node;
629 }
630 DHD_FLOWID_UNLOCK(dhdp->flowid_lock, flags);
631
632 DHD_INFO(("%s: allocated flowid %d\n", __FUNCTION__, fl_hash_node->flowid));
633
634 return fl_hash_node->flowid;
635 } /* dhd_flowid_alloc */
636
637 /** Get flow ring ID, if not present try to create one */
638 static INLINE int
639 dhd_flowid_lookup(dhd_pub_t *dhdp, uint8 ifindex,
640 uint8 prio, char *sa, char *da, uint16 *flowid)
641 {
642 uint16 id;
643 flow_ring_node_t *flow_ring_node;
644 flow_ring_table_t *flow_ring_table;
645 unsigned long flags;
646 int ret;
647 bool is_sta_assoc;
648
649 DHD_INFO(("%s\n", __FUNCTION__));
650 if (!dhdp->flow_ring_table) {
651 return BCME_ERROR;
652 }
653
654 flow_ring_table = (flow_ring_table_t *)dhdp->flow_ring_table;
655
656 id = dhd_flowid_find(dhdp, ifindex, prio, sa, da);
657
658 if (id == FLOWID_INVALID) {
659
660 if_flow_lkup_t *if_flow_lkup;
661 if_flow_lkup = (if_flow_lkup_t *)dhdp->if_flow_lkup;
662
663 if (!if_flow_lkup[ifindex].status)
664 return BCME_ERROR;
665 BCM_REFERENCE(is_sta_assoc);
666 #if defined(PCIE_FULL_DONGLE)
667 is_sta_assoc = dhd_sta_associated(dhdp, ifindex, (uint8 *)da);
668 DHD_ERROR(("%s: multi %x ifindex %d role %x assoc %d\n", __FUNCTION__,
669 ETHER_ISMULTI(da), ifindex, if_flow_lkup[ifindex].role,
670 is_sta_assoc));
671 if (!ETHER_ISMULTI(da) &&
672 ((if_flow_lkup[ifindex].role == WLC_E_IF_ROLE_AP) ||
673 (if_flow_lkup[ifindex].role == WLC_E_IF_ROLE_P2P_GO)) &&
674 (!is_sta_assoc))
675 return BCME_ERROR;
676 #endif /* (linux || LINUX) && PCIE_FULL_DONGLE */
677
678 id = dhd_flowid_alloc(dhdp, ifindex, prio, sa, da);
679 if (id == FLOWID_INVALID) {
680 DHD_ERROR(("%s: alloc flowid ifindex %u status %u\n",
681 __FUNCTION__, ifindex, if_flow_lkup[ifindex].status));
682 return BCME_ERROR;
683 }
684
685 ASSERT(id < dhdp->num_flow_rings);
686
687 /* register this flowid in dhd_pub */
688 dhd_add_flowid(dhdp, ifindex, prio, da, id);
689
690 flow_ring_node = (flow_ring_node_t *) &flow_ring_table[id];
691
692 DHD_FLOWRING_LOCK(flow_ring_node->lock, flags);
693
694 /* Init Flow info */
695 memcpy(flow_ring_node->flow_info.sa, sa, sizeof(flow_ring_node->flow_info.sa));
696 memcpy(flow_ring_node->flow_info.da, da, sizeof(flow_ring_node->flow_info.da));
697 flow_ring_node->flow_info.tid = prio;
698 flow_ring_node->flow_info.ifindex = ifindex;
699 flow_ring_node->active = TRUE;
700 flow_ring_node->status = FLOW_RING_STATUS_CREATE_PENDING;
701
702 #ifdef DEVICE_TX_STUCK_DETECT
703 flow_ring_node->tx_cmpl = flow_ring_node->tx_cmpl_prev = OSL_SYSUPTIME();
704 flow_ring_node->stuck_count = 0;
705 #endif /* DEVICE_TX_STUCK_DETECT */
706
707 DHD_FLOWRING_UNLOCK(flow_ring_node->lock, flags);
708
709 /* Create and inform device about the new flow */
710 if (dhd_bus_flow_ring_create_request(dhdp->bus, (void *)flow_ring_node)
711 != BCME_OK) {
712 DHD_ERROR(("%s: create error %d\n", __FUNCTION__, id));
713 return BCME_ERROR;
714 }
715
716 *flowid = id;
717 return BCME_OK;
718 } else {
719 /* if the Flow id was found in the hash */
720 ASSERT(id < dhdp->num_flow_rings);
721
722 flow_ring_node = (flow_ring_node_t *) &flow_ring_table[id];
723 DHD_FLOWRING_LOCK(flow_ring_node->lock, flags);
724
725 /*
726 * If the flow_ring_node is in Open State or Status pending state then
727 * we can return the Flow id to the caller.If the flow_ring_node is in
728 * FLOW_RING_STATUS_PENDING this means the creation is in progress and
729 * hence the packets should be queued.
730 *
731 * If the flow_ring_node is in FLOW_RING_STATUS_DELETE_PENDING Or
732 * FLOW_RING_STATUS_CLOSED, then we should return Error.
733 * Note that if the flowing is being deleted we would mark it as
734 * FLOW_RING_STATUS_DELETE_PENDING. Now before Dongle could respond and
735 * before we mark it as FLOW_RING_STATUS_CLOSED we could get tx packets.
736 * We should drop the packets in that case.
737 * The decission to return OK should NOT be based on 'active' variable, beause
738 * active is made TRUE when a flow_ring_node gets allocated and is made
739 * FALSE when the flow ring gets removed and does not reflect the True state
740 * of the Flow ring.
741 * In case if IDLE_TX_FLOW_MGMT is defined, we have to handle two more flowring
742 * states. If the flow_ring_node's status is FLOW_RING_STATUS_SUSPENDED, the flowid
743 * is to be returned and from dhd_bus_txdata, the flowring would be resumed again.
744 * The status FLOW_RING_STATUS_RESUME_PENDING, is equivalent to
745 * FLOW_RING_STATUS_CREATE_PENDING.
746 */
747 if (flow_ring_node->status == FLOW_RING_STATUS_DELETE_PENDING ||
748 flow_ring_node->status == FLOW_RING_STATUS_CLOSED) {
749 *flowid = FLOWID_INVALID;
750 ret = BCME_ERROR;
751 } else {
752 *flowid = id;
753 ret = BCME_OK;
754 }
755
756 DHD_FLOWRING_UNLOCK(flow_ring_node->lock, flags);
757 return ret;
758 } /* Flow Id found in the hash */
759 } /* dhd_flowid_lookup */
760
761 /**
762 * Assign existing or newly created flowid to an 802.3 packet. This flowid is later on used to
763 * select the flowring to send the packet to the dongle.
764 */
765 int BCMFASTPATH
766 dhd_flowid_update(dhd_pub_t *dhdp, uint8 ifindex, uint8 prio, void *pktbuf)
767 {
768 uint8 *pktdata = (uint8 *)PKTDATA(dhdp->osh, pktbuf);
769 struct ether_header *eh = (struct ether_header *)pktdata;
770 uint16 flowid;
771
772 ASSERT(ifindex < DHD_MAX_IFS);
773
774 if (ifindex >= DHD_MAX_IFS) {
775 return BCME_BADARG;
776 }
777
778 if (!dhdp->flowid_allocator) {
779 DHD_ERROR(("%s: Flow ring not intited yet \n", __FUNCTION__));
780 return BCME_ERROR;
781 }
782
783 if (dhd_flowid_lookup(dhdp, ifindex, prio, (char *)eh->ether_shost, (char *)eh->ether_dhost,
784 &flowid) != BCME_OK) {
785 return BCME_ERROR;
786 }
787
788 DHD_INFO(("%s: prio %d flowid %d\n", __FUNCTION__, prio, flowid));
789
790 /* Tag the packet with flowid */
791 DHD_PKT_SET_FLOWID(pktbuf, flowid);
792 return BCME_OK;
793 }
794
795 void
796 dhd_flowid_free(dhd_pub_t *dhdp, uint8 ifindex, uint16 flowid)
797 {
798 int hashix;
799 bool found = FALSE;
800 flow_hash_info_t *cur, *prev;
801 if_flow_lkup_t *if_flow_lkup;
802 unsigned long flags;
803
804 DHD_FLOWID_LOCK(dhdp->flowid_lock, flags);
805 if_flow_lkup = (if_flow_lkup_t *)dhdp->if_flow_lkup;
806
807 for (hashix = 0; hashix < DHD_FLOWRING_HASH_SIZE; hashix++) {
808
809 cur = if_flow_lkup[ifindex].fl_hash[hashix];
810
811 if (cur) {
812 if (cur->flowid == flowid) {
813 found = TRUE;
814 }
815
816 prev = NULL;
817 while (!found && cur) {
818 if (cur->flowid == flowid) {
819 found = TRUE;
820 break;
821 }
822 prev = cur;
823 cur = cur->next;
824 }
825 if (found) {
826 if (!prev) {
827 if_flow_lkup[ifindex].fl_hash[hashix] = cur->next;
828 } else {
829 prev->next = cur->next;
830 }
831
832 /* deregister flowid from dhd_pub. */
833 dhd_del_flowid(dhdp, ifindex, flowid);
834
835 id16_map_free(dhdp->flowid_allocator, flowid);
836 DHD_FLOWID_UNLOCK(dhdp->flowid_lock, flags);
837 MFREE(dhdp->osh, cur, sizeof(flow_hash_info_t));
838
839 return;
840 }
841 }
842 }
843
844 DHD_FLOWID_UNLOCK(dhdp->flowid_lock, flags);
845 DHD_ERROR(("%s: could not free flow ring hash entry flowid %d\n",
846 __FUNCTION__, flowid));
847 } /* dhd_flowid_free */
848
849 /**
850 * Delete all Flow rings associated with the given interface. Is called when eg the dongle
851 * indicates that a wireless link has gone down.
852 */
853 void
854 dhd_flow_rings_delete(dhd_pub_t *dhdp, uint8 ifindex)
855 {
856 uint32 id;
857 flow_ring_table_t *flow_ring_table;
858
859 DHD_ERROR(("%s: ifindex %u\n", __FUNCTION__, ifindex));
860
861 ASSERT(ifindex < DHD_MAX_IFS);
862 if (ifindex >= DHD_MAX_IFS)
863 return;
864
865 if (!dhdp->flow_ring_table)
866 return;
867
868 flow_ring_table = (flow_ring_table_t *)dhdp->flow_ring_table;
869 for (id = 0; id < dhdp->num_flow_rings; id++) {
870 if (flow_ring_table[id].active &&
871 (flow_ring_table[id].flow_info.ifindex == ifindex) &&
872 (flow_ring_table[id].status == FLOW_RING_STATUS_OPEN)) {
873 dhd_bus_flow_ring_delete_request(dhdp->bus,
874 (void *) &flow_ring_table[id]);
875 }
876 }
877 }
878
879 void
880 dhd_flow_rings_flush(dhd_pub_t *dhdp, uint8 ifindex)
881 {
882 uint32 id;
883 flow_ring_table_t *flow_ring_table;
884
885 DHD_INFO(("%s: ifindex %u\n", __FUNCTION__, ifindex));
886
887 ASSERT(ifindex < DHD_MAX_IFS);
888 if (ifindex >= DHD_MAX_IFS)
889 return;
890
891 if (!dhdp->flow_ring_table)
892 return;
893 flow_ring_table = (flow_ring_table_t *)dhdp->flow_ring_table;
894
895 for (id = 0; id <= dhdp->num_flow_rings; id++) {
896 if (flow_ring_table[id].active &&
897 (flow_ring_table[id].flow_info.ifindex == ifindex) &&
898 (flow_ring_table[id].status == FLOW_RING_STATUS_OPEN)) {
899 dhd_bus_flow_ring_flush_request(dhdp->bus,
900 (void *) &flow_ring_table[id]);
901 }
902 }
903 }
904
905
906 /** Delete flow ring(s) for given peer address. Related to AP/AWDL/TDLS functionality. */
907 void
908 dhd_flow_rings_delete_for_peer(dhd_pub_t *dhdp, uint8 ifindex, char *addr)
909 {
910 uint32 id;
911 flow_ring_table_t *flow_ring_table;
912
913 DHD_ERROR(("%s: ifindex %u\n", __FUNCTION__, ifindex));
914
915 ASSERT(ifindex < DHD_MAX_IFS);
916 if (ifindex >= DHD_MAX_IFS)
917 return;
918
919 if (!dhdp->flow_ring_table)
920 return;
921
922 flow_ring_table = (flow_ring_table_t *)dhdp->flow_ring_table;
923 for (id = 0; id < dhdp->num_flow_rings; id++) {
924 if (flow_ring_table[id].active &&
925 (flow_ring_table[id].flow_info.ifindex == ifindex) &&
926 (!memcmp(flow_ring_table[id].flow_info.da, addr, ETHER_ADDR_LEN)) &&
927 (flow_ring_table[id].status == FLOW_RING_STATUS_OPEN)) {
928 DHD_ERROR(("%s: deleting flowid %d\n",
929 __FUNCTION__, flow_ring_table[id].flowid));
930 dhd_bus_flow_ring_delete_request(dhdp->bus,
931 (void *) &flow_ring_table[id]);
932 }
933 }
934 }
935
936 /** Handles interface ADD, CHANGE, DEL indications from the dongle */
937 void
938 dhd_update_interface_flow_info(dhd_pub_t *dhdp, uint8 ifindex,
939 uint8 op, uint8 role)
940 {
941 if_flow_lkup_t *if_flow_lkup;
942 unsigned long flags;
943
944 ASSERT(ifindex < DHD_MAX_IFS);
945 if (ifindex >= DHD_MAX_IFS)
946 return;
947
948 DHD_ERROR(("%s: ifindex %u op %u role is %u \n",
949 __FUNCTION__, ifindex, op, role));
950 if (!dhdp->flowid_allocator) {
951 DHD_ERROR(("%s: Flow ring not intited yet \n", __FUNCTION__));
952 return;
953 }
954
955 DHD_FLOWID_LOCK(dhdp->flowid_lock, flags);
956 if_flow_lkup = (if_flow_lkup_t *)dhdp->if_flow_lkup;
957
958 if (op == WLC_E_IF_ADD || op == WLC_E_IF_CHANGE) {
959
960 if_flow_lkup[ifindex].role = role;
961
962 if (role == WLC_E_IF_ROLE_WDS) {
963 /**
964 * WDS role does not send WLC_E_LINK event after interface is up.
965 * So to create flowrings for WDS, make status as TRUE in WLC_E_IF itself.
966 * same is true while making the status as FALSE.
967 * TODO: Fix FW to send WLC_E_LINK for WDS role aswell. So that all the
968 * interfaces are handled uniformly.
969 */
970 if_flow_lkup[ifindex].status = TRUE;
971 DHD_INFO(("%s: Mcast Flow ring for ifindex %d role is %d \n",
972 __FUNCTION__, ifindex, role));
973 }
974 } else if ((op == WLC_E_IF_DEL) && (role == WLC_E_IF_ROLE_WDS)) {
975 if_flow_lkup[ifindex].status = FALSE;
976 DHD_INFO(("%s: cleanup all Flow rings for ifindex %d role is %d \n",
977 __FUNCTION__, ifindex, role));
978 }
979 DHD_FLOWID_UNLOCK(dhdp->flowid_lock, flags);
980 }
981
982 /** Handles a STA 'link' indication from the dongle */
983 int
984 dhd_update_interface_link_status(dhd_pub_t *dhdp, uint8 ifindex, uint8 status)
985 {
986 if_flow_lkup_t *if_flow_lkup;
987 unsigned long flags;
988
989 ASSERT(ifindex < DHD_MAX_IFS);
990 if (ifindex >= DHD_MAX_IFS)
991 return BCME_BADARG;
992
993 DHD_ERROR(("%s: ifindex %d status %d\n", __FUNCTION__, ifindex, status));
994
995 DHD_FLOWID_LOCK(dhdp->flowid_lock, flags);
996 if_flow_lkup = (if_flow_lkup_t *)dhdp->if_flow_lkup;
997
998 if (status) {
999 if_flow_lkup[ifindex].status = TRUE;
1000 } else {
1001 if_flow_lkup[ifindex].status = FALSE;
1002 }
1003
1004 DHD_FLOWID_UNLOCK(dhdp->flowid_lock, flags);
1005
1006 return BCME_OK;
1007 }
1008
1009 /** Update flow priority mapping, called on IOVAR */
1010 int dhd_update_flow_prio_map(dhd_pub_t *dhdp, uint8 map)
1011 {
1012 uint16 flowid;
1013 flow_ring_node_t *flow_ring_node;
1014
1015 if (map > DHD_FLOW_PRIO_LLR_MAP)
1016 return BCME_BADOPTION;
1017
1018 /* Check if we need to change prio map */
1019 if (map == dhdp->flow_prio_map_type)
1020 return BCME_OK;
1021
1022 /* If any ring is active we cannot change priority mapping for flow rings */
1023 for (flowid = 0; flowid < dhdp->num_flow_rings; flowid++) {
1024 flow_ring_node = DHD_FLOW_RING(dhdp, flowid);
1025 if (flow_ring_node->active)
1026 return BCME_EPERM;
1027 }
1028
1029 /* Inform firmware about new mapping type */
1030 if (BCME_OK != dhd_flow_prio_map(dhdp, &map, TRUE))
1031 return BCME_ERROR;
1032
1033 /* update internal structures */
1034 dhdp->flow_prio_map_type = map;
1035 if (dhdp->flow_prio_map_type == DHD_FLOW_PRIO_TID_MAP)
1036 bcopy(prio2tid, dhdp->flow_prio_map, sizeof(uint8) * NUMPRIO);
1037 else
1038 bcopy(prio2ac, dhdp->flow_prio_map, sizeof(uint8) * NUMPRIO);
1039
1040 return BCME_OK;
1041 }
1042
1043 /** Inform firmware on updated flow priority mapping, called on IOVAR */
1044 int dhd_flow_prio_map(dhd_pub_t *dhd, uint8 *map, bool set)
1045 {
1046 uint8 iovbuf[24] = {0};
1047 if (!set) {
1048 bcm_mkiovar("bus:fl_prio_map", NULL, 0, (char*)iovbuf, sizeof(iovbuf));
1049 if (dhd_wl_ioctl_cmd(dhd, WLC_GET_VAR, iovbuf, sizeof(iovbuf), FALSE, 0) < 0) {
1050 DHD_ERROR(("%s: failed to get fl_prio_map\n", __FUNCTION__));
1051 return BCME_ERROR;
1052 }
1053 *map = iovbuf[0];
1054 return BCME_OK;
1055 }
1056 bcm_mkiovar("bus:fl_prio_map", (char *)map, 4, (char*)iovbuf, sizeof(iovbuf));
1057 if (dhd_wl_ioctl_cmd(dhd, WLC_SET_VAR, iovbuf, sizeof(iovbuf), TRUE, 0) < 0) {
1058 DHD_ERROR(("%s: failed to set fl_prio_map \n",
1059 __FUNCTION__));
1060 return BCME_ERROR;
1061 }
1062 return BCME_OK;
1063 }