dhd: make driver version configurable
[GitHub/LineageOS/G12/android_hardware_amlogic_kernel-modules_dhd-driver.git] / bcmdhd.100.10.315.x / wl_event.c
CommitLineData
84813812
LJ
1
2#if defined(WL_EXT_IAPSTA) || defined(USE_IW)
3#include <bcmendian.h>
4#include <wl_android.h>
5#include <dhd_config.h>
6
7#define EVENT_ERROR(name, arg1, args...) \
8 do { \
9 if (android_msg_level & ANDROID_ERROR_LEVEL) { \
10 printk(KERN_ERR "[dhd-%s] EVENT-ERROR) %s : " arg1, name, __func__, ## args); \
11 } \
12 } while (0)
13#define EVENT_TRACE(name, arg1, args...) \
14 do { \
15 if (android_msg_level & ANDROID_TRACE_LEVEL) { \
16 printk(KERN_INFO "[dhd-%s] EVENT-TRACE) %s : " arg1, name, __func__, ## args); \
17 } \
18 } while (0)
19#define EVENT_DBG(name, arg1, args...) \
20 do { \
21 if (android_msg_level & ANDROID_DBG_LEVEL) { \
22 printk(KERN_INFO "[dhd-%s] EVENT-DBG) %s : " arg1, name, __func__, ## args); \
23 } \
24 } while (0)
25
26#if defined(STRICT_GCC_WARNINGS) && defined(__GNUC__) && (__GNUC__ > 4 || (__GNUC__ == \
27 4 && __GNUC_MINOR__ >= 6))
28#define BCM_SET_LIST_FIRST_ENTRY(entry, ptr, type, member) \
29_Pragma("GCC diagnostic push") \
30_Pragma("GCC diagnostic ignored \"-Wcast-qual\"") \
31(entry) = list_first_entry((ptr), type, member); \
32_Pragma("GCC diagnostic pop") \
33
34#define BCM_SET_CONTAINER_OF(entry, ptr, type, member) \
35_Pragma("GCC diagnostic push") \
36_Pragma("GCC diagnostic ignored \"-Wcast-qual\"") \
37entry = container_of((ptr), type, member); \
38_Pragma("GCC diagnostic pop") \
39
40#else
41#define BCM_SET_LIST_FIRST_ENTRY(entry, ptr, type, member) \
42(entry) = list_first_entry((ptr), type, member); \
43
44#define BCM_SET_CONTAINER_OF(entry, ptr, type, member) \
45entry = container_of((ptr), type, member); \
46
47#endif /* STRICT_GCC_WARNINGS */
48
49#ifdef DHD_MAX_IFS
50#define WL_MAX_IFS DHD_MAX_IFS
51#else
52#define WL_MAX_IFS 16
53#endif
54
55/* event queue for cfg80211 main event */
56struct wl_event_q {
57 struct list_head eq_list;
58 u32 etype;
59 wl_event_msg_t emsg;
60 s8 edata[1];
61};
62
63typedef s32(*EXT_EVENT_HANDLER) (struct net_device *dev, void *cb_argu,
64 const wl_event_msg_t *e, void *data);
65
66typedef struct event_handler_list {
67 struct event_handler_list *next;
68 struct net_device *dev;
69 uint32 etype;
70 EXT_EVENT_HANDLER cb_func;
71 void *cb_argu;
72 wl_event_prio_t prio;
73} event_handler_list_t;
74
75typedef struct event_handler_head {
76 event_handler_list_t *evt_head;
77} event_handler_head_t;
78
79typedef struct wl_event_params {
80 dhd_pub_t *pub;
81 struct net_device *dev[WL_MAX_IFS];
82 struct event_handler_head evt_head;
83 struct list_head eq_list; /* used for event queue */
84 spinlock_t eq_lock; /* for event queue synchronization */
85 struct workqueue_struct *event_workq; /* workqueue for event */
86 struct work_struct event_work; /* work item for event */
87 struct mutex event_sync;
88} wl_event_params_t;
89
90static unsigned long
91wl_ext_event_lock_eq(struct wl_event_params *event_params)
92{
93 unsigned long flags;
94
95 spin_lock_irqsave(&event_params->eq_lock, flags);
96 return flags;
97}
98
99static void
100wl_ext_event_unlock_eq(struct wl_event_params *event_params, unsigned long flags)
101{
102 spin_unlock_irqrestore(&event_params->eq_lock, flags);
103}
104
105static void
106wl_ext_event_init_eq_lock(struct wl_event_params *event_params)
107{
108 spin_lock_init(&event_params->eq_lock);
109}
110
111static void
112wl_ext_event_init_eq(struct wl_event_params *event_params)
113{
114 wl_ext_event_init_eq_lock(event_params);
115 INIT_LIST_HEAD(&event_params->eq_list);
116}
117
118static void
119wl_ext_event_flush_eq(struct wl_event_params *event_params)
120{
121 struct wl_event_q *e;
122 unsigned long flags;
123
124 flags = wl_ext_event_lock_eq(event_params);
125 while (!list_empty_careful(&event_params->eq_list)) {
126 BCM_SET_LIST_FIRST_ENTRY(e, &event_params->eq_list, struct wl_event_q, eq_list);
127 list_del(&e->eq_list);
128 kfree(e);
129 }
130 wl_ext_event_unlock_eq(event_params, flags);
131}
132
133/*
134* retrieve first queued event from head
135*/
136
137static struct wl_event_q *
138wl_ext_event_deq_event(struct wl_event_params *event_params)
139{
140 struct wl_event_q *e = NULL;
141 unsigned long flags;
142
143 flags = wl_ext_event_lock_eq(event_params);
144 if (likely(!list_empty(&event_params->eq_list))) {
145 BCM_SET_LIST_FIRST_ENTRY(e, &event_params->eq_list, struct wl_event_q, eq_list);
146 list_del(&e->eq_list);
147 }
148 wl_ext_event_unlock_eq(event_params, flags);
149
150 return e;
151}
152
153/*
154 * push event to tail of the queue
155 */
156
157static s32
158wl_ext_event_enq_event(struct wl_event_params *event_params, u32 event,
159 const wl_event_msg_t *msg, void *data)
160{
161 struct wl_event_q *e;
162 s32 err = 0;
163 uint32 evtq_size;
164 uint32 data_len;
165 unsigned long flags;
166 gfp_t aflags;
167
168 data_len = 0;
169 if (data)
170 data_len = ntoh32(msg->datalen);
171 evtq_size = sizeof(struct wl_event_q) + data_len;
172 aflags = (in_atomic()) ? GFP_ATOMIC : GFP_KERNEL;
173 e = kzalloc(evtq_size, aflags);
174 if (unlikely(!e)) {
175 EVENT_ERROR("wlan", "event alloc failed\n");
176 return -ENOMEM;
177 }
178 e->etype = event;
179 memcpy(&e->emsg, msg, sizeof(wl_event_msg_t));
180 if (data)
181 memcpy(e->edata, data, data_len);
182 flags = wl_ext_event_lock_eq(event_params);
183 list_add_tail(&e->eq_list, &event_params->eq_list);
184 wl_ext_event_unlock_eq(event_params, flags);
185
186 return err;
187}
188
189static void
190wl_ext_event_put_event(struct wl_event_q *e)
191{
192 kfree(e);
193}
194
195static void
196wl_ext_event_handler(struct work_struct *work_data)
197{
198 struct wl_event_params *event_params = NULL;
199 struct wl_event_q *e;
200 struct net_device *dev = NULL;
201 struct event_handler_list *evt_node;
202 dhd_pub_t *dhd;
203 unsigned long flags = 0;
204
205 BCM_SET_CONTAINER_OF(event_params, work_data, struct wl_event_params, event_work);
206 DHD_EVENT_WAKE_LOCK(event_params->pub);
207 while ((e = wl_ext_event_deq_event(event_params))) {
208 if (e->emsg.ifidx >= DHD_MAX_IFS) {
209 EVENT_ERROR("wlan", "ifidx=%d not in range\n", e->emsg.ifidx);
210 goto fail;
211 }
212 dev = event_params->dev[e->emsg.ifidx];
213 if (!dev) {
214 EVENT_DBG("wlan", "ifidx=%d dev not ready\n", e->emsg.ifidx);
215 goto fail;
216 }
217 dhd = dhd_get_pub(dev);
218 if (e->etype > WLC_E_LAST) {
219 EVENT_TRACE(dev->name, "Unknown Event (%d): ignoring\n", e->etype);
220 goto fail;
221 }
222 DHD_GENERAL_LOCK(dhd, flags);
223 if (DHD_BUS_CHECK_DOWN_OR_DOWN_IN_PROGRESS(dhd)) {
224 EVENT_ERROR(dev->name, "BUS is DOWN.\n");
225 DHD_GENERAL_UNLOCK(dhd, flags);
226 goto fail;
227 }
228 DHD_GENERAL_UNLOCK(dhd, flags);
229 EVENT_DBG(dev->name, "event type (%d)\n", e->etype);
230 mutex_lock(&event_params->event_sync);
231 evt_node = event_params->evt_head.evt_head;
232 for (;evt_node;) {
233 if (evt_node->dev == dev &&
234 (evt_node->etype == e->etype || evt_node->etype == WLC_E_LAST))
235 evt_node->cb_func(dev, evt_node->cb_argu, &e->emsg, e->edata);
236 evt_node = evt_node->next;
237 }
238 mutex_unlock(&event_params->event_sync);
239fail:
240 wl_ext_event_put_event(e);
241 }
242 DHD_EVENT_WAKE_UNLOCK(event_params->pub);
243}
244
245void
246wl_ext_event_send(void *params, const wl_event_msg_t * e, void *data)
247{
248 struct wl_event_params *event_params = params;
249 u32 event_type = ntoh32(e->event_type);
250
251 if (event_params == NULL) {
252 EVENT_ERROR("wlan", "Stale event %d(%s) ignored\n",
253 event_type, bcmevent_get_name(event_type));
254 return;
255 }
256
257 if (event_params->event_workq == NULL) {
258 EVENT_ERROR("wlan", "Event handler is not created %d(%s)\n",
259 event_type, bcmevent_get_name(event_type));
260 return;
261 }
262
263 if (likely(!wl_ext_event_enq_event(event_params, event_type, e, data))) {
264 queue_work(event_params->event_workq, &event_params->event_work);
265 }
266}
267
268static s32
269wl_ext_event_create_handler(struct wl_event_params *event_params)
270{
271 int ret = 0;
272 EVENT_TRACE("wlan", "Enter\n");
273
274 /* Allocate workqueue for event */
275 if (!event_params->event_workq) {
276 event_params->event_workq = alloc_workqueue("ext_eventd", WQ_MEM_RECLAIM | WQ_HIGHPRI, 0);
277 }
278
279 if (!event_params->event_workq) {
280 EVENT_ERROR("wlan", "event_workq alloc_workqueue failed\n");
281 ret = -ENOMEM;
282 } else {
283 INIT_WORK(&event_params->event_work, wl_ext_event_handler);
284 }
285 return ret;
286}
287
288static void
289wl_ext_event_free(struct wl_event_params *event_params)
290{
291 struct event_handler_list *node, *cur, **evt_head;
292
293 evt_head = &event_params->evt_head.evt_head;
294 node = *evt_head;
295
296 for (;node;) {
297 EVENT_TRACE(node->dev->name, "Free etype=%d\n", node->etype);
298 cur = node;
299 node = cur->next;
300 kfree(cur);
301 }
302 *evt_head = NULL;
303}
304
305static void
306wl_ext_event_destroy_handler(struct wl_event_params *event_params)
307{
308 if (event_params && event_params->event_workq) {
309 cancel_work_sync(&event_params->event_work);
310 destroy_workqueue(event_params->event_workq);
311 event_params->event_workq = NULL;
312 }
313}
314
315int
316wl_ext_event_register(struct net_device *dev, dhd_pub_t *dhd, uint32 event,
317 void *cb_func, void *data, wl_event_prio_t prio)
318{
319 struct wl_event_params *event_params = dhd->event_params;
320 struct event_handler_list *node, *leaf, *node_prev, **evt_head;
321 int ret = 0;
322
323 if (event_params) {
324 mutex_lock(&event_params->event_sync);
325 evt_head = &event_params->evt_head.evt_head;
326 node = *evt_head;
327 for (;node;) {
328 if (node->dev == dev && node->etype == event && node->cb_func == cb_func) {
329 EVENT_TRACE(dev->name, "skip event %d\n", event);
330 mutex_unlock(&event_params->event_sync);
331 return 0;
332 }
333 node = node->next;
334 }
335 leaf = kmalloc(sizeof(event_handler_list_t), GFP_KERNEL);
336 if (!leaf) {
337 EVENT_ERROR(dev->name, "Memory alloc failure %d for event %d\n",
338 (int)sizeof(event_handler_list_t), event);
339 mutex_unlock(&event_params->event_sync);
340 return -ENOMEM;
341 }
342 leaf->next = NULL;
343 leaf->dev = dev;
344 leaf->etype = event;
345 leaf->cb_func = cb_func;
346 leaf->cb_argu = data;
347 leaf->prio = prio;
348 if (*evt_head == NULL) {
349 *evt_head = leaf;
350 } else {
351 node = *evt_head;
352 node_prev = NULL;
353 for (;node;) {
354 if (node->prio <= prio) {
355 leaf->next = node;
356 if (node_prev)
357 node_prev->next = leaf;
358 else
359 *evt_head = leaf;
360 break;
361 } else if (node->next == NULL) {
362 node->next = leaf;
363 break;
364 }
365 node_prev = node;
366 node = node->next;
367 }
368 }
369 EVENT_TRACE(dev->name, "event %d registered\n", event);
370 mutex_unlock(&event_params->event_sync);
371 } else {
372 EVENT_ERROR(dev->name, "event_params not ready %d\n", event);
373 ret = -ENODEV;
374 }
375
376 return ret;
377}
378
379void
380wl_ext_event_deregister(struct net_device *dev, dhd_pub_t *dhd,
381 uint32 event, void *cb_func)
382{
383 struct wl_event_params *event_params = dhd->event_params;
384 struct event_handler_list *node, *prev, **evt_head;
385 int tmp = 0;
386
387 if (event_params) {
388 mutex_lock(&event_params->event_sync);
389 evt_head = &event_params->evt_head.evt_head;
390 node = *evt_head;
391 prev = node;
392 for (;node;) {
393 if (node->dev == dev && node->etype == event && node->cb_func == cb_func) {
394 if (node == *evt_head) {
395 tmp = 1;
396 *evt_head = node->next;
397 } else {
398 tmp = 0;
399 prev->next = node->next;
400 }
401 EVENT_TRACE(dev->name, "event %d deregistered\n", event);
402 kfree(node);
403 if (tmp == 1) {
404 node = *evt_head;
405 prev = node;
406 } else {
407 node = prev->next;
408 }
409 continue;
410 }
411 prev = node;
412 node = node->next;
413 }
414 mutex_unlock(&event_params->event_sync);
415 } else {
416 EVENT_ERROR(dev->name, "event_params not ready %d\n", event);
417 }
418}
419
420static s32
421wl_ext_event_init_priv(struct wl_event_params *event_params)
422{
423 s32 err = 0;
424
425 mutex_init(&event_params->event_sync);
426 wl_ext_event_init_eq(event_params);
427 if (wl_ext_event_create_handler(event_params))
428 return -ENOMEM;
429
430 return err;
431}
432
433static void
434wl_ext_event_deinit_priv(struct wl_event_params *event_params)
435{
436 wl_ext_event_destroy_handler(event_params);
437 wl_ext_event_flush_eq(event_params);
438 wl_ext_event_free(event_params);
439}
440
441int
442wl_ext_event_attach_netdev(struct net_device *net, int ifidx, uint8 bssidx)
443{
444 struct dhd_pub *dhd = dhd_get_pub(net);
445 struct wl_event_params *event_params = dhd->event_params;
446
447 EVENT_TRACE(net->name, "ifidx=%d, bssidx=%d\n", ifidx, bssidx);
448 if (event_params && ifidx < WL_MAX_IFS) {
449 event_params->dev[ifidx] = net;
450 }
451
452 return 0;
453}
454
455int
456wl_ext_event_dettach_netdev(struct net_device *net, int ifidx)
457{
458 struct dhd_pub *dhd = dhd_get_pub(net);
459 struct wl_event_params *event_params = dhd->event_params;
460
461 EVENT_TRACE(net->name, "ifidx=%d\n", ifidx);
462 if (event_params && ifidx < WL_MAX_IFS) {
463 event_params->dev[ifidx] = NULL;
464 }
465
466 return 0;
467}
468
469s32
470wl_ext_event_attach(struct net_device *dev, dhd_pub_t *dhdp)
471{
472 struct wl_event_params *event_params = NULL;
473 s32 err = 0;
474
475 event_params = kmalloc(sizeof(wl_event_params_t), GFP_KERNEL);
476 if (!event_params) {
477 EVENT_ERROR(dev->name, "Failed to allocate memory (%zu)\n",
478 sizeof(wl_event_params_t));
479 return -ENOMEM;
480 }
481 dhdp->event_params = event_params;
482 memset(event_params, 0, sizeof(wl_event_params_t));
483 event_params->pub = dhdp;
484
485 err = wl_ext_event_init_priv(event_params);
486 if (err) {
487 EVENT_ERROR(dev->name, "Failed to wl_ext_event_init_priv (%d)\n", err);
488 goto ext_attach_out;
489 }
490
491 return err;
492ext_attach_out:
493 wl_ext_event_dettach(dhdp);
494 return err;
495}
496
497void
498wl_ext_event_dettach(dhd_pub_t *dhdp)
499{
500 struct wl_event_params *event_params = dhdp->event_params;
501
502 if (event_params) {
503 wl_ext_event_deinit_priv(event_params);
504 kfree(event_params);
505 dhdp->event_params = NULL;
506 }
507}
508#endif