dhd: import wifi and bluetooth firmware
[GitHub/LineageOS/G12/android_hardware_amlogic_kernel-modules_dhd-driver.git] / bcmdhd.101.10.240.x / dhd_linux_wq.c
CommitLineData
1b4a7c03
LJ
1/*
2 * Broadcom Dongle Host Driver (DHD), Generic work queue framework
3 * Generic interface to handle dhd deferred work events
4 *
5 * Copyright (C) 2020, Broadcom.
6 *
7 * Unless you and Broadcom execute a separate written software license
8 * agreement governing use of this software, this software is licensed to you
9 * under the terms of the GNU General Public License version 2 (the "GPL"),
10 * available at http://www.broadcom.com/licenses/GPLv2.php, with the
11 * following added to such license:
12 *
13 * As a special exception, the copyright holders of this software give you
14 * permission to link this software with independent modules, and to copy and
15 * distribute the resulting executable under terms of your choice, provided that
16 * you also meet, for each linked independent module, the terms and conditions of
17 * the license of that module. An independent module is a module which is not
18 * derived from this software. The special exception does not apply to any
19 * modifications of the software.
20 *
21 *
22 * <<Broadcom-WL-IPTag/Open:>>
23 *
24 * $Id$
25 */
26
27#include <linux/init.h>
28#include <linux/kernel.h>
29#include <linux/spinlock.h>
30#include <linux/fcntl.h>
31#include <linux/fs.h>
32#include <linux/ip.h>
33#include <linux/kfifo.h>
34
35#include <linuxver.h>
36#include <osl.h>
37#include <bcmutils.h>
38#include <bcmendian.h>
39#include <bcmdevs.h>
40#include <dngl_stats.h>
41#include <dhd.h>
42#include <dhd_dbg.h>
43#include <dhd_linux_wq.h>
44
45/*
46 * XXX: always make sure that the size of this structure is aligned to
47 * the power of 2 (2^n) i.e, if any new variable has to be added then
48 * modify the padding accordingly
49 */
50typedef struct dhd_deferred_event {
51 u8 event; /* holds the event */
52 void *event_data; /* holds event specific data */
53 event_handler_t event_handler;
54 unsigned long pad; /* for memory alignment to power of 2 */
55} dhd_deferred_event_t;
56
57#define DEFRD_EVT_SIZE (sizeof(dhd_deferred_event_t))
58
59/*
60 * work events may occur simultaneously.
61 * can hold upto 64 low priority events and 16 high priority events
62 */
63#define DHD_PRIO_WORK_FIFO_SIZE (16 * DEFRD_EVT_SIZE)
64#define DHD_WORK_FIFO_SIZE (64 * DEFRD_EVT_SIZE)
65
66#define DHD_FIFO_HAS_FREE_SPACE(fifo) \
67 ((fifo) && (kfifo_avail(fifo) >= DEFRD_EVT_SIZE))
68#define DHD_FIFO_HAS_ENOUGH_DATA(fifo) \
69 ((fifo) && (kfifo_len(fifo) >= DEFRD_EVT_SIZE))
70
71struct dhd_deferred_wq {
72 struct work_struct deferred_work; /* should be the first member */
73
74 struct kfifo *prio_fifo;
75 struct kfifo *work_fifo;
76 u8 *prio_fifo_buf;
77 u8 *work_fifo_buf;
78 spinlock_t work_lock;
79 void *dhd_info; /* review: does it require */
80 u32 event_skip_mask;
81};
82
83static inline struct kfifo*
84dhd_kfifo_init(u8 *buf, int size, spinlock_t *lock)
85{
86 struct kfifo *fifo;
87 gfp_t flags = CAN_SLEEP()? GFP_KERNEL : GFP_ATOMIC;
88
89 fifo = (struct kfifo *)kzalloc(sizeof(struct kfifo), flags);
90 if (!fifo) {
91 return NULL;
92 }
93 kfifo_init(fifo, buf, size);
94 return fifo;
95}
96
97static inline void
98dhd_kfifo_free(struct kfifo *fifo)
99{
100 kfifo_free(fifo);
101 kfree(fifo);
102}
103
104/* deferred work functions */
105static void dhd_deferred_work_handler(struct work_struct *data);
106
107void*
108dhd_deferred_work_init(void *dhd_info)
109{
110 struct dhd_deferred_wq *work = NULL;
111 u8* buf;
112 unsigned long fifo_size = 0;
113 gfp_t flags = CAN_SLEEP()? GFP_KERNEL : GFP_ATOMIC;
114
115 if (!dhd_info) {
116 DHD_ERROR(("%s: dhd info not initialized\n", __FUNCTION__));
117 goto return_null;
118 }
119
120 work = (struct dhd_deferred_wq *)kzalloc(sizeof(struct dhd_deferred_wq),
121 flags);
122 if (!work) {
123 DHD_ERROR(("%s: work queue creation failed\n", __FUNCTION__));
124 goto return_null;
125 }
126
127 INIT_WORK((struct work_struct *)work, dhd_deferred_work_handler);
128
129 /* initialize event fifo */
130 spin_lock_init(&work->work_lock);
131
132 /* allocate buffer to hold prio events */
133 fifo_size = DHD_PRIO_WORK_FIFO_SIZE;
134 fifo_size = is_power_of_2(fifo_size) ? fifo_size :
135 roundup_pow_of_two(fifo_size);
136 buf = (u8*)kzalloc(fifo_size, flags);
137 if (!buf) {
138 DHD_ERROR(("%s: prio work fifo allocation failed\n",
139 __FUNCTION__));
140 goto return_null;
141 }
142
143 /* Initialize prio event fifo */
144 work->prio_fifo = dhd_kfifo_init(buf, fifo_size, &work->work_lock);
145 if (!work->prio_fifo) {
146 kfree(buf);
147 goto return_null;
148 }
149
150 /* allocate buffer to hold work events */
151 fifo_size = DHD_WORK_FIFO_SIZE;
152 fifo_size = is_power_of_2(fifo_size) ? fifo_size :
153 roundup_pow_of_two(fifo_size);
154 buf = (u8*)kzalloc(fifo_size, flags);
155 if (!buf) {
156 DHD_ERROR(("%s: work fifo allocation failed\n", __FUNCTION__));
157 goto return_null;
158 }
159
160 /* Initialize event fifo */
161 work->work_fifo = dhd_kfifo_init(buf, fifo_size, &work->work_lock);
162 if (!work->work_fifo) {
163 kfree(buf);
164 goto return_null;
165 }
166
167 work->dhd_info = dhd_info;
168 work->event_skip_mask = 0;
169 DHD_ERROR(("%s: work queue initialized\n", __FUNCTION__));
170 return work;
171
172return_null:
173 if (work) {
174 dhd_deferred_work_deinit(work);
175 }
176
177 return NULL;
178}
179
180void
181dhd_deferred_work_deinit(void *work)
182{
183 struct dhd_deferred_wq *deferred_work = work;
184
185 if (!deferred_work) {
186 DHD_ERROR(("%s: deferred work has been freed already\n",
187 __FUNCTION__));
188 return;
189 }
190
191 /* cancel the deferred work handling */
192 cancel_work_sync((struct work_struct *)deferred_work);
193
194 /*
195 * free work event fifo.
196 * kfifo_free frees locally allocated fifo buffer
197 */
198 if (deferred_work->prio_fifo) {
199 dhd_kfifo_free(deferred_work->prio_fifo);
200 }
201
202 if (deferred_work->work_fifo) {
203 dhd_kfifo_free(deferred_work->work_fifo);
204 }
205
206 kfree(deferred_work);
207}
208
209/* select kfifo according to priority */
210static inline struct kfifo *
211dhd_deferred_work_select_kfifo(struct dhd_deferred_wq *deferred_wq,
212 u8 priority)
213{
214 if (priority == DHD_WQ_WORK_PRIORITY_HIGH) {
215 return deferred_wq->prio_fifo;
216 } else if (priority == DHD_WQ_WORK_PRIORITY_LOW) {
217 return deferred_wq->work_fifo;
218 } else {
219 return NULL;
220 }
221}
222
223/*
224 * Prepares event to be queued
225 * Schedules the event
226 */
227int
228dhd_deferred_schedule_work(void *workq, void *event_data, u8 event,
229 event_handler_t event_handler, u8 priority)
230{
231 struct dhd_deferred_wq *deferred_wq = (struct dhd_deferred_wq *)workq;
232 struct kfifo *fifo;
233 dhd_deferred_event_t deferred_event;
234 int bytes_copied = 0;
235
236 if (!deferred_wq) {
237 DHD_ERROR(("%s: work queue not initialized\n", __FUNCTION__));
238 ASSERT(0);
239 return DHD_WQ_STS_UNINITIALIZED;
240 }
241
242 if (!event || (event >= DHD_MAX_WQ_EVENTS)) {
243 DHD_ERROR(("%s: unknown event, event=%d\n", __FUNCTION__,
244 event));
245 return DHD_WQ_STS_UNKNOWN_EVENT;
246 }
247
248 if (!priority || (priority >= DHD_WQ_MAX_PRIORITY)) {
249 DHD_ERROR(("%s: unknown priority, priority=%d\n",
250 __FUNCTION__, priority));
251 return DHD_WQ_STS_UNKNOWN_PRIORITY;
252 }
253
254 if ((deferred_wq->event_skip_mask & (1 << event))) {
255 DHD_ERROR(("%s: Skip event requested. Mask = 0x%x\n",
256 __FUNCTION__, deferred_wq->event_skip_mask));
257 return DHD_WQ_STS_EVENT_SKIPPED;
258 }
259
260 /*
261 * default element size is 1, which can be changed
262 * using kfifo_esize(). Older kernel(FC11) doesn't support
263 * changing element size. For compatibility changing
264 * element size is not prefered
265 */
266 ASSERT(kfifo_esize(deferred_wq->prio_fifo) == 1);
267 ASSERT(kfifo_esize(deferred_wq->work_fifo) == 1);
268
269 deferred_event.event = event;
270 deferred_event.event_data = event_data;
271 deferred_event.event_handler = event_handler;
272
273 fifo = dhd_deferred_work_select_kfifo(deferred_wq, priority);
274 if (DHD_FIFO_HAS_FREE_SPACE(fifo)) {
275 bytes_copied = kfifo_in_spinlocked(fifo, &deferred_event,
276 DEFRD_EVT_SIZE, &deferred_wq->work_lock);
277 }
278 if (bytes_copied != DEFRD_EVT_SIZE) {
279 DHD_ERROR(("%s: failed to schedule deferred work, "
280 "priority=%d, bytes_copied=%d\n", __FUNCTION__,
281 priority, bytes_copied));
282 return DHD_WQ_STS_SCHED_FAILED;
283 }
284 schedule_work((struct work_struct *)deferred_wq);
285 return DHD_WQ_STS_OK;
286}
287
288static bool
289dhd_get_scheduled_work(struct dhd_deferred_wq *deferred_wq,
290 dhd_deferred_event_t *event)
291{
292 int bytes_copied = 0;
293
294 if (!deferred_wq) {
295 DHD_ERROR(("%s: work queue not initialized\n", __FUNCTION__));
296 return DHD_WQ_STS_UNINITIALIZED;
297 }
298
299 /*
300 * default element size is 1 byte, which can be changed
301 * using kfifo_esize(). Older kernel(FC11) doesn't support
302 * changing element size. For compatibility changing
303 * element size is not prefered
304 */
305 ASSERT(kfifo_esize(deferred_wq->prio_fifo) == 1);
306 ASSERT(kfifo_esize(deferred_wq->work_fifo) == 1);
307
308 /* handle priority work */
309 if (DHD_FIFO_HAS_ENOUGH_DATA(deferred_wq->prio_fifo)) {
310 bytes_copied = kfifo_out_spinlocked(deferred_wq->prio_fifo,
311 event, DEFRD_EVT_SIZE, &deferred_wq->work_lock);
312 }
313
314 /* handle normal work if priority work doesn't have enough data */
315 if ((bytes_copied != DEFRD_EVT_SIZE) &&
316 DHD_FIFO_HAS_ENOUGH_DATA(deferred_wq->work_fifo)) {
317 bytes_copied = kfifo_out_spinlocked(deferred_wq->work_fifo,
318 event, DEFRD_EVT_SIZE, &deferred_wq->work_lock);
319 }
320
321 return (bytes_copied == DEFRD_EVT_SIZE);
322}
323
324static inline void
325dhd_deferred_dump_work_event(dhd_deferred_event_t *work_event)
326{
327 if (!work_event) {
328 DHD_ERROR(("%s: work_event is null\n", __FUNCTION__));
329 return;
330 }
331
332 DHD_ERROR(("%s: work_event->event = %d\n", __FUNCTION__,
333 work_event->event));
334 DHD_ERROR(("%s: work_event->event_data = %p\n", __FUNCTION__,
335 work_event->event_data));
336 DHD_ERROR(("%s: work_event->event_handler = %p\n", __FUNCTION__,
337 work_event->event_handler));
338}
339
340/*
341 * Called when work is scheduled
342 */
343static void
344dhd_deferred_work_handler(struct work_struct *work)
345{
346 struct dhd_deferred_wq *deferred_work = (struct dhd_deferred_wq *)work;
347 dhd_deferred_event_t work_event;
348
349 if (!deferred_work) {
350 DHD_ERROR(("%s: work queue not initialized\n", __FUNCTION__));
351 return;
352 }
353
354 do {
355 if (!dhd_get_scheduled_work(deferred_work, &work_event)) {
356 DHD_TRACE(("%s: no event to handle\n", __FUNCTION__));
357 break;
358 }
359
360 if (work_event.event >= DHD_MAX_WQ_EVENTS) {
361 DHD_ERROR(("%s: unknown event\n", __FUNCTION__));
362 dhd_deferred_dump_work_event(&work_event);
363 ASSERT(work_event.event < DHD_MAX_WQ_EVENTS);
364 continue;
365 }
366
367 /*
368 * XXX: don't do NULL check for 'work_event.event_data'
369 * as for some events like DHD_WQ_WORK_DHD_LOG_DUMP the
370 * event data is always NULL even though rest of the
371 * event parameters are valid
372 */
373
374 if (work_event.event_handler) {
375 work_event.event_handler(deferred_work->dhd_info,
376 work_event.event_data, work_event.event);
377 } else {
378 DHD_ERROR(("%s: event handler is null\n",
379 __FUNCTION__));
380 dhd_deferred_dump_work_event(&work_event);
381 ASSERT(work_event.event_handler != NULL);
382 }
383 } while (1);
384
385 return;
386}
387
388void
389dhd_deferred_work_set_skip(void *work, u8 event, bool set)
390{
391 struct dhd_deferred_wq *deferred_wq = (struct dhd_deferred_wq *)work;
392
393 if (!deferred_wq || !event || (event >= DHD_MAX_WQ_EVENTS)) {
394 DHD_ERROR(("%s: Invalid!!\n", __FUNCTION__));
395 return;
396 }
397
398 if (set) {
399 /* Set */
400 deferred_wq->event_skip_mask |= (1 << event);
401 } else {
402 /* Clear */
403 deferred_wq->event_skip_mask &= ~(1 << event);
404 }
405}