1cba8d1400c43739f840c05927eeca16d101e05f
[GitHub/LineageOS/android_kernel_samsung_universal7580.git] / drivers / net / wireless / bcmdhd_1_77 / dhd_linux_wq.c
1 /*
2 * Broadcom Dongle Host Driver (DHD), Generic work queue framework
3 * Generic interface to handle dhd deferred work events
4 *
5 * Copyright (C) 1999-2017, Broadcom Corporation
6 *
7 * Unless you and Broadcom execute a separate written software license
8 * agreement governing use of this software, this software is licensed to you
9 * under the terms of the GNU General Public License version 2 (the "GPL"),
10 * available at http://www.broadcom.com/licenses/GPLv2.php, with the
11 * following added to such license:
12 *
13 * As a special exception, the copyright holders of this software give you
14 * permission to link this software with independent modules, and to copy and
15 * distribute the resulting executable under terms of your choice, provided that
16 * you also meet, for each linked independent module, the terms and conditions of
17 * the license of that module. An independent module is a module which is not
18 * derived from this software. The special exception does not apply to any
19 * modifications of the software.
20 *
21 * Notwithstanding the above, under no circumstances may you combine this
22 * software in any way with any other Broadcom software provided under a license
23 * other than the GPL, without Broadcom's express prior written consent.
24 *
25 *
26 * <<Broadcom-WL-IPTag/Open:>>
27 *
28 * $Id: dhd_linux_wq.c 641330 2016-06-02 06:55:00Z $
29 */
30
31 #include <linux/init.h>
32 #include <linux/kernel.h>
33 #include <linux/spinlock.h>
34 #include <linux/fcntl.h>
35 #include <linux/fs.h>
36 #include <linux/ip.h>
37 #include <linux/kfifo.h>
38
39 #include <linuxver.h>
40 #include <osl.h>
41 #include <bcmutils.h>
42 #include <bcmendian.h>
43 #include <bcmdevs.h>
44 #include <dngl_stats.h>
45 #include <dhd.h>
46 #include <dhd_dbg.h>
47 #include <dhd_linux_wq.h>
48
49 typedef struct dhd_deferred_event {
50 u8 event; /* holds the event */
51 void *event_data; /* holds event specific data */
52 event_handler_t event_handler;
53 unsigned long pad; /* for memory alignment to power of 2 */
54 } dhd_deferred_event_t;
55
56 #define DEFRD_EVT_SIZE (sizeof(dhd_deferred_event_t))
57
58 /*
59 * work events may occur simultaneously.
60 * can hold upto 64 low priority events and 16 high priority events
61 */
62 #define DHD_PRIO_WORK_FIFO_SIZE (16 * DEFRD_EVT_SIZE)
63 #define DHD_WORK_FIFO_SIZE (64 * DEFRD_EVT_SIZE)
64
65 #define DHD_FIFO_HAS_FREE_SPACE(fifo) \
66 ((fifo) && (kfifo_avail(fifo) >= DEFRD_EVT_SIZE))
67 #define DHD_FIFO_HAS_ENOUGH_DATA(fifo) \
68 ((fifo) && (kfifo_len(fifo) >= DEFRD_EVT_SIZE))
69
70 struct dhd_deferred_wq {
71 struct work_struct deferred_work; /* should be the first member */
72
73 struct kfifo *prio_fifo;
74 struct kfifo *work_fifo;
75 u8 *prio_fifo_buf;
76 u8 *work_fifo_buf;
77 spinlock_t work_lock;
78 void *dhd_info; /* review: does it require */
79 };
80
81 static inline struct kfifo*
82 dhd_kfifo_init(u8 *buf, int size, spinlock_t *lock)
83 {
84 struct kfifo *fifo;
85 gfp_t flags = CAN_SLEEP() ? GFP_KERNEL : GFP_ATOMIC;
86
87 #if (LINUX_VERSION_CODE < KERNEL_VERSION(2, 6, 33))
88 fifo = kfifo_init(buf, size, flags, lock);
89 #else
90 fifo = (struct kfifo *)kzalloc(sizeof(struct kfifo), flags);
91 if (!fifo) {
92 return NULL;
93 }
94 kfifo_init(fifo, buf, size);
95 #endif /* (LINUX_VERSION_CODE < KERNEL_VERSION(2, 6, 33)) */
96 return fifo;
97 }
98
99 static inline void
100 dhd_kfifo_free(struct kfifo *fifo)
101 {
102 kfifo_free(fifo);
103 #if (LINUX_VERSION_CODE > KERNEL_VERSION(2, 6, 31))
104 /* FC11 releases the fifo memory */
105 kfree(fifo);
106 #endif
107 }
108
109 /* deferred work functions */
110 static void dhd_deferred_work_handler(struct work_struct *data);
111
112 void*
113 dhd_deferred_work_init(void *dhd_info)
114 {
115 struct dhd_deferred_wq *work = NULL;
116 u8* buf;
117 unsigned long fifo_size = 0;
118 gfp_t flags = CAN_SLEEP() ? GFP_KERNEL : GFP_ATOMIC;
119
120 if (!dhd_info) {
121 DHD_ERROR(("%s: dhd info not initialized\n", __FUNCTION__));
122 goto return_null;
123 }
124
125 work = (struct dhd_deferred_wq *)kzalloc(sizeof(struct dhd_deferred_wq),
126 flags);
127 if (!work) {
128 DHD_ERROR(("%s: work queue creation failed\n", __FUNCTION__));
129 goto return_null;
130 }
131
132 INIT_WORK((struct work_struct *)work, dhd_deferred_work_handler);
133
134 /* initialize event fifo */
135 spin_lock_init(&work->work_lock);
136
137 /* allocate buffer to hold prio events */
138 fifo_size = DHD_PRIO_WORK_FIFO_SIZE;
139 fifo_size = is_power_of_2(fifo_size) ? fifo_size :
140 roundup_pow_of_two(fifo_size);
141 buf = (u8*)kzalloc(fifo_size, flags);
142 if (!buf) {
143 DHD_ERROR(("%s: prio work fifo allocation failed\n",
144 __FUNCTION__));
145 goto return_null;
146 }
147
148 /* Initialize prio event fifo */
149 work->prio_fifo = dhd_kfifo_init(buf, fifo_size, &work->work_lock);
150 if (!work->prio_fifo) {
151 kfree(buf);
152 goto return_null;
153 }
154
155 /* allocate buffer to hold work events */
156 fifo_size = DHD_WORK_FIFO_SIZE;
157 fifo_size = is_power_of_2(fifo_size) ? fifo_size :
158 roundup_pow_of_two(fifo_size);
159 buf = (u8*)kzalloc(fifo_size, flags);
160 if (!buf) {
161 DHD_ERROR(("%s: work fifo allocation failed\n", __FUNCTION__));
162 goto return_null;
163 }
164
165 /* Initialize event fifo */
166 work->work_fifo = dhd_kfifo_init(buf, fifo_size, &work->work_lock);
167 if (!work->work_fifo) {
168 kfree(buf);
169 goto return_null;
170 }
171
172 work->dhd_info = dhd_info;
173 DHD_ERROR(("%s: work queue initialized\n", __FUNCTION__));
174 return work;
175
176 return_null:
177 if (work) {
178 dhd_deferred_work_deinit(work);
179 }
180
181 return NULL;
182 }
183
184 void
185 dhd_deferred_work_deinit(void *work)
186 {
187 struct dhd_deferred_wq *deferred_work = work;
188
189
190 if (!deferred_work) {
191 DHD_ERROR(("%s: deferred work has been freed already\n",
192 __FUNCTION__));
193 return;
194 }
195
196 /* cancel the deferred work handling */
197 cancel_work_sync((struct work_struct *)deferred_work);
198
199 /*
200 * free work event fifo.
201 * kfifo_free frees locally allocated fifo buffer
202 */
203 if (deferred_work->prio_fifo) {
204 dhd_kfifo_free(deferred_work->prio_fifo);
205 }
206
207 if (deferred_work->work_fifo) {
208 dhd_kfifo_free(deferred_work->work_fifo);
209 }
210
211 kfree(deferred_work);
212 }
213
214 /* select kfifo according to priority */
215 static inline struct kfifo *
216 dhd_deferred_work_select_kfifo(struct dhd_deferred_wq *deferred_wq,
217 u8 priority)
218 {
219 if (priority == DHD_WQ_WORK_PRIORITY_HIGH) {
220 return deferred_wq->prio_fifo;
221 } else if (priority == DHD_WQ_WORK_PRIORITY_LOW) {
222 return deferred_wq->work_fifo;
223 } else {
224 return NULL;
225 }
226 }
227
228 /*
229 * Prepares event to be queued
230 * Schedules the event
231 */
232 int
233 dhd_deferred_schedule_work(void *workq, void *event_data, u8 event,
234 event_handler_t event_handler, u8 priority)
235 {
236 struct dhd_deferred_wq *deferred_wq = (struct dhd_deferred_wq *)workq;
237 struct kfifo *fifo;
238 dhd_deferred_event_t deferred_event;
239 int bytes_copied = 0;
240
241 if (!deferred_wq) {
242 DHD_ERROR(("%s: work queue not initialized\n", __FUNCTION__));
243 ASSERT(0);
244 return DHD_WQ_STS_UNINITIALIZED;
245 }
246
247 if (!event || (event >= DHD_MAX_WQ_EVENTS)) {
248 DHD_ERROR(("%s: unknown event, event=%d\n", __FUNCTION__,
249 event));
250 return DHD_WQ_STS_UNKNOWN_EVENT;
251 }
252
253 if (!priority || (priority >= DHD_WQ_MAX_PRIORITY)) {
254 DHD_ERROR(("%s: unknown priority, priority=%d\n",
255 __FUNCTION__, priority));
256 return DHD_WQ_STS_UNKNOWN_PRIORITY;
257 }
258
259 /*
260 * default element size is 1, which can be changed
261 * using kfifo_esize(). Older kernel(FC11) doesn't support
262 * changing element size. For compatibility changing
263 * element size is not prefered
264 */
265 ASSERT(kfifo_esize(deferred_wq->prio_fifo) == 1);
266 ASSERT(kfifo_esize(deferred_wq->work_fifo) == 1);
267
268 deferred_event.event = event;
269 deferred_event.event_data = event_data;
270 deferred_event.event_handler = event_handler;
271
272 fifo = dhd_deferred_work_select_kfifo(deferred_wq, priority);
273 if (DHD_FIFO_HAS_FREE_SPACE(fifo)) {
274 bytes_copied = kfifo_in_spinlocked(fifo, &deferred_event,
275 DEFRD_EVT_SIZE, &deferred_wq->work_lock);
276 }
277 if (bytes_copied != DEFRD_EVT_SIZE) {
278 DHD_ERROR(("%s: failed to schedule deferred work, "
279 "priority=%d, bytes_copied=%d\n", __FUNCTION__,
280 priority, bytes_copied));
281 return DHD_WQ_STS_SCHED_FAILED;
282 }
283 schedule_work((struct work_struct *)deferred_wq);
284 return DHD_WQ_STS_OK;
285 }
286
287 static bool
288 dhd_get_scheduled_work(struct dhd_deferred_wq *deferred_wq,
289 dhd_deferred_event_t *event)
290 {
291 int bytes_copied = 0;
292
293 if (!deferred_wq) {
294 DHD_ERROR(("%s: work queue not initialized\n", __FUNCTION__));
295 return DHD_WQ_STS_UNINITIALIZED;
296 }
297
298 /*
299 * default element size is 1 byte, which can be changed
300 * using kfifo_esize(). Older kernel(FC11) doesn't support
301 * changing element size. For compatibility changing
302 * element size is not prefered
303 */
304 ASSERT(kfifo_esize(deferred_wq->prio_fifo) == 1);
305 ASSERT(kfifo_esize(deferred_wq->work_fifo) == 1);
306
307 /* handle priority work */
308 if (DHD_FIFO_HAS_ENOUGH_DATA(deferred_wq->prio_fifo)) {
309 bytes_copied = kfifo_out_spinlocked(deferred_wq->prio_fifo,
310 event, DEFRD_EVT_SIZE, &deferred_wq->work_lock);
311 }
312
313 /* handle normal work if priority work doesn't have enough data */
314 if ((bytes_copied != DEFRD_EVT_SIZE) &&
315 DHD_FIFO_HAS_ENOUGH_DATA(deferred_wq->work_fifo)) {
316 bytes_copied = kfifo_out_spinlocked(deferred_wq->work_fifo,
317 event, DEFRD_EVT_SIZE, &deferred_wq->work_lock);
318 }
319
320 return (bytes_copied == DEFRD_EVT_SIZE);
321 }
322
323 static inline void
324 dhd_deferred_dump_work_event(dhd_deferred_event_t *work_event)
325 {
326 if (!work_event) {
327 DHD_ERROR(("%s: work_event is null\n", __FUNCTION__));
328 return;
329 }
330
331 DHD_ERROR(("%s: work_event->event = %d\n", __FUNCTION__,
332 work_event->event));
333 DHD_ERROR(("%s: work_event->event_data = %p\n", __FUNCTION__,
334 work_event->event_data));
335 DHD_ERROR(("%s: work_event->event_handler = %p\n", __FUNCTION__,
336 work_event->event_handler));
337 }
338
339 /*
340 * Called when work is scheduled
341 */
342 static void
343 dhd_deferred_work_handler(struct work_struct *work)
344 {
345 struct dhd_deferred_wq *deferred_work = (struct dhd_deferred_wq *)work;
346 dhd_deferred_event_t work_event;
347
348 if (!deferred_work) {
349 DHD_ERROR(("%s: work queue not initialized\n", __FUNCTION__));
350 return;
351 }
352
353 do {
354 if (!dhd_get_scheduled_work(deferred_work, &work_event)) {
355 DHD_TRACE(("%s: no event to handle\n", __FUNCTION__));
356 break;
357 }
358
359 if (work_event.event >= DHD_MAX_WQ_EVENTS) {
360 DHD_ERROR(("%s: unknown event\n", __FUNCTION__));
361 dhd_deferred_dump_work_event(&work_event);
362 ASSERT(work_event.event < DHD_MAX_WQ_EVENTS);
363 continue;
364 }
365
366
367 if (work_event.event_handler) {
368 work_event.event_handler(deferred_work->dhd_info,
369 work_event.event_data, work_event.event);
370 } else {
371 DHD_ERROR(("%s: event handler is null\n",
372 __FUNCTION__));
373 dhd_deferred_dump_work_event(&work_event);
374 ASSERT(work_event.event_handler != NULL);
375 }
376 } while (1);
377
378 return;
379 }