wifi: update driver to 100.10.545.2 to support STA/AP concurrent [1/2]
[GitHub/LineageOS/G12/android_hardware_amlogic_kernel-modules_dhd-driver.git] / bcmdhd.100.10.315.x / include / linuxver.h
CommitLineData
d2839953
RC
1/*
2 * Linux-specific abstractions to gain some independence from linux kernel versions.
3 * Pave over some 2.2 versus 2.4 versus 2.6 kernel differences.
4 *
965f77c4 5 * Copyright (C) 1999-2019, Broadcom.
d2839953
RC
6 *
7 * Unless you and Broadcom execute a separate written software license
8 * agreement governing use of this software, this software is licensed to you
9 * under the terms of the GNU General Public License version 2 (the "GPL"),
10 * available at http://www.broadcom.com/licenses/GPLv2.php, with the
11 * following added to such license:
12 *
13 * As a special exception, the copyright holders of this software give you
14 * permission to link this software with independent modules, and to copy and
15 * distribute the resulting executable under terms of your choice, provided that
16 * you also meet, for each linked independent module, the terms and conditions of
17 * the license of that module. An independent module is a module which is not
18 * derived from this software. The special exception does not apply to any
19 * modifications of the software.
20 *
21 * Notwithstanding the above, under no circumstances may you combine this
22 * software in any way with any other Broadcom software provided under a license
23 * other than the GPL, without Broadcom's express prior written consent.
24 *
25 *
26 * <<Broadcom-WL-IPTag/Open:>>
27 *
965f77c4 28 * $Id: linuxver.h 806092 2019-02-21 08:19:13Z $
d2839953
RC
29 */
30
31#ifndef _linuxver_h_
32#define _linuxver_h_
33
34#if __GNUC__ > 4 || (__GNUC__ == 4 && __GNUC_MINOR__ >= 6)
35#pragma GCC diagnostic push
36#pragma GCC diagnostic ignored "-Wunused-but-set-variable"
37#pragma GCC diagnostic ignored "-Wunused-but-set-parameter"
38#endif // endif
39
40#include <typedefs.h>
41#include <linux/version.h>
42#if (LINUX_VERSION_CODE < KERNEL_VERSION(2, 6, 0))
43#include <linux/config.h>
44#else
45#if (LINUX_VERSION_CODE >= KERNEL_VERSION(2, 6, 33))
46#include <generated/autoconf.h>
47#else
48#include <linux/autoconf.h>
49#endif // endif
50#endif /* (LINUX_VERSION_CODE < KERNEL_VERSION(2, 6, 0)) */
51
52#if (LINUX_VERSION_CODE >= KERNEL_VERSION(3, 1, 0))
53#include <linux/kconfig.h>
54#endif // endif
55#include <linux/module.h>
56
57#if (LINUX_VERSION_CODE < KERNEL_VERSION(2, 3, 0))
58/* __NO_VERSION__ must be defined for all linkables except one in 2.2 */
59#ifdef __UNDEF_NO_VERSION__
60#undef __NO_VERSION__
61#else
62#define __NO_VERSION__
63#endif // endif
64#endif /* LINUX_VERSION_CODE < KERNEL_VERSION(2, 3, 0) */
65
66#if LINUX_VERSION_CODE < KERNEL_VERSION(2, 5, 0)
67#define module_param(_name_, _type_, _perm_) MODULE_PARM(_name_, "i")
68#define module_param_string(_name_, _string_, _size_, _perm_) \
69 MODULE_PARM(_string_, "c" __MODULE_STRING(_size_))
70#endif // endif
71
72/* linux/malloc.h is deprecated, use linux/slab.h instead. */
73#if (LINUX_VERSION_CODE < KERNEL_VERSION(2, 4, 9))
74#include <linux/malloc.h>
75#else
76#include <linux/slab.h>
77#endif // endif
78
79#include <linux/types.h>
80#include <linux/init.h>
81#include <linux/mm.h>
82#include <linux/string.h>
83#include <linux/pci.h>
84#include <linux/interrupt.h>
85#include <linux/kthread.h>
86#include <linux/netdevice.h>
87#if (LINUX_VERSION_CODE >= KERNEL_VERSION(2, 6, 27))
88#include <linux/semaphore.h>
89#else
90#include <asm/semaphore.h>
91#endif /* (LINUX_VERSION_CODE >= KERNEL_VERSION(2, 6, 27)) */
92#if (LINUX_VERSION_CODE >= KERNEL_VERSION(2, 6, 28))
93#undef IP_TOS
94#endif /* (LINUX_VERSION_CODE >= KERNEL_VERSION(2, 6, 28)) */
95#include <asm/io.h>
96
97#if (LINUX_VERSION_CODE > KERNEL_VERSION(2, 5, 41))
98#include <linux/workqueue.h>
99#else
100#include <linux/tqueue.h>
101#ifndef work_struct
102#define work_struct tq_struct
103#endif // endif
104#ifndef INIT_WORK
105#define INIT_WORK(_work, _func, _data) INIT_TQUEUE((_work), (_func), (_data))
106#endif // endif
107#ifndef schedule_work
108#define schedule_work(_work) schedule_task((_work))
109#endif // endif
110#ifndef flush_scheduled_work
111#define flush_scheduled_work() flush_scheduled_tasks()
112#endif // endif
113#endif /* LINUX_VERSION_CODE > KERNEL_VERSION(2, 5, 41) */
114
115#if (LINUX_VERSION_CODE >= KERNEL_VERSION(3, 8, 0))
116#define DAEMONIZE(a) do { \
117 allow_signal(SIGKILL); \
118 allow_signal(SIGTERM); \
119 } while (0)
120#elif ((LINUX_VERSION_CODE < KERNEL_VERSION(3, 8, 0)) && \
121 (LINUX_VERSION_CODE >= KERNEL_VERSION(2, 6, 0)))
122#define DAEMONIZE(a) daemonize(a); \
123 allow_signal(SIGKILL); \
124 allow_signal(SIGTERM);
125#else /* Linux 2.4 (w/o preemption patch) */
126#define RAISE_RX_SOFTIRQ() \
127 cpu_raise_softirq(smp_processor_id(), NET_RX_SOFTIRQ)
128#define DAEMONIZE(a) daemonize(); \
129 do { if (a) \
130 strncpy(current->comm, a, MIN(sizeof(current->comm), (strlen(a)))); \
131 } while (0);
132#endif /* LINUX_VERSION_CODE */
133
134#if LINUX_VERSION_CODE >= KERNEL_VERSION(2, 6, 19)
135#define MY_INIT_WORK(_work, _func) INIT_WORK(_work, _func)
136#else
137#define MY_INIT_WORK(_work, _func) INIT_WORK(_work, _func, _work)
138#if !(LINUX_VERSION_CODE == KERNEL_VERSION(2, 6, 18) && defined(RHEL_MAJOR) && \
139 (RHEL_MAJOR == 5))
140/* Exclude RHEL 5 */
141typedef void (*work_func_t)(void *work);
142#endif // endif
143#endif /* >= 2.6.20 */
144
145#if (LINUX_VERSION_CODE < KERNEL_VERSION(2, 6, 0))
146/* Some distributions have their own 2.6.x compatibility layers */
147#ifndef IRQ_NONE
148typedef void irqreturn_t;
149#define IRQ_NONE
150#define IRQ_HANDLED
151#define IRQ_RETVAL(x)
152#endif // endif
153#else
154typedef irqreturn_t(*FN_ISR) (int irq, void *dev_id, struct pt_regs *ptregs);
155#endif /* LINUX_VERSION_CODE < KERNEL_VERSION(2, 6, 0) */
156
157#if LINUX_VERSION_CODE < KERNEL_VERSION(2, 6, 18)
158#define IRQF_SHARED SA_SHIRQ
159#endif /* < 2.6.18 */
160
161#if LINUX_VERSION_CODE < KERNEL_VERSION(2, 6, 17)
162#ifdef CONFIG_NET_RADIO
163#define CONFIG_WIRELESS_EXT
164#endif // endif
165#endif /* < 2.6.17 */
166
167#if LINUX_VERSION_CODE >= KERNEL_VERSION(2, 5, 67)
168#define MOD_INC_USE_COUNT
169#define MOD_DEC_USE_COUNT
170#endif /* LINUX_VERSION_CODE >= KERNEL_VERSION(2, 5, 67) */
171
172#if LINUX_VERSION_CODE >= KERNEL_VERSION(2, 6, 32)
173#include <linux/sched.h>
174#endif /* LINUX_VERSION_CODE >= KERNEL_VERSION(2, 6, 32) */
175
176#if (LINUX_VERSION_CODE >= KERNEL_VERSION(3, 9, 0))
177#include <linux/sched/rt.h>
178#endif /* LINUX_VERSION_CODE >= KERNEL_VERSION(3, 9, 0) */
179
180#if LINUX_VERSION_CODE >= KERNEL_VERSION(2, 6, 29)
181#include <net/lib80211.h>
182#endif // endif
183#if LINUX_VERSION_CODE >= KERNEL_VERSION(2, 6, 29)
184#include <linux/ieee80211.h>
185#else
186#if LINUX_VERSION_CODE >= KERNEL_VERSION(2, 6, 14)
187#include <net/ieee80211.h>
188#endif // endif
189#endif /* LINUX_VERSION_CODE >= KERNEL_VERSION(2, 6, 30) */
190
191#ifndef __exit
192#define __exit
193#endif // endif
194#ifndef __devexit
195#define __devexit
196#endif // endif
197#ifndef __devinit
198# if (LINUX_VERSION_CODE < KERNEL_VERSION(3, 8, 0))
199# define __devinit __init
200# else
201/* All devices are hotpluggable since linux 3.8.0 */
202# define __devinit
203# endif
204#endif /* !__devinit */
205#ifndef __devinitdata
206#define __devinitdata
207#endif // endif
208#ifndef __devexit_p
209#define __devexit_p(x) x
210#endif // endif
211
212#if (LINUX_VERSION_CODE < KERNEL_VERSION(2, 4, 0))
213
214#define pci_get_drvdata(dev) (dev)->sysdata
215#define pci_set_drvdata(dev, value) (dev)->sysdata = (value)
216
217/*
218 * New-style (2.4.x) PCI/hot-pluggable PCI/CardBus registration
219 */
220
221struct pci_device_id {
222 unsigned int vendor, device; /* Vendor and device ID or PCI_ANY_ID */
223 unsigned int subvendor, subdevice; /* Subsystem ID's or PCI_ANY_ID */
224 unsigned int class, class_mask; /* (class,subclass,prog-if) triplet */
225 unsigned long driver_data; /* Data private to the driver */
226};
227
228struct pci_driver {
229 struct list_head node;
230 char *name;
231 const struct pci_device_id *id_table; /* NULL if wants all devices */
232 int (*probe)(struct pci_dev *dev,
233 const struct pci_device_id *id); /* New device inserted */
234 void (*remove)(struct pci_dev *dev); /* Device removed (NULL if not a hot-plug
235 * capable driver)
236 */
237 void (*suspend)(struct pci_dev *dev); /* Device suspended */
238 void (*resume)(struct pci_dev *dev); /* Device woken up */
239};
240
241#define MODULE_DEVICE_TABLE(type, name)
242#define PCI_ANY_ID (~0)
243
244/* compatpci.c */
245#define pci_module_init pci_register_driver
246extern int pci_register_driver(struct pci_driver *drv);
247extern void pci_unregister_driver(struct pci_driver *drv);
248
249#endif /* PCI registration */
250
251#if (LINUX_VERSION_CODE >= KERNEL_VERSION(2, 6, 18))
252#define pci_module_init pci_register_driver
253#endif // endif
254
255#if (LINUX_VERSION_CODE < KERNEL_VERSION(2, 2, 18))
256#ifdef MODULE
257#define module_init(x) int init_module(void) { return x(); }
258#define module_exit(x) void cleanup_module(void) { x(); }
259#else
260#define module_init(x) __initcall(x);
261#define module_exit(x) __exitcall(x);
262#endif // endif
263#endif /* LINUX_VERSION_CODE < KERNEL_VERSION(2, 2, 18) */
264
265#if LINUX_VERSION_CODE >= KERNEL_VERSION(2, 6, 31)
266#define WL_USE_NETDEV_OPS
267#else
268#undef WL_USE_NETDEV_OPS
269#endif // endif
270
271#if (LINUX_VERSION_CODE >= KERNEL_VERSION(2, 6, 31)) && defined(CONFIG_RFKILL)
272#define WL_CONFIG_RFKILL
273#else
274#undef WL_CONFIG_RFKILL
275#endif // endif
276
277#if (LINUX_VERSION_CODE < KERNEL_VERSION(2, 3, 48))
278#define list_for_each(pos, head) \
279 for (pos = (head)->next; pos != (head); pos = pos->next)
280#endif // endif
281
282#if (LINUX_VERSION_CODE < KERNEL_VERSION(2, 3, 13))
283#define pci_resource_start(dev, bar) ((dev)->base_address[(bar)])
284#elif (LINUX_VERSION_CODE < KERNEL_VERSION(2, 3, 44))
285#define pci_resource_start(dev, bar) ((dev)->resource[(bar)].start)
286#endif // endif
287
288#if (LINUX_VERSION_CODE < KERNEL_VERSION(2, 3, 23))
289#define pci_enable_device(dev) do { } while (0)
290#endif // endif
291
292#if (LINUX_VERSION_CODE < KERNEL_VERSION(2, 3, 14))
293#define net_device device
294#endif // endif
295
296#if (LINUX_VERSION_CODE < KERNEL_VERSION(2, 3, 42))
297
298/*
299 * DMA mapping
300 *
301 * See linux/Documentation/DMA-mapping.txt
302 */
303
304#ifndef PCI_DMA_TODEVICE
305#define PCI_DMA_TODEVICE 1
306#define PCI_DMA_FROMDEVICE 2
307#endif // endif
308
309typedef u32 dma_addr_t;
310
311/* Pure 2^n version of get_order */
312static inline int get_order(unsigned long size)
313{
314 int order;
315
316 size = (size-1) >> (PAGE_SHIFT-1);
317 order = -1;
318 do {
319 size >>= 1;
320 order++;
321 } while (size);
322 return order;
323}
324
325static inline void *pci_alloc_consistent(struct pci_dev *hwdev, size_t size,
326 dma_addr_t *dma_handle)
327{
328 void *ret;
329 int gfp = GFP_ATOMIC | GFP_DMA;
330
331 ret = (void *)__get_free_pages(gfp, get_order(size));
332
333 if (ret != NULL) {
334 memset(ret, 0, size);
335 *dma_handle = virt_to_bus(ret);
336 }
337 return ret;
338}
339static inline void pci_free_consistent(struct pci_dev *hwdev, size_t size,
340 void *vaddr, dma_addr_t dma_handle)
341{
342 free_pages((unsigned long)vaddr, get_order(size));
343}
344#define pci_map_single(cookie, address, size, dir) virt_to_bus(address)
345#define pci_unmap_single(cookie, address, size, dir)
346
347#endif /* DMA mapping */
348
965f77c4
RC
349#if LINUX_VERSION_CODE < KERNEL_VERSION(4, 15, 0)
350
351typedef struct timer_list timer_list_compat_t;
352
353#define init_timer_compat(timer_compat, cb, priv) \
354 init_timer(timer_compat); \
355 (timer_compat)->data = (ulong)priv; \
356 (timer_compat)->function = cb
357#define timer_set_private(timer_compat, priv) (timer_compat)->data = (ulong)priv
358#define timer_expires(timer_compat) (timer_compat)->expires
359
360#else /* LINUX_VERSION_CODE < KERNEL_VERSION(4, 15, 0) */
361
362typedef struct timer_list_compat {
363 struct timer_list timer;
364 void *arg;
365 void (*callback)(ulong arg);
366} timer_list_compat_t;
367
368extern void timer_cb_compat(struct timer_list *tl);
369
370#define init_timer_compat(timer_compat, cb, priv) \
371 (timer_compat)->arg = priv; \
372 (timer_compat)->callback = cb; \
373 timer_setup(&(timer_compat)->timer, timer_cb_compat, 0);
374#define timer_set_private(timer_compat, priv) (timer_compat)->arg = priv
375#define timer_expires(timer_compat) (timer_compat)->timer.expires
376
377#define del_timer(t) del_timer(&((t)->timer))
378#define del_timer_sync(t) del_timer_sync(&((t)->timer))
379#define timer_pending(t) timer_pending(&((t)->timer))
380#define add_timer(t) add_timer(&((t)->timer))
381#define mod_timer(t, j) mod_timer(&((t)->timer), j)
382
383#endif /* LINUX_VERSION_CODE < KERNEL_VERSION(4, 15, 0) */
384
d2839953
RC
385#if (LINUX_VERSION_CODE < KERNEL_VERSION(2, 3, 43))
386
387#define dev_kfree_skb_any(a) dev_kfree_skb(a)
388#define netif_down(dev) do { (dev)->start = 0; } while (0)
389
390/* pcmcia-cs provides its own netdevice compatibility layer */
391#ifndef _COMPAT_NETDEVICE_H
392
393/*
394 * SoftNet
395 *
396 * For pre-softnet kernels we need to tell the upper layer not to
397 * re-enter start_xmit() while we are in there. However softnet
398 * guarantees not to enter while we are in there so there is no need
399 * to do the netif_stop_queue() dance unless the transmit queue really
400 * gets stuck. This should also improve performance according to tests
401 * done by Aman Singla.
402 */
403
404#define dev_kfree_skb_irq(a) dev_kfree_skb(a)
405#define netif_wake_queue(dev) \
406 do { clear_bit(0, &(dev)->tbusy); mark_bh(NET_BH); } while (0)
407#define netif_stop_queue(dev) set_bit(0, &(dev)->tbusy)
408
409static inline void netif_start_queue(struct net_device *dev)
410{
411 dev->tbusy = 0;
412 dev->interrupt = 0;
413 dev->start = 1;
414}
415
416#define netif_queue_stopped(dev) (dev)->tbusy
417#define netif_running(dev) (dev)->start
418
419#endif /* _COMPAT_NETDEVICE_H */
420
421#define netif_device_attach(dev) netif_start_queue(dev)
422#define netif_device_detach(dev) netif_stop_queue(dev)
423
424/* 2.4.x renamed bottom halves to tasklets */
425#define tasklet_struct tq_struct
426static inline void tasklet_schedule(struct tasklet_struct *tasklet)
427{
428 queue_task(tasklet, &tq_immediate);
429 mark_bh(IMMEDIATE_BH);
430}
431
432static inline void tasklet_init(struct tasklet_struct *tasklet,
433 void (*func)(unsigned long),
434 unsigned long data)
435{
436 tasklet->next = NULL;
437 tasklet->sync = 0;
438 tasklet->routine = (void (*)(void *))func;
439 tasklet->data = (void *)data;
440}
441#define tasklet_kill(tasklet) { do {} while (0); }
442
443/* 2.4.x introduced del_timer_sync() */
444#define del_timer_sync(timer) del_timer(timer)
445
446#else
447
448#define netif_down(dev)
449
450#endif /* SoftNet */
451
452#if (LINUX_VERSION_CODE < KERNEL_VERSION(2, 4, 3))
453
454/*
455 * Emit code to initialise a tq_struct's routine and data pointers
456 */
457#define PREPARE_TQUEUE(_tq, _routine, _data) \
458 do { \
459 (_tq)->routine = _routine; \
460 (_tq)->data = _data; \
461 } while (0)
462
463/*
464 * Emit code to initialise all of a tq_struct
465 */
466#define INIT_TQUEUE(_tq, _routine, _data) \
467 do { \
468 INIT_LIST_HEAD(&(_tq)->list); \
469 (_tq)->sync = 0; \
470 PREPARE_TQUEUE((_tq), (_routine), (_data)); \
471 } while (0)
472
473#endif /* LINUX_VERSION_CODE < KERNEL_VERSION(2, 4, 3) */
474
475/* Power management related macro & routines */
476#if LINUX_VERSION_CODE > KERNEL_VERSION(2, 6, 9)
477#define PCI_SAVE_STATE(a, b) pci_save_state(a)
478#define PCI_RESTORE_STATE(a, b) pci_restore_state(a)
479#else
480#define PCI_SAVE_STATE(a, b) pci_save_state(a, b)
481#define PCI_RESTORE_STATE(a, b) pci_restore_state(a, b)
482#endif // endif
483
484#if (LINUX_VERSION_CODE < KERNEL_VERSION(2, 4, 6))
485static inline int
486pci_save_state(struct pci_dev *dev, u32 *buffer)
487{
488 int i;
489 if (buffer) {
490 for (i = 0; i < 16; i++)
491 pci_read_config_dword(dev, i * 4, &buffer[i]);
492 }
493 return 0;
494}
495
496static inline int
497pci_restore_state(struct pci_dev *dev, u32 *buffer)
498{
499 int i;
500
501 if (buffer) {
502 for (i = 0; i < 16; i++)
503 pci_write_config_dword(dev, i * 4, buffer[i]);
504 }
505 /*
506 * otherwise, write the context information we know from bootup.
507 * This works around a problem where warm-booting from Windows
508 * combined with a D3(hot)->D0 transition causes PCI config
509 * header data to be forgotten.
510 */
511 else {
512 for (i = 0; i < 6; i ++)
513 pci_write_config_dword(dev,
514 PCI_BASE_ADDRESS_0 + (i * 4),
515 pci_resource_start(dev, i));
516 pci_write_config_byte(dev, PCI_INTERRUPT_LINE, dev->irq);
517 }
518 return 0;
519}
520#endif /* PCI power management */
521
522/* Old cp0 access macros deprecated in 2.4.19 */
523#if (LINUX_VERSION_CODE < KERNEL_VERSION(2, 4, 19))
524#define read_c0_count() read_32bit_cp0_register(CP0_COUNT)
525#endif // endif
526
527/* Module refcount handled internally in 2.6.x */
528#if (LINUX_VERSION_CODE < KERNEL_VERSION(2, 6, 24))
529#ifndef SET_MODULE_OWNER
530#define SET_MODULE_OWNER(dev) do {} while (0)
531#define OLD_MOD_INC_USE_COUNT MOD_INC_USE_COUNT
532#define OLD_MOD_DEC_USE_COUNT MOD_DEC_USE_COUNT
533#else
534#define OLD_MOD_INC_USE_COUNT do {} while (0)
535#define OLD_MOD_DEC_USE_COUNT do {} while (0)
536#endif // endif
537#else /* LINUX_VERSION_CODE < KERNEL_VERSION(2, 6, 24) */
538#ifndef SET_MODULE_OWNER
539#define SET_MODULE_OWNER(dev) do {} while (0)
540#endif // endif
541#ifndef MOD_INC_USE_COUNT
542#define MOD_INC_USE_COUNT do {} while (0)
543#endif // endif
544#ifndef MOD_DEC_USE_COUNT
545#define MOD_DEC_USE_COUNT do {} while (0)
546#endif // endif
547#define OLD_MOD_INC_USE_COUNT MOD_INC_USE_COUNT
548#define OLD_MOD_DEC_USE_COUNT MOD_DEC_USE_COUNT
549#endif /* LINUX_VERSION_CODE < KERNEL_VERSION(2, 6, 24) */
550
551#ifndef SET_NETDEV_DEV
552#define SET_NETDEV_DEV(net, pdev) do {} while (0)
553#endif // endif
554
555#if (LINUX_VERSION_CODE < KERNEL_VERSION(3, 1, 0))
556#ifndef HAVE_FREE_NETDEV
557#define free_netdev(dev) kfree(dev)
558#endif // endif
559#endif /* LINUX_VERSION_CODE < KERNEL_VERSION(3, 1, 0) */
560
561#if (LINUX_VERSION_CODE < KERNEL_VERSION(2, 6, 0))
562/* struct packet_type redefined in 2.6.x */
563#define af_packet_priv data
564#endif // endif
565
566/* suspend args */
567#if LINUX_VERSION_CODE >= KERNEL_VERSION(2, 6, 11)
568#define DRV_SUSPEND_STATE_TYPE pm_message_t
569#else
570#define DRV_SUSPEND_STATE_TYPE uint32
571#endif // endif
572
573#if LINUX_VERSION_CODE >= KERNEL_VERSION(2, 6, 19)
574#define CHECKSUM_HW CHECKSUM_PARTIAL
575#endif // endif
576
577typedef struct {
578 void *parent; /* some external entity that the thread supposed to work for */
579 char *proc_name;
580 struct task_struct *p_task;
581 long thr_pid;
582 int prio; /* priority */
583 struct semaphore sema;
584 int terminated;
585 struct completion completed;
586 int flush_ind;
587 struct completion flushed;
588 spinlock_t spinlock;
589 int up_cnt;
590} tsk_ctl_t;
591
592/* requires tsk_ctl_t tsk argument, the caller's priv data is passed in owner ptr */
593/* note this macro assumes there may be only one context waiting on thread's completion */
594#ifdef DHD_DEBUG
595#define DBG_THR(x) printk x
596#else
597#define DBG_THR(x)
598#endif // endif
599
600static inline bool binary_sema_down(tsk_ctl_t *tsk)
601{
602 if (down_interruptible(&tsk->sema) == 0) {
603 unsigned long flags = 0;
604 spin_lock_irqsave(&tsk->spinlock, flags);
605 if (tsk->up_cnt == 1)
606 tsk->up_cnt--;
607 else {
608 DBG_THR(("dhd_dpc_thread: Unexpected up_cnt %d\n", tsk->up_cnt));
609 }
610 spin_unlock_irqrestore(&tsk->spinlock, flags);
611 return false;
612 } else
613 return true;
614}
615
616static inline bool binary_sema_up(tsk_ctl_t *tsk)
617{
618 bool sem_up = false;
619 unsigned long flags = 0;
620
621 spin_lock_irqsave(&tsk->spinlock, flags);
622 if (tsk->up_cnt == 0) {
623 tsk->up_cnt++;
624 sem_up = true;
625 } else if (tsk->up_cnt == 1) {
626 /* dhd_sched_dpc: dpc is alread up! */
627 } else
628 DBG_THR(("dhd_sched_dpc: unexpected up cnt %d!\n", tsk->up_cnt));
629
630 spin_unlock_irqrestore(&tsk->spinlock, flags);
631
632 if (sem_up)
633 up(&tsk->sema);
634
635 return sem_up;
636}
637
638#if (LINUX_VERSION_CODE >= KERNEL_VERSION(2, 6, 0))
639#define SMP_RD_BARRIER_DEPENDS(x) smp_read_barrier_depends(x)
640#else
641#define SMP_RD_BARRIER_DEPENDS(x) smp_rmb(x)
642#endif // endif
643
644#define PROC_START(thread_func, owner, tsk_ctl, flags, name) \
645{ \
646 sema_init(&((tsk_ctl)->sema), 0); \
647 init_completion(&((tsk_ctl)->completed)); \
648 init_completion(&((tsk_ctl)->flushed)); \
649 (tsk_ctl)->parent = owner; \
650 (tsk_ctl)->proc_name = name; \
651 (tsk_ctl)->terminated = FALSE; \
652 (tsk_ctl)->flush_ind = FALSE; \
653 (tsk_ctl)->up_cnt = 0; \
654 (tsk_ctl)->p_task = kthread_run(thread_func, tsk_ctl, (char*)name); \
655 if (IS_ERR((tsk_ctl)->p_task)) { \
656 (tsk_ctl)->thr_pid = -1; \
657 DBG_THR(("%s(): thread:%s create failed\n", __FUNCTION__, \
658 (tsk_ctl)->proc_name)); \
659 } else { \
660 (tsk_ctl)->thr_pid = (tsk_ctl)->p_task->pid; \
661 spin_lock_init(&((tsk_ctl)->spinlock)); \
662 DBG_THR(("%s(): thread:%s:%lx started\n", __FUNCTION__, \
663 (tsk_ctl)->proc_name, (tsk_ctl)->thr_pid)); \
664 }; \
665}
666
667#define PROC_WAIT_TIMEOUT_MSEC 5000 /* 5 seconds */
668
669#define PROC_STOP(tsk_ctl) \
670{ \
965f77c4 671 uint timeout = (uint)msecs_to_jiffies(PROC_WAIT_TIMEOUT_MSEC); \
d2839953
RC
672 (tsk_ctl)->terminated = TRUE; \
673 smp_wmb(); \
674 up(&((tsk_ctl)->sema)); \
675 DBG_THR(("%s(): thread:%s:%lx wait for terminate\n", __FUNCTION__, \
676 (tsk_ctl)->proc_name, (tsk_ctl)->thr_pid)); \
965f77c4 677 timeout = (uint)wait_for_completion_timeout(&((tsk_ctl)->completed), timeout); \
d2839953
RC
678 if (timeout == 0) \
679 DBG_THR(("%s(): thread:%s:%lx terminate timeout\n", __FUNCTION__, \
680 (tsk_ctl)->proc_name, (tsk_ctl)->thr_pid)); \
681 else \
682 DBG_THR(("%s(): thread:%s:%lx terminated OK\n", __FUNCTION__, \
683 (tsk_ctl)->proc_name, (tsk_ctl)->thr_pid)); \
684 (tsk_ctl)->parent = NULL; \
685 (tsk_ctl)->proc_name = NULL; \
686 (tsk_ctl)->thr_pid = -1; \
687 (tsk_ctl)->up_cnt = 0; \
688}
689
690#define PROC_STOP_USING_BINARY_SEMA(tsk_ctl) \
691{ \
965f77c4 692 uint timeout = (uint)msecs_to_jiffies(PROC_WAIT_TIMEOUT_MSEC); \
d2839953
RC
693 (tsk_ctl)->terminated = TRUE; \
694 smp_wmb(); \
695 binary_sema_up(tsk_ctl); \
696 DBG_THR(("%s(): thread:%s:%lx wait for terminate\n", __FUNCTION__, \
697 (tsk_ctl)->proc_name, (tsk_ctl)->thr_pid)); \
965f77c4 698 timeout = (uint)wait_for_completion_timeout(&((tsk_ctl)->completed), timeout); \
d2839953
RC
699 if (timeout == 0) \
700 DBG_THR(("%s(): thread:%s:%lx terminate timeout\n", __FUNCTION__, \
701 (tsk_ctl)->proc_name, (tsk_ctl)->thr_pid)); \
702 else \
703 DBG_THR(("%s(): thread:%s:%lx terminated OK\n", __FUNCTION__, \
704 (tsk_ctl)->proc_name, (tsk_ctl)->thr_pid)); \
705 (tsk_ctl)->parent = NULL; \
706 (tsk_ctl)->proc_name = NULL; \
707 (tsk_ctl)->thr_pid = -1; \
708}
709
710/*
711* Flush is non-rentrant, so callers must make sure
712* there is no race condition.
713* For safer exit, added wait_for_completion_timeout
714* with 1 sec timeout.
715*/
716#define PROC_FLUSH_USING_BINARY_SEMA(tsk_ctl) \
717{ \
965f77c4 718 uint timeout = (uint)msecs_to_jiffies(PROC_WAIT_TIMEOUT_MSEC); \
d2839953
RC
719 (tsk_ctl)->flush_ind = TRUE; \
720 smp_wmb(); \
721 binary_sema_up(tsk_ctl); \
722 DBG_THR(("%s(): thread:%s:%lx wait for flush\n", __FUNCTION__, \
723 (tsk_ctl)->proc_name, (tsk_ctl)->thr_pid)); \
965f77c4 724 timeout = (uint)wait_for_completion_timeout(&((tsk_ctl)->flushed), timeout); \
d2839953
RC
725 if (timeout == 0) \
726 DBG_THR(("%s(): thread:%s:%lx flush timeout\n", __FUNCTION__, \
727 (tsk_ctl)->proc_name, (tsk_ctl)->thr_pid)); \
728 else \
729 DBG_THR(("%s(): thread:%s:%lx flushed OK\n", __FUNCTION__, \
730 (tsk_ctl)->proc_name, (tsk_ctl)->thr_pid)); \
731}
732
733/* ----------------------- */
734
735#if (LINUX_VERSION_CODE >= KERNEL_VERSION(2, 6, 31))
965f77c4
RC
736#if LINUX_VERSION_CODE >= KERNEL_VERSION(4, 18, 0)
737/* send_sig declaration moved */
738#include <linux/sched/signal.h>
739#endif /* LINUX_VERSION_CODE >= KERNEL_VERSION(4, 18, 0) */
740
d2839953
RC
741#define KILL_PROC(nr, sig) \
742{ \
743struct task_struct *tsk; \
744struct pid *pid; \
745pid = find_get_pid((pid_t)nr); \
746tsk = pid_task(pid, PIDTYPE_PID); \
747if (tsk) send_sig(sig, tsk, 1); \
748}
749#else
750#if (LINUX_VERSION_CODE >= KERNEL_VERSION(2, 6, 27)) && (LINUX_VERSION_CODE <= \
751 KERNEL_VERSION(2, 6, 30))
752#define KILL_PROC(pid, sig) \
753{ \
754 struct task_struct *tsk; \
755 tsk = find_task_by_vpid(pid); \
756 if (tsk) send_sig(sig, tsk, 1); \
757}
758#else
759#define KILL_PROC(pid, sig) \
760{ \
761 kill_proc(pid, sig, 1); \
762}
763#endif // endif
764#endif /* LINUX_VERSION_CODE >= KERNEL_VERSION(2, 6, 31) */
765
766#if (LINUX_VERSION_CODE >= KERNEL_VERSION(2, 6, 0))
767#include <linux/time.h>
768#include <linux/wait.h>
769#else
770#include <linux/sched.h>
771
772#define __wait_event_interruptible_timeout(wq, condition, ret) \
773do { \
774 wait_queue_t __wait; \
775 init_waitqueue_entry(&__wait, current); \
776 \
777 add_wait_queue(&wq, &__wait); \
778 for (;;) { \
779 set_current_state(TASK_INTERRUPTIBLE); \
780 if (condition) \
781 break; \
782 if (!signal_pending(current)) { \
783 ret = schedule_timeout(ret); \
784 if (!ret) \
785 break; \
786 continue; \
787 } \
788 ret = -ERESTARTSYS; \
789 break; \
790 } \
791 current->state = TASK_RUNNING; \
792 remove_wait_queue(&wq, &__wait); \
793} while (0)
794
795#define wait_event_interruptible_timeout(wq, condition, timeout) \
796({ \
797 long __ret = timeout; \
798 if (!(condition)) \
799 __wait_event_interruptible_timeout(wq, condition, __ret); \
800 __ret; \
801})
802
803#endif /* (LINUX_VERSION_CODE >= KERNEL_VERSION(2, 6, 0)) */
804
805/*
806For < 2.6.24, wl creates its own netdev but doesn't
807align the priv area like the genuine alloc_netdev().
808Since netdev_priv() always gives us the aligned address, it will
809not match our unaligned address for < 2.6.24
810*/
811#if (LINUX_VERSION_CODE < KERNEL_VERSION(2, 6, 24))
812#define DEV_PRIV(dev) (dev->priv)
813#else
814#define DEV_PRIV(dev) netdev_priv(dev)
815#endif // endif
816
817#if LINUX_VERSION_CODE >= KERNEL_VERSION(2, 6, 20)
818#define WL_ISR(i, d, p) wl_isr((i), (d))
819#else
820#define WL_ISR(i, d, p) wl_isr((i), (d), (p))
821#endif /* < 2.6.20 */
822
823#if (LINUX_VERSION_CODE < KERNEL_VERSION(2, 6, 0))
824#define netdev_priv(dev) dev->priv
825#endif /* (LINUX_VERSION_CODE < KERNEL_VERSION(2, 6, 0)) */
826
827#if (LINUX_VERSION_CODE >= KERNEL_VERSION(2, 6, 25))
828#define CAN_SLEEP() ((!in_atomic() && !irqs_disabled()))
829#else
830#define CAN_SLEEP() (FALSE)
831#endif // endif
832
833#define KMALLOC_FLAG (CAN_SLEEP() ? GFP_KERNEL: GFP_ATOMIC)
834
835#if (LINUX_VERSION_CODE >= KERNEL_VERSION(3, 8, 0))
836#define RANDOM32 prandom_u32
837#define RANDOM_BYTES prandom_bytes
838#else
839#define RANDOM32 random32
840#define RANDOM_BYTES get_random_bytes
841#endif /* LINUX_VERSION_CODE >= KERNEL_VERSION(3, 8, 0) */
842
843#if (LINUX_VERSION_CODE >= KERNEL_VERSION(3, 8, 0))
844#define SRANDOM32(entropy) prandom_seed(entropy)
845#else
846#define SRANDOM32(entropy) srandom32(entropy)
847#endif /* LINUX_VERSION_CODE >= KERNEL_VERSION(3, 8, 0) */
848
849/*
850 * Overide latest kfifo functions with
851 * older version to work on older kernels
852 */
853#if (LINUX_VERSION_CODE < KERNEL_VERSION(2, 6, 33)) && !defined(WL_COMPAT_WIRELESS)
854#define kfifo_in_spinlocked(a, b, c, d) kfifo_put(a, (u8 *)b, c)
855#define kfifo_out_spinlocked(a, b, c, d) kfifo_get(a, (u8 *)b, c)
856#define kfifo_esize(a) 1
857#elif (LINUX_VERSION_CODE > KERNEL_VERSION(2, 6, 32)) && \
858 (LINUX_VERSION_CODE < KERNEL_VERSION(2, 6, 36)) && !defined(WL_COMPAT_WIRELESS)
859#define kfifo_in_spinlocked(a, b, c, d) kfifo_in_locked(a, b, c, d)
860#define kfifo_out_spinlocked(a, b, c, d) kfifo_out_locked(a, b, c, d)
861#define kfifo_esize(a) 1
862#endif /* (LINUX_VERSION_CODE < KERNEL_VERSION(2, 6, 33)) */
863
864#if __GNUC__ > 4 || (__GNUC__ == 4 && __GNUC_MINOR__ >= 6)
865#pragma GCC diagnostic pop
866#endif // endif
867
868#if (LINUX_VERSION_CODE < KERNEL_VERSION(3, 9, 0))
965f77c4 869#include <linux/fs.h>
d2839953
RC
870static inline struct inode *file_inode(const struct file *f)
871{
872 return f->f_dentry->d_inode;
873}
874#endif /* (LINUX_VERSION_CODE < KERNEL_VERSION(3, 9, 0)) */
875
965f77c4
RC
876#if LINUX_VERSION_CODE >= KERNEL_VERSION(4, 14, 0)
877#define vfs_write(fp, buf, len, pos) kernel_write(fp, buf, len, pos)
878#define vfs_read(fp, buf, len, pos) kernel_read(fp, buf, len, pos)
879int kernel_read_compat(struct file *file, loff_t offset, char *addr, unsigned long count);
880#else /* LINUX_VERSION_CODE >= KERNEL_VERSION(4, 14, 0) */
881#define kernel_read_compat(file, offset, addr, count) kernel_read(file, offset, addr, count)
882#endif /* LINUX_VERSION_CODE >= KERNEL_VERSION(4, 14, 0) */
883
d2839953 884#endif /* _linuxver_h_ */