f702c8949ec9781bfc8ce66c1d20fc887b741e70
[GitHub/LineageOS/G12/android_hardware_amlogic_kernel-modules_dhd-driver.git] / bcmdhd.100.10.315.x / linux_osl.c
1 /*
2 * Linux OS Independent Layer
3 *
4 * Copyright (C) 1999-2018, Broadcom.
5 *
6 * Unless you and Broadcom execute a separate written software license
7 * agreement governing use of this software, this software is licensed to you
8 * under the terms of the GNU General Public License version 2 (the "GPL"),
9 * available at http://www.broadcom.com/licenses/GPLv2.php, with the
10 * following added to such license:
11 *
12 * As a special exception, the copyright holders of this software give you
13 * permission to link this software with independent modules, and to copy and
14 * distribute the resulting executable under terms of your choice, provided that
15 * you also meet, for each linked independent module, the terms and conditions of
16 * the license of that module. An independent module is a module which is not
17 * derived from this software. The special exception does not apply to any
18 * modifications of the software.
19 *
20 * Notwithstanding the above, under no circumstances may you combine this
21 * software in any way with any other Broadcom software provided under a license
22 * other than the GPL, without Broadcom's express prior written consent.
23 *
24 *
25 * <<Broadcom-WL-IPTag/Open:>>
26 *
27 * $Id: linux_osl.c 767848 2018-06-15 09:33:44Z $
28 */
29
30 #define LINUX_PORT
31
32 #include <typedefs.h>
33 #include <bcmendian.h>
34 #include <linuxver.h>
35 #include <bcmdefs.h>
36
37 #if defined(__ARM_ARCH_7A__) && !defined(DHD_USE_COHERENT_MEM_FOR_RING)
38 #include <asm/cacheflush.h>
39 #endif /* __ARM_ARCH_7A__ && !DHD_USE_COHERENT_MEM_FOR_RING */
40
41 #include <linux/random.h>
42
43 #include <osl.h>
44 #include <bcmutils.h>
45 #include <linux/delay.h>
46 #include <linux/vmalloc.h>
47 #include <pcicfg.h>
48
49 #ifdef BCM_SECURE_DMA
50 #include <linux/module.h>
51 #include <linux/kernel.h>
52 #include <linux/io.h>
53 #include <linux/printk.h>
54 #include <linux/errno.h>
55 #include <linux/mm.h>
56 #include <linux/moduleparam.h>
57 #include <asm/io.h>
58 #include <linux/skbuff.h>
59 #include <stbutils.h>
60 #include <linux/highmem.h>
61 #include <linux/dma-mapping.h>
62 #include <asm/memory.h>
63 #endif /* BCM_SECURE_DMA */
64
65 #include <linux/fs.h>
66
67 #if defined(STB)
68 #include <linux/spinlock.h>
69 extern spinlock_t l2x0_reg_lock;
70 #endif // endif
71
72 #ifdef BCM_OBJECT_TRACE
73 #include <bcmutils.h>
74 #endif /* BCM_OBJECT_TRACE */
75 #include "linux_osl_priv.h"
76
77 #define PCI_CFG_RETRY 10
78
79 #define DUMPBUFSZ 1024
80
81 #ifdef BCM_SECURE_DMA
82 static void * osl_sec_dma_ioremap(osl_t *osh, struct page *page, size_t size,
83 bool iscache, bool isdecr);
84 static void osl_sec_dma_iounmap(osl_t *osh, void *contig_base_va, size_t size);
85 static int osl_sec_dma_init_elem_mem_block(osl_t *osh, size_t mbsize, int max,
86 sec_mem_elem_t **list);
87 static void osl_sec_dma_deinit_elem_mem_block(osl_t *osh, size_t mbsize, int max,
88 void *sec_list_base);
89 static sec_mem_elem_t * osl_sec_dma_alloc_mem_elem(osl_t *osh, void *va, uint size,
90 int direction, struct sec_cma_info *ptr_cma_info, uint offset);
91 static void osl_sec_dma_free_mem_elem(osl_t *osh, sec_mem_elem_t *sec_mem_elem);
92 static void osl_sec_dma_init_consistent(osl_t *osh);
93 static void *osl_sec_dma_alloc_consistent(osl_t *osh, uint size, uint16 align_bits,
94 ulong *pap);
95 static void osl_sec_dma_free_consistent(osl_t *osh, void *va, uint size, dmaaddr_t pa);
96 #endif /* BCM_SECURE_DMA */
97
98 /* PCMCIA attribute space access macros */
99
100 uint32 g_assert_type = 0; /* By Default Kernel Panic */
101
102 module_param(g_assert_type, int, 0);
103 #ifdef BCM_SECURE_DMA
104 #define SECDMA_MODULE_PARAMS 0
105 #define SECDMA_EXT_FILE 1
106 unsigned long secdma_addr = 0;
107 unsigned long secdma_addr2 = 0;
108 u32 secdma_size = 0;
109 u32 secdma_size2 = 0;
110 module_param(secdma_addr, ulong, 0);
111 module_param(secdma_size, int, 0);
112 module_param(secdma_addr2, ulong, 0);
113 module_param(secdma_size2, int, 0);
114 static int secdma_found = 0;
115 #endif /* BCM_SECURE_DMA */
116
117 #ifdef USE_DMA_LOCK
118 static void osl_dma_lock(osl_t *osh);
119 static void osl_dma_unlock(osl_t *osh);
120 static void osl_dma_lock_init(osl_t *osh);
121 #define DMA_LOCK(osh) osl_dma_lock(osh)
122 #define DMA_UNLOCK(osh) osl_dma_unlock(osh)
123 #define DMA_LOCK_INIT(osh) osl_dma_lock_init(osh);
124 #else
125 #define DMA_LOCK(osh) do { /* noop */ } while(0)
126 #define DMA_UNLOCK(osh) do { /* noop */ } while(0)
127 #define DMA_LOCK_INIT(osh) do { /* noop */ } while(0)
128 #endif /* USE_DMA_LOCK */
129
130 static int16 linuxbcmerrormap[] =
131 { 0, /* 0 */
132 -EINVAL, /* BCME_ERROR */
133 -EINVAL, /* BCME_BADARG */
134 -EINVAL, /* BCME_BADOPTION */
135 -EINVAL, /* BCME_NOTUP */
136 -EINVAL, /* BCME_NOTDOWN */
137 -EINVAL, /* BCME_NOTAP */
138 -EINVAL, /* BCME_NOTSTA */
139 -EINVAL, /* BCME_BADKEYIDX */
140 -EINVAL, /* BCME_RADIOOFF */
141 -EINVAL, /* BCME_NOTBANDLOCKED */
142 -EINVAL, /* BCME_NOCLK */
143 -EINVAL, /* BCME_BADRATESET */
144 -EINVAL, /* BCME_BADBAND */
145 -E2BIG, /* BCME_BUFTOOSHORT */
146 -E2BIG, /* BCME_BUFTOOLONG */
147 -EBUSY, /* BCME_BUSY */
148 -EINVAL, /* BCME_NOTASSOCIATED */
149 -EINVAL, /* BCME_BADSSIDLEN */
150 -EINVAL, /* BCME_OUTOFRANGECHAN */
151 -EINVAL, /* BCME_BADCHAN */
152 -EFAULT, /* BCME_BADADDR */
153 -ENOMEM, /* BCME_NORESOURCE */
154 -EOPNOTSUPP, /* BCME_UNSUPPORTED */
155 -EMSGSIZE, /* BCME_BADLENGTH */
156 -EINVAL, /* BCME_NOTREADY */
157 -EPERM, /* BCME_EPERM */
158 -ENOMEM, /* BCME_NOMEM */
159 -EINVAL, /* BCME_ASSOCIATED */
160 -ERANGE, /* BCME_RANGE */
161 -EINVAL, /* BCME_NOTFOUND */
162 -EINVAL, /* BCME_WME_NOT_ENABLED */
163 -EINVAL, /* BCME_TSPEC_NOTFOUND */
164 -EINVAL, /* BCME_ACM_NOTSUPPORTED */
165 -EINVAL, /* BCME_NOT_WME_ASSOCIATION */
166 -EIO, /* BCME_SDIO_ERROR */
167 -ENODEV, /* BCME_DONGLE_DOWN */
168 -EINVAL, /* BCME_VERSION */
169 -EIO, /* BCME_TXFAIL */
170 -EIO, /* BCME_RXFAIL */
171 -ENODEV, /* BCME_NODEVICE */
172 -EINVAL, /* BCME_NMODE_DISABLED */
173 -ENODATA, /* BCME_NONRESIDENT */
174 -EINVAL, /* BCME_SCANREJECT */
175 -EINVAL, /* BCME_USAGE_ERROR */
176 -EIO, /* BCME_IOCTL_ERROR */
177 -EIO, /* BCME_SERIAL_PORT_ERR */
178 -EOPNOTSUPP, /* BCME_DISABLED, BCME_NOTENABLED */
179 -EIO, /* BCME_DECERR */
180 -EIO, /* BCME_ENCERR */
181 -EIO, /* BCME_MICERR */
182 -ERANGE, /* BCME_REPLAY */
183 -EINVAL, /* BCME_IE_NOTFOUND */
184 -EINVAL, /* BCME_DATA_NOTFOUND */
185 -EINVAL, /* BCME_NOT_GC */
186 -EINVAL, /* BCME_PRS_REQ_FAILED */
187 -EINVAL, /* BCME_NO_P2P_SE */
188 -EINVAL, /* BCME_NOA_PND */
189 -EINVAL, /* BCME_FRAG_Q_FAILED */
190 -EINVAL, /* BCME_GET_AF_FAILED */
191 -EINVAL, /* BCME_MSCH_NOTREADY */
192 -EINVAL, /* BCME_IOV_LAST_CMD */
193 -EINVAL, /* BCME_MINIPMU_CAL_FAIL */
194 -EINVAL, /* BCME_RCAL_FAIL */
195 -EINVAL, /* BCME_LPF_RCCAL_FAIL */
196 -EINVAL, /* BCME_DACBUF_RCCAL_FAIL */
197 -EINVAL, /* BCME_VCOCAL_FAIL */
198 -EINVAL, /* BCME_BANDLOCKED */
199 -EINVAL, /* BCME_DNGL_DEVRESET */
200
201 /* When an new error code is added to bcmutils.h, add os
202 * specific error translation here as well
203 */
204 /* check if BCME_LAST changed since the last time this function was updated */
205 #if BCME_LAST != -68
206 #error "You need to add a OS error translation in the linuxbcmerrormap \
207 for new error code defined in bcmutils.h"
208 #endif // endif
209 };
210 uint lmtest = FALSE;
211
212 #ifdef DHD_MAP_LOGGING
213 #define DHD_MAP_LOG_SIZE 2048
214
215 typedef struct dhd_map_record {
216 dma_addr_t addr;
217 uint64 time;
218 } dhd_map_log_t;
219
220 dhd_map_log_t *dhd_map_log = NULL, *dhd_unmap_log = NULL;
221 uint32 map_idx = 0, unmap_idx = 0;
222
223 void
224 osl_dma_map_dump(void)
225 {
226 printk("%s: map_idx=%d unmap_idx=%d current time=%llu\n",
227 __FUNCTION__, map_idx, unmap_idx, OSL_SYSUPTIME_US());
228 if (dhd_map_log && dhd_unmap_log) {
229 printk("%s: dhd_map_log(pa)=%llx size=%d, dma_unmap_log(pa)=%llx size=%d\n",
230 __FUNCTION__, (uint64)__virt_to_phys((ulong)dhd_map_log),
231 (uint32)(sizeof(dhd_map_log_t) * DHD_MAP_LOG_SIZE),
232 (uint64)__virt_to_phys((ulong)dhd_unmap_log),
233 (uint32)(sizeof(dhd_map_log_t) * DHD_MAP_LOG_SIZE));
234 }
235 }
236 #endif /* DHD_MAP_LOGGING */
237
238 /* translate bcmerrors into linux errors */
239 int
240 osl_error(int bcmerror)
241 {
242 if (bcmerror > 0)
243 bcmerror = 0;
244 else if (bcmerror < BCME_LAST)
245 bcmerror = BCME_ERROR;
246
247 /* Array bounds covered by ASSERT in osl_attach */
248 return linuxbcmerrormap[-bcmerror];
249 }
250
251 osl_t *
252 osl_attach(void *pdev, uint bustype, bool pkttag)
253 {
254 void **osl_cmn = NULL;
255 osl_t *osh;
256 gfp_t flags;
257 #ifdef BCM_SECURE_DMA
258 u32 secdma_memsize;
259 #endif // endif
260
261 flags = CAN_SLEEP() ? GFP_KERNEL: GFP_ATOMIC;
262 if (!(osh = kmalloc(sizeof(osl_t), flags)))
263 return osh;
264
265 ASSERT(osh);
266
267 bzero(osh, sizeof(osl_t));
268
269 if (osl_cmn == NULL || *osl_cmn == NULL) {
270 if (!(osh->cmn = kmalloc(sizeof(osl_cmn_t), flags))) {
271 kfree(osh);
272 return NULL;
273 }
274 bzero(osh->cmn, sizeof(osl_cmn_t));
275 if (osl_cmn)
276 *osl_cmn = osh->cmn;
277 atomic_set(&osh->cmn->malloced, 0);
278 osh->cmn->dbgmem_list = NULL;
279 spin_lock_init(&(osh->cmn->dbgmem_lock));
280
281 spin_lock_init(&(osh->cmn->pktalloc_lock));
282
283 } else {
284 osh->cmn = *osl_cmn;
285 }
286 atomic_add(1, &osh->cmn->refcount);
287
288 bcm_object_trace_init();
289
290 /* Check that error map has the right number of entries in it */
291 ASSERT(ABS(BCME_LAST) == (ARRAYSIZE(linuxbcmerrormap) - 1));
292
293 osh->failed = 0;
294 osh->pdev = pdev;
295 osh->pub.pkttag = pkttag;
296 osh->bustype = bustype;
297 osh->magic = OS_HANDLE_MAGIC;
298 #ifdef BCM_SECURE_DMA
299
300 if ((secdma_addr != 0) && (secdma_size != 0)) {
301 printk("linux_osl.c: Buffer info passed via module params, using it.\n");
302 if (secdma_found == 0) {
303 osh->contig_base_alloc = (phys_addr_t)secdma_addr;
304 secdma_memsize = secdma_size;
305 } else if (secdma_found == 1) {
306 osh->contig_base_alloc = (phys_addr_t)secdma_addr2;
307 secdma_memsize = secdma_size2;
308 } else {
309 printk("linux_osl.c secdma: secDMA instances %d \n", secdma_found);
310 kfree(osh);
311 return NULL;
312 }
313 osh->contig_base = (phys_addr_t)osh->contig_base_alloc;
314 printf("linux_osl.c: secdma_cma_size = 0x%x\n", secdma_memsize);
315 printf("linux_osl.c: secdma_cma_addr = 0x%x \n",
316 (unsigned int)osh->contig_base_alloc);
317 osh->stb_ext_params = SECDMA_MODULE_PARAMS;
318 }
319 else if (stbpriv_init(osh) == 0) {
320 printk("linux_osl.c: stbpriv.txt found. Get buffer info.\n");
321 if (secdma_found == 0) {
322 osh->contig_base_alloc =
323 (phys_addr_t)bcm_strtoul(stbparam_get("secdma_cma_addr"), NULL, 0);
324 secdma_memsize = bcm_strtoul(stbparam_get("secdma_cma_size"), NULL, 0);
325 } else if (secdma_found == 1) {
326 osh->contig_base_alloc =
327 (phys_addr_t)bcm_strtoul(stbparam_get("secdma_cma_addr2"), NULL, 0);
328 secdma_memsize = bcm_strtoul(stbparam_get("secdma_cma_size2"), NULL, 0);
329 } else {
330 printk("linux_osl.c secdma: secDMA instances %d \n", secdma_found);
331 kfree(osh);
332 return NULL;
333 }
334 osh->contig_base = (phys_addr_t)osh->contig_base_alloc;
335 printf("linux_osl.c: secdma_cma_size = 0x%x\n", secdma_memsize);
336 printf("linux_osl.c: secdma_cma_addr = 0x%x \n",
337 (unsigned int)osh->contig_base_alloc);
338 osh->stb_ext_params = SECDMA_EXT_FILE;
339 }
340 else {
341 printk("linux_osl.c: secDMA no longer supports internal buffer allocation.\n");
342 kfree(osh);
343 return NULL;
344 }
345 secdma_found++;
346 osh->contig_base_alloc_coherent_va = osl_sec_dma_ioremap(osh,
347 phys_to_page((u32)osh->contig_base_alloc),
348 CMA_DMA_DESC_MEMBLOCK, FALSE, TRUE);
349
350 if (osh->contig_base_alloc_coherent_va == NULL) {
351 if (osh->cmn)
352 kfree(osh->cmn);
353 kfree(osh);
354 return NULL;
355 }
356 osh->contig_base_coherent_va = osh->contig_base_alloc_coherent_va;
357 osh->contig_base_alloc_coherent = osh->contig_base_alloc;
358 osl_sec_dma_init_consistent(osh);
359
360 osh->contig_base_alloc += CMA_DMA_DESC_MEMBLOCK;
361
362 osh->contig_base_alloc_va = osl_sec_dma_ioremap(osh,
363 phys_to_page((u32)osh->contig_base_alloc), CMA_DMA_DATA_MEMBLOCK, TRUE, FALSE);
364 if (osh->contig_base_alloc_va == NULL) {
365 osl_sec_dma_iounmap(osh, osh->contig_base_coherent_va, CMA_DMA_DESC_MEMBLOCK);
366 if (osh->cmn)
367 kfree(osh->cmn);
368 kfree(osh);
369 return NULL;
370 }
371 osh->contig_base_va = osh->contig_base_alloc_va;
372
373 #ifdef NOT_YET
374 /*
375 * osl_sec_dma_init_elem_mem_block(osh, CMA_BUFSIZE_512, CMA_BUFNUM, &osh->sec_list_512);
376 * osh->sec_list_base_512 = osh->sec_list_512;
377 * osl_sec_dma_init_elem_mem_block(osh, CMA_BUFSIZE_2K, CMA_BUFNUM, &osh->sec_list_2048);
378 * osh->sec_list_base_2048 = osh->sec_list_2048;
379 */
380 #endif // endif
381 if (BCME_OK != osl_sec_dma_init_elem_mem_block(osh,
382 CMA_BUFSIZE_4K, CMA_BUFNUM, &osh->sec_list_4096)) {
383 osl_sec_dma_iounmap(osh, osh->contig_base_coherent_va, CMA_DMA_DESC_MEMBLOCK);
384 osl_sec_dma_iounmap(osh, osh->contig_base_va, CMA_DMA_DATA_MEMBLOCK);
385 if (osh->cmn)
386 kfree(osh->cmn);
387 kfree(osh);
388 return NULL;
389 }
390 osh->sec_list_base_4096 = osh->sec_list_4096;
391
392 #endif /* BCM_SECURE_DMA */
393
394 switch (bustype) {
395 case PCI_BUS:
396 case SI_BUS:
397 case PCMCIA_BUS:
398 osh->pub.mmbus = TRUE;
399 break;
400 case JTAG_BUS:
401 case SDIO_BUS:
402 case USB_BUS:
403 case SPI_BUS:
404 case RPC_BUS:
405 osh->pub.mmbus = FALSE;
406 break;
407 default:
408 ASSERT(FALSE);
409 break;
410 }
411
412 DMA_LOCK_INIT(osh);
413
414 #ifdef DHD_MAP_LOGGING
415 dhd_map_log = kmalloc(sizeof(dhd_map_log_t) * DHD_MAP_LOG_SIZE, flags);
416 if (dhd_map_log) {
417 memset(dhd_map_log, 0, sizeof(dhd_map_log_t) * DHD_MAP_LOG_SIZE);
418 }
419 dhd_unmap_log = kmalloc(sizeof(dhd_map_log_t) * DHD_MAP_LOG_SIZE, flags);
420 if (dhd_unmap_log) {
421 memset(dhd_unmap_log, 0, sizeof(dhd_map_log_t) * DHD_MAP_LOG_SIZE);
422 }
423 #endif /* DHD_MAP_LOGGING */
424
425 return osh;
426 }
427
428 void osl_set_bus_handle(osl_t *osh, void *bus_handle)
429 {
430 osh->bus_handle = bus_handle;
431 }
432
433 void* osl_get_bus_handle(osl_t *osh)
434 {
435 return osh->bus_handle;
436 }
437
438 #if defined(BCM_BACKPLANE_TIMEOUT)
439 void osl_set_bpt_cb(osl_t *osh, void *bpt_cb, void *bpt_ctx)
440 {
441 if (osh) {
442 osh->bpt_cb = (bpt_cb_fn)bpt_cb;
443 osh->sih = bpt_ctx;
444 }
445 }
446 #endif /* BCM_BACKPLANE_TIMEOUT */
447
448 void
449 osl_detach(osl_t *osh)
450 {
451 if (osh == NULL)
452 return;
453
454 #ifdef BCM_SECURE_DMA
455 if (osh->stb_ext_params == SECDMA_EXT_FILE)
456 stbpriv_exit(osh);
457 #ifdef NOT_YET
458 osl_sec_dma_deinit_elem_mem_block(osh, CMA_BUFSIZE_512, CMA_BUFNUM, osh->sec_list_base_512);
459 osl_sec_dma_deinit_elem_mem_block(osh, CMA_BUFSIZE_2K, CMA_BUFNUM, osh->sec_list_base_2048);
460 #endif /* NOT_YET */
461 osl_sec_dma_deinit_elem_mem_block(osh, CMA_BUFSIZE_4K, CMA_BUFNUM, osh->sec_list_base_4096);
462 osl_sec_dma_iounmap(osh, osh->contig_base_coherent_va, CMA_DMA_DESC_MEMBLOCK);
463 osl_sec_dma_iounmap(osh, osh->contig_base_va, CMA_DMA_DATA_MEMBLOCK);
464 secdma_found--;
465 #endif /* BCM_SECURE_DMA */
466
467 bcm_object_trace_deinit();
468
469 #ifdef DHD_MAP_LOGGING
470 if (dhd_map_log) {
471 kfree(dhd_map_log);
472 }
473 if (dhd_unmap_log) {
474 kfree(dhd_unmap_log);
475 }
476 #endif /* DHD_MAP_LOGGING */
477
478 ASSERT(osh->magic == OS_HANDLE_MAGIC);
479 atomic_sub(1, &osh->cmn->refcount);
480 if (atomic_read(&osh->cmn->refcount) == 0) {
481 kfree(osh->cmn);
482 }
483 kfree(osh);
484 }
485
486 /* APIs to set/get specific quirks in OSL layer */
487 void BCMFASTPATH
488 osl_flag_set(osl_t *osh, uint32 mask)
489 {
490 osh->flags |= mask;
491 }
492
493 void
494 osl_flag_clr(osl_t *osh, uint32 mask)
495 {
496 osh->flags &= ~mask;
497 }
498
499 #if defined(STB)
500 inline bool BCMFASTPATH
501 #else
502 bool
503 #endif // endif
504 osl_is_flag_set(osl_t *osh, uint32 mask)
505 {
506 return (osh->flags & mask);
507 }
508
509 #if (defined(__ARM_ARCH_7A__) && !defined(DHD_USE_COHERENT_MEM_FOR_RING)) || \
510 defined(STB_SOC_WIFI)
511
512 inline int BCMFASTPATH
513 osl_arch_is_coherent(void)
514 {
515 return 0;
516 }
517
518 inline int BCMFASTPATH
519 osl_acp_war_enab(void)
520 {
521 return 0;
522 }
523
524 inline void BCMFASTPATH
525 osl_cache_flush(void *va, uint size)
526 {
527
528 if (size > 0)
529 #ifdef STB_SOC_WIFI
530 dma_sync_single_for_device(OSH_NULL, virt_to_phys(va), size, DMA_TX);
531 #else /* STB_SOC_WIFI */
532 dma_sync_single_for_device(OSH_NULL, virt_to_dma(OSH_NULL, va), size,
533 DMA_TO_DEVICE);
534 #endif /* STB_SOC_WIFI */
535 }
536
537 inline void BCMFASTPATH
538 osl_cache_inv(void *va, uint size)
539 {
540
541 #ifdef STB_SOC_WIFI
542 dma_sync_single_for_cpu(OSH_NULL, virt_to_phys(va), size, DMA_RX);
543 #else /* STB_SOC_WIFI */
544 dma_sync_single_for_cpu(OSH_NULL, virt_to_dma(OSH_NULL, va), size, DMA_FROM_DEVICE);
545 #endif /* STB_SOC_WIFI */
546 }
547
548 inline void BCMFASTPATH
549 osl_prefetch(const void *ptr)
550 {
551 #if !defined(STB_SOC_WIFI)
552 __asm__ __volatile__("pld\t%0" :: "o"(*(const char *)ptr) : "cc");
553 #endif // endif
554 }
555
556 #endif // endif
557
558 uint32
559 osl_pci_read_config(osl_t *osh, uint offset, uint size)
560 {
561 uint val = 0;
562 uint retry = PCI_CFG_RETRY;
563
564 ASSERT((osh && (osh->magic == OS_HANDLE_MAGIC)));
565
566 /* only 4byte access supported */
567 ASSERT(size == 4);
568
569 do {
570 pci_read_config_dword(osh->pdev, offset, &val);
571 if (val != 0xffffffff)
572 break;
573 } while (retry--);
574
575 return (val);
576 }
577
578 void
579 osl_pci_write_config(osl_t *osh, uint offset, uint size, uint val)
580 {
581 uint retry = PCI_CFG_RETRY;
582
583 ASSERT((osh && (osh->magic == OS_HANDLE_MAGIC)));
584
585 /* only 4byte access supported */
586 ASSERT(size == 4);
587
588 do {
589 pci_write_config_dword(osh->pdev, offset, val);
590 if (offset != PCI_BAR0_WIN)
591 break;
592 if (osl_pci_read_config(osh, offset, size) == val)
593 break;
594 } while (retry--);
595
596 }
597
598 /* return bus # for the pci device pointed by osh->pdev */
599 uint
600 osl_pci_bus(osl_t *osh)
601 {
602 ASSERT(osh && (osh->magic == OS_HANDLE_MAGIC) && osh->pdev);
603
604 #if defined(__ARM_ARCH_7A__) && LINUX_VERSION_CODE > KERNEL_VERSION(2, 6, 35)
605 return pci_domain_nr(((struct pci_dev *)osh->pdev)->bus);
606 #else
607 return ((struct pci_dev *)osh->pdev)->bus->number;
608 #endif // endif
609 }
610
611 /* return slot # for the pci device pointed by osh->pdev */
612 uint
613 osl_pci_slot(osl_t *osh)
614 {
615 ASSERT(osh && (osh->magic == OS_HANDLE_MAGIC) && osh->pdev);
616
617 #if defined(__ARM_ARCH_7A__) && LINUX_VERSION_CODE > KERNEL_VERSION(2, 6, 35)
618 return PCI_SLOT(((struct pci_dev *)osh->pdev)->devfn) + 1;
619 #else
620 return PCI_SLOT(((struct pci_dev *)osh->pdev)->devfn);
621 #endif // endif
622 }
623
624 /* return domain # for the pci device pointed by osh->pdev */
625 uint
626 osl_pcie_domain(osl_t *osh)
627 {
628 ASSERT(osh && (osh->magic == OS_HANDLE_MAGIC) && osh->pdev);
629
630 return pci_domain_nr(((struct pci_dev *)osh->pdev)->bus);
631 }
632
633 /* return bus # for the pci device pointed by osh->pdev */
634 uint
635 osl_pcie_bus(osl_t *osh)
636 {
637 ASSERT(osh && (osh->magic == OS_HANDLE_MAGIC) && osh->pdev);
638
639 return ((struct pci_dev *)osh->pdev)->bus->number;
640 }
641
642 /* return the pci device pointed by osh->pdev */
643 struct pci_dev *
644 osl_pci_device(osl_t *osh)
645 {
646 ASSERT(osh && (osh->magic == OS_HANDLE_MAGIC) && osh->pdev);
647
648 return osh->pdev;
649 }
650
651 static void
652 osl_pcmcia_attr(osl_t *osh, uint offset, char *buf, int size, bool write)
653 {
654 }
655
656 void
657 osl_pcmcia_read_attr(osl_t *osh, uint offset, void *buf, int size)
658 {
659 osl_pcmcia_attr(osh, offset, (char *) buf, size, FALSE);
660 }
661
662 void
663 osl_pcmcia_write_attr(osl_t *osh, uint offset, void *buf, int size)
664 {
665 osl_pcmcia_attr(osh, offset, (char *) buf, size, TRUE);
666 }
667
668 void *
669 osl_malloc(osl_t *osh, uint size)
670 {
671 void *addr;
672 gfp_t flags;
673
674 /* only ASSERT if osh is defined */
675 if (osh)
676 ASSERT(osh->magic == OS_HANDLE_MAGIC);
677 #ifdef CONFIG_DHD_USE_STATIC_BUF
678 if (bcm_static_buf)
679 {
680 unsigned long irq_flags;
681 int i = 0;
682 if ((size >= PAGE_SIZE)&&(size <= STATIC_BUF_SIZE))
683 {
684 spin_lock_irqsave(&bcm_static_buf->static_lock, irq_flags);
685
686 for (i = 0; i < STATIC_BUF_MAX_NUM; i++)
687 {
688 if (bcm_static_buf->buf_use[i] == 0)
689 break;
690 }
691
692 if (i == STATIC_BUF_MAX_NUM)
693 {
694 spin_unlock_irqrestore(&bcm_static_buf->static_lock, irq_flags);
695 printk("all static buff in use!\n");
696 goto original;
697 }
698
699 bcm_static_buf->buf_use[i] = 1;
700 spin_unlock_irqrestore(&bcm_static_buf->static_lock, irq_flags);
701
702 bzero(bcm_static_buf->buf_ptr+STATIC_BUF_SIZE*i, size);
703 if (osh)
704 atomic_add(size, &osh->cmn->malloced);
705
706 return ((void *)(bcm_static_buf->buf_ptr+STATIC_BUF_SIZE*i));
707 }
708 }
709 original:
710 #endif /* CONFIG_DHD_USE_STATIC_BUF */
711
712 flags = CAN_SLEEP() ? GFP_KERNEL: GFP_ATOMIC;
713 if ((addr = kmalloc(size, flags)) == NULL) {
714 if (osh)
715 osh->failed++;
716 return (NULL);
717 }
718 if (osh && osh->cmn)
719 atomic_add(size, &osh->cmn->malloced);
720
721 return (addr);
722 }
723
724 void *
725 osl_mallocz(osl_t *osh, uint size)
726 {
727 void *ptr;
728
729 ptr = osl_malloc(osh, size);
730
731 if (ptr != NULL) {
732 bzero(ptr, size);
733 }
734
735 return ptr;
736 }
737
738 void
739 osl_mfree(osl_t *osh, void *addr, uint size)
740 {
741 #ifdef CONFIG_DHD_USE_STATIC_BUF
742 unsigned long flags;
743
744 if (bcm_static_buf)
745 {
746 if ((addr > (void *)bcm_static_buf) && ((unsigned char *)addr
747 <= ((unsigned char *)bcm_static_buf + STATIC_BUF_TOTAL_LEN)))
748 {
749 int buf_idx = 0;
750
751 buf_idx = ((unsigned char *)addr - bcm_static_buf->buf_ptr)/STATIC_BUF_SIZE;
752
753 spin_lock_irqsave(&bcm_static_buf->static_lock, flags);
754 bcm_static_buf->buf_use[buf_idx] = 0;
755 spin_unlock_irqrestore(&bcm_static_buf->static_lock, flags);
756
757 if (osh && osh->cmn) {
758 ASSERT(osh->magic == OS_HANDLE_MAGIC);
759 atomic_sub(size, &osh->cmn->malloced);
760 }
761 return;
762 }
763 }
764 #endif /* CONFIG_DHD_USE_STATIC_BUF */
765 if (osh && osh->cmn) {
766 ASSERT(osh->magic == OS_HANDLE_MAGIC);
767
768 ASSERT(size <= osl_malloced(osh));
769
770 atomic_sub(size, &osh->cmn->malloced);
771 }
772 kfree(addr);
773 }
774
775 void *
776 osl_vmalloc(osl_t *osh, uint size)
777 {
778 void *addr;
779
780 /* only ASSERT if osh is defined */
781 if (osh)
782 ASSERT(osh->magic == OS_HANDLE_MAGIC);
783 if ((addr = vmalloc(size)) == NULL) {
784 if (osh)
785 osh->failed++;
786 return (NULL);
787 }
788 if (osh && osh->cmn)
789 atomic_add(size, &osh->cmn->malloced);
790
791 return (addr);
792 }
793
794 void *
795 osl_vmallocz(osl_t *osh, uint size)
796 {
797 void *ptr;
798
799 ptr = osl_vmalloc(osh, size);
800
801 if (ptr != NULL) {
802 bzero(ptr, size);
803 }
804
805 return ptr;
806 }
807
808 void
809 osl_vmfree(osl_t *osh, void *addr, uint size)
810 {
811 if (osh && osh->cmn) {
812 ASSERT(osh->magic == OS_HANDLE_MAGIC);
813
814 ASSERT(size <= osl_malloced(osh));
815
816 atomic_sub(size, &osh->cmn->malloced);
817 }
818 vfree(addr);
819 }
820
821 uint
822 osl_check_memleak(osl_t *osh)
823 {
824 ASSERT((osh && (osh->magic == OS_HANDLE_MAGIC)));
825 if (atomic_read(&osh->cmn->refcount) == 1)
826 return (atomic_read(&osh->cmn->malloced));
827 else
828 return 0;
829 }
830
831 uint
832 osl_malloced(osl_t *osh)
833 {
834 ASSERT((osh && (osh->magic == OS_HANDLE_MAGIC)));
835 return (atomic_read(&osh->cmn->malloced));
836 }
837
838 uint
839 osl_malloc_failed(osl_t *osh)
840 {
841 ASSERT((osh && (osh->magic == OS_HANDLE_MAGIC)));
842 return (osh->failed);
843 }
844
845 uint
846 osl_dma_consistent_align(void)
847 {
848 return (PAGE_SIZE);
849 }
850
851 void*
852 osl_dma_alloc_consistent(osl_t *osh, uint size, uint16 align_bits, uint *alloced, dmaaddr_t *pap)
853 {
854 void *va;
855 uint16 align = (1 << align_bits);
856 ASSERT((osh && (osh->magic == OS_HANDLE_MAGIC)));
857
858 if (!ISALIGNED(DMA_CONSISTENT_ALIGN, align))
859 size += align;
860 *alloced = size;
861
862 #ifndef BCM_SECURE_DMA
863 #if (defined(__ARM_ARCH_7A__) && !defined(DHD_USE_COHERENT_MEM_FOR_RING)) || \
864 defined(STB_SOC_WIFI)
865 va = kmalloc(size, GFP_ATOMIC | __GFP_ZERO);
866 if (va)
867 *pap = (ulong)__virt_to_phys((ulong)va);
868 #else
869 {
870 dma_addr_t pap_lin;
871 struct pci_dev *hwdev = osh->pdev;
872 gfp_t flags;
873 #ifdef DHD_ALLOC_COHERENT_MEM_FROM_ATOMIC_POOL
874 flags = GFP_ATOMIC;
875 #else
876 flags = CAN_SLEEP() ? GFP_KERNEL: GFP_ATOMIC;
877 #endif /* DHD_ALLOC_COHERENT_MEM_FROM_ATOMIC_POOL */
878 va = dma_alloc_coherent(&hwdev->dev, size, &pap_lin, flags);
879 #ifdef BCMDMA64OSL
880 PHYSADDRLOSET(*pap, pap_lin & 0xffffffff);
881 PHYSADDRHISET(*pap, (pap_lin >> 32) & 0xffffffff);
882 #else
883 *pap = (dmaaddr_t)pap_lin;
884 #endif /* BCMDMA64OSL */
885 }
886 #endif /* __ARM_ARCH_7A__ && !DHD_USE_COHERENT_MEM_FOR_RING */
887 #else
888 va = osl_sec_dma_alloc_consistent(osh, size, align_bits, pap);
889 #endif /* BCM_SECURE_DMA */
890 return va;
891 }
892
893 void
894 osl_dma_free_consistent(osl_t *osh, void *va, uint size, dmaaddr_t pa)
895 {
896 #ifdef BCMDMA64OSL
897 dma_addr_t paddr;
898 #endif /* BCMDMA64OSL */
899 ASSERT((osh && (osh->magic == OS_HANDLE_MAGIC)));
900
901 #ifndef BCM_SECURE_DMA
902 #if (defined(__ARM_ARCH_7A__) && !defined(DHD_USE_COHERENT_MEM_FOR_RING)) || \
903 defined(STB_SOC_WIFI)
904 kfree(va);
905 #else
906 #ifdef BCMDMA64OSL
907 PHYSADDRTOULONG(pa, paddr);
908 pci_free_consistent(osh->pdev, size, va, paddr);
909 #else
910 pci_free_consistent(osh->pdev, size, va, (dma_addr_t)pa);
911 #endif /* BCMDMA64OSL */
912 #endif /* __ARM_ARCH_7A__ && !DHD_USE_COHERENT_MEM_FOR_RING */
913 #else
914 osl_sec_dma_free_consistent(osh, va, size, pa);
915 #endif /* BCM_SECURE_DMA */
916 }
917
918 void *
919 osl_virt_to_phys(void *va)
920 {
921 return (void *)(uintptr)virt_to_phys(va);
922 }
923
924 #if LINUX_VERSION_CODE >= KERNEL_VERSION(2, 6, 36)
925 #include <asm/cacheflush.h>
926 void BCMFASTPATH
927 osl_dma_flush(osl_t *osh, void *va, uint size, int direction, void *p, hnddma_seg_map_t *dmah)
928 {
929 return;
930 }
931 #endif /* LINUX_VERSION_CODE >= 2.6.36 */
932
933 dmaaddr_t BCMFASTPATH
934 osl_dma_map(osl_t *osh, void *va, uint size, int direction, void *p, hnddma_seg_map_t *dmah)
935 {
936 int dir;
937 dmaaddr_t ret_addr;
938 dma_addr_t map_addr;
939 int ret;
940
941 DMA_LOCK(osh);
942
943 ASSERT((osh && (osh->magic == OS_HANDLE_MAGIC)));
944 dir = (direction == DMA_TX)? PCI_DMA_TODEVICE: PCI_DMA_FROMDEVICE;
945
946 #ifdef STB_SOC_WIFI
947 #if (__LINUX_ARM_ARCH__ == 8)
948 /* need to flush or invalidate the cache here */
949 if (dir == DMA_TX) { /* to device */
950 osl_cache_flush(va, size);
951 } else if (dir == DMA_RX) { /* from device */
952 osl_cache_inv(va, size);
953 } else { /* both */
954 osl_cache_flush(va, size);
955 osl_cache_inv(va, size);
956 }
957 DMA_UNLOCK(osh);
958 return virt_to_phys(va);
959 #else /* (__LINUX_ARM_ARCH__ == 8) */
960 map_addr = dma_map_single(osh->pdev, va, size, dir);
961 DMA_UNLOCK(osh);
962 return map_addr;
963 #endif /* (__LINUX_ARM_ARCH__ == 8) */
964 #else /* ! STB_SOC_WIFI */
965 map_addr = pci_map_single(osh->pdev, va, size, dir);
966 #endif /* ! STB_SOC_WIFI */
967
968 #ifdef DHD_MAP_LOGGING
969 if (dhd_map_log) {
970 dhd_map_log[map_idx].addr = map_addr;
971 dhd_map_log[map_idx].time = OSL_SYSUPTIME_US();
972 map_idx++;
973 map_idx = map_idx % DHD_MAP_LOG_SIZE;
974 }
975 #endif /* DHD_MAP_LOGGING */
976
977 #if (LINUX_VERSION_CODE >= KERNEL_VERSION(2, 6, 27))
978 ret = pci_dma_mapping_error(osh->pdev, map_addr);
979 #elif (LINUX_VERSION_CODE >= KERNEL_VERSION(2, 6, 5))
980 ret = pci_dma_mapping_error(map_addr);
981 #else
982 ret = 0;
983 #endif // endif
984 if (ret) {
985 printk("%s: Failed to map memory\n", __FUNCTION__);
986 PHYSADDRLOSET(ret_addr, 0);
987 PHYSADDRHISET(ret_addr, 0);
988 } else {
989 PHYSADDRLOSET(ret_addr, map_addr & 0xffffffff);
990 PHYSADDRHISET(ret_addr, (map_addr >> 32) & 0xffffffff);
991 }
992
993 DMA_UNLOCK(osh);
994
995 return ret_addr;
996 }
997
998 void BCMFASTPATH
999 osl_dma_unmap(osl_t *osh, dmaaddr_t pa, uint size, int direction)
1000 {
1001 int dir;
1002 #ifdef BCMDMA64OSL
1003 dma_addr_t paddr;
1004 #endif /* BCMDMA64OSL */
1005
1006 ASSERT((osh && (osh->magic == OS_HANDLE_MAGIC)));
1007
1008 DMA_LOCK(osh);
1009
1010 dir = (direction == DMA_TX)? PCI_DMA_TODEVICE: PCI_DMA_FROMDEVICE;
1011
1012 #ifdef BCMDMA64OSL
1013 PHYSADDRTOULONG(pa, paddr);
1014 #ifdef DHD_MAP_LOGGING
1015 if (dhd_unmap_log) {
1016 dhd_unmap_log[unmap_idx].addr = paddr;
1017 dhd_unmap_log[unmap_idx].time = OSL_SYSUPTIME_US();
1018 unmap_idx++;
1019 unmap_idx = unmap_idx % DHD_MAP_LOG_SIZE;
1020 }
1021 #endif /* DHD_MAP_LOGGING */
1022
1023 pci_unmap_single(osh->pdev, paddr, size, dir);
1024 #else /* BCMDMA64OSL */
1025
1026 #ifdef STB_SOC_WIFI
1027 #if (__LINUX_ARM_ARCH__ == 8)
1028 if (dir == DMA_TX) { /* to device */
1029 dma_sync_single_for_device(OSH_NULL, pa, size, DMA_TX);
1030 } else if (dir == DMA_RX) { /* from device */
1031 dma_sync_single_for_cpu(OSH_NULL, pa, size, DMA_RX);
1032 } else { /* both */
1033 dma_sync_single_for_device(OSH_NULL, pa, size, DMA_TX);
1034 dma_sync_single_for_cpu(OSH_NULL, pa, size, DMA_RX);
1035 }
1036 #else /* (__LINUX_ARM_ARCH__ == 8) */
1037 dma_unmap_single(osh->pdev, (uintptr)pa, size, dir);
1038 #endif /* (__LINUX_ARM_ARCH__ == 8) */
1039 #else /* STB_SOC_WIFI */
1040 #ifdef DHD_MAP_LOGGING
1041 if (dhd_unmap_log) {
1042 dhd_unmap_log[unmap_idx].addr = pa;
1043 dhd_unmap_log[unmap_idx].time = OSL_SYSUPTIME_US();
1044 unmap_idx++;
1045 unmap_idx = unmap_idx % DHD_MAP_LOG_SIZE;
1046 }
1047 #endif /* DHD_MAP_LOGGING */
1048
1049 pci_unmap_single(osh->pdev, (uint32)pa, size, dir);
1050 #endif /* STB_SOC_WIFI */
1051
1052 #endif /* BCMDMA64OSL */
1053 DMA_UNLOCK(osh);
1054 }
1055
1056 /* OSL function for CPU relax */
1057 inline void BCMFASTPATH
1058 osl_cpu_relax(void)
1059 {
1060 cpu_relax();
1061 }
1062
1063 extern void osl_preempt_disable(osl_t *osh)
1064 {
1065 preempt_disable();
1066 }
1067
1068 extern void osl_preempt_enable(osl_t *osh)
1069 {
1070 preempt_enable();
1071 }
1072
1073 #if defined(BCMASSERT_LOG)
1074 void
1075 osl_assert(const char *exp, const char *file, int line)
1076 {
1077 char tempbuf[256];
1078 const char *basename;
1079
1080 basename = strrchr(file, '/');
1081 /* skip the '/' */
1082 if (basename)
1083 basename++;
1084
1085 if (!basename)
1086 basename = file;
1087
1088 #ifdef BCMASSERT_LOG
1089 snprintf(tempbuf, 64, "\"%s\": file \"%s\", line %d\n",
1090 exp, basename, line);
1091 #endif /* BCMASSERT_LOG */
1092
1093 switch (g_assert_type) {
1094 case 0:
1095 panic("%s", tempbuf);
1096 break;
1097 case 1:
1098 /* fall through */
1099 case 3:
1100 printk("%s", tempbuf);
1101 break;
1102 case 2:
1103 printk("%s", tempbuf);
1104 BUG();
1105 break;
1106 default:
1107 break;
1108 }
1109 }
1110 #endif // endif
1111
1112 void
1113 osl_delay(uint usec)
1114 {
1115 uint d;
1116
1117 while (usec > 0) {
1118 d = MIN(usec, 1000);
1119 udelay(d);
1120 usec -= d;
1121 }
1122 }
1123
1124 void
1125 osl_sleep(uint ms)
1126 {
1127 #if LINUX_VERSION_CODE >= KERNEL_VERSION(2, 6, 36)
1128 if (ms < 20)
1129 usleep_range(ms*1000, ms*1000 + 1000);
1130 else
1131 #endif // endif
1132 msleep(ms);
1133 }
1134
1135 uint64
1136 osl_sysuptime_us(void)
1137 {
1138 struct timeval tv;
1139 uint64 usec;
1140
1141 do_gettimeofday(&tv);
1142 /* tv_usec content is fraction of a second */
1143 usec = (uint64)tv.tv_sec * 1000000ul + tv.tv_usec;
1144 return usec;
1145 }
1146
1147 /*
1148 * OSLREGOPS specifies the use of osl_XXX routines to be used for register access
1149 */
1150
1151 /*
1152 * BINOSL selects the slightly slower function-call-based binary compatible osl.
1153 */
1154
1155 uint32
1156 osl_rand(void)
1157 {
1158 uint32 rand;
1159
1160 get_random_bytes(&rand, sizeof(rand));
1161
1162 return rand;
1163 }
1164
1165 /* Linux Kernel: File Operations: start */
1166 void *
1167 osl_os_open_image(char *filename)
1168 {
1169 struct file *fp;
1170
1171 fp = filp_open(filename, O_RDONLY, 0);
1172 /*
1173 * 2.6.11 (FC4) supports filp_open() but later revs don't?
1174 * Alternative:
1175 * fp = open_namei(AT_FDCWD, filename, O_RD, 0);
1176 * ???
1177 */
1178 if (IS_ERR(fp))
1179 fp = NULL;
1180
1181 return fp;
1182 }
1183
1184 int
1185 osl_os_get_image_block(char *buf, int len, void *image)
1186 {
1187 struct file *fp = (struct file *)image;
1188 int rdlen;
1189
1190 if (!image)
1191 return 0;
1192
1193 #if (LINUX_VERSION_CODE >= KERNEL_VERSION(4, 14, 0))
1194 rdlen = kernel_read(fp, buf, len, &fp->f_pos);
1195 #else
1196 rdlen = kernel_read(fp, fp->f_pos, buf, len);
1197 #endif /* (LINUX_VERSION_CODE >= KERNEL_VERSION(4, 14, 0)) */
1198
1199 if (rdlen > 0)
1200 fp->f_pos += rdlen;
1201
1202 return rdlen;
1203 }
1204
1205 void
1206 osl_os_close_image(void *image)
1207 {
1208 if (image)
1209 filp_close((struct file *)image, NULL);
1210 }
1211
1212 int
1213 osl_os_image_size(void *image)
1214 {
1215 int len = 0, curroffset;
1216
1217 if (image) {
1218 /* store the current offset */
1219 curroffset = generic_file_llseek(image, 0, 1);
1220 /* goto end of file to get length */
1221 len = generic_file_llseek(image, 0, 2);
1222 /* restore back the offset */
1223 generic_file_llseek(image, curroffset, 0);
1224 }
1225 return len;
1226 }
1227
1228 /* Linux Kernel: File Operations: end */
1229
1230 #if (defined(STB) && defined(__arm__))
1231 inline void osl_pcie_rreg(osl_t *osh, ulong addr, volatile void *v, uint size)
1232 {
1233 unsigned long flags = 0;
1234 int pci_access = 0;
1235 int acp_war_enab = ACP_WAR_ENAB();
1236
1237 if (osh && BUSTYPE(osh->bustype) == PCI_BUS)
1238 pci_access = 1;
1239
1240 if (pci_access && acp_war_enab)
1241 spin_lock_irqsave(&l2x0_reg_lock, flags);
1242
1243 switch (size) {
1244 case sizeof(uint8):
1245 *(volatile uint8*)v = readb((volatile uint8*)(addr));
1246 break;
1247 case sizeof(uint16):
1248 *(volatile uint16*)v = readw((volatile uint16*)(addr));
1249 break;
1250 case sizeof(uint32):
1251 *(volatile uint32*)v = readl((volatile uint32*)(addr));
1252 break;
1253 case sizeof(uint64):
1254 *(volatile uint64*)v = *((volatile uint64*)(addr));
1255 break;
1256 }
1257
1258 if (pci_access && acp_war_enab)
1259 spin_unlock_irqrestore(&l2x0_reg_lock, flags);
1260 }
1261 #endif // endif
1262
1263 #if defined(BCM_BACKPLANE_TIMEOUT)
1264 inline void osl_bpt_rreg(osl_t *osh, ulong addr, volatile void *v, uint size)
1265 {
1266 bool poll_timeout = FALSE;
1267 static int in_si_clear = FALSE;
1268
1269 switch (size) {
1270 case sizeof(uint8):
1271 *(volatile uint8*)v = readb((volatile uint8*)(addr));
1272 if (*(volatile uint8*)v == 0xff)
1273 poll_timeout = TRUE;
1274 break;
1275 case sizeof(uint16):
1276 *(volatile uint16*)v = readw((volatile uint16*)(addr));
1277 if (*(volatile uint16*)v == 0xffff)
1278 poll_timeout = TRUE;
1279 break;
1280 case sizeof(uint32):
1281 *(volatile uint32*)v = readl((volatile uint32*)(addr));
1282 if (*(volatile uint32*)v == 0xffffffff)
1283 poll_timeout = TRUE;
1284 break;
1285 case sizeof(uint64):
1286 *(volatile uint64*)v = *((volatile uint64*)(addr));
1287 if (*(volatile uint64*)v == 0xffffffffffffffff)
1288 poll_timeout = TRUE;
1289 break;
1290 }
1291
1292 if (osh && osh->sih && (in_si_clear == FALSE) && poll_timeout && osh->bpt_cb) {
1293 in_si_clear = TRUE;
1294 osh->bpt_cb((void *)osh->sih, (void *)addr);
1295 in_si_clear = FALSE;
1296 }
1297 }
1298 #endif /* BCM_BACKPLANE_TIMEOUT */
1299
1300 #ifdef BCM_SECURE_DMA
1301 static void *
1302 osl_sec_dma_ioremap(osl_t *osh, struct page *page, size_t size, bool iscache, bool isdecr)
1303 {
1304
1305 struct page **map;
1306 int order, i;
1307 void *addr = NULL;
1308
1309 size = PAGE_ALIGN(size);
1310 order = get_order(size);
1311
1312 map = kmalloc(sizeof(struct page *) << order, GFP_ATOMIC);
1313
1314 if (map == NULL)
1315 return NULL;
1316
1317 for (i = 0; i < (size >> PAGE_SHIFT); i++)
1318 map[i] = page + i;
1319
1320 if (iscache) {
1321 addr = vmap(map, size >> PAGE_SHIFT, VM_MAP, __pgprot(PAGE_KERNEL));
1322 if (isdecr) {
1323 osh->contig_delta_va_pa = ((uint8 *)addr - page_to_phys(page));
1324 }
1325 } else {
1326
1327 #if defined(__ARM_ARCH_7A__)
1328 addr = vmap(map, size >> PAGE_SHIFT, VM_MAP,
1329 pgprot_noncached(__pgprot(PAGE_KERNEL)));
1330 #endif // endif
1331 if (isdecr) {
1332 osh->contig_delta_va_pa = ((uint8 *)addr - page_to_phys(page));
1333 }
1334 }
1335
1336 kfree(map);
1337 return (void *)addr;
1338 }
1339
1340 static void
1341 osl_sec_dma_iounmap(osl_t *osh, void *contig_base_va, size_t size)
1342 {
1343 vunmap(contig_base_va);
1344 }
1345
1346 static int
1347 osl_sec_dma_init_elem_mem_block(osl_t *osh, size_t mbsize, int max, sec_mem_elem_t **list)
1348 {
1349 int i;
1350 int ret = BCME_OK;
1351 sec_mem_elem_t *sec_mem_elem;
1352
1353 if ((sec_mem_elem = kmalloc(sizeof(sec_mem_elem_t)*(max), GFP_ATOMIC)) != NULL) {
1354
1355 *list = sec_mem_elem;
1356 bzero(sec_mem_elem, sizeof(sec_mem_elem_t)*(max));
1357 for (i = 0; i < max-1; i++) {
1358 sec_mem_elem->next = (sec_mem_elem + 1);
1359 sec_mem_elem->size = mbsize;
1360 sec_mem_elem->pa_cma = osh->contig_base_alloc;
1361 sec_mem_elem->vac = osh->contig_base_alloc_va;
1362
1363 sec_mem_elem->pa_cma_page = phys_to_page(sec_mem_elem->pa_cma);
1364 osh->contig_base_alloc += mbsize;
1365 osh->contig_base_alloc_va = ((uint8 *)osh->contig_base_alloc_va + mbsize);
1366
1367 sec_mem_elem = sec_mem_elem + 1;
1368 }
1369 sec_mem_elem->next = NULL;
1370 sec_mem_elem->size = mbsize;
1371 sec_mem_elem->pa_cma = osh->contig_base_alloc;
1372 sec_mem_elem->vac = osh->contig_base_alloc_va;
1373
1374 sec_mem_elem->pa_cma_page = phys_to_page(sec_mem_elem->pa_cma);
1375 osh->contig_base_alloc += mbsize;
1376 osh->contig_base_alloc_va = ((uint8 *)osh->contig_base_alloc_va + mbsize);
1377
1378 } else {
1379 printf("%s sec mem elem kmalloc failed\n", __FUNCTION__);
1380 ret = BCME_ERROR;
1381 }
1382 return ret;
1383 }
1384
1385 static void
1386 osl_sec_dma_deinit_elem_mem_block(osl_t *osh, size_t mbsize, int max, void *sec_list_base)
1387 {
1388 if (sec_list_base)
1389 kfree(sec_list_base);
1390 }
1391
1392 static sec_mem_elem_t * BCMFASTPATH
1393 osl_sec_dma_alloc_mem_elem(osl_t *osh, void *va, uint size, int direction,
1394 struct sec_cma_info *ptr_cma_info, uint offset)
1395 {
1396 sec_mem_elem_t *sec_mem_elem = NULL;
1397
1398 #ifdef NOT_YET
1399 if (size <= 512 && osh->sec_list_512) {
1400 sec_mem_elem = osh->sec_list_512;
1401 osh->sec_list_512 = sec_mem_elem->next;
1402 }
1403 else if (size <= 2048 && osh->sec_list_2048) {
1404 sec_mem_elem = osh->sec_list_2048;
1405 osh->sec_list_2048 = sec_mem_elem->next;
1406 }
1407 else
1408 #else
1409 ASSERT(osh->sec_list_4096);
1410 sec_mem_elem = osh->sec_list_4096;
1411 osh->sec_list_4096 = sec_mem_elem->next;
1412 #endif /* NOT_YET */
1413
1414 sec_mem_elem->next = NULL;
1415
1416 if (ptr_cma_info->sec_alloc_list_tail) {
1417 ptr_cma_info->sec_alloc_list_tail->next = sec_mem_elem;
1418 ptr_cma_info->sec_alloc_list_tail = sec_mem_elem;
1419 }
1420 else {
1421 /* First allocation: If tail is NULL, sec_alloc_list MUST also be NULL */
1422 ASSERT(ptr_cma_info->sec_alloc_list == NULL);
1423 ptr_cma_info->sec_alloc_list = sec_mem_elem;
1424 ptr_cma_info->sec_alloc_list_tail = sec_mem_elem;
1425 }
1426 return sec_mem_elem;
1427 }
1428
1429 static void BCMFASTPATH
1430 osl_sec_dma_free_mem_elem(osl_t *osh, sec_mem_elem_t *sec_mem_elem)
1431 {
1432 sec_mem_elem->dma_handle = 0x0;
1433 sec_mem_elem->va = NULL;
1434 #ifdef NOT_YET
1435 if (sec_mem_elem->size == 512) {
1436 sec_mem_elem->next = osh->sec_list_512;
1437 osh->sec_list_512 = sec_mem_elem;
1438 } else if (sec_mem_elem->size == 2048) {
1439 sec_mem_elem->next = osh->sec_list_2048;
1440 osh->sec_list_2048 = sec_mem_elem;
1441 } else if (sec_mem_elem->size == 4096) {
1442 #endif /* NOT_YET */
1443 sec_mem_elem->next = osh->sec_list_4096;
1444 osh->sec_list_4096 = sec_mem_elem;
1445 #ifdef NOT_YET
1446 }
1447 else
1448 printf("%s free failed size=%d\n", __FUNCTION__, sec_mem_elem->size);
1449 #endif /* NOT_YET */
1450 }
1451
1452 static sec_mem_elem_t * BCMFASTPATH
1453 osl_sec_dma_find_rem_elem(osl_t *osh, struct sec_cma_info *ptr_cma_info, dma_addr_t dma_handle)
1454 {
1455 sec_mem_elem_t *sec_mem_elem = ptr_cma_info->sec_alloc_list;
1456 sec_mem_elem_t *sec_prv_elem = ptr_cma_info->sec_alloc_list;
1457
1458 if (sec_mem_elem->dma_handle == dma_handle) {
1459
1460 ptr_cma_info->sec_alloc_list = sec_mem_elem->next;
1461
1462 if (sec_mem_elem == ptr_cma_info->sec_alloc_list_tail) {
1463 ptr_cma_info->sec_alloc_list_tail = NULL;
1464 ASSERT(ptr_cma_info->sec_alloc_list == NULL);
1465 }
1466
1467 return sec_mem_elem;
1468 }
1469 sec_mem_elem = sec_mem_elem->next;
1470
1471 while (sec_mem_elem != NULL) {
1472
1473 if (sec_mem_elem->dma_handle == dma_handle) {
1474
1475 sec_prv_elem->next = sec_mem_elem->next;
1476 if (sec_mem_elem == ptr_cma_info->sec_alloc_list_tail)
1477 ptr_cma_info->sec_alloc_list_tail = sec_prv_elem;
1478
1479 return sec_mem_elem;
1480 }
1481 sec_prv_elem = sec_mem_elem;
1482 sec_mem_elem = sec_mem_elem->next;
1483 }
1484 return NULL;
1485 }
1486
1487 static sec_mem_elem_t *
1488 osl_sec_dma_rem_first_elem(osl_t *osh, struct sec_cma_info *ptr_cma_info)
1489 {
1490 sec_mem_elem_t *sec_mem_elem = ptr_cma_info->sec_alloc_list;
1491
1492 if (sec_mem_elem) {
1493
1494 ptr_cma_info->sec_alloc_list = sec_mem_elem->next;
1495
1496 if (ptr_cma_info->sec_alloc_list == NULL)
1497 ptr_cma_info->sec_alloc_list_tail = NULL;
1498
1499 return sec_mem_elem;
1500
1501 } else
1502 return NULL;
1503 }
1504
1505 static void * BCMFASTPATH
1506 osl_sec_dma_last_elem(osl_t *osh, struct sec_cma_info *ptr_cma_info)
1507 {
1508 return ptr_cma_info->sec_alloc_list_tail;
1509 }
1510
1511 dma_addr_t BCMFASTPATH
1512 osl_sec_dma_map_txmeta(osl_t *osh, void *va, uint size, int direction, void *p,
1513 hnddma_seg_map_t *dmah, void *ptr_cma_info)
1514 {
1515 sec_mem_elem_t *sec_mem_elem;
1516 struct page *pa_cma_page;
1517 uint loffset;
1518 void *vaorig = ((uint8 *)va + size);
1519 dma_addr_t dma_handle = 0x0;
1520 /* packet will be the one added with osl_sec_dma_map() just before this call */
1521
1522 sec_mem_elem = osl_sec_dma_last_elem(osh, ptr_cma_info);
1523
1524 if (sec_mem_elem && sec_mem_elem->va == vaorig) {
1525
1526 pa_cma_page = phys_to_page(sec_mem_elem->pa_cma);
1527 loffset = sec_mem_elem->pa_cma -(sec_mem_elem->pa_cma & ~(PAGE_SIZE-1));
1528
1529 dma_handle = dma_map_page(OSH_NULL, pa_cma_page, loffset, size,
1530 (direction == DMA_TX ? DMA_TO_DEVICE:DMA_FROM_DEVICE));
1531
1532 } else {
1533 printf("%s: error orig va not found va = 0x%p \n",
1534 __FUNCTION__, vaorig);
1535 }
1536 return dma_handle;
1537 }
1538
1539 dma_addr_t BCMFASTPATH
1540 osl_sec_dma_map(osl_t *osh, void *va, uint size, int direction, void *p,
1541 hnddma_seg_map_t *dmah, void *ptr_cma_info, uint offset)
1542 {
1543
1544 sec_mem_elem_t *sec_mem_elem;
1545 struct page *pa_cma_page;
1546 void *pa_cma_kmap_va = NULL;
1547 uint buflen = 0;
1548 dma_addr_t dma_handle = 0x0;
1549 uint loffset;
1550 #ifdef NOT_YET
1551 int *fragva;
1552 struct sk_buff *skb;
1553 int i = 0;
1554 #endif /* NOT_YET */
1555
1556 ASSERT((direction == DMA_RX) || (direction == DMA_TX));
1557 sec_mem_elem = osl_sec_dma_alloc_mem_elem(osh, va, size, direction, ptr_cma_info, offset);
1558
1559 sec_mem_elem->va = va;
1560 sec_mem_elem->direction = direction;
1561 pa_cma_page = sec_mem_elem->pa_cma_page;
1562
1563 loffset = sec_mem_elem->pa_cma -(sec_mem_elem->pa_cma & ~(PAGE_SIZE-1));
1564 /* pa_cma_kmap_va = kmap_atomic(pa_cma_page);
1565 * pa_cma_kmap_va += loffset;
1566 */
1567
1568 pa_cma_kmap_va = sec_mem_elem->vac;
1569 pa_cma_kmap_va = ((uint8 *)pa_cma_kmap_va + offset);
1570 buflen = size;
1571
1572 if (direction == DMA_TX) {
1573 memcpy((uint8*)pa_cma_kmap_va+offset, va, size);
1574
1575 #ifdef NOT_YET
1576 if (p == NULL) {
1577
1578 memcpy(pa_cma_kmap_va, va, size);
1579 /* prhex("Txpkt",pa_cma_kmap_va, size); */
1580 } else {
1581 for (skb = (struct sk_buff *)p; skb != NULL; skb = PKTNEXT(osh, skb)) {
1582 if (skb_is_nonlinear(skb)) {
1583
1584 for (i = 0; i < skb_shinfo(skb)->nr_frags; i++) {
1585 skb_frag_t *f = &skb_shinfo(skb)->frags[i];
1586 fragva = kmap_atomic(skb_frag_page(f));
1587 pa_cma_kmap_va = ((uint8 *)pa_cma_kmap_va + buflen);
1588 memcpy((pa_cma_kmap_va),
1589 (fragva + f->page_offset), skb_frag_size(f));
1590 kunmap_atomic(fragva);
1591 buflen += skb_frag_size(f);
1592 }
1593 } else {
1594
1595 pa_cma_kmap_va = ((uint8 *)pa_cma_kmap_va + buflen);
1596 memcpy(pa_cma_kmap_va, skb->data, skb->len);
1597 buflen += skb->len;
1598 }
1599 }
1600
1601 }
1602 #endif /* NOT_YET */
1603 if (dmah) {
1604 dmah->nsegs = 1;
1605 dmah->origsize = buflen;
1606 }
1607 }
1608 else
1609 {
1610 if ((p != NULL) && (dmah != NULL)) {
1611 dmah->nsegs = 1;
1612 dmah->origsize = buflen;
1613 }
1614 *(uint32 *)(pa_cma_kmap_va) = 0x0;
1615 }
1616
1617 if (direction == DMA_RX) {
1618 flush_kernel_vmap_range(pa_cma_kmap_va, sizeof(int));
1619 }
1620 dma_handle = dma_map_page(OSH_NULL, pa_cma_page, loffset+offset, buflen,
1621 (direction == DMA_TX ? DMA_TO_DEVICE:DMA_FROM_DEVICE));
1622 if (dmah) {
1623 dmah->segs[0].addr = dma_handle;
1624 dmah->segs[0].length = buflen;
1625 }
1626 sec_mem_elem->dma_handle = dma_handle;
1627 /* kunmap_atomic(pa_cma_kmap_va-loffset); */
1628 return dma_handle;
1629 }
1630
1631 dma_addr_t BCMFASTPATH
1632 osl_sec_dma_dd_map(osl_t *osh, void *va, uint size, int direction, void *p, hnddma_seg_map_t *map)
1633 {
1634
1635 struct page *pa_cma_page;
1636 phys_addr_t pa_cma;
1637 dma_addr_t dma_handle = 0x0;
1638 uint loffset;
1639
1640 pa_cma = ((uint8 *)va - (uint8 *)osh->contig_delta_va_pa);
1641 pa_cma_page = phys_to_page(pa_cma);
1642 loffset = pa_cma -(pa_cma & ~(PAGE_SIZE-1));
1643
1644 dma_handle = dma_map_page(OSH_NULL, pa_cma_page, loffset, size,
1645 (direction == DMA_TX ? DMA_TO_DEVICE:DMA_FROM_DEVICE));
1646
1647 return dma_handle;
1648 }
1649
1650 void BCMFASTPATH
1651 osl_sec_dma_unmap(osl_t *osh, dma_addr_t dma_handle, uint size, int direction,
1652 void *p, hnddma_seg_map_t *map, void *ptr_cma_info, uint offset)
1653 {
1654 sec_mem_elem_t *sec_mem_elem;
1655 #ifdef NOT_YET
1656 struct page *pa_cma_page;
1657 #endif // endif
1658 void *pa_cma_kmap_va = NULL;
1659 uint buflen = 0;
1660 dma_addr_t pa_cma;
1661 void *va;
1662 int read_count = 0;
1663 BCM_REFERENCE(buflen);
1664 BCM_REFERENCE(read_count);
1665
1666 sec_mem_elem = osl_sec_dma_find_rem_elem(osh, ptr_cma_info, dma_handle);
1667 ASSERT(sec_mem_elem);
1668
1669 va = sec_mem_elem->va;
1670 va = (uint8 *)va - offset;
1671 pa_cma = sec_mem_elem->pa_cma;
1672
1673 #ifdef NOT_YET
1674 pa_cma_page = sec_mem_elem->pa_cma_page;
1675 #endif // endif
1676
1677 if (direction == DMA_RX) {
1678
1679 if (p == NULL) {
1680
1681 /* pa_cma_kmap_va = kmap_atomic(pa_cma_page);
1682 * pa_cma_kmap_va += loffset;
1683 */
1684
1685 pa_cma_kmap_va = sec_mem_elem->vac;
1686
1687 do {
1688 invalidate_kernel_vmap_range(pa_cma_kmap_va, sizeof(int));
1689
1690 buflen = *(uint *)(pa_cma_kmap_va);
1691 if (buflen)
1692 break;
1693
1694 OSL_DELAY(1);
1695 read_count++;
1696 } while (read_count < 200);
1697 dma_unmap_page(OSH_NULL, pa_cma, size, DMA_FROM_DEVICE);
1698 memcpy(va, pa_cma_kmap_va, size);
1699 /* kunmap_atomic(pa_cma_kmap_va); */
1700 }
1701 #ifdef NOT_YET
1702 else {
1703 buflen = 0;
1704 for (skb = (struct sk_buff *)p; (buflen < size) &&
1705 (skb != NULL); skb = skb->next) {
1706 if (skb_is_nonlinear(skb)) {
1707 pa_cma_kmap_va = kmap_atomic(pa_cma_page);
1708 for (i = 0; (buflen < size) &&
1709 (i < skb_shinfo(skb)->nr_frags); i++) {
1710 skb_frag_t *f = &skb_shinfo(skb)->frags[i];
1711 cpuaddr = kmap_atomic(skb_frag_page(f));
1712 pa_cma_kmap_va = ((uint8 *)pa_cma_kmap_va + buflen);
1713 memcpy((cpuaddr + f->page_offset),
1714 pa_cma_kmap_va, skb_frag_size(f));
1715 kunmap_atomic(cpuaddr);
1716 buflen += skb_frag_size(f);
1717 }
1718 kunmap_atomic(pa_cma_kmap_va);
1719 } else {
1720 pa_cma_kmap_va = kmap_atomic(pa_cma_page);
1721 pa_cma_kmap_va = ((uint8 *)pa_cma_kmap_va + buflen);
1722 memcpy(skb->data, pa_cma_kmap_va, skb->len);
1723 kunmap_atomic(pa_cma_kmap_va);
1724 buflen += skb->len;
1725 }
1726
1727 }
1728
1729 }
1730 #endif /* NOT YET */
1731 } else {
1732 dma_unmap_page(OSH_NULL, pa_cma, size+offset, DMA_TO_DEVICE);
1733 }
1734
1735 osl_sec_dma_free_mem_elem(osh, sec_mem_elem);
1736 }
1737
1738 void
1739 osl_sec_dma_unmap_all(osl_t *osh, void *ptr_cma_info)
1740 {
1741
1742 sec_mem_elem_t *sec_mem_elem;
1743
1744 sec_mem_elem = osl_sec_dma_rem_first_elem(osh, ptr_cma_info);
1745
1746 while (sec_mem_elem != NULL) {
1747
1748 dma_unmap_page(OSH_NULL, sec_mem_elem->pa_cma, sec_mem_elem->size,
1749 sec_mem_elem->direction == DMA_TX ? DMA_TO_DEVICE : DMA_FROM_DEVICE);
1750 osl_sec_dma_free_mem_elem(osh, sec_mem_elem);
1751
1752 sec_mem_elem = osl_sec_dma_rem_first_elem(osh, ptr_cma_info);
1753 }
1754 }
1755
1756 static void
1757 osl_sec_dma_init_consistent(osl_t *osh)
1758 {
1759 int i;
1760 void *temp_va = osh->contig_base_alloc_coherent_va;
1761 phys_addr_t temp_pa = osh->contig_base_alloc_coherent;
1762
1763 for (i = 0; i < SEC_CMA_COHERENT_MAX; i++) {
1764 osh->sec_cma_coherent[i].avail = TRUE;
1765 osh->sec_cma_coherent[i].va = temp_va;
1766 osh->sec_cma_coherent[i].pa = temp_pa;
1767 temp_va = ((uint8 *)temp_va)+SEC_CMA_COHERENT_BLK;
1768 temp_pa += SEC_CMA_COHERENT_BLK;
1769 }
1770 }
1771
1772 static void *
1773 osl_sec_dma_alloc_consistent(osl_t *osh, uint size, uint16 align_bits, ulong *pap)
1774 {
1775
1776 void *temp_va = NULL;
1777 ulong temp_pa = 0;
1778 int i;
1779
1780 if (size > SEC_CMA_COHERENT_BLK) {
1781 printf("%s unsupported size\n", __FUNCTION__);
1782 return NULL;
1783 }
1784
1785 for (i = 0; i < SEC_CMA_COHERENT_MAX; i++) {
1786 if (osh->sec_cma_coherent[i].avail == TRUE) {
1787 temp_va = osh->sec_cma_coherent[i].va;
1788 temp_pa = osh->sec_cma_coherent[i].pa;
1789 osh->sec_cma_coherent[i].avail = FALSE;
1790 break;
1791 }
1792 }
1793
1794 if (i == SEC_CMA_COHERENT_MAX)
1795 printf("%s:No coherent mem: va = 0x%p pa = 0x%lx size = %d\n", __FUNCTION__,
1796 temp_va, (ulong)temp_pa, size);
1797
1798 *pap = (unsigned long)temp_pa;
1799 return temp_va;
1800 }
1801
1802 static void
1803 osl_sec_dma_free_consistent(osl_t *osh, void *va, uint size, dmaaddr_t pa)
1804 {
1805 int i = 0;
1806
1807 for (i = 0; i < SEC_CMA_COHERENT_MAX; i++) {
1808 if (osh->sec_cma_coherent[i].va == va) {
1809 osh->sec_cma_coherent[i].avail = TRUE;
1810 break;
1811 }
1812 }
1813 if (i == SEC_CMA_COHERENT_MAX)
1814 printf("%s:Error: va = 0x%p pa = 0x%lx size = %d\n", __FUNCTION__,
1815 va, (ulong)pa, size);
1816 }
1817 #endif /* BCM_SECURE_DMA */
1818
1819 /* timer apis */
1820 /* Note: All timer api's are thread unsafe and should be protected with locks by caller */
1821
1822 #ifdef REPORT_FATAL_TIMEOUTS
1823 osl_timer_t *
1824 osl_timer_init(osl_t *osh, const char *name, void (*fn)(void *arg), void *arg)
1825 {
1826 osl_timer_t *t;
1827 BCM_REFERENCE(fn);
1828 if ((t = MALLOCZ(NULL, sizeof(osl_timer_t))) == NULL) {
1829 printk(KERN_ERR "osl_timer_init: out of memory, malloced %d bytes\n",
1830 (int)sizeof(osl_timer_t));
1831 return (NULL);
1832 }
1833 bzero(t, sizeof(osl_timer_t));
1834 if ((t->timer = MALLOCZ(NULL, sizeof(struct timer_list))) == NULL) {
1835 printf("osl_timer_init: malloc failed\n");
1836 MFREE(NULL, t, sizeof(osl_timer_t));
1837 return (NULL);
1838 }
1839 t->timer->data = (ulong)arg;
1840 t->timer->function = (linux_timer_fn)fn;
1841 t->set = TRUE;
1842
1843 init_timer(t->timer);
1844
1845 return (t);
1846 }
1847
1848 void
1849 osl_timer_add(osl_t *osh, osl_timer_t *t, uint32 ms, bool periodic)
1850 {
1851 if (t == NULL) {
1852 printf("%s: Timer handle is NULL\n", __FUNCTION__);
1853 return;
1854 }
1855 ASSERT(!t->set);
1856
1857 t->set = TRUE;
1858 if (periodic) {
1859 printf("Periodic timers are not supported by Linux timer apis\n");
1860 }
1861 t->timer->expires = jiffies + ms*HZ/1000;
1862
1863 add_timer(t->timer);
1864
1865 return;
1866 }
1867
1868 void
1869 osl_timer_update(osl_t *osh, osl_timer_t *t, uint32 ms, bool periodic)
1870 {
1871 if (t == NULL) {
1872 printf("%s: Timer handle is NULL\n", __FUNCTION__);
1873 return;
1874 }
1875 if (periodic) {
1876 printf("Periodic timers are not supported by Linux timer apis\n");
1877 }
1878 t->set = TRUE;
1879 t->timer->expires = jiffies + ms*HZ/1000;
1880
1881 mod_timer(t->timer, t->timer->expires);
1882
1883 return;
1884 }
1885
1886 /*
1887 * Return TRUE if timer successfully deleted, FALSE if still pending
1888 */
1889 bool
1890 osl_timer_del(osl_t *osh, osl_timer_t *t)
1891 {
1892 if (t == NULL) {
1893 printf("%s: Timer handle is NULL\n", __FUNCTION__);
1894 return (FALSE);
1895 }
1896 if (t->set) {
1897 t->set = FALSE;
1898 if (t->timer) {
1899 del_timer(t->timer);
1900 MFREE(NULL, t->timer, sizeof(struct timer_list));
1901 }
1902 MFREE(NULL, t, sizeof(osl_timer_t));
1903 }
1904 return (TRUE);
1905 }
1906 #endif
1907
1908 #ifdef USE_DMA_LOCK
1909 static void
1910 osl_dma_lock(osl_t *osh)
1911 {
1912 /* XXX: The conditional check is to avoid the scheduling bug.
1913 * If the spin_lock_bh is used under the spin_lock_irqsave,
1914 * Kernel triggered the warning message as the spin_lock_irqsave
1915 * disables the interrupt and the spin_lock_bh doesn't use in case
1916 * interrupt is disabled.
1917 * Please refer to the __local_bh_enable_ip() function
1918 * in kernel/softirq.c to understand the condtion.
1919 */
1920 if (likely(in_irq() || irqs_disabled())) {
1921 spin_lock(&osh->dma_lock);
1922 } else {
1923 spin_lock_bh(&osh->dma_lock);
1924 osh->dma_lock_bh = TRUE;
1925 }
1926 }
1927
1928 static void
1929 osl_dma_unlock(osl_t *osh)
1930 {
1931 if (unlikely(osh->dma_lock_bh)) {
1932 osh->dma_lock_bh = FALSE;
1933 spin_unlock_bh(&osh->dma_lock);
1934 } else {
1935 spin_unlock(&osh->dma_lock);
1936 }
1937 }
1938
1939 static void
1940 osl_dma_lock_init(osl_t *osh)
1941 {
1942 spin_lock_init(&osh->dma_lock);
1943 osh->dma_lock_bh = FALSE;
1944 }
1945 #endif /* USE_DMA_LOCK */