dhd: rename 100.10.315.x to 100.10.545.x
[GitHub/LineageOS/G12/android_hardware_amlogic_kernel-modules_dhd-driver.git] / bcmdhd.100.10.545.x / linux_osl.c
CommitLineData
d2839953
RC
1/*
2 * Linux OS Independent Layer
3 *
965f77c4 4 * Copyright (C) 1999-2019, Broadcom.
d2839953
RC
5 *
6 * Unless you and Broadcom execute a separate written software license
7 * agreement governing use of this software, this software is licensed to you
8 * under the terms of the GNU General Public License version 2 (the "GPL"),
9 * available at http://www.broadcom.com/licenses/GPLv2.php, with the
10 * following added to such license:
11 *
12 * As a special exception, the copyright holders of this software give you
13 * permission to link this software with independent modules, and to copy and
14 * distribute the resulting executable under terms of your choice, provided that
15 * you also meet, for each linked independent module, the terms and conditions of
16 * the license of that module. An independent module is a module which is not
17 * derived from this software. The special exception does not apply to any
18 * modifications of the software.
19 *
20 * Notwithstanding the above, under no circumstances may you combine this
21 * software in any way with any other Broadcom software provided under a license
22 * other than the GPL, without Broadcom's express prior written consent.
23 *
24 *
25 * <<Broadcom-WL-IPTag/Open:>>
26 *
965f77c4 27 * $Id: linux_osl.c 815919 2019-04-22 09:06:50Z $
d2839953
RC
28 */
29
30#define LINUX_PORT
31
32#include <typedefs.h>
33#include <bcmendian.h>
34#include <linuxver.h>
35#include <bcmdefs.h>
36
37#if defined(__ARM_ARCH_7A__) && !defined(DHD_USE_COHERENT_MEM_FOR_RING)
38#include <asm/cacheflush.h>
39#endif /* __ARM_ARCH_7A__ && !DHD_USE_COHERENT_MEM_FOR_RING */
40
41#include <linux/random.h>
42
43#include <osl.h>
44#include <bcmutils.h>
45#include <linux/delay.h>
46#include <linux/vmalloc.h>
47#include <pcicfg.h>
965f77c4
RC
48#if (LINUX_VERSION_CODE <= KERNEL_VERSION(4, 8, 0))
49#include <asm-generic/pci-dma-compat.h>
50#endif
d2839953
RC
51
52#ifdef BCM_SECURE_DMA
53#include <linux/module.h>
54#include <linux/kernel.h>
55#include <linux/io.h>
56#include <linux/printk.h>
57#include <linux/errno.h>
58#include <linux/mm.h>
59#include <linux/moduleparam.h>
60#include <asm/io.h>
61#include <linux/skbuff.h>
62#include <stbutils.h>
63#include <linux/highmem.h>
64#include <linux/dma-mapping.h>
65#include <asm/memory.h>
66#endif /* BCM_SECURE_DMA */
67
68#include <linux/fs.h>
69
70#if defined(STB)
71#include <linux/spinlock.h>
72extern spinlock_t l2x0_reg_lock;
73#endif // endif
74
75#ifdef BCM_OBJECT_TRACE
76#include <bcmutils.h>
77#endif /* BCM_OBJECT_TRACE */
78#include "linux_osl_priv.h"
79
80#define PCI_CFG_RETRY 10
81
82#define DUMPBUFSZ 1024
83
84#ifdef BCM_SECURE_DMA
85static void * osl_sec_dma_ioremap(osl_t *osh, struct page *page, size_t size,
86 bool iscache, bool isdecr);
87static void osl_sec_dma_iounmap(osl_t *osh, void *contig_base_va, size_t size);
88static int osl_sec_dma_init_elem_mem_block(osl_t *osh, size_t mbsize, int max,
89 sec_mem_elem_t **list);
90static void osl_sec_dma_deinit_elem_mem_block(osl_t *osh, size_t mbsize, int max,
91 void *sec_list_base);
92static sec_mem_elem_t * osl_sec_dma_alloc_mem_elem(osl_t *osh, void *va, uint size,
93 int direction, struct sec_cma_info *ptr_cma_info, uint offset);
94static void osl_sec_dma_free_mem_elem(osl_t *osh, sec_mem_elem_t *sec_mem_elem);
95static void osl_sec_dma_init_consistent(osl_t *osh);
96static void *osl_sec_dma_alloc_consistent(osl_t *osh, uint size, uint16 align_bits,
97 ulong *pap);
98static void osl_sec_dma_free_consistent(osl_t *osh, void *va, uint size, dmaaddr_t pa);
99#endif /* BCM_SECURE_DMA */
100
101/* PCMCIA attribute space access macros */
102
103uint32 g_assert_type = 0; /* By Default Kernel Panic */
104
105module_param(g_assert_type, int, 0);
106#ifdef BCM_SECURE_DMA
107#define SECDMA_MODULE_PARAMS 0
108#define SECDMA_EXT_FILE 1
109unsigned long secdma_addr = 0;
110unsigned long secdma_addr2 = 0;
111u32 secdma_size = 0;
112u32 secdma_size2 = 0;
113module_param(secdma_addr, ulong, 0);
114module_param(secdma_size, int, 0);
115module_param(secdma_addr2, ulong, 0);
116module_param(secdma_size2, int, 0);
117static int secdma_found = 0;
118#endif /* BCM_SECURE_DMA */
119
120#ifdef USE_DMA_LOCK
47fa5ad5
RC
121static void osl_dma_lock(osl_t *osh);
122static void osl_dma_unlock(osl_t *osh);
123static void osl_dma_lock_init(osl_t *osh);
965f77c4 124
47fa5ad5
RC
125#define DMA_LOCK(osh) osl_dma_lock(osh)
126#define DMA_UNLOCK(osh) osl_dma_unlock(osh)
127#define DMA_LOCK_INIT(osh) osl_dma_lock_init(osh);
d2839953
RC
128#else
129#define DMA_LOCK(osh) do { /* noop */ } while(0)
130#define DMA_UNLOCK(osh) do { /* noop */ } while(0)
131#define DMA_LOCK_INIT(osh) do { /* noop */ } while(0)
132#endif /* USE_DMA_LOCK */
133
134static int16 linuxbcmerrormap[] =
135{ 0, /* 0 */
136 -EINVAL, /* BCME_ERROR */
137 -EINVAL, /* BCME_BADARG */
138 -EINVAL, /* BCME_BADOPTION */
139 -EINVAL, /* BCME_NOTUP */
140 -EINVAL, /* BCME_NOTDOWN */
141 -EINVAL, /* BCME_NOTAP */
142 -EINVAL, /* BCME_NOTSTA */
143 -EINVAL, /* BCME_BADKEYIDX */
144 -EINVAL, /* BCME_RADIOOFF */
145 -EINVAL, /* BCME_NOTBANDLOCKED */
146 -EINVAL, /* BCME_NOCLK */
147 -EINVAL, /* BCME_BADRATESET */
148 -EINVAL, /* BCME_BADBAND */
149 -E2BIG, /* BCME_BUFTOOSHORT */
150 -E2BIG, /* BCME_BUFTOOLONG */
151 -EBUSY, /* BCME_BUSY */
152 -EINVAL, /* BCME_NOTASSOCIATED */
153 -EINVAL, /* BCME_BADSSIDLEN */
154 -EINVAL, /* BCME_OUTOFRANGECHAN */
155 -EINVAL, /* BCME_BADCHAN */
156 -EFAULT, /* BCME_BADADDR */
157 -ENOMEM, /* BCME_NORESOURCE */
158 -EOPNOTSUPP, /* BCME_UNSUPPORTED */
159 -EMSGSIZE, /* BCME_BADLENGTH */
160 -EINVAL, /* BCME_NOTREADY */
161 -EPERM, /* BCME_EPERM */
162 -ENOMEM, /* BCME_NOMEM */
163 -EINVAL, /* BCME_ASSOCIATED */
164 -ERANGE, /* BCME_RANGE */
165 -EINVAL, /* BCME_NOTFOUND */
166 -EINVAL, /* BCME_WME_NOT_ENABLED */
167 -EINVAL, /* BCME_TSPEC_NOTFOUND */
168 -EINVAL, /* BCME_ACM_NOTSUPPORTED */
169 -EINVAL, /* BCME_NOT_WME_ASSOCIATION */
170 -EIO, /* BCME_SDIO_ERROR */
171 -ENODEV, /* BCME_DONGLE_DOWN */
172 -EINVAL, /* BCME_VERSION */
173 -EIO, /* BCME_TXFAIL */
174 -EIO, /* BCME_RXFAIL */
175 -ENODEV, /* BCME_NODEVICE */
176 -EINVAL, /* BCME_NMODE_DISABLED */
177 -ENODATA, /* BCME_NONRESIDENT */
178 -EINVAL, /* BCME_SCANREJECT */
179 -EINVAL, /* BCME_USAGE_ERROR */
180 -EIO, /* BCME_IOCTL_ERROR */
181 -EIO, /* BCME_SERIAL_PORT_ERR */
182 -EOPNOTSUPP, /* BCME_DISABLED, BCME_NOTENABLED */
183 -EIO, /* BCME_DECERR */
184 -EIO, /* BCME_ENCERR */
185 -EIO, /* BCME_MICERR */
186 -ERANGE, /* BCME_REPLAY */
187 -EINVAL, /* BCME_IE_NOTFOUND */
188 -EINVAL, /* BCME_DATA_NOTFOUND */
189 -EINVAL, /* BCME_NOT_GC */
190 -EINVAL, /* BCME_PRS_REQ_FAILED */
191 -EINVAL, /* BCME_NO_P2P_SE */
192 -EINVAL, /* BCME_NOA_PND */
193 -EINVAL, /* BCME_FRAG_Q_FAILED */
194 -EINVAL, /* BCME_GET_AF_FAILED */
195 -EINVAL, /* BCME_MSCH_NOTREADY */
196 -EINVAL, /* BCME_IOV_LAST_CMD */
197 -EINVAL, /* BCME_MINIPMU_CAL_FAIL */
198 -EINVAL, /* BCME_RCAL_FAIL */
199 -EINVAL, /* BCME_LPF_RCCAL_FAIL */
200 -EINVAL, /* BCME_DACBUF_RCCAL_FAIL */
201 -EINVAL, /* BCME_VCOCAL_FAIL */
202 -EINVAL, /* BCME_BANDLOCKED */
203 -EINVAL, /* BCME_DNGL_DEVRESET */
204
205/* When an new error code is added to bcmutils.h, add os
206 * specific error translation here as well
207 */
208/* check if BCME_LAST changed since the last time this function was updated */
209#if BCME_LAST != -68
210#error "You need to add a OS error translation in the linuxbcmerrormap \
211 for new error code defined in bcmutils.h"
212#endif // endif
213};
214uint lmtest = FALSE;
215
216#ifdef DHD_MAP_LOGGING
217#define DHD_MAP_LOG_SIZE 2048
218
965f77c4
RC
219typedef struct dhd_map_item {
220 dmaaddr_t pa; /* DMA address (physical) */
221 uint64 ts_nsec; /* timestamp: nsec */
222 uint32 size; /* mapping size */
223 uint8 rsvd[4]; /* reserved for future use */
224} dhd_map_item_t;
225
d2839953 226typedef struct dhd_map_record {
965f77c4
RC
227 uint32 items; /* number of total items */
228 uint32 idx; /* current index of metadata */
229 dhd_map_item_t map[0]; /* metadata storage */
d2839953
RC
230} dhd_map_log_t;
231
d2839953 232void
965f77c4
RC
233osl_dma_map_dump(osl_t *osh)
234{
235 dhd_map_log_t *map_log, *unmap_log;
236 uint64 ts_sec, ts_usec;
237
238 map_log = (dhd_map_log_t *)(osh->dhd_map_log);
239 unmap_log = (dhd_map_log_t *)(osh->dhd_unmap_log);
240 osl_get_localtime(&ts_sec, &ts_usec);
241
242 if (map_log && unmap_log) {
243 printk("%s: map_idx=%d unmap_idx=%d "
244 "current time=[%5lu.%06lu]\n", __FUNCTION__,
245 map_log->idx, unmap_log->idx, (unsigned long)ts_sec,
246 (unsigned long)ts_usec);
247 printk("%s: dhd_map_log(pa)=0x%llx size=%d,"
248 " dma_unmap_log(pa)=0x%llx size=%d\n", __FUNCTION__,
249 (uint64)__virt_to_phys((ulong)(map_log->map)),
250 (uint32)(sizeof(dhd_map_item_t) * map_log->items),
251 (uint64)__virt_to_phys((ulong)(unmap_log->map)),
252 (uint32)(sizeof(dhd_map_item_t) * unmap_log->items));
253 }
254}
255
256static void *
257osl_dma_map_log_init(uint32 item_len)
d2839953 258{
965f77c4
RC
259 dhd_map_log_t *map_log;
260 gfp_t flags;
261 uint32 alloc_size = (uint32)(sizeof(dhd_map_log_t) +
262 (item_len * sizeof(dhd_map_item_t)));
263
264 flags = CAN_SLEEP() ? GFP_KERNEL : GFP_ATOMIC;
265 map_log = (dhd_map_log_t *)kmalloc(alloc_size, flags);
266 if (map_log) {
267 memset(map_log, 0, alloc_size);
268 map_log->items = item_len;
269 map_log->idx = 0;
d2839953 270 }
965f77c4
RC
271
272 return (void *)map_log;
273}
274
275static void
276osl_dma_map_log_deinit(osl_t *osh)
277{
278 if (osh->dhd_map_log) {
279 kfree(osh->dhd_map_log);
280 osh->dhd_map_log = NULL;
281 }
282
283 if (osh->dhd_unmap_log) {
284 kfree(osh->dhd_unmap_log);
285 osh->dhd_unmap_log = NULL;
286 }
287}
288
289static void
290osl_dma_map_logging(osl_t *osh, void *handle, dmaaddr_t pa, uint32 len)
291{
292 dhd_map_log_t *log = (dhd_map_log_t *)handle;
293 uint32 idx;
294
295 if (log == NULL) {
296 printk("%s: log is NULL\n", __FUNCTION__);
297 return;
298 }
299
300 idx = log->idx;
301 log->map[idx].ts_nsec = osl_localtime_ns();
302 log->map[idx].pa = pa;
303 log->map[idx].size = len;
304 log->idx = (idx + 1) % log->items;
d2839953
RC
305}
306#endif /* DHD_MAP_LOGGING */
307
308/* translate bcmerrors into linux errors */
309int
310osl_error(int bcmerror)
311{
312 if (bcmerror > 0)
313 bcmerror = 0;
314 else if (bcmerror < BCME_LAST)
315 bcmerror = BCME_ERROR;
316
317 /* Array bounds covered by ASSERT in osl_attach */
318 return linuxbcmerrormap[-bcmerror];
319}
d2839953
RC
320osl_t *
321osl_attach(void *pdev, uint bustype, bool pkttag)
322{
323 void **osl_cmn = NULL;
324 osl_t *osh;
325 gfp_t flags;
326#ifdef BCM_SECURE_DMA
327 u32 secdma_memsize;
328#endif // endif
329
330 flags = CAN_SLEEP() ? GFP_KERNEL: GFP_ATOMIC;
331 if (!(osh = kmalloc(sizeof(osl_t), flags)))
332 return osh;
333
334 ASSERT(osh);
335
336 bzero(osh, sizeof(osl_t));
337
338 if (osl_cmn == NULL || *osl_cmn == NULL) {
339 if (!(osh->cmn = kmalloc(sizeof(osl_cmn_t), flags))) {
340 kfree(osh);
341 return NULL;
342 }
343 bzero(osh->cmn, sizeof(osl_cmn_t));
344 if (osl_cmn)
345 *osl_cmn = osh->cmn;
346 atomic_set(&osh->cmn->malloced, 0);
347 osh->cmn->dbgmem_list = NULL;
348 spin_lock_init(&(osh->cmn->dbgmem_lock));
349
350 spin_lock_init(&(osh->cmn->pktalloc_lock));
351
352 } else {
353 osh->cmn = *osl_cmn;
354 }
355 atomic_add(1, &osh->cmn->refcount);
356
357 bcm_object_trace_init();
358
359 /* Check that error map has the right number of entries in it */
360 ASSERT(ABS(BCME_LAST) == (ARRAYSIZE(linuxbcmerrormap) - 1));
361
362 osh->failed = 0;
363 osh->pdev = pdev;
364 osh->pub.pkttag = pkttag;
365 osh->bustype = bustype;
366 osh->magic = OS_HANDLE_MAGIC;
367#ifdef BCM_SECURE_DMA
368
369 if ((secdma_addr != 0) && (secdma_size != 0)) {
370 printk("linux_osl.c: Buffer info passed via module params, using it.\n");
371 if (secdma_found == 0) {
372 osh->contig_base_alloc = (phys_addr_t)secdma_addr;
373 secdma_memsize = secdma_size;
374 } else if (secdma_found == 1) {
375 osh->contig_base_alloc = (phys_addr_t)secdma_addr2;
376 secdma_memsize = secdma_size2;
377 } else {
378 printk("linux_osl.c secdma: secDMA instances %d \n", secdma_found);
379 kfree(osh);
380 return NULL;
381 }
382 osh->contig_base = (phys_addr_t)osh->contig_base_alloc;
383 printf("linux_osl.c: secdma_cma_size = 0x%x\n", secdma_memsize);
384 printf("linux_osl.c: secdma_cma_addr = 0x%x \n",
385 (unsigned int)osh->contig_base_alloc);
386 osh->stb_ext_params = SECDMA_MODULE_PARAMS;
387 }
388 else if (stbpriv_init(osh) == 0) {
389 printk("linux_osl.c: stbpriv.txt found. Get buffer info.\n");
390 if (secdma_found == 0) {
391 osh->contig_base_alloc =
392 (phys_addr_t)bcm_strtoul(stbparam_get("secdma_cma_addr"), NULL, 0);
393 secdma_memsize = bcm_strtoul(stbparam_get("secdma_cma_size"), NULL, 0);
394 } else if (secdma_found == 1) {
395 osh->contig_base_alloc =
396 (phys_addr_t)bcm_strtoul(stbparam_get("secdma_cma_addr2"), NULL, 0);
397 secdma_memsize = bcm_strtoul(stbparam_get("secdma_cma_size2"), NULL, 0);
398 } else {
399 printk("linux_osl.c secdma: secDMA instances %d \n", secdma_found);
400 kfree(osh);
401 return NULL;
402 }
403 osh->contig_base = (phys_addr_t)osh->contig_base_alloc;
404 printf("linux_osl.c: secdma_cma_size = 0x%x\n", secdma_memsize);
405 printf("linux_osl.c: secdma_cma_addr = 0x%x \n",
406 (unsigned int)osh->contig_base_alloc);
407 osh->stb_ext_params = SECDMA_EXT_FILE;
408 }
409 else {
410 printk("linux_osl.c: secDMA no longer supports internal buffer allocation.\n");
411 kfree(osh);
412 return NULL;
413 }
414 secdma_found++;
415 osh->contig_base_alloc_coherent_va = osl_sec_dma_ioremap(osh,
416 phys_to_page((u32)osh->contig_base_alloc),
417 CMA_DMA_DESC_MEMBLOCK, FALSE, TRUE);
418
419 if (osh->contig_base_alloc_coherent_va == NULL) {
420 if (osh->cmn)
421 kfree(osh->cmn);
422 kfree(osh);
423 return NULL;
424 }
425 osh->contig_base_coherent_va = osh->contig_base_alloc_coherent_va;
426 osh->contig_base_alloc_coherent = osh->contig_base_alloc;
427 osl_sec_dma_init_consistent(osh);
428
429 osh->contig_base_alloc += CMA_DMA_DESC_MEMBLOCK;
430
431 osh->contig_base_alloc_va = osl_sec_dma_ioremap(osh,
432 phys_to_page((u32)osh->contig_base_alloc), CMA_DMA_DATA_MEMBLOCK, TRUE, FALSE);
433 if (osh->contig_base_alloc_va == NULL) {
434 osl_sec_dma_iounmap(osh, osh->contig_base_coherent_va, CMA_DMA_DESC_MEMBLOCK);
435 if (osh->cmn)
436 kfree(osh->cmn);
437 kfree(osh);
438 return NULL;
439 }
440 osh->contig_base_va = osh->contig_base_alloc_va;
441
442#ifdef NOT_YET
443 /*
444 * osl_sec_dma_init_elem_mem_block(osh, CMA_BUFSIZE_512, CMA_BUFNUM, &osh->sec_list_512);
445 * osh->sec_list_base_512 = osh->sec_list_512;
446 * osl_sec_dma_init_elem_mem_block(osh, CMA_BUFSIZE_2K, CMA_BUFNUM, &osh->sec_list_2048);
447 * osh->sec_list_base_2048 = osh->sec_list_2048;
448 */
449#endif // endif
450 if (BCME_OK != osl_sec_dma_init_elem_mem_block(osh,
451 CMA_BUFSIZE_4K, CMA_BUFNUM, &osh->sec_list_4096)) {
452 osl_sec_dma_iounmap(osh, osh->contig_base_coherent_va, CMA_DMA_DESC_MEMBLOCK);
453 osl_sec_dma_iounmap(osh, osh->contig_base_va, CMA_DMA_DATA_MEMBLOCK);
454 if (osh->cmn)
455 kfree(osh->cmn);
456 kfree(osh);
457 return NULL;
458 }
459 osh->sec_list_base_4096 = osh->sec_list_4096;
460
461#endif /* BCM_SECURE_DMA */
462
463 switch (bustype) {
464 case PCI_BUS:
465 case SI_BUS:
466 case PCMCIA_BUS:
467 osh->pub.mmbus = TRUE;
468 break;
469 case JTAG_BUS:
470 case SDIO_BUS:
471 case USB_BUS:
472 case SPI_BUS:
473 case RPC_BUS:
474 osh->pub.mmbus = FALSE;
475 break;
476 default:
477 ASSERT(FALSE);
478 break;
479 }
480
481 DMA_LOCK_INIT(osh);
482
483#ifdef DHD_MAP_LOGGING
965f77c4
RC
484 osh->dhd_map_log = osl_dma_map_log_init(DHD_MAP_LOG_SIZE);
485 if (osh->dhd_map_log == NULL) {
486 printk("%s: Failed to alloc dhd_map_log\n", __FUNCTION__);
d2839953 487 }
965f77c4
RC
488
489 osh->dhd_unmap_log = osl_dma_map_log_init(DHD_MAP_LOG_SIZE);
490 if (osh->dhd_unmap_log == NULL) {
491 printk("%s: Failed to alloc dhd_unmap_log\n", __FUNCTION__);
d2839953
RC
492 }
493#endif /* DHD_MAP_LOGGING */
494
495 return osh;
496}
497
498void osl_set_bus_handle(osl_t *osh, void *bus_handle)
499{
500 osh->bus_handle = bus_handle;
501}
502
503void* osl_get_bus_handle(osl_t *osh)
504{
505 return osh->bus_handle;
506}
507
508#if defined(BCM_BACKPLANE_TIMEOUT)
509void osl_set_bpt_cb(osl_t *osh, void *bpt_cb, void *bpt_ctx)
510{
511 if (osh) {
512 osh->bpt_cb = (bpt_cb_fn)bpt_cb;
513 osh->sih = bpt_ctx;
514 }
515}
516#endif /* BCM_BACKPLANE_TIMEOUT */
517
518void
519osl_detach(osl_t *osh)
520{
521 if (osh == NULL)
522 return;
523
524#ifdef BCM_SECURE_DMA
525 if (osh->stb_ext_params == SECDMA_EXT_FILE)
526 stbpriv_exit(osh);
527#ifdef NOT_YET
528 osl_sec_dma_deinit_elem_mem_block(osh, CMA_BUFSIZE_512, CMA_BUFNUM, osh->sec_list_base_512);
529 osl_sec_dma_deinit_elem_mem_block(osh, CMA_BUFSIZE_2K, CMA_BUFNUM, osh->sec_list_base_2048);
530#endif /* NOT_YET */
531 osl_sec_dma_deinit_elem_mem_block(osh, CMA_BUFSIZE_4K, CMA_BUFNUM, osh->sec_list_base_4096);
532 osl_sec_dma_iounmap(osh, osh->contig_base_coherent_va, CMA_DMA_DESC_MEMBLOCK);
533 osl_sec_dma_iounmap(osh, osh->contig_base_va, CMA_DMA_DATA_MEMBLOCK);
534 secdma_found--;
535#endif /* BCM_SECURE_DMA */
536
537 bcm_object_trace_deinit();
538
539#ifdef DHD_MAP_LOGGING
965f77c4
RC
540 osl_dma_map_log_deinit(osh->dhd_map_log);
541 osl_dma_map_log_deinit(osh->dhd_unmap_log);
d2839953
RC
542#endif /* DHD_MAP_LOGGING */
543
544 ASSERT(osh->magic == OS_HANDLE_MAGIC);
545 atomic_sub(1, &osh->cmn->refcount);
546 if (atomic_read(&osh->cmn->refcount) == 0) {
547 kfree(osh->cmn);
548 }
549 kfree(osh);
550}
551
552/* APIs to set/get specific quirks in OSL layer */
553void BCMFASTPATH
554osl_flag_set(osl_t *osh, uint32 mask)
555{
556 osh->flags |= mask;
557}
558
559void
560osl_flag_clr(osl_t *osh, uint32 mask)
561{
562 osh->flags &= ~mask;
563}
564
565#if defined(STB)
566inline bool BCMFASTPATH
567#else
568bool
569#endif // endif
570osl_is_flag_set(osl_t *osh, uint32 mask)
571{
572 return (osh->flags & mask);
573}
574
575#if (defined(__ARM_ARCH_7A__) && !defined(DHD_USE_COHERENT_MEM_FOR_RING)) || \
576 defined(STB_SOC_WIFI)
577
578inline int BCMFASTPATH
579osl_arch_is_coherent(void)
580{
581 return 0;
582}
583
584inline int BCMFASTPATH
585osl_acp_war_enab(void)
586{
587 return 0;
588}
589
590inline void BCMFASTPATH
591osl_cache_flush(void *va, uint size)
592{
593
594 if (size > 0)
595#ifdef STB_SOC_WIFI
596 dma_sync_single_for_device(OSH_NULL, virt_to_phys(va), size, DMA_TX);
597#else /* STB_SOC_WIFI */
598 dma_sync_single_for_device(OSH_NULL, virt_to_dma(OSH_NULL, va), size,
599 DMA_TO_DEVICE);
600#endif /* STB_SOC_WIFI */
601}
602
603inline void BCMFASTPATH
604osl_cache_inv(void *va, uint size)
605{
606
607#ifdef STB_SOC_WIFI
608 dma_sync_single_for_cpu(OSH_NULL, virt_to_phys(va), size, DMA_RX);
609#else /* STB_SOC_WIFI */
610 dma_sync_single_for_cpu(OSH_NULL, virt_to_dma(OSH_NULL, va), size, DMA_FROM_DEVICE);
611#endif /* STB_SOC_WIFI */
612}
613
614inline void BCMFASTPATH
615osl_prefetch(const void *ptr)
616{
617#if !defined(STB_SOC_WIFI)
618 __asm__ __volatile__("pld\t%0" :: "o"(*(const char *)ptr) : "cc");
619#endif // endif
620}
621
622#endif // endif
623
624uint32
625osl_pci_read_config(osl_t *osh, uint offset, uint size)
626{
627 uint val = 0;
628 uint retry = PCI_CFG_RETRY;
629
630 ASSERT((osh && (osh->magic == OS_HANDLE_MAGIC)));
631
632 /* only 4byte access supported */
633 ASSERT(size == 4);
634
635 do {
636 pci_read_config_dword(osh->pdev, offset, &val);
637 if (val != 0xffffffff)
638 break;
639 } while (retry--);
640
641 return (val);
642}
643
644void
645osl_pci_write_config(osl_t *osh, uint offset, uint size, uint val)
646{
647 uint retry = PCI_CFG_RETRY;
648
649 ASSERT((osh && (osh->magic == OS_HANDLE_MAGIC)));
650
651 /* only 4byte access supported */
652 ASSERT(size == 4);
653
654 do {
655 pci_write_config_dword(osh->pdev, offset, val);
656 if (offset != PCI_BAR0_WIN)
657 break;
658 if (osl_pci_read_config(osh, offset, size) == val)
659 break;
660 } while (retry--);
661
662}
663
664/* return bus # for the pci device pointed by osh->pdev */
665uint
666osl_pci_bus(osl_t *osh)
667{
668 ASSERT(osh && (osh->magic == OS_HANDLE_MAGIC) && osh->pdev);
669
965f77c4 670#if defined(__ARM_ARCH_7A__)
d2839953
RC
671 return pci_domain_nr(((struct pci_dev *)osh->pdev)->bus);
672#else
673 return ((struct pci_dev *)osh->pdev)->bus->number;
674#endif // endif
675}
676
677/* return slot # for the pci device pointed by osh->pdev */
678uint
679osl_pci_slot(osl_t *osh)
680{
681 ASSERT(osh && (osh->magic == OS_HANDLE_MAGIC) && osh->pdev);
682
965f77c4 683#if defined(__ARM_ARCH_7A__)
d2839953
RC
684 return PCI_SLOT(((struct pci_dev *)osh->pdev)->devfn) + 1;
685#else
686 return PCI_SLOT(((struct pci_dev *)osh->pdev)->devfn);
687#endif // endif
688}
689
690/* return domain # for the pci device pointed by osh->pdev */
691uint
692osl_pcie_domain(osl_t *osh)
693{
694 ASSERT(osh && (osh->magic == OS_HANDLE_MAGIC) && osh->pdev);
695
696 return pci_domain_nr(((struct pci_dev *)osh->pdev)->bus);
697}
698
699/* return bus # for the pci device pointed by osh->pdev */
700uint
701osl_pcie_bus(osl_t *osh)
702{
703 ASSERT(osh && (osh->magic == OS_HANDLE_MAGIC) && osh->pdev);
704
705 return ((struct pci_dev *)osh->pdev)->bus->number;
706}
707
708/* return the pci device pointed by osh->pdev */
709struct pci_dev *
710osl_pci_device(osl_t *osh)
711{
712 ASSERT(osh && (osh->magic == OS_HANDLE_MAGIC) && osh->pdev);
713
714 return osh->pdev;
715}
716
717static void
718osl_pcmcia_attr(osl_t *osh, uint offset, char *buf, int size, bool write)
719{
720}
721
722void
723osl_pcmcia_read_attr(osl_t *osh, uint offset, void *buf, int size)
724{
725 osl_pcmcia_attr(osh, offset, (char *) buf, size, FALSE);
726}
727
728void
729osl_pcmcia_write_attr(osl_t *osh, uint offset, void *buf, int size)
730{
731 osl_pcmcia_attr(osh, offset, (char *) buf, size, TRUE);
732}
733
734void *
735osl_malloc(osl_t *osh, uint size)
736{
737 void *addr;
738 gfp_t flags;
739
740 /* only ASSERT if osh is defined */
741 if (osh)
742 ASSERT(osh->magic == OS_HANDLE_MAGIC);
743#ifdef CONFIG_DHD_USE_STATIC_BUF
744 if (bcm_static_buf)
745 {
746 unsigned long irq_flags;
747 int i = 0;
748 if ((size >= PAGE_SIZE)&&(size <= STATIC_BUF_SIZE))
749 {
750 spin_lock_irqsave(&bcm_static_buf->static_lock, irq_flags);
751
752 for (i = 0; i < STATIC_BUF_MAX_NUM; i++)
753 {
754 if (bcm_static_buf->buf_use[i] == 0)
755 break;
756 }
757
758 if (i == STATIC_BUF_MAX_NUM)
759 {
760 spin_unlock_irqrestore(&bcm_static_buf->static_lock, irq_flags);
761 printk("all static buff in use!\n");
762 goto original;
763 }
764
765 bcm_static_buf->buf_use[i] = 1;
766 spin_unlock_irqrestore(&bcm_static_buf->static_lock, irq_flags);
767
768 bzero(bcm_static_buf->buf_ptr+STATIC_BUF_SIZE*i, size);
769 if (osh)
770 atomic_add(size, &osh->cmn->malloced);
771
772 return ((void *)(bcm_static_buf->buf_ptr+STATIC_BUF_SIZE*i));
773 }
774 }
775original:
776#endif /* CONFIG_DHD_USE_STATIC_BUF */
777
778 flags = CAN_SLEEP() ? GFP_KERNEL: GFP_ATOMIC;
779 if ((addr = kmalloc(size, flags)) == NULL) {
780 if (osh)
781 osh->failed++;
782 return (NULL);
783 }
784 if (osh && osh->cmn)
785 atomic_add(size, &osh->cmn->malloced);
786
787 return (addr);
788}
789
790void *
791osl_mallocz(osl_t *osh, uint size)
792{
793 void *ptr;
794
795 ptr = osl_malloc(osh, size);
796
797 if (ptr != NULL) {
798 bzero(ptr, size);
799 }
800
801 return ptr;
802}
803
804void
805osl_mfree(osl_t *osh, void *addr, uint size)
806{
807#ifdef CONFIG_DHD_USE_STATIC_BUF
808 unsigned long flags;
809
810 if (bcm_static_buf)
811 {
812 if ((addr > (void *)bcm_static_buf) && ((unsigned char *)addr
813 <= ((unsigned char *)bcm_static_buf + STATIC_BUF_TOTAL_LEN)))
814 {
815 int buf_idx = 0;
816
817 buf_idx = ((unsigned char *)addr - bcm_static_buf->buf_ptr)/STATIC_BUF_SIZE;
818
819 spin_lock_irqsave(&bcm_static_buf->static_lock, flags);
820 bcm_static_buf->buf_use[buf_idx] = 0;
821 spin_unlock_irqrestore(&bcm_static_buf->static_lock, flags);
822
823 if (osh && osh->cmn) {
824 ASSERT(osh->magic == OS_HANDLE_MAGIC);
825 atomic_sub(size, &osh->cmn->malloced);
826 }
827 return;
828 }
829 }
830#endif /* CONFIG_DHD_USE_STATIC_BUF */
831 if (osh && osh->cmn) {
832 ASSERT(osh->magic == OS_HANDLE_MAGIC);
833
834 ASSERT(size <= osl_malloced(osh));
835
836 atomic_sub(size, &osh->cmn->malloced);
837 }
838 kfree(addr);
839}
840
841void *
842osl_vmalloc(osl_t *osh, uint size)
843{
844 void *addr;
845
846 /* only ASSERT if osh is defined */
847 if (osh)
848 ASSERT(osh->magic == OS_HANDLE_MAGIC);
849 if ((addr = vmalloc(size)) == NULL) {
850 if (osh)
851 osh->failed++;
852 return (NULL);
853 }
854 if (osh && osh->cmn)
855 atomic_add(size, &osh->cmn->malloced);
856
857 return (addr);
858}
859
860void *
861osl_vmallocz(osl_t *osh, uint size)
862{
863 void *ptr;
864
865 ptr = osl_vmalloc(osh, size);
866
867 if (ptr != NULL) {
868 bzero(ptr, size);
869 }
870
871 return ptr;
872}
873
874void
875osl_vmfree(osl_t *osh, void *addr, uint size)
876{
877 if (osh && osh->cmn) {
878 ASSERT(osh->magic == OS_HANDLE_MAGIC);
879
880 ASSERT(size <= osl_malloced(osh));
881
882 atomic_sub(size, &osh->cmn->malloced);
883 }
884 vfree(addr);
885}
886
887uint
888osl_check_memleak(osl_t *osh)
889{
890 ASSERT((osh && (osh->magic == OS_HANDLE_MAGIC)));
891 if (atomic_read(&osh->cmn->refcount) == 1)
892 return (atomic_read(&osh->cmn->malloced));
893 else
894 return 0;
895}
896
897uint
898osl_malloced(osl_t *osh)
899{
900 ASSERT((osh && (osh->magic == OS_HANDLE_MAGIC)));
901 return (atomic_read(&osh->cmn->malloced));
902}
903
904uint
905osl_malloc_failed(osl_t *osh)
906{
907 ASSERT((osh && (osh->magic == OS_HANDLE_MAGIC)));
908 return (osh->failed);
909}
910
911uint
912osl_dma_consistent_align(void)
913{
914 return (PAGE_SIZE);
915}
916
917void*
918osl_dma_alloc_consistent(osl_t *osh, uint size, uint16 align_bits, uint *alloced, dmaaddr_t *pap)
919{
920 void *va;
921 uint16 align = (1 << align_bits);
922 ASSERT((osh && (osh->magic == OS_HANDLE_MAGIC)));
923
924 if (!ISALIGNED(DMA_CONSISTENT_ALIGN, align))
925 size += align;
926 *alloced = size;
927
928#ifndef BCM_SECURE_DMA
929#if (defined(__ARM_ARCH_7A__) && !defined(DHD_USE_COHERENT_MEM_FOR_RING)) || \
930 defined(STB_SOC_WIFI)
931 va = kmalloc(size, GFP_ATOMIC | __GFP_ZERO);
932 if (va)
933 *pap = (ulong)__virt_to_phys((ulong)va);
934#else
935 {
936 dma_addr_t pap_lin;
937 struct pci_dev *hwdev = osh->pdev;
938 gfp_t flags;
939#ifdef DHD_ALLOC_COHERENT_MEM_FROM_ATOMIC_POOL
940 flags = GFP_ATOMIC;
941#else
942 flags = CAN_SLEEP() ? GFP_KERNEL: GFP_ATOMIC;
943#endif /* DHD_ALLOC_COHERENT_MEM_FROM_ATOMIC_POOL */
944 va = dma_alloc_coherent(&hwdev->dev, size, &pap_lin, flags);
945#ifdef BCMDMA64OSL
946 PHYSADDRLOSET(*pap, pap_lin & 0xffffffff);
947 PHYSADDRHISET(*pap, (pap_lin >> 32) & 0xffffffff);
948#else
949 *pap = (dmaaddr_t)pap_lin;
950#endif /* BCMDMA64OSL */
951 }
952#endif /* __ARM_ARCH_7A__ && !DHD_USE_COHERENT_MEM_FOR_RING */
953#else
954 va = osl_sec_dma_alloc_consistent(osh, size, align_bits, pap);
955#endif /* BCM_SECURE_DMA */
956 return va;
957}
958
959void
960osl_dma_free_consistent(osl_t *osh, void *va, uint size, dmaaddr_t pa)
961{
962#ifdef BCMDMA64OSL
963 dma_addr_t paddr;
964#endif /* BCMDMA64OSL */
965 ASSERT((osh && (osh->magic == OS_HANDLE_MAGIC)));
966
967#ifndef BCM_SECURE_DMA
968#if (defined(__ARM_ARCH_7A__) && !defined(DHD_USE_COHERENT_MEM_FOR_RING)) || \
969 defined(STB_SOC_WIFI)
970 kfree(va);
971#else
972#ifdef BCMDMA64OSL
973 PHYSADDRTOULONG(pa, paddr);
974 pci_free_consistent(osh->pdev, size, va, paddr);
975#else
976 pci_free_consistent(osh->pdev, size, va, (dma_addr_t)pa);
977#endif /* BCMDMA64OSL */
978#endif /* __ARM_ARCH_7A__ && !DHD_USE_COHERENT_MEM_FOR_RING */
979#else
980 osl_sec_dma_free_consistent(osh, va, size, pa);
981#endif /* BCM_SECURE_DMA */
982}
983
984void *
985osl_virt_to_phys(void *va)
986{
987 return (void *)(uintptr)virt_to_phys(va);
988}
989
d2839953
RC
990#include <asm/cacheflush.h>
991void BCMFASTPATH
992osl_dma_flush(osl_t *osh, void *va, uint size, int direction, void *p, hnddma_seg_map_t *dmah)
993{
994 return;
995}
d2839953
RC
996
997dmaaddr_t BCMFASTPATH
998osl_dma_map(osl_t *osh, void *va, uint size, int direction, void *p, hnddma_seg_map_t *dmah)
999{
1000 int dir;
1001 dmaaddr_t ret_addr;
1002 dma_addr_t map_addr;
1003 int ret;
1004
1005 DMA_LOCK(osh);
1006
1007 ASSERT((osh && (osh->magic == OS_HANDLE_MAGIC)));
1008 dir = (direction == DMA_TX)? PCI_DMA_TODEVICE: PCI_DMA_FROMDEVICE;
1009
1010#ifdef STB_SOC_WIFI
1011#if (__LINUX_ARM_ARCH__ == 8)
1012 /* need to flush or invalidate the cache here */
1013 if (dir == DMA_TX) { /* to device */
1014 osl_cache_flush(va, size);
1015 } else if (dir == DMA_RX) { /* from device */
1016 osl_cache_inv(va, size);
1017 } else { /* both */
1018 osl_cache_flush(va, size);
1019 osl_cache_inv(va, size);
1020 }
1021 DMA_UNLOCK(osh);
1022 return virt_to_phys(va);
1023#else /* (__LINUX_ARM_ARCH__ == 8) */
1024 map_addr = dma_map_single(osh->pdev, va, size, dir);
1025 DMA_UNLOCK(osh);
1026 return map_addr;
1027#endif /* (__LINUX_ARM_ARCH__ == 8) */
1028#else /* ! STB_SOC_WIFI */
1029 map_addr = pci_map_single(osh->pdev, va, size, dir);
1030#endif /* ! STB_SOC_WIFI */
1031
d2839953 1032 ret = pci_dma_mapping_error(osh->pdev, map_addr);
965f77c4 1033
d2839953
RC
1034 if (ret) {
1035 printk("%s: Failed to map memory\n", __FUNCTION__);
1036 PHYSADDRLOSET(ret_addr, 0);
1037 PHYSADDRHISET(ret_addr, 0);
1038 } else {
1039 PHYSADDRLOSET(ret_addr, map_addr & 0xffffffff);
1040 PHYSADDRHISET(ret_addr, (map_addr >> 32) & 0xffffffff);
1041 }
1042
965f77c4
RC
1043#ifdef DHD_MAP_LOGGING
1044 osl_dma_map_logging(osh, osh->dhd_map_log, ret_addr, size);
1045#endif /* DHD_MAP_LOGGING */
1046
d2839953
RC
1047 DMA_UNLOCK(osh);
1048
1049 return ret_addr;
1050}
1051
1052void BCMFASTPATH
1053osl_dma_unmap(osl_t *osh, dmaaddr_t pa, uint size, int direction)
1054{
1055 int dir;
1056#ifdef BCMDMA64OSL
1057 dma_addr_t paddr;
1058#endif /* BCMDMA64OSL */
1059
1060 ASSERT((osh && (osh->magic == OS_HANDLE_MAGIC)));
1061
1062 DMA_LOCK(osh);
1063
1064 dir = (direction == DMA_TX)? PCI_DMA_TODEVICE: PCI_DMA_FROMDEVICE;
1065
d2839953 1066#ifdef DHD_MAP_LOGGING
965f77c4 1067 osl_dma_map_logging(osh, osh->dhd_unmap_log, pa, size);
d2839953
RC
1068#endif /* DHD_MAP_LOGGING */
1069
965f77c4
RC
1070#ifdef BCMDMA64OSL
1071 PHYSADDRTOULONG(pa, paddr);
d2839953
RC
1072 pci_unmap_single(osh->pdev, paddr, size, dir);
1073#else /* BCMDMA64OSL */
1074
1075#ifdef STB_SOC_WIFI
1076#if (__LINUX_ARM_ARCH__ == 8)
1077 if (dir == DMA_TX) { /* to device */
1078 dma_sync_single_for_device(OSH_NULL, pa, size, DMA_TX);
1079 } else if (dir == DMA_RX) { /* from device */
1080 dma_sync_single_for_cpu(OSH_NULL, pa, size, DMA_RX);
1081 } else { /* both */
1082 dma_sync_single_for_device(OSH_NULL, pa, size, DMA_TX);
1083 dma_sync_single_for_cpu(OSH_NULL, pa, size, DMA_RX);
1084 }
1085#else /* (__LINUX_ARM_ARCH__ == 8) */
1086 dma_unmap_single(osh->pdev, (uintptr)pa, size, dir);
1087#endif /* (__LINUX_ARM_ARCH__ == 8) */
1088#else /* STB_SOC_WIFI */
d2839953
RC
1089 pci_unmap_single(osh->pdev, (uint32)pa, size, dir);
1090#endif /* STB_SOC_WIFI */
1091
1092#endif /* BCMDMA64OSL */
965f77c4 1093
d2839953
RC
1094 DMA_UNLOCK(osh);
1095}
1096
1097/* OSL function for CPU relax */
1098inline void BCMFASTPATH
1099osl_cpu_relax(void)
1100{
1101 cpu_relax();
1102}
1103
1104extern void osl_preempt_disable(osl_t *osh)
1105{
1106 preempt_disable();
1107}
1108
1109extern void osl_preempt_enable(osl_t *osh)
1110{
1111 preempt_enable();
1112}
1113
1114#if defined(BCMASSERT_LOG)
1115void
1116osl_assert(const char *exp, const char *file, int line)
1117{
1118 char tempbuf[256];
1119 const char *basename;
1120
1121 basename = strrchr(file, '/');
1122 /* skip the '/' */
1123 if (basename)
1124 basename++;
1125
1126 if (!basename)
1127 basename = file;
1128
1129#ifdef BCMASSERT_LOG
1130 snprintf(tempbuf, 64, "\"%s\": file \"%s\", line %d\n",
1131 exp, basename, line);
1132#endif /* BCMASSERT_LOG */
1133
1134 switch (g_assert_type) {
1135 case 0:
1136 panic("%s", tempbuf);
1137 break;
1138 case 1:
1139 /* fall through */
1140 case 3:
1141 printk("%s", tempbuf);
1142 break;
1143 case 2:
1144 printk("%s", tempbuf);
1145 BUG();
1146 break;
1147 default:
1148 break;
1149 }
1150}
1151#endif // endif
1152
1153void
1154osl_delay(uint usec)
1155{
1156 uint d;
1157
1158 while (usec > 0) {
1159 d = MIN(usec, 1000);
1160 udelay(d);
1161 usec -= d;
1162 }
1163}
1164
1165void
1166osl_sleep(uint ms)
1167{
d2839953
RC
1168 if (ms < 20)
1169 usleep_range(ms*1000, ms*1000 + 1000);
1170 else
965f77c4 1171 msleep(ms);
d2839953
RC
1172}
1173
1174uint64
1175osl_sysuptime_us(void)
1176{
965f77c4 1177 struct osl_timespec tv;
d2839953
RC
1178 uint64 usec;
1179
965f77c4 1180 osl_do_gettimeofday(&tv);
d2839953
RC
1181 /* tv_usec content is fraction of a second */
1182 usec = (uint64)tv.tv_sec * 1000000ul + tv.tv_usec;
1183 return usec;
1184}
1185
965f77c4
RC
1186uint64
1187osl_localtime_ns(void)
1188{
1189 uint64 ts_nsec = 0;
1190
1191 ts_nsec = local_clock();
1192
1193 return ts_nsec;
1194}
1195
1196void
1197osl_get_localtime(uint64 *sec, uint64 *usec)
1198{
1199 uint64 ts_nsec = 0;
1200 unsigned long rem_nsec = 0;
1201
1202 ts_nsec = local_clock();
1203 rem_nsec = do_div(ts_nsec, NSEC_PER_SEC);
1204 *sec = (uint64)ts_nsec;
1205 *usec = (uint64)(rem_nsec / MSEC_PER_SEC);
1206}
1207
1208uint64
1209osl_systztime_us(void)
1210{
3910ce8e 1211 struct osl_timespec tv;
965f77c4
RC
1212 uint64 tzusec;
1213
3910ce8e 1214 osl_do_gettimeofday(&tv);
965f77c4
RC
1215 /* apply timezone */
1216 tzusec = (uint64)((tv.tv_sec - (sys_tz.tz_minuteswest * 60)) *
1217 USEC_PER_SEC);
1218 tzusec += tv.tv_usec;
1219
1220 return tzusec;
1221}
1222
d2839953
RC
1223/*
1224 * OSLREGOPS specifies the use of osl_XXX routines to be used for register access
1225 */
1226
1227/*
1228 * BINOSL selects the slightly slower function-call-based binary compatible osl.
1229 */
1230
1231uint32
1232osl_rand(void)
1233{
1234 uint32 rand;
1235
1236 get_random_bytes(&rand, sizeof(rand));
1237
1238 return rand;
1239}
1240
1241/* Linux Kernel: File Operations: start */
1242void *
1243osl_os_open_image(char *filename)
1244{
1245 struct file *fp;
1246
1247 fp = filp_open(filename, O_RDONLY, 0);
1248 /*
1249 * 2.6.11 (FC4) supports filp_open() but later revs don't?
1250 * Alternative:
1251 * fp = open_namei(AT_FDCWD, filename, O_RD, 0);
1252 * ???
1253 */
1254 if (IS_ERR(fp))
1255 fp = NULL;
1256
1257 return fp;
1258}
1259
1260int
1261osl_os_get_image_block(char *buf, int len, void *image)
1262{
1263 struct file *fp = (struct file *)image;
1264 int rdlen;
1265
1266 if (!image)
1267 return 0;
1268
1269#if (LINUX_VERSION_CODE >= KERNEL_VERSION(4, 14, 0))
1270 rdlen = kernel_read(fp, buf, len, &fp->f_pos);
1271#else
1272 rdlen = kernel_read(fp, fp->f_pos, buf, len);
1273#endif /* (LINUX_VERSION_CODE >= KERNEL_VERSION(4, 14, 0)) */
1274
1275 if (rdlen > 0)
1276 fp->f_pos += rdlen;
1277
1278 return rdlen;
1279}
1280
1281void
1282osl_os_close_image(void *image)
1283{
1284 if (image)
1285 filp_close((struct file *)image, NULL);
1286}
1287
1288int
1289osl_os_image_size(void *image)
1290{
1291 int len = 0, curroffset;
1292
1293 if (image) {
1294 /* store the current offset */
1295 curroffset = generic_file_llseek(image, 0, 1);
1296 /* goto end of file to get length */
1297 len = generic_file_llseek(image, 0, 2);
1298 /* restore back the offset */
1299 generic_file_llseek(image, curroffset, 0);
1300 }
1301 return len;
1302}
1303
1304/* Linux Kernel: File Operations: end */
1305
1306#if (defined(STB) && defined(__arm__))
1307inline void osl_pcie_rreg(osl_t *osh, ulong addr, volatile void *v, uint size)
1308{
1309 unsigned long flags = 0;
1310 int pci_access = 0;
1311 int acp_war_enab = ACP_WAR_ENAB();
1312
1313 if (osh && BUSTYPE(osh->bustype) == PCI_BUS)
1314 pci_access = 1;
1315
1316 if (pci_access && acp_war_enab)
1317 spin_lock_irqsave(&l2x0_reg_lock, flags);
1318
1319 switch (size) {
1320 case sizeof(uint8):
1321 *(volatile uint8*)v = readb((volatile uint8*)(addr));
1322 break;
1323 case sizeof(uint16):
1324 *(volatile uint16*)v = readw((volatile uint16*)(addr));
1325 break;
1326 case sizeof(uint32):
1327 *(volatile uint32*)v = readl((volatile uint32*)(addr));
1328 break;
1329 case sizeof(uint64):
1330 *(volatile uint64*)v = *((volatile uint64*)(addr));
1331 break;
1332 }
1333
1334 if (pci_access && acp_war_enab)
1335 spin_unlock_irqrestore(&l2x0_reg_lock, flags);
1336}
1337#endif // endif
1338
1339#if defined(BCM_BACKPLANE_TIMEOUT)
1340inline void osl_bpt_rreg(osl_t *osh, ulong addr, volatile void *v, uint size)
1341{
1342 bool poll_timeout = FALSE;
1343 static int in_si_clear = FALSE;
1344
1345 switch (size) {
1346 case sizeof(uint8):
1347 *(volatile uint8*)v = readb((volatile uint8*)(addr));
1348 if (*(volatile uint8*)v == 0xff)
1349 poll_timeout = TRUE;
1350 break;
1351 case sizeof(uint16):
1352 *(volatile uint16*)v = readw((volatile uint16*)(addr));
1353 if (*(volatile uint16*)v == 0xffff)
1354 poll_timeout = TRUE;
1355 break;
1356 case sizeof(uint32):
1357 *(volatile uint32*)v = readl((volatile uint32*)(addr));
1358 if (*(volatile uint32*)v == 0xffffffff)
1359 poll_timeout = TRUE;
1360 break;
1361 case sizeof(uint64):
1362 *(volatile uint64*)v = *((volatile uint64*)(addr));
1363 if (*(volatile uint64*)v == 0xffffffffffffffff)
1364 poll_timeout = TRUE;
1365 break;
1366 }
1367
1368 if (osh && osh->sih && (in_si_clear == FALSE) && poll_timeout && osh->bpt_cb) {
1369 in_si_clear = TRUE;
1370 osh->bpt_cb((void *)osh->sih, (void *)addr);
1371 in_si_clear = FALSE;
1372 }
1373}
1374#endif /* BCM_BACKPLANE_TIMEOUT */
1375
1376#ifdef BCM_SECURE_DMA
1377static void *
1378osl_sec_dma_ioremap(osl_t *osh, struct page *page, size_t size, bool iscache, bool isdecr)
1379{
1380
1381 struct page **map;
1382 int order, i;
1383 void *addr = NULL;
1384
1385 size = PAGE_ALIGN(size);
1386 order = get_order(size);
1387
1388 map = kmalloc(sizeof(struct page *) << order, GFP_ATOMIC);
1389
1390 if (map == NULL)
1391 return NULL;
1392
1393 for (i = 0; i < (size >> PAGE_SHIFT); i++)
1394 map[i] = page + i;
1395
1396 if (iscache) {
1397 addr = vmap(map, size >> PAGE_SHIFT, VM_MAP, __pgprot(PAGE_KERNEL));
1398 if (isdecr) {
1399 osh->contig_delta_va_pa = ((uint8 *)addr - page_to_phys(page));
1400 }
1401 } else {
1402
1403#if defined(__ARM_ARCH_7A__)
1404 addr = vmap(map, size >> PAGE_SHIFT, VM_MAP,
1405 pgprot_noncached(__pgprot(PAGE_KERNEL)));
1406#endif // endif
1407 if (isdecr) {
1408 osh->contig_delta_va_pa = ((uint8 *)addr - page_to_phys(page));
1409 }
1410 }
1411
1412 kfree(map);
1413 return (void *)addr;
1414}
1415
1416static void
1417osl_sec_dma_iounmap(osl_t *osh, void *contig_base_va, size_t size)
1418{
1419 vunmap(contig_base_va);
1420}
1421
1422static int
1423osl_sec_dma_init_elem_mem_block(osl_t *osh, size_t mbsize, int max, sec_mem_elem_t **list)
1424{
1425 int i;
1426 int ret = BCME_OK;
1427 sec_mem_elem_t *sec_mem_elem;
1428
1429 if ((sec_mem_elem = kmalloc(sizeof(sec_mem_elem_t)*(max), GFP_ATOMIC)) != NULL) {
1430
1431 *list = sec_mem_elem;
1432 bzero(sec_mem_elem, sizeof(sec_mem_elem_t)*(max));
1433 for (i = 0; i < max-1; i++) {
1434 sec_mem_elem->next = (sec_mem_elem + 1);
1435 sec_mem_elem->size = mbsize;
1436 sec_mem_elem->pa_cma = osh->contig_base_alloc;
1437 sec_mem_elem->vac = osh->contig_base_alloc_va;
1438
1439 sec_mem_elem->pa_cma_page = phys_to_page(sec_mem_elem->pa_cma);
1440 osh->contig_base_alloc += mbsize;
1441 osh->contig_base_alloc_va = ((uint8 *)osh->contig_base_alloc_va + mbsize);
1442
1443 sec_mem_elem = sec_mem_elem + 1;
1444 }
1445 sec_mem_elem->next = NULL;
1446 sec_mem_elem->size = mbsize;
1447 sec_mem_elem->pa_cma = osh->contig_base_alloc;
1448 sec_mem_elem->vac = osh->contig_base_alloc_va;
1449
1450 sec_mem_elem->pa_cma_page = phys_to_page(sec_mem_elem->pa_cma);
1451 osh->contig_base_alloc += mbsize;
1452 osh->contig_base_alloc_va = ((uint8 *)osh->contig_base_alloc_va + mbsize);
1453
1454 } else {
1455 printf("%s sec mem elem kmalloc failed\n", __FUNCTION__);
1456 ret = BCME_ERROR;
1457 }
1458 return ret;
1459}
1460
1461static void
1462osl_sec_dma_deinit_elem_mem_block(osl_t *osh, size_t mbsize, int max, void *sec_list_base)
1463{
1464 if (sec_list_base)
1465 kfree(sec_list_base);
1466}
1467
1468static sec_mem_elem_t * BCMFASTPATH
1469osl_sec_dma_alloc_mem_elem(osl_t *osh, void *va, uint size, int direction,
1470 struct sec_cma_info *ptr_cma_info, uint offset)
1471{
1472 sec_mem_elem_t *sec_mem_elem = NULL;
1473
1474#ifdef NOT_YET
1475 if (size <= 512 && osh->sec_list_512) {
1476 sec_mem_elem = osh->sec_list_512;
1477 osh->sec_list_512 = sec_mem_elem->next;
1478 }
1479 else if (size <= 2048 && osh->sec_list_2048) {
1480 sec_mem_elem = osh->sec_list_2048;
1481 osh->sec_list_2048 = sec_mem_elem->next;
1482 }
1483 else
1484#else
1485 ASSERT(osh->sec_list_4096);
1486 sec_mem_elem = osh->sec_list_4096;
1487 osh->sec_list_4096 = sec_mem_elem->next;
1488#endif /* NOT_YET */
1489
1490 sec_mem_elem->next = NULL;
1491
1492 if (ptr_cma_info->sec_alloc_list_tail) {
1493 ptr_cma_info->sec_alloc_list_tail->next = sec_mem_elem;
1494 ptr_cma_info->sec_alloc_list_tail = sec_mem_elem;
1495 }
1496 else {
1497 /* First allocation: If tail is NULL, sec_alloc_list MUST also be NULL */
1498 ASSERT(ptr_cma_info->sec_alloc_list == NULL);
1499 ptr_cma_info->sec_alloc_list = sec_mem_elem;
1500 ptr_cma_info->sec_alloc_list_tail = sec_mem_elem;
1501 }
1502 return sec_mem_elem;
1503}
1504
1505static void BCMFASTPATH
1506osl_sec_dma_free_mem_elem(osl_t *osh, sec_mem_elem_t *sec_mem_elem)
1507{
1508 sec_mem_elem->dma_handle = 0x0;
1509 sec_mem_elem->va = NULL;
1510#ifdef NOT_YET
1511 if (sec_mem_elem->size == 512) {
1512 sec_mem_elem->next = osh->sec_list_512;
1513 osh->sec_list_512 = sec_mem_elem;
1514 } else if (sec_mem_elem->size == 2048) {
1515 sec_mem_elem->next = osh->sec_list_2048;
1516 osh->sec_list_2048 = sec_mem_elem;
1517 } else if (sec_mem_elem->size == 4096) {
1518#endif /* NOT_YET */
1519 sec_mem_elem->next = osh->sec_list_4096;
1520 osh->sec_list_4096 = sec_mem_elem;
1521#ifdef NOT_YET
1522 }
1523 else
1524 printf("%s free failed size=%d\n", __FUNCTION__, sec_mem_elem->size);
1525#endif /* NOT_YET */
1526}
1527
1528static sec_mem_elem_t * BCMFASTPATH
1529osl_sec_dma_find_rem_elem(osl_t *osh, struct sec_cma_info *ptr_cma_info, dma_addr_t dma_handle)
1530{
1531 sec_mem_elem_t *sec_mem_elem = ptr_cma_info->sec_alloc_list;
1532 sec_mem_elem_t *sec_prv_elem = ptr_cma_info->sec_alloc_list;
1533
1534 if (sec_mem_elem->dma_handle == dma_handle) {
1535
1536 ptr_cma_info->sec_alloc_list = sec_mem_elem->next;
1537
1538 if (sec_mem_elem == ptr_cma_info->sec_alloc_list_tail) {
1539 ptr_cma_info->sec_alloc_list_tail = NULL;
1540 ASSERT(ptr_cma_info->sec_alloc_list == NULL);
1541 }
1542
1543 return sec_mem_elem;
1544 }
1545 sec_mem_elem = sec_mem_elem->next;
1546
1547 while (sec_mem_elem != NULL) {
1548
1549 if (sec_mem_elem->dma_handle == dma_handle) {
1550
1551 sec_prv_elem->next = sec_mem_elem->next;
1552 if (sec_mem_elem == ptr_cma_info->sec_alloc_list_tail)
1553 ptr_cma_info->sec_alloc_list_tail = sec_prv_elem;
1554
1555 return sec_mem_elem;
1556 }
1557 sec_prv_elem = sec_mem_elem;
1558 sec_mem_elem = sec_mem_elem->next;
1559 }
1560 return NULL;
1561}
1562
1563static sec_mem_elem_t *
1564osl_sec_dma_rem_first_elem(osl_t *osh, struct sec_cma_info *ptr_cma_info)
1565{
1566 sec_mem_elem_t *sec_mem_elem = ptr_cma_info->sec_alloc_list;
1567
1568 if (sec_mem_elem) {
1569
1570 ptr_cma_info->sec_alloc_list = sec_mem_elem->next;
1571
1572 if (ptr_cma_info->sec_alloc_list == NULL)
1573 ptr_cma_info->sec_alloc_list_tail = NULL;
1574
1575 return sec_mem_elem;
1576
1577 } else
1578 return NULL;
1579}
1580
1581static void * BCMFASTPATH
1582osl_sec_dma_last_elem(osl_t *osh, struct sec_cma_info *ptr_cma_info)
1583{
1584 return ptr_cma_info->sec_alloc_list_tail;
1585}
1586
1587dma_addr_t BCMFASTPATH
1588osl_sec_dma_map_txmeta(osl_t *osh, void *va, uint size, int direction, void *p,
1589 hnddma_seg_map_t *dmah, void *ptr_cma_info)
1590{
1591 sec_mem_elem_t *sec_mem_elem;
1592 struct page *pa_cma_page;
1593 uint loffset;
1594 void *vaorig = ((uint8 *)va + size);
1595 dma_addr_t dma_handle = 0x0;
1596 /* packet will be the one added with osl_sec_dma_map() just before this call */
1597
1598 sec_mem_elem = osl_sec_dma_last_elem(osh, ptr_cma_info);
1599
1600 if (sec_mem_elem && sec_mem_elem->va == vaorig) {
1601
1602 pa_cma_page = phys_to_page(sec_mem_elem->pa_cma);
1603 loffset = sec_mem_elem->pa_cma -(sec_mem_elem->pa_cma & ~(PAGE_SIZE-1));
1604
1605 dma_handle = dma_map_page(OSH_NULL, pa_cma_page, loffset, size,
1606 (direction == DMA_TX ? DMA_TO_DEVICE:DMA_FROM_DEVICE));
1607
1608 } else {
1609 printf("%s: error orig va not found va = 0x%p \n",
1610 __FUNCTION__, vaorig);
1611 }
1612 return dma_handle;
1613}
1614
1615dma_addr_t BCMFASTPATH
1616osl_sec_dma_map(osl_t *osh, void *va, uint size, int direction, void *p,
1617 hnddma_seg_map_t *dmah, void *ptr_cma_info, uint offset)
1618{
1619
1620 sec_mem_elem_t *sec_mem_elem;
1621 struct page *pa_cma_page;
1622 void *pa_cma_kmap_va = NULL;
1623 uint buflen = 0;
1624 dma_addr_t dma_handle = 0x0;
1625 uint loffset;
1626#ifdef NOT_YET
1627 int *fragva;
1628 struct sk_buff *skb;
1629 int i = 0;
1630#endif /* NOT_YET */
1631
1632 ASSERT((direction == DMA_RX) || (direction == DMA_TX));
1633 sec_mem_elem = osl_sec_dma_alloc_mem_elem(osh, va, size, direction, ptr_cma_info, offset);
1634
1635 sec_mem_elem->va = va;
1636 sec_mem_elem->direction = direction;
1637 pa_cma_page = sec_mem_elem->pa_cma_page;
1638
1639 loffset = sec_mem_elem->pa_cma -(sec_mem_elem->pa_cma & ~(PAGE_SIZE-1));
1640 /* pa_cma_kmap_va = kmap_atomic(pa_cma_page);
1641 * pa_cma_kmap_va += loffset;
1642 */
1643
1644 pa_cma_kmap_va = sec_mem_elem->vac;
1645 pa_cma_kmap_va = ((uint8 *)pa_cma_kmap_va + offset);
1646 buflen = size;
1647
1648 if (direction == DMA_TX) {
1649 memcpy((uint8*)pa_cma_kmap_va+offset, va, size);
1650
1651#ifdef NOT_YET
1652 if (p == NULL) {
1653
1654 memcpy(pa_cma_kmap_va, va, size);
1655 /* prhex("Txpkt",pa_cma_kmap_va, size); */
1656 } else {
1657 for (skb = (struct sk_buff *)p; skb != NULL; skb = PKTNEXT(osh, skb)) {
1658 if (skb_is_nonlinear(skb)) {
1659
1660 for (i = 0; i < skb_shinfo(skb)->nr_frags; i++) {
1661 skb_frag_t *f = &skb_shinfo(skb)->frags[i];
1662 fragva = kmap_atomic(skb_frag_page(f));
1663 pa_cma_kmap_va = ((uint8 *)pa_cma_kmap_va + buflen);
1664 memcpy((pa_cma_kmap_va),
1665 (fragva + f->page_offset), skb_frag_size(f));
1666 kunmap_atomic(fragva);
1667 buflen += skb_frag_size(f);
1668 }
1669 } else {
1670
1671 pa_cma_kmap_va = ((uint8 *)pa_cma_kmap_va + buflen);
1672 memcpy(pa_cma_kmap_va, skb->data, skb->len);
1673 buflen += skb->len;
1674 }
1675 }
1676
1677 }
1678#endif /* NOT_YET */
1679 if (dmah) {
1680 dmah->nsegs = 1;
1681 dmah->origsize = buflen;
1682 }
1683 }
1684 else
1685 {
1686 if ((p != NULL) && (dmah != NULL)) {
1687 dmah->nsegs = 1;
1688 dmah->origsize = buflen;
1689 }
1690 *(uint32 *)(pa_cma_kmap_va) = 0x0;
1691 }
1692
1693 if (direction == DMA_RX) {
1694 flush_kernel_vmap_range(pa_cma_kmap_va, sizeof(int));
1695 }
1696 dma_handle = dma_map_page(OSH_NULL, pa_cma_page, loffset+offset, buflen,
1697 (direction == DMA_TX ? DMA_TO_DEVICE:DMA_FROM_DEVICE));
1698 if (dmah) {
1699 dmah->segs[0].addr = dma_handle;
1700 dmah->segs[0].length = buflen;
1701 }
1702 sec_mem_elem->dma_handle = dma_handle;
1703 /* kunmap_atomic(pa_cma_kmap_va-loffset); */
1704 return dma_handle;
1705}
1706
1707dma_addr_t BCMFASTPATH
1708osl_sec_dma_dd_map(osl_t *osh, void *va, uint size, int direction, void *p, hnddma_seg_map_t *map)
1709{
1710
1711 struct page *pa_cma_page;
1712 phys_addr_t pa_cma;
1713 dma_addr_t dma_handle = 0x0;
1714 uint loffset;
1715
1716 pa_cma = ((uint8 *)va - (uint8 *)osh->contig_delta_va_pa);
1717 pa_cma_page = phys_to_page(pa_cma);
1718 loffset = pa_cma -(pa_cma & ~(PAGE_SIZE-1));
1719
1720 dma_handle = dma_map_page(OSH_NULL, pa_cma_page, loffset, size,
1721 (direction == DMA_TX ? DMA_TO_DEVICE:DMA_FROM_DEVICE));
1722
1723 return dma_handle;
1724}
1725
1726void BCMFASTPATH
1727osl_sec_dma_unmap(osl_t *osh, dma_addr_t dma_handle, uint size, int direction,
1728void *p, hnddma_seg_map_t *map, void *ptr_cma_info, uint offset)
1729{
1730 sec_mem_elem_t *sec_mem_elem;
1731#ifdef NOT_YET
1732 struct page *pa_cma_page;
1733#endif // endif
1734 void *pa_cma_kmap_va = NULL;
1735 uint buflen = 0;
1736 dma_addr_t pa_cma;
1737 void *va;
1738 int read_count = 0;
1739 BCM_REFERENCE(buflen);
1740 BCM_REFERENCE(read_count);
1741
1742 sec_mem_elem = osl_sec_dma_find_rem_elem(osh, ptr_cma_info, dma_handle);
1743 ASSERT(sec_mem_elem);
1744
1745 va = sec_mem_elem->va;
1746 va = (uint8 *)va - offset;
1747 pa_cma = sec_mem_elem->pa_cma;
1748
1749#ifdef NOT_YET
1750 pa_cma_page = sec_mem_elem->pa_cma_page;
1751#endif // endif
1752
1753 if (direction == DMA_RX) {
1754
1755 if (p == NULL) {
1756
1757 /* pa_cma_kmap_va = kmap_atomic(pa_cma_page);
1758 * pa_cma_kmap_va += loffset;
1759 */
1760
1761 pa_cma_kmap_va = sec_mem_elem->vac;
1762
1763 do {
1764 invalidate_kernel_vmap_range(pa_cma_kmap_va, sizeof(int));
1765
1766 buflen = *(uint *)(pa_cma_kmap_va);
1767 if (buflen)
1768 break;
1769
1770 OSL_DELAY(1);
1771 read_count++;
1772 } while (read_count < 200);
1773 dma_unmap_page(OSH_NULL, pa_cma, size, DMA_FROM_DEVICE);
1774 memcpy(va, pa_cma_kmap_va, size);
1775 /* kunmap_atomic(pa_cma_kmap_va); */
1776 }
1777#ifdef NOT_YET
1778 else {
1779 buflen = 0;
1780 for (skb = (struct sk_buff *)p; (buflen < size) &&
1781 (skb != NULL); skb = skb->next) {
1782 if (skb_is_nonlinear(skb)) {
1783 pa_cma_kmap_va = kmap_atomic(pa_cma_page);
1784 for (i = 0; (buflen < size) &&
1785 (i < skb_shinfo(skb)->nr_frags); i++) {
1786 skb_frag_t *f = &skb_shinfo(skb)->frags[i];
1787 cpuaddr = kmap_atomic(skb_frag_page(f));
1788 pa_cma_kmap_va = ((uint8 *)pa_cma_kmap_va + buflen);
1789 memcpy((cpuaddr + f->page_offset),
1790 pa_cma_kmap_va, skb_frag_size(f));
1791 kunmap_atomic(cpuaddr);
1792 buflen += skb_frag_size(f);
1793 }
1794 kunmap_atomic(pa_cma_kmap_va);
1795 } else {
1796 pa_cma_kmap_va = kmap_atomic(pa_cma_page);
1797 pa_cma_kmap_va = ((uint8 *)pa_cma_kmap_va + buflen);
1798 memcpy(skb->data, pa_cma_kmap_va, skb->len);
1799 kunmap_atomic(pa_cma_kmap_va);
1800 buflen += skb->len;
1801 }
1802
1803 }
1804
1805 }
1806#endif /* NOT YET */
1807 } else {
1808 dma_unmap_page(OSH_NULL, pa_cma, size+offset, DMA_TO_DEVICE);
1809 }
1810
1811 osl_sec_dma_free_mem_elem(osh, sec_mem_elem);
1812}
1813
1814void
1815osl_sec_dma_unmap_all(osl_t *osh, void *ptr_cma_info)
1816{
1817
1818 sec_mem_elem_t *sec_mem_elem;
1819
1820 sec_mem_elem = osl_sec_dma_rem_first_elem(osh, ptr_cma_info);
1821
1822 while (sec_mem_elem != NULL) {
1823
1824 dma_unmap_page(OSH_NULL, sec_mem_elem->pa_cma, sec_mem_elem->size,
1825 sec_mem_elem->direction == DMA_TX ? DMA_TO_DEVICE : DMA_FROM_DEVICE);
1826 osl_sec_dma_free_mem_elem(osh, sec_mem_elem);
1827
1828 sec_mem_elem = osl_sec_dma_rem_first_elem(osh, ptr_cma_info);
1829 }
1830}
1831
1832static void
1833osl_sec_dma_init_consistent(osl_t *osh)
1834{
1835 int i;
1836 void *temp_va = osh->contig_base_alloc_coherent_va;
1837 phys_addr_t temp_pa = osh->contig_base_alloc_coherent;
1838
1839 for (i = 0; i < SEC_CMA_COHERENT_MAX; i++) {
1840 osh->sec_cma_coherent[i].avail = TRUE;
1841 osh->sec_cma_coherent[i].va = temp_va;
1842 osh->sec_cma_coherent[i].pa = temp_pa;
1843 temp_va = ((uint8 *)temp_va)+SEC_CMA_COHERENT_BLK;
1844 temp_pa += SEC_CMA_COHERENT_BLK;
1845 }
1846}
1847
1848static void *
1849osl_sec_dma_alloc_consistent(osl_t *osh, uint size, uint16 align_bits, ulong *pap)
1850{
1851
1852 void *temp_va = NULL;
1853 ulong temp_pa = 0;
1854 int i;
1855
1856 if (size > SEC_CMA_COHERENT_BLK) {
1857 printf("%s unsupported size\n", __FUNCTION__);
1858 return NULL;
1859 }
1860
1861 for (i = 0; i < SEC_CMA_COHERENT_MAX; i++) {
1862 if (osh->sec_cma_coherent[i].avail == TRUE) {
1863 temp_va = osh->sec_cma_coherent[i].va;
1864 temp_pa = osh->sec_cma_coherent[i].pa;
1865 osh->sec_cma_coherent[i].avail = FALSE;
1866 break;
1867 }
1868 }
1869
1870 if (i == SEC_CMA_COHERENT_MAX)
1871 printf("%s:No coherent mem: va = 0x%p pa = 0x%lx size = %d\n", __FUNCTION__,
1872 temp_va, (ulong)temp_pa, size);
1873
1874 *pap = (unsigned long)temp_pa;
1875 return temp_va;
1876}
1877
1878static void
1879osl_sec_dma_free_consistent(osl_t *osh, void *va, uint size, dmaaddr_t pa)
1880{
1881 int i = 0;
1882
1883 for (i = 0; i < SEC_CMA_COHERENT_MAX; i++) {
1884 if (osh->sec_cma_coherent[i].va == va) {
1885 osh->sec_cma_coherent[i].avail = TRUE;
1886 break;
1887 }
1888 }
1889 if (i == SEC_CMA_COHERENT_MAX)
1890 printf("%s:Error: va = 0x%p pa = 0x%lx size = %d\n", __FUNCTION__,
1891 va, (ulong)pa, size);
1892}
1893#endif /* BCM_SECURE_DMA */
1894
1895/* timer apis */
1896/* Note: All timer api's are thread unsafe and should be protected with locks by caller */
1897
965f77c4
RC
1898#if LINUX_VERSION_CODE >= KERNEL_VERSION(4, 15, 0)
1899void
1900timer_cb_compat(struct timer_list *tl)
1901{
1902 timer_list_compat_t *t = container_of(tl, timer_list_compat_t, timer);
1903 t->callback((ulong)t->arg);
1904}
1905#endif /* LINUX_VERSION_CODE >= KERNEL_VERSION(4, 15, 0) */
1906
d2839953
RC
1907osl_timer_t *
1908osl_timer_init(osl_t *osh, const char *name, void (*fn)(void *arg), void *arg)
1909{
1910 osl_timer_t *t;
1911 BCM_REFERENCE(fn);
1912 if ((t = MALLOCZ(NULL, sizeof(osl_timer_t))) == NULL) {
1913 printk(KERN_ERR "osl_timer_init: out of memory, malloced %d bytes\n",
1914 (int)sizeof(osl_timer_t));
1915 return (NULL);
1916 }
1917 bzero(t, sizeof(osl_timer_t));
1918 if ((t->timer = MALLOCZ(NULL, sizeof(struct timer_list))) == NULL) {
1919 printf("osl_timer_init: malloc failed\n");
1920 MFREE(NULL, t, sizeof(osl_timer_t));
1921 return (NULL);
1922 }
d2839953
RC
1923 t->set = TRUE;
1924
965f77c4 1925 init_timer_compat(t->timer, (linux_timer_fn)fn, arg);
d2839953
RC
1926
1927 return (t);
1928}
1929
1930void
1931osl_timer_add(osl_t *osh, osl_timer_t *t, uint32 ms, bool periodic)
1932{
1933 if (t == NULL) {
1934 printf("%s: Timer handle is NULL\n", __FUNCTION__);
1935 return;
1936 }
1937 ASSERT(!t->set);
1938
1939 t->set = TRUE;
1940 if (periodic) {
1941 printf("Periodic timers are not supported by Linux timer apis\n");
1942 }
965f77c4 1943 timer_expires(t->timer) = jiffies + ms*HZ/1000;
d2839953
RC
1944
1945 add_timer(t->timer);
1946
1947 return;
1948}
1949
1950void
1951osl_timer_update(osl_t *osh, osl_timer_t *t, uint32 ms, bool periodic)
1952{
1953 if (t == NULL) {
1954 printf("%s: Timer handle is NULL\n", __FUNCTION__);
1955 return;
1956 }
1957 if (periodic) {
1958 printf("Periodic timers are not supported by Linux timer apis\n");
1959 }
1960 t->set = TRUE;
965f77c4 1961 timer_expires(t->timer) = jiffies + ms*HZ/1000;
d2839953 1962
965f77c4 1963 mod_timer(t->timer, timer_expires(t->timer));
d2839953
RC
1964
1965 return;
1966}
1967
1968/*
1969 * Return TRUE if timer successfully deleted, FALSE if still pending
1970 */
1971bool
1972osl_timer_del(osl_t *osh, osl_timer_t *t)
1973{
1974 if (t == NULL) {
1975 printf("%s: Timer handle is NULL\n", __FUNCTION__);
1976 return (FALSE);
1977 }
1978 if (t->set) {
1979 t->set = FALSE;
1980 if (t->timer) {
1981 del_timer(t->timer);
1982 MFREE(NULL, t->timer, sizeof(struct timer_list));
1983 }
1984 MFREE(NULL, t, sizeof(osl_timer_t));
1985 }
1986 return (TRUE);
1987}
965f77c4
RC
1988#if (LINUX_VERSION_CODE >= KERNEL_VERSION(4, 14, 0))
1989int
1990kernel_read_compat(struct file *file, loff_t offset, char *addr, unsigned long count)
1991{
1992 return (int)kernel_read(file, addr, (size_t)count, &offset);
1993}
1994#endif /* (LINUX_VERSION_CODE >= KERNEL_VERSION(4, 14, 0)) */
1995
1996void *
1997osl_spin_lock_init(osl_t *osh)
1998{
1999 /* Adding 4 bytes since the sizeof(spinlock_t) could be 0 */
2000 /* if CONFIG_SMP and CONFIG_DEBUG_SPINLOCK are not defined */
2001 /* and this results in kernel asserts in internal builds */
2002 spinlock_t * lock = MALLOC(osh, sizeof(spinlock_t) + 4);
2003 if (lock)
2004 spin_lock_init(lock);
2005 return ((void *)lock);
2006}
2007
2008void
2009osl_spin_lock_deinit(osl_t *osh, void *lock)
2010{
2011 if (lock)
2012 MFREE(osh, lock, sizeof(spinlock_t) + 4);
2013}
2014
2015unsigned long
2016osl_spin_lock(void *lock)
2017{
2018 unsigned long flags = 0;
2019
2020 if (lock)
2021 spin_lock_irqsave((spinlock_t *)lock, flags);
2022
2023 return flags;
2024}
2025
2026void
2027osl_spin_unlock(void *lock, unsigned long flags)
2028{
2029 if (lock)
2030 spin_unlock_irqrestore((spinlock_t *)lock, flags);
2031}
47fa5ad5
RC
2032
2033#ifdef USE_DMA_LOCK
2034static void
2035osl_dma_lock(osl_t *osh)
2036{
47fa5ad5
RC
2037 if (likely(in_irq() || irqs_disabled())) {
2038 spin_lock(&osh->dma_lock);
2039 } else {
2040 spin_lock_bh(&osh->dma_lock);
2041 osh->dma_lock_bh = TRUE;
2042 }
2043}
2044
2045static void
2046osl_dma_unlock(osl_t *osh)
2047{
2048 if (unlikely(osh->dma_lock_bh)) {
2049 osh->dma_lock_bh = FALSE;
2050 spin_unlock_bh(&osh->dma_lock);
2051 } else {
2052 spin_unlock(&osh->dma_lock);
2053 }
2054}
2055
2056static void
2057osl_dma_lock_init(osl_t *osh)
2058{
2059 spin_lock_init(&osh->dma_lock);
2060 osh->dma_lock_bh = FALSE;
2061}
965f77c4
RC
2062#endif /* USE_DMA_LOCK */
2063
2064void
2065osl_do_gettimeofday(struct osl_timespec *ts)
2066{
3910ce8e
LJ
2067#if LINUX_VERSION_CODE >= KERNEL_VERSION(5, 6, 0)
2068 struct timespec64 curtime;
2069#elif LINUX_VERSION_CODE >= KERNEL_VERSION(5, 0, 0)
965f77c4
RC
2070 struct timespec curtime;
2071#else
2072 struct timeval curtime;
2073#endif
2074
3910ce8e
LJ
2075#if LINUX_VERSION_CODE >= KERNEL_VERSION(5, 6, 0)
2076 ktime_get_real_ts64(&curtime);
2077 ts->tv_nsec = curtime.tv_nsec;
2078 ts->tv_usec = curtime.tv_nsec / 1000;
2079#elif LINUX_VERSION_CODE >= KERNEL_VERSION(5, 0, 0)
965f77c4
RC
2080 getnstimeofday(&curtime);
2081 ts->tv_nsec = curtime.tv_nsec;
2082 ts->tv_usec = curtime.tv_nsec / 1000;
2083#else
2084 do_gettimeofday(&curtime);
2085 ts->tv_usec = curtime.tv_usec;
2086 ts->tv_nsec = curtime.tv_usec * 1000;
2087#endif
2088 ts->tv_sec = curtime.tv_sec;
2089}
2090
2091void
2092osl_get_monotonic_boottime(struct osl_timespec *ts)
2093{
3910ce8e
LJ
2094#if LINUX_VERSION_CODE >= KERNEL_VERSION(5, 6, 0)
2095 struct timespec64 curtime;
2096#else
965f77c4 2097 struct timespec curtime;
3910ce8e 2098#endif
965f77c4 2099
3910ce8e
LJ
2100#if LINUX_VERSION_CODE >= KERNEL_VERSION(5, 6, 0)
2101 curtime = ktime_to_timespec64(ktime_get_boottime());
2102#elif LINUX_VERSION_CODE >= KERNEL_VERSION(4, 20, 0)
965f77c4
RC
2103 curtime = ktime_to_timespec(ktime_get_boottime());
2104#else
2105 get_monotonic_boottime(&curtime);
2106#endif
2107 ts->tv_sec = curtime.tv_sec;
2108 ts->tv_nsec = curtime.tv_nsec;
2109 ts->tv_usec = curtime.tv_nsec / 1000;
2110}