2 * Linux OS Independent Layer
4 * Copyright (C) 1999-2018, Broadcom.
6 * Unless you and Broadcom execute a separate written software license
7 * agreement governing use of this software, this software is licensed to you
8 * under the terms of the GNU General Public License version 2 (the "GPL"),
9 * available at http://www.broadcom.com/licenses/GPLv2.php, with the
10 * following added to such license:
12 * As a special exception, the copyright holders of this software give you
13 * permission to link this software with independent modules, and to copy and
14 * distribute the resulting executable under terms of your choice, provided that
15 * you also meet, for each linked independent module, the terms and conditions of
16 * the license of that module. An independent module is a module which is not
17 * derived from this software. The special exception does not apply to any
18 * modifications of the software.
20 * Notwithstanding the above, under no circumstances may you combine this
21 * software in any way with any other Broadcom software provided under a license
22 * other than the GPL, without Broadcom's express prior written consent.
25 * <<Broadcom-WL-IPTag/Open:>>
27 * $Id: linux_osl.c 767848 2018-06-15 09:33:44Z $
33 #include <bcmendian.h>
37 #if defined(__ARM_ARCH_7A__) && !defined(DHD_USE_COHERENT_MEM_FOR_RING)
38 #include <asm/cacheflush.h>
39 #endif /* __ARM_ARCH_7A__ && !DHD_USE_COHERENT_MEM_FOR_RING */
41 #include <linux/random.h>
45 #include <linux/delay.h>
46 #include <linux/vmalloc.h>
50 #include <linux/module.h>
51 #include <linux/kernel.h>
53 #include <linux/printk.h>
54 #include <linux/errno.h>
56 #include <linux/moduleparam.h>
58 #include <linux/skbuff.h>
60 #include <linux/highmem.h>
61 #include <linux/dma-mapping.h>
62 #include <asm/memory.h>
63 #endif /* BCM_SECURE_DMA */
68 #include <linux/spinlock.h>
69 extern spinlock_t l2x0_reg_lock
;
72 #ifdef BCM_OBJECT_TRACE
74 #endif /* BCM_OBJECT_TRACE */
75 #include "linux_osl_priv.h"
77 #define PCI_CFG_RETRY 10
79 #define DUMPBUFSZ 1024
82 static void * osl_sec_dma_ioremap(osl_t
*osh
, struct page
*page
, size_t size
,
83 bool iscache
, bool isdecr
);
84 static void osl_sec_dma_iounmap(osl_t
*osh
, void *contig_base_va
, size_t size
);
85 static int osl_sec_dma_init_elem_mem_block(osl_t
*osh
, size_t mbsize
, int max
,
86 sec_mem_elem_t
**list
);
87 static void osl_sec_dma_deinit_elem_mem_block(osl_t
*osh
, size_t mbsize
, int max
,
89 static sec_mem_elem_t
* osl_sec_dma_alloc_mem_elem(osl_t
*osh
, void *va
, uint size
,
90 int direction
, struct sec_cma_info
*ptr_cma_info
, uint offset
);
91 static void osl_sec_dma_free_mem_elem(osl_t
*osh
, sec_mem_elem_t
*sec_mem_elem
);
92 static void osl_sec_dma_init_consistent(osl_t
*osh
);
93 static void *osl_sec_dma_alloc_consistent(osl_t
*osh
, uint size
, uint16 align_bits
,
95 static void osl_sec_dma_free_consistent(osl_t
*osh
, void *va
, uint size
, dmaaddr_t pa
);
96 #endif /* BCM_SECURE_DMA */
98 /* PCMCIA attribute space access macros */
100 uint32 g_assert_type
= 0; /* By Default Kernel Panic */
102 module_param(g_assert_type
, int, 0);
103 #ifdef BCM_SECURE_DMA
104 #define SECDMA_MODULE_PARAMS 0
105 #define SECDMA_EXT_FILE 1
106 unsigned long secdma_addr
= 0;
107 unsigned long secdma_addr2
= 0;
109 u32 secdma_size2
= 0;
110 module_param(secdma_addr
, ulong
, 0);
111 module_param(secdma_size
, int, 0);
112 module_param(secdma_addr2
, ulong
, 0);
113 module_param(secdma_size2
, int, 0);
114 static int secdma_found
= 0;
115 #endif /* BCM_SECURE_DMA */
118 static void osl_dma_lock(osl_t
*osh
);
119 static void osl_dma_unlock(osl_t
*osh
);
120 static void osl_dma_lock_init(osl_t
*osh
);
121 #define DMA_LOCK(osh) osl_dma_lock(osh)
122 #define DMA_UNLOCK(osh) osl_dma_unlock(osh)
123 #define DMA_LOCK_INIT(osh) osl_dma_lock_init(osh);
125 #define DMA_LOCK(osh) do { /* noop */ } while(0)
126 #define DMA_UNLOCK(osh) do { /* noop */ } while(0)
127 #define DMA_LOCK_INIT(osh) do { /* noop */ } while(0)
128 #endif /* USE_DMA_LOCK */
130 static int16 linuxbcmerrormap
[] =
132 -EINVAL
, /* BCME_ERROR */
133 -EINVAL
, /* BCME_BADARG */
134 -EINVAL
, /* BCME_BADOPTION */
135 -EINVAL
, /* BCME_NOTUP */
136 -EINVAL
, /* BCME_NOTDOWN */
137 -EINVAL
, /* BCME_NOTAP */
138 -EINVAL
, /* BCME_NOTSTA */
139 -EINVAL
, /* BCME_BADKEYIDX */
140 -EINVAL
, /* BCME_RADIOOFF */
141 -EINVAL
, /* BCME_NOTBANDLOCKED */
142 -EINVAL
, /* BCME_NOCLK */
143 -EINVAL
, /* BCME_BADRATESET */
144 -EINVAL
, /* BCME_BADBAND */
145 -E2BIG
, /* BCME_BUFTOOSHORT */
146 -E2BIG
, /* BCME_BUFTOOLONG */
147 -EBUSY
, /* BCME_BUSY */
148 -EINVAL
, /* BCME_NOTASSOCIATED */
149 -EINVAL
, /* BCME_BADSSIDLEN */
150 -EINVAL
, /* BCME_OUTOFRANGECHAN */
151 -EINVAL
, /* BCME_BADCHAN */
152 -EFAULT
, /* BCME_BADADDR */
153 -ENOMEM
, /* BCME_NORESOURCE */
154 -EOPNOTSUPP
, /* BCME_UNSUPPORTED */
155 -EMSGSIZE
, /* BCME_BADLENGTH */
156 -EINVAL
, /* BCME_NOTREADY */
157 -EPERM
, /* BCME_EPERM */
158 -ENOMEM
, /* BCME_NOMEM */
159 -EINVAL
, /* BCME_ASSOCIATED */
160 -ERANGE
, /* BCME_RANGE */
161 -EINVAL
, /* BCME_NOTFOUND */
162 -EINVAL
, /* BCME_WME_NOT_ENABLED */
163 -EINVAL
, /* BCME_TSPEC_NOTFOUND */
164 -EINVAL
, /* BCME_ACM_NOTSUPPORTED */
165 -EINVAL
, /* BCME_NOT_WME_ASSOCIATION */
166 -EIO
, /* BCME_SDIO_ERROR */
167 -ENODEV
, /* BCME_DONGLE_DOWN */
168 -EINVAL
, /* BCME_VERSION */
169 -EIO
, /* BCME_TXFAIL */
170 -EIO
, /* BCME_RXFAIL */
171 -ENODEV
, /* BCME_NODEVICE */
172 -EINVAL
, /* BCME_NMODE_DISABLED */
173 -ENODATA
, /* BCME_NONRESIDENT */
174 -EINVAL
, /* BCME_SCANREJECT */
175 -EINVAL
, /* BCME_USAGE_ERROR */
176 -EIO
, /* BCME_IOCTL_ERROR */
177 -EIO
, /* BCME_SERIAL_PORT_ERR */
178 -EOPNOTSUPP
, /* BCME_DISABLED, BCME_NOTENABLED */
179 -EIO
, /* BCME_DECERR */
180 -EIO
, /* BCME_ENCERR */
181 -EIO
, /* BCME_MICERR */
182 -ERANGE
, /* BCME_REPLAY */
183 -EINVAL
, /* BCME_IE_NOTFOUND */
184 -EINVAL
, /* BCME_DATA_NOTFOUND */
185 -EINVAL
, /* BCME_NOT_GC */
186 -EINVAL
, /* BCME_PRS_REQ_FAILED */
187 -EINVAL
, /* BCME_NO_P2P_SE */
188 -EINVAL
, /* BCME_NOA_PND */
189 -EINVAL
, /* BCME_FRAG_Q_FAILED */
190 -EINVAL
, /* BCME_GET_AF_FAILED */
191 -EINVAL
, /* BCME_MSCH_NOTREADY */
192 -EINVAL
, /* BCME_IOV_LAST_CMD */
193 -EINVAL
, /* BCME_MINIPMU_CAL_FAIL */
194 -EINVAL
, /* BCME_RCAL_FAIL */
195 -EINVAL
, /* BCME_LPF_RCCAL_FAIL */
196 -EINVAL
, /* BCME_DACBUF_RCCAL_FAIL */
197 -EINVAL
, /* BCME_VCOCAL_FAIL */
198 -EINVAL
, /* BCME_BANDLOCKED */
199 -EINVAL
, /* BCME_DNGL_DEVRESET */
201 /* When an new error code is added to bcmutils.h, add os
202 * specific error translation here as well
204 /* check if BCME_LAST changed since the last time this function was updated */
206 #error "You need to add a OS error translation in the linuxbcmerrormap \
207 for new error code defined in bcmutils.h"
212 #ifdef DHD_MAP_LOGGING
213 #define DHD_MAP_LOG_SIZE 2048
215 typedef struct dhd_map_record
{
220 dhd_map_log_t
*dhd_map_log
= NULL
, *dhd_unmap_log
= NULL
;
221 uint32 map_idx
= 0, unmap_idx
= 0;
224 osl_dma_map_dump(void)
226 printk("%s: map_idx=%d unmap_idx=%d current time=%llu\n",
227 __FUNCTION__
, map_idx
, unmap_idx
, OSL_SYSUPTIME_US());
228 if (dhd_map_log
&& dhd_unmap_log
) {
229 printk("%s: dhd_map_log(pa)=%llx size=%d, dma_unmap_log(pa)=%llx size=%d\n",
230 __FUNCTION__
, (uint64
)__virt_to_phys((ulong
)dhd_map_log
),
231 (uint32
)(sizeof(dhd_map_log_t
) * DHD_MAP_LOG_SIZE
),
232 (uint64
)__virt_to_phys((ulong
)dhd_unmap_log
),
233 (uint32
)(sizeof(dhd_map_log_t
) * DHD_MAP_LOG_SIZE
));
236 #endif /* DHD_MAP_LOGGING */
238 /* translate bcmerrors into linux errors */
240 osl_error(int bcmerror
)
244 else if (bcmerror
< BCME_LAST
)
245 bcmerror
= BCME_ERROR
;
247 /* Array bounds covered by ASSERT in osl_attach */
248 return linuxbcmerrormap
[-bcmerror
];
252 osl_attach(void *pdev
, uint bustype
, bool pkttag
)
254 void **osl_cmn
= NULL
;
257 #ifdef BCM_SECURE_DMA
261 flags
= CAN_SLEEP() ? GFP_KERNEL
: GFP_ATOMIC
;
262 if (!(osh
= kmalloc(sizeof(osl_t
), flags
)))
267 bzero(osh
, sizeof(osl_t
));
269 if (osl_cmn
== NULL
|| *osl_cmn
== NULL
) {
270 if (!(osh
->cmn
= kmalloc(sizeof(osl_cmn_t
), flags
))) {
274 bzero(osh
->cmn
, sizeof(osl_cmn_t
));
277 atomic_set(&osh
->cmn
->malloced
, 0);
278 osh
->cmn
->dbgmem_list
= NULL
;
279 spin_lock_init(&(osh
->cmn
->dbgmem_lock
));
281 spin_lock_init(&(osh
->cmn
->pktalloc_lock
));
286 atomic_add(1, &osh
->cmn
->refcount
);
288 bcm_object_trace_init();
290 /* Check that error map has the right number of entries in it */
291 ASSERT(ABS(BCME_LAST
) == (ARRAYSIZE(linuxbcmerrormap
) - 1));
295 osh
->pub
.pkttag
= pkttag
;
296 osh
->bustype
= bustype
;
297 osh
->magic
= OS_HANDLE_MAGIC
;
298 #ifdef BCM_SECURE_DMA
300 if ((secdma_addr
!= 0) && (secdma_size
!= 0)) {
301 printk("linux_osl.c: Buffer info passed via module params, using it.\n");
302 if (secdma_found
== 0) {
303 osh
->contig_base_alloc
= (phys_addr_t
)secdma_addr
;
304 secdma_memsize
= secdma_size
;
305 } else if (secdma_found
== 1) {
306 osh
->contig_base_alloc
= (phys_addr_t
)secdma_addr2
;
307 secdma_memsize
= secdma_size2
;
309 printk("linux_osl.c secdma: secDMA instances %d \n", secdma_found
);
313 osh
->contig_base
= (phys_addr_t
)osh
->contig_base_alloc
;
314 printf("linux_osl.c: secdma_cma_size = 0x%x\n", secdma_memsize
);
315 printf("linux_osl.c: secdma_cma_addr = 0x%x \n",
316 (unsigned int)osh
->contig_base_alloc
);
317 osh
->stb_ext_params
= SECDMA_MODULE_PARAMS
;
319 else if (stbpriv_init(osh
) == 0) {
320 printk("linux_osl.c: stbpriv.txt found. Get buffer info.\n");
321 if (secdma_found
== 0) {
322 osh
->contig_base_alloc
=
323 (phys_addr_t
)bcm_strtoul(stbparam_get("secdma_cma_addr"), NULL
, 0);
324 secdma_memsize
= bcm_strtoul(stbparam_get("secdma_cma_size"), NULL
, 0);
325 } else if (secdma_found
== 1) {
326 osh
->contig_base_alloc
=
327 (phys_addr_t
)bcm_strtoul(stbparam_get("secdma_cma_addr2"), NULL
, 0);
328 secdma_memsize
= bcm_strtoul(stbparam_get("secdma_cma_size2"), NULL
, 0);
330 printk("linux_osl.c secdma: secDMA instances %d \n", secdma_found
);
334 osh
->contig_base
= (phys_addr_t
)osh
->contig_base_alloc
;
335 printf("linux_osl.c: secdma_cma_size = 0x%x\n", secdma_memsize
);
336 printf("linux_osl.c: secdma_cma_addr = 0x%x \n",
337 (unsigned int)osh
->contig_base_alloc
);
338 osh
->stb_ext_params
= SECDMA_EXT_FILE
;
341 printk("linux_osl.c: secDMA no longer supports internal buffer allocation.\n");
346 osh
->contig_base_alloc_coherent_va
= osl_sec_dma_ioremap(osh
,
347 phys_to_page((u32
)osh
->contig_base_alloc
),
348 CMA_DMA_DESC_MEMBLOCK
, FALSE
, TRUE
);
350 if (osh
->contig_base_alloc_coherent_va
== NULL
) {
356 osh
->contig_base_coherent_va
= osh
->contig_base_alloc_coherent_va
;
357 osh
->contig_base_alloc_coherent
= osh
->contig_base_alloc
;
358 osl_sec_dma_init_consistent(osh
);
360 osh
->contig_base_alloc
+= CMA_DMA_DESC_MEMBLOCK
;
362 osh
->contig_base_alloc_va
= osl_sec_dma_ioremap(osh
,
363 phys_to_page((u32
)osh
->contig_base_alloc
), CMA_DMA_DATA_MEMBLOCK
, TRUE
, FALSE
);
364 if (osh
->contig_base_alloc_va
== NULL
) {
365 osl_sec_dma_iounmap(osh
, osh
->contig_base_coherent_va
, CMA_DMA_DESC_MEMBLOCK
);
371 osh
->contig_base_va
= osh
->contig_base_alloc_va
;
375 * osl_sec_dma_init_elem_mem_block(osh, CMA_BUFSIZE_512, CMA_BUFNUM, &osh->sec_list_512);
376 * osh->sec_list_base_512 = osh->sec_list_512;
377 * osl_sec_dma_init_elem_mem_block(osh, CMA_BUFSIZE_2K, CMA_BUFNUM, &osh->sec_list_2048);
378 * osh->sec_list_base_2048 = osh->sec_list_2048;
381 if (BCME_OK
!= osl_sec_dma_init_elem_mem_block(osh
,
382 CMA_BUFSIZE_4K
, CMA_BUFNUM
, &osh
->sec_list_4096
)) {
383 osl_sec_dma_iounmap(osh
, osh
->contig_base_coherent_va
, CMA_DMA_DESC_MEMBLOCK
);
384 osl_sec_dma_iounmap(osh
, osh
->contig_base_va
, CMA_DMA_DATA_MEMBLOCK
);
390 osh
->sec_list_base_4096
= osh
->sec_list_4096
;
392 #endif /* BCM_SECURE_DMA */
398 osh
->pub
.mmbus
= TRUE
;
405 osh
->pub
.mmbus
= FALSE
;
414 #ifdef DHD_MAP_LOGGING
415 dhd_map_log
= kmalloc(sizeof(dhd_map_log_t
) * DHD_MAP_LOG_SIZE
, flags
);
417 memset(dhd_map_log
, 0, sizeof(dhd_map_log_t
) * DHD_MAP_LOG_SIZE
);
419 dhd_unmap_log
= kmalloc(sizeof(dhd_map_log_t
) * DHD_MAP_LOG_SIZE
, flags
);
421 memset(dhd_unmap_log
, 0, sizeof(dhd_map_log_t
) * DHD_MAP_LOG_SIZE
);
423 #endif /* DHD_MAP_LOGGING */
428 void osl_set_bus_handle(osl_t
*osh
, void *bus_handle
)
430 osh
->bus_handle
= bus_handle
;
433 void* osl_get_bus_handle(osl_t
*osh
)
435 return osh
->bus_handle
;
438 #if defined(BCM_BACKPLANE_TIMEOUT)
439 void osl_set_bpt_cb(osl_t
*osh
, void *bpt_cb
, void *bpt_ctx
)
442 osh
->bpt_cb
= (bpt_cb_fn
)bpt_cb
;
446 #endif /* BCM_BACKPLANE_TIMEOUT */
449 osl_detach(osl_t
*osh
)
454 #ifdef BCM_SECURE_DMA
455 if (osh
->stb_ext_params
== SECDMA_EXT_FILE
)
458 osl_sec_dma_deinit_elem_mem_block(osh
, CMA_BUFSIZE_512
, CMA_BUFNUM
, osh
->sec_list_base_512
);
459 osl_sec_dma_deinit_elem_mem_block(osh
, CMA_BUFSIZE_2K
, CMA_BUFNUM
, osh
->sec_list_base_2048
);
461 osl_sec_dma_deinit_elem_mem_block(osh
, CMA_BUFSIZE_4K
, CMA_BUFNUM
, osh
->sec_list_base_4096
);
462 osl_sec_dma_iounmap(osh
, osh
->contig_base_coherent_va
, CMA_DMA_DESC_MEMBLOCK
);
463 osl_sec_dma_iounmap(osh
, osh
->contig_base_va
, CMA_DMA_DATA_MEMBLOCK
);
465 #endif /* BCM_SECURE_DMA */
467 bcm_object_trace_deinit();
469 #ifdef DHD_MAP_LOGGING
474 kfree(dhd_unmap_log
);
476 #endif /* DHD_MAP_LOGGING */
478 ASSERT(osh
->magic
== OS_HANDLE_MAGIC
);
479 atomic_sub(1, &osh
->cmn
->refcount
);
480 if (atomic_read(&osh
->cmn
->refcount
) == 0) {
486 /* APIs to set/get specific quirks in OSL layer */
488 osl_flag_set(osl_t
*osh
, uint32 mask
)
494 osl_flag_clr(osl_t
*osh
, uint32 mask
)
500 inline bool BCMFASTPATH
504 osl_is_flag_set(osl_t
*osh
, uint32 mask
)
506 return (osh
->flags
& mask
);
509 #if (defined(__ARM_ARCH_7A__) && !defined(DHD_USE_COHERENT_MEM_FOR_RING)) || \
510 defined(STB_SOC_WIFI)
512 inline int BCMFASTPATH
513 osl_arch_is_coherent(void)
518 inline int BCMFASTPATH
519 osl_acp_war_enab(void)
524 inline void BCMFASTPATH
525 osl_cache_flush(void *va
, uint size
)
530 dma_sync_single_for_device(OSH_NULL
, virt_to_phys(va
), size
, DMA_TX
);
531 #else /* STB_SOC_WIFI */
532 dma_sync_single_for_device(OSH_NULL
, virt_to_dma(OSH_NULL
, va
), size
,
534 #endif /* STB_SOC_WIFI */
537 inline void BCMFASTPATH
538 osl_cache_inv(void *va
, uint size
)
542 dma_sync_single_for_cpu(OSH_NULL
, virt_to_phys(va
), size
, DMA_RX
);
543 #else /* STB_SOC_WIFI */
544 dma_sync_single_for_cpu(OSH_NULL
, virt_to_dma(OSH_NULL
, va
), size
, DMA_FROM_DEVICE
);
545 #endif /* STB_SOC_WIFI */
548 inline void BCMFASTPATH
549 osl_prefetch(const void *ptr
)
551 #if !defined(STB_SOC_WIFI)
552 __asm__
__volatile__("pld\t%0" :: "o"(*(const char *)ptr
) : "cc");
559 osl_pci_read_config(osl_t
*osh
, uint offset
, uint size
)
562 uint retry
= PCI_CFG_RETRY
;
564 ASSERT((osh
&& (osh
->magic
== OS_HANDLE_MAGIC
)));
566 /* only 4byte access supported */
570 pci_read_config_dword(osh
->pdev
, offset
, &val
);
571 if (val
!= 0xffffffff)
579 osl_pci_write_config(osl_t
*osh
, uint offset
, uint size
, uint val
)
581 uint retry
= PCI_CFG_RETRY
;
583 ASSERT((osh
&& (osh
->magic
== OS_HANDLE_MAGIC
)));
585 /* only 4byte access supported */
589 pci_write_config_dword(osh
->pdev
, offset
, val
);
590 if (offset
!= PCI_BAR0_WIN
)
592 if (osl_pci_read_config(osh
, offset
, size
) == val
)
598 /* return bus # for the pci device pointed by osh->pdev */
600 osl_pci_bus(osl_t
*osh
)
602 ASSERT(osh
&& (osh
->magic
== OS_HANDLE_MAGIC
) && osh
->pdev
);
604 #if defined(__ARM_ARCH_7A__) && LINUX_VERSION_CODE > KERNEL_VERSION(2, 6, 35)
605 return pci_domain_nr(((struct pci_dev
*)osh
->pdev
)->bus
);
607 return ((struct pci_dev
*)osh
->pdev
)->bus
->number
;
611 /* return slot # for the pci device pointed by osh->pdev */
613 osl_pci_slot(osl_t
*osh
)
615 ASSERT(osh
&& (osh
->magic
== OS_HANDLE_MAGIC
) && osh
->pdev
);
617 #if defined(__ARM_ARCH_7A__) && LINUX_VERSION_CODE > KERNEL_VERSION(2, 6, 35)
618 return PCI_SLOT(((struct pci_dev
*)osh
->pdev
)->devfn
) + 1;
620 return PCI_SLOT(((struct pci_dev
*)osh
->pdev
)->devfn
);
624 /* return domain # for the pci device pointed by osh->pdev */
626 osl_pcie_domain(osl_t
*osh
)
628 ASSERT(osh
&& (osh
->magic
== OS_HANDLE_MAGIC
) && osh
->pdev
);
630 return pci_domain_nr(((struct pci_dev
*)osh
->pdev
)->bus
);
633 /* return bus # for the pci device pointed by osh->pdev */
635 osl_pcie_bus(osl_t
*osh
)
637 ASSERT(osh
&& (osh
->magic
== OS_HANDLE_MAGIC
) && osh
->pdev
);
639 return ((struct pci_dev
*)osh
->pdev
)->bus
->number
;
642 /* return the pci device pointed by osh->pdev */
644 osl_pci_device(osl_t
*osh
)
646 ASSERT(osh
&& (osh
->magic
== OS_HANDLE_MAGIC
) && osh
->pdev
);
652 osl_pcmcia_attr(osl_t
*osh
, uint offset
, char *buf
, int size
, bool write
)
657 osl_pcmcia_read_attr(osl_t
*osh
, uint offset
, void *buf
, int size
)
659 osl_pcmcia_attr(osh
, offset
, (char *) buf
, size
, FALSE
);
663 osl_pcmcia_write_attr(osl_t
*osh
, uint offset
, void *buf
, int size
)
665 osl_pcmcia_attr(osh
, offset
, (char *) buf
, size
, TRUE
);
669 osl_malloc(osl_t
*osh
, uint size
)
674 /* only ASSERT if osh is defined */
676 ASSERT(osh
->magic
== OS_HANDLE_MAGIC
);
677 #ifdef CONFIG_DHD_USE_STATIC_BUF
680 unsigned long irq_flags
;
682 if ((size
>= PAGE_SIZE
)&&(size
<= STATIC_BUF_SIZE
))
684 spin_lock_irqsave(&bcm_static_buf
->static_lock
, irq_flags
);
686 for (i
= 0; i
< STATIC_BUF_MAX_NUM
; i
++)
688 if (bcm_static_buf
->buf_use
[i
] == 0)
692 if (i
== STATIC_BUF_MAX_NUM
)
694 spin_unlock_irqrestore(&bcm_static_buf
->static_lock
, irq_flags
);
695 printk("all static buff in use!\n");
699 bcm_static_buf
->buf_use
[i
] = 1;
700 spin_unlock_irqrestore(&bcm_static_buf
->static_lock
, irq_flags
);
702 bzero(bcm_static_buf
->buf_ptr
+STATIC_BUF_SIZE
*i
, size
);
704 atomic_add(size
, &osh
->cmn
->malloced
);
706 return ((void *)(bcm_static_buf
->buf_ptr
+STATIC_BUF_SIZE
*i
));
710 #endif /* CONFIG_DHD_USE_STATIC_BUF */
712 flags
= CAN_SLEEP() ? GFP_KERNEL
: GFP_ATOMIC
;
713 if ((addr
= kmalloc(size
, flags
)) == NULL
) {
719 atomic_add(size
, &osh
->cmn
->malloced
);
725 osl_mallocz(osl_t
*osh
, uint size
)
729 ptr
= osl_malloc(osh
, size
);
739 osl_mfree(osl_t
*osh
, void *addr
, uint size
)
741 #ifdef CONFIG_DHD_USE_STATIC_BUF
746 if ((addr
> (void *)bcm_static_buf
) && ((unsigned char *)addr
747 <= ((unsigned char *)bcm_static_buf
+ STATIC_BUF_TOTAL_LEN
)))
751 buf_idx
= ((unsigned char *)addr
- bcm_static_buf
->buf_ptr
)/STATIC_BUF_SIZE
;
753 spin_lock_irqsave(&bcm_static_buf
->static_lock
, flags
);
754 bcm_static_buf
->buf_use
[buf_idx
] = 0;
755 spin_unlock_irqrestore(&bcm_static_buf
->static_lock
, flags
);
757 if (osh
&& osh
->cmn
) {
758 ASSERT(osh
->magic
== OS_HANDLE_MAGIC
);
759 atomic_sub(size
, &osh
->cmn
->malloced
);
764 #endif /* CONFIG_DHD_USE_STATIC_BUF */
765 if (osh
&& osh
->cmn
) {
766 ASSERT(osh
->magic
== OS_HANDLE_MAGIC
);
768 ASSERT(size
<= osl_malloced(osh
));
770 atomic_sub(size
, &osh
->cmn
->malloced
);
776 osl_vmalloc(osl_t
*osh
, uint size
)
780 /* only ASSERT if osh is defined */
782 ASSERT(osh
->magic
== OS_HANDLE_MAGIC
);
783 if ((addr
= vmalloc(size
)) == NULL
) {
789 atomic_add(size
, &osh
->cmn
->malloced
);
795 osl_vmallocz(osl_t
*osh
, uint size
)
799 ptr
= osl_vmalloc(osh
, size
);
809 osl_vmfree(osl_t
*osh
, void *addr
, uint size
)
811 if (osh
&& osh
->cmn
) {
812 ASSERT(osh
->magic
== OS_HANDLE_MAGIC
);
814 ASSERT(size
<= osl_malloced(osh
));
816 atomic_sub(size
, &osh
->cmn
->malloced
);
822 osl_check_memleak(osl_t
*osh
)
824 ASSERT((osh
&& (osh
->magic
== OS_HANDLE_MAGIC
)));
825 if (atomic_read(&osh
->cmn
->refcount
) == 1)
826 return (atomic_read(&osh
->cmn
->malloced
));
832 osl_malloced(osl_t
*osh
)
834 ASSERT((osh
&& (osh
->magic
== OS_HANDLE_MAGIC
)));
835 return (atomic_read(&osh
->cmn
->malloced
));
839 osl_malloc_failed(osl_t
*osh
)
841 ASSERT((osh
&& (osh
->magic
== OS_HANDLE_MAGIC
)));
842 return (osh
->failed
);
846 osl_dma_consistent_align(void)
852 osl_dma_alloc_consistent(osl_t
*osh
, uint size
, uint16 align_bits
, uint
*alloced
, dmaaddr_t
*pap
)
855 uint16 align
= (1 << align_bits
);
856 ASSERT((osh
&& (osh
->magic
== OS_HANDLE_MAGIC
)));
858 if (!ISALIGNED(DMA_CONSISTENT_ALIGN
, align
))
862 #ifndef BCM_SECURE_DMA
863 #if (defined(__ARM_ARCH_7A__) && !defined(DHD_USE_COHERENT_MEM_FOR_RING)) || \
864 defined(STB_SOC_WIFI)
865 va
= kmalloc(size
, GFP_ATOMIC
| __GFP_ZERO
);
867 *pap
= (ulong
)__virt_to_phys((ulong
)va
);
871 struct pci_dev
*hwdev
= osh
->pdev
;
873 #ifdef DHD_ALLOC_COHERENT_MEM_FROM_ATOMIC_POOL
876 flags
= CAN_SLEEP() ? GFP_KERNEL
: GFP_ATOMIC
;
877 #endif /* DHD_ALLOC_COHERENT_MEM_FROM_ATOMIC_POOL */
878 va
= dma_alloc_coherent(&hwdev
->dev
, size
, &pap_lin
, flags
);
880 PHYSADDRLOSET(*pap
, pap_lin
& 0xffffffff);
881 PHYSADDRHISET(*pap
, (pap_lin
>> 32) & 0xffffffff);
883 *pap
= (dmaaddr_t
)pap_lin
;
884 #endif /* BCMDMA64OSL */
886 #endif /* __ARM_ARCH_7A__ && !DHD_USE_COHERENT_MEM_FOR_RING */
888 va
= osl_sec_dma_alloc_consistent(osh
, size
, align_bits
, pap
);
889 #endif /* BCM_SECURE_DMA */
894 osl_dma_free_consistent(osl_t
*osh
, void *va
, uint size
, dmaaddr_t pa
)
898 #endif /* BCMDMA64OSL */
899 ASSERT((osh
&& (osh
->magic
== OS_HANDLE_MAGIC
)));
901 #ifndef BCM_SECURE_DMA
902 #if (defined(__ARM_ARCH_7A__) && !defined(DHD_USE_COHERENT_MEM_FOR_RING)) || \
903 defined(STB_SOC_WIFI)
907 PHYSADDRTOULONG(pa
, paddr
);
908 pci_free_consistent(osh
->pdev
, size
, va
, paddr
);
910 pci_free_consistent(osh
->pdev
, size
, va
, (dma_addr_t
)pa
);
911 #endif /* BCMDMA64OSL */
912 #endif /* __ARM_ARCH_7A__ && !DHD_USE_COHERENT_MEM_FOR_RING */
914 osl_sec_dma_free_consistent(osh
, va
, size
, pa
);
915 #endif /* BCM_SECURE_DMA */
919 osl_virt_to_phys(void *va
)
921 return (void *)(uintptr
)virt_to_phys(va
);
924 #if LINUX_VERSION_CODE >= KERNEL_VERSION(2, 6, 36)
925 #include <asm/cacheflush.h>
927 osl_dma_flush(osl_t
*osh
, void *va
, uint size
, int direction
, void *p
, hnddma_seg_map_t
*dmah
)
931 #endif /* LINUX_VERSION_CODE >= 2.6.36 */
933 dmaaddr_t BCMFASTPATH
934 osl_dma_map(osl_t
*osh
, void *va
, uint size
, int direction
, void *p
, hnddma_seg_map_t
*dmah
)
943 ASSERT((osh
&& (osh
->magic
== OS_HANDLE_MAGIC
)));
944 dir
= (direction
== DMA_TX
)? PCI_DMA_TODEVICE
: PCI_DMA_FROMDEVICE
;
947 #if (__LINUX_ARM_ARCH__ == 8)
948 /* need to flush or invalidate the cache here */
949 if (dir
== DMA_TX
) { /* to device */
950 osl_cache_flush(va
, size
);
951 } else if (dir
== DMA_RX
) { /* from device */
952 osl_cache_inv(va
, size
);
954 osl_cache_flush(va
, size
);
955 osl_cache_inv(va
, size
);
958 return virt_to_phys(va
);
959 #else /* (__LINUX_ARM_ARCH__ == 8) */
960 map_addr
= dma_map_single(osh
->pdev
, va
, size
, dir
);
963 #endif /* (__LINUX_ARM_ARCH__ == 8) */
964 #else /* ! STB_SOC_WIFI */
965 map_addr
= pci_map_single(osh
->pdev
, va
, size
, dir
);
966 #endif /* ! STB_SOC_WIFI */
968 #ifdef DHD_MAP_LOGGING
970 dhd_map_log
[map_idx
].addr
= map_addr
;
971 dhd_map_log
[map_idx
].time
= OSL_SYSUPTIME_US();
973 map_idx
= map_idx
% DHD_MAP_LOG_SIZE
;
975 #endif /* DHD_MAP_LOGGING */
977 #if (LINUX_VERSION_CODE >= KERNEL_VERSION(2, 6, 27))
978 ret
= pci_dma_mapping_error(osh
->pdev
, map_addr
);
979 #elif (LINUX_VERSION_CODE >= KERNEL_VERSION(2, 6, 5))
980 ret
= pci_dma_mapping_error(map_addr
);
985 printk("%s: Failed to map memory\n", __FUNCTION__
);
986 PHYSADDRLOSET(ret_addr
, 0);
987 PHYSADDRHISET(ret_addr
, 0);
989 PHYSADDRLOSET(ret_addr
, map_addr
& 0xffffffff);
990 PHYSADDRHISET(ret_addr
, (map_addr
>> 32) & 0xffffffff);
999 osl_dma_unmap(osl_t
*osh
, dmaaddr_t pa
, uint size
, int direction
)
1004 #endif /* BCMDMA64OSL */
1006 ASSERT((osh
&& (osh
->magic
== OS_HANDLE_MAGIC
)));
1010 dir
= (direction
== DMA_TX
)? PCI_DMA_TODEVICE
: PCI_DMA_FROMDEVICE
;
1013 PHYSADDRTOULONG(pa
, paddr
);
1014 #ifdef DHD_MAP_LOGGING
1015 if (dhd_unmap_log
) {
1016 dhd_unmap_log
[unmap_idx
].addr
= paddr
;
1017 dhd_unmap_log
[unmap_idx
].time
= OSL_SYSUPTIME_US();
1019 unmap_idx
= unmap_idx
% DHD_MAP_LOG_SIZE
;
1021 #endif /* DHD_MAP_LOGGING */
1023 pci_unmap_single(osh
->pdev
, paddr
, size
, dir
);
1024 #else /* BCMDMA64OSL */
1027 #if (__LINUX_ARM_ARCH__ == 8)
1028 if (dir
== DMA_TX
) { /* to device */
1029 dma_sync_single_for_device(OSH_NULL
, pa
, size
, DMA_TX
);
1030 } else if (dir
== DMA_RX
) { /* from device */
1031 dma_sync_single_for_cpu(OSH_NULL
, pa
, size
, DMA_RX
);
1033 dma_sync_single_for_device(OSH_NULL
, pa
, size
, DMA_TX
);
1034 dma_sync_single_for_cpu(OSH_NULL
, pa
, size
, DMA_RX
);
1036 #else /* (__LINUX_ARM_ARCH__ == 8) */
1037 dma_unmap_single(osh
->pdev
, (uintptr
)pa
, size
, dir
);
1038 #endif /* (__LINUX_ARM_ARCH__ == 8) */
1039 #else /* STB_SOC_WIFI */
1040 #ifdef DHD_MAP_LOGGING
1041 if (dhd_unmap_log
) {
1042 dhd_unmap_log
[unmap_idx
].addr
= pa
;
1043 dhd_unmap_log
[unmap_idx
].time
= OSL_SYSUPTIME_US();
1045 unmap_idx
= unmap_idx
% DHD_MAP_LOG_SIZE
;
1047 #endif /* DHD_MAP_LOGGING */
1049 pci_unmap_single(osh
->pdev
, (uint32
)pa
, size
, dir
);
1050 #endif /* STB_SOC_WIFI */
1052 #endif /* BCMDMA64OSL */
1056 /* OSL function for CPU relax */
1057 inline void BCMFASTPATH
1063 extern void osl_preempt_disable(osl_t
*osh
)
1068 extern void osl_preempt_enable(osl_t
*osh
)
1073 #if defined(BCMASSERT_LOG)
1075 osl_assert(const char *exp
, const char *file
, int line
)
1078 const char *basename
;
1080 basename
= strrchr(file
, '/');
1088 #ifdef BCMASSERT_LOG
1089 snprintf(tempbuf
, 64, "\"%s\": file \"%s\", line %d\n",
1090 exp
, basename
, line
);
1091 #endif /* BCMASSERT_LOG */
1093 switch (g_assert_type
) {
1095 panic("%s", tempbuf
);
1100 printk("%s", tempbuf
);
1103 printk("%s", tempbuf
);
1113 osl_delay(uint usec
)
1118 d
= MIN(usec
, 1000);
1127 #if LINUX_VERSION_CODE >= KERNEL_VERSION(2, 6, 36)
1129 usleep_range(ms
*1000, ms
*1000 + 1000);
1136 osl_sysuptime_us(void)
1141 do_gettimeofday(&tv
);
1142 /* tv_usec content is fraction of a second */
1143 usec
= (uint64
)tv
.tv_sec
* 1000000ul + tv
.tv_usec
;
1148 * OSLREGOPS specifies the use of osl_XXX routines to be used for register access
1152 * BINOSL selects the slightly slower function-call-based binary compatible osl.
1160 get_random_bytes(&rand
, sizeof(rand
));
1165 /* Linux Kernel: File Operations: start */
1167 osl_os_open_image(char *filename
)
1171 fp
= filp_open(filename
, O_RDONLY
, 0);
1173 * 2.6.11 (FC4) supports filp_open() but later revs don't?
1175 * fp = open_namei(AT_FDCWD, filename, O_RD, 0);
1185 osl_os_get_image_block(char *buf
, int len
, void *image
)
1187 struct file
*fp
= (struct file
*)image
;
1193 #if (LINUX_VERSION_CODE >= KERNEL_VERSION(4, 14, 0))
1194 rdlen
= kernel_read(fp
, buf
, len
, &fp
->f_pos
);
1196 rdlen
= kernel_read(fp
, fp
->f_pos
, buf
, len
);
1197 #endif /* (LINUX_VERSION_CODE >= KERNEL_VERSION(4, 14, 0)) */
1206 osl_os_close_image(void *image
)
1209 filp_close((struct file
*)image
, NULL
);
1213 osl_os_image_size(void *image
)
1215 int len
= 0, curroffset
;
1218 /* store the current offset */
1219 curroffset
= generic_file_llseek(image
, 0, 1);
1220 /* goto end of file to get length */
1221 len
= generic_file_llseek(image
, 0, 2);
1222 /* restore back the offset */
1223 generic_file_llseek(image
, curroffset
, 0);
1228 /* Linux Kernel: File Operations: end */
1230 #if (defined(STB) && defined(__arm__))
1231 inline void osl_pcie_rreg(osl_t
*osh
, ulong addr
, volatile void *v
, uint size
)
1233 unsigned long flags
= 0;
1235 int acp_war_enab
= ACP_WAR_ENAB();
1237 if (osh
&& BUSTYPE(osh
->bustype
) == PCI_BUS
)
1240 if (pci_access
&& acp_war_enab
)
1241 spin_lock_irqsave(&l2x0_reg_lock
, flags
);
1245 *(volatile uint8
*)v
= readb((volatile uint8
*)(addr
));
1247 case sizeof(uint16
):
1248 *(volatile uint16
*)v
= readw((volatile uint16
*)(addr
));
1250 case sizeof(uint32
):
1251 *(volatile uint32
*)v
= readl((volatile uint32
*)(addr
));
1253 case sizeof(uint64
):
1254 *(volatile uint64
*)v
= *((volatile uint64
*)(addr
));
1258 if (pci_access
&& acp_war_enab
)
1259 spin_unlock_irqrestore(&l2x0_reg_lock
, flags
);
1263 #if defined(BCM_BACKPLANE_TIMEOUT)
1264 inline void osl_bpt_rreg(osl_t
*osh
, ulong addr
, volatile void *v
, uint size
)
1266 bool poll_timeout
= FALSE
;
1267 static int in_si_clear
= FALSE
;
1271 *(volatile uint8
*)v
= readb((volatile uint8
*)(addr
));
1272 if (*(volatile uint8
*)v
== 0xff)
1273 poll_timeout
= TRUE
;
1275 case sizeof(uint16
):
1276 *(volatile uint16
*)v
= readw((volatile uint16
*)(addr
));
1277 if (*(volatile uint16
*)v
== 0xffff)
1278 poll_timeout
= TRUE
;
1280 case sizeof(uint32
):
1281 *(volatile uint32
*)v
= readl((volatile uint32
*)(addr
));
1282 if (*(volatile uint32
*)v
== 0xffffffff)
1283 poll_timeout
= TRUE
;
1285 case sizeof(uint64
):
1286 *(volatile uint64
*)v
= *((volatile uint64
*)(addr
));
1287 if (*(volatile uint64
*)v
== 0xffffffffffffffff)
1288 poll_timeout
= TRUE
;
1292 if (osh
&& osh
->sih
&& (in_si_clear
== FALSE
) && poll_timeout
&& osh
->bpt_cb
) {
1294 osh
->bpt_cb((void *)osh
->sih
, (void *)addr
);
1295 in_si_clear
= FALSE
;
1298 #endif /* BCM_BACKPLANE_TIMEOUT */
1300 #ifdef BCM_SECURE_DMA
1302 osl_sec_dma_ioremap(osl_t
*osh
, struct page
*page
, size_t size
, bool iscache
, bool isdecr
)
1309 size
= PAGE_ALIGN(size
);
1310 order
= get_order(size
);
1312 map
= kmalloc(sizeof(struct page
*) << order
, GFP_ATOMIC
);
1317 for (i
= 0; i
< (size
>> PAGE_SHIFT
); i
++)
1321 addr
= vmap(map
, size
>> PAGE_SHIFT
, VM_MAP
, __pgprot(PAGE_KERNEL
));
1323 osh
->contig_delta_va_pa
= ((uint8
*)addr
- page_to_phys(page
));
1327 #if defined(__ARM_ARCH_7A__)
1328 addr
= vmap(map
, size
>> PAGE_SHIFT
, VM_MAP
,
1329 pgprot_noncached(__pgprot(PAGE_KERNEL
)));
1332 osh
->contig_delta_va_pa
= ((uint8
*)addr
- page_to_phys(page
));
1337 return (void *)addr
;
1341 osl_sec_dma_iounmap(osl_t
*osh
, void *contig_base_va
, size_t size
)
1343 vunmap(contig_base_va
);
1347 osl_sec_dma_init_elem_mem_block(osl_t
*osh
, size_t mbsize
, int max
, sec_mem_elem_t
**list
)
1351 sec_mem_elem_t
*sec_mem_elem
;
1353 if ((sec_mem_elem
= kmalloc(sizeof(sec_mem_elem_t
)*(max
), GFP_ATOMIC
)) != NULL
) {
1355 *list
= sec_mem_elem
;
1356 bzero(sec_mem_elem
, sizeof(sec_mem_elem_t
)*(max
));
1357 for (i
= 0; i
< max
-1; i
++) {
1358 sec_mem_elem
->next
= (sec_mem_elem
+ 1);
1359 sec_mem_elem
->size
= mbsize
;
1360 sec_mem_elem
->pa_cma
= osh
->contig_base_alloc
;
1361 sec_mem_elem
->vac
= osh
->contig_base_alloc_va
;
1363 sec_mem_elem
->pa_cma_page
= phys_to_page(sec_mem_elem
->pa_cma
);
1364 osh
->contig_base_alloc
+= mbsize
;
1365 osh
->contig_base_alloc_va
= ((uint8
*)osh
->contig_base_alloc_va
+ mbsize
);
1367 sec_mem_elem
= sec_mem_elem
+ 1;
1369 sec_mem_elem
->next
= NULL
;
1370 sec_mem_elem
->size
= mbsize
;
1371 sec_mem_elem
->pa_cma
= osh
->contig_base_alloc
;
1372 sec_mem_elem
->vac
= osh
->contig_base_alloc_va
;
1374 sec_mem_elem
->pa_cma_page
= phys_to_page(sec_mem_elem
->pa_cma
);
1375 osh
->contig_base_alloc
+= mbsize
;
1376 osh
->contig_base_alloc_va
= ((uint8
*)osh
->contig_base_alloc_va
+ mbsize
);
1379 printf("%s sec mem elem kmalloc failed\n", __FUNCTION__
);
1386 osl_sec_dma_deinit_elem_mem_block(osl_t
*osh
, size_t mbsize
, int max
, void *sec_list_base
)
1389 kfree(sec_list_base
);
1392 static sec_mem_elem_t
* BCMFASTPATH
1393 osl_sec_dma_alloc_mem_elem(osl_t
*osh
, void *va
, uint size
, int direction
,
1394 struct sec_cma_info
*ptr_cma_info
, uint offset
)
1396 sec_mem_elem_t
*sec_mem_elem
= NULL
;
1399 if (size
<= 512 && osh
->sec_list_512
) {
1400 sec_mem_elem
= osh
->sec_list_512
;
1401 osh
->sec_list_512
= sec_mem_elem
->next
;
1403 else if (size
<= 2048 && osh
->sec_list_2048
) {
1404 sec_mem_elem
= osh
->sec_list_2048
;
1405 osh
->sec_list_2048
= sec_mem_elem
->next
;
1409 ASSERT(osh
->sec_list_4096
);
1410 sec_mem_elem
= osh
->sec_list_4096
;
1411 osh
->sec_list_4096
= sec_mem_elem
->next
;
1412 #endif /* NOT_YET */
1414 sec_mem_elem
->next
= NULL
;
1416 if (ptr_cma_info
->sec_alloc_list_tail
) {
1417 ptr_cma_info
->sec_alloc_list_tail
->next
= sec_mem_elem
;
1418 ptr_cma_info
->sec_alloc_list_tail
= sec_mem_elem
;
1421 /* First allocation: If tail is NULL, sec_alloc_list MUST also be NULL */
1422 ASSERT(ptr_cma_info
->sec_alloc_list
== NULL
);
1423 ptr_cma_info
->sec_alloc_list
= sec_mem_elem
;
1424 ptr_cma_info
->sec_alloc_list_tail
= sec_mem_elem
;
1426 return sec_mem_elem
;
1429 static void BCMFASTPATH
1430 osl_sec_dma_free_mem_elem(osl_t
*osh
, sec_mem_elem_t
*sec_mem_elem
)
1432 sec_mem_elem
->dma_handle
= 0x0;
1433 sec_mem_elem
->va
= NULL
;
1435 if (sec_mem_elem
->size
== 512) {
1436 sec_mem_elem
->next
= osh
->sec_list_512
;
1437 osh
->sec_list_512
= sec_mem_elem
;
1438 } else if (sec_mem_elem
->size
== 2048) {
1439 sec_mem_elem
->next
= osh
->sec_list_2048
;
1440 osh
->sec_list_2048
= sec_mem_elem
;
1441 } else if (sec_mem_elem
->size
== 4096) {
1442 #endif /* NOT_YET */
1443 sec_mem_elem
->next
= osh
->sec_list_4096
;
1444 osh
->sec_list_4096
= sec_mem_elem
;
1448 printf("%s free failed size=%d\n", __FUNCTION__
, sec_mem_elem
->size
);
1449 #endif /* NOT_YET */
1452 static sec_mem_elem_t
* BCMFASTPATH
1453 osl_sec_dma_find_rem_elem(osl_t
*osh
, struct sec_cma_info
*ptr_cma_info
, dma_addr_t dma_handle
)
1455 sec_mem_elem_t
*sec_mem_elem
= ptr_cma_info
->sec_alloc_list
;
1456 sec_mem_elem_t
*sec_prv_elem
= ptr_cma_info
->sec_alloc_list
;
1458 if (sec_mem_elem
->dma_handle
== dma_handle
) {
1460 ptr_cma_info
->sec_alloc_list
= sec_mem_elem
->next
;
1462 if (sec_mem_elem
== ptr_cma_info
->sec_alloc_list_tail
) {
1463 ptr_cma_info
->sec_alloc_list_tail
= NULL
;
1464 ASSERT(ptr_cma_info
->sec_alloc_list
== NULL
);
1467 return sec_mem_elem
;
1469 sec_mem_elem
= sec_mem_elem
->next
;
1471 while (sec_mem_elem
!= NULL
) {
1473 if (sec_mem_elem
->dma_handle
== dma_handle
) {
1475 sec_prv_elem
->next
= sec_mem_elem
->next
;
1476 if (sec_mem_elem
== ptr_cma_info
->sec_alloc_list_tail
)
1477 ptr_cma_info
->sec_alloc_list_tail
= sec_prv_elem
;
1479 return sec_mem_elem
;
1481 sec_prv_elem
= sec_mem_elem
;
1482 sec_mem_elem
= sec_mem_elem
->next
;
1487 static sec_mem_elem_t
*
1488 osl_sec_dma_rem_first_elem(osl_t
*osh
, struct sec_cma_info
*ptr_cma_info
)
1490 sec_mem_elem_t
*sec_mem_elem
= ptr_cma_info
->sec_alloc_list
;
1494 ptr_cma_info
->sec_alloc_list
= sec_mem_elem
->next
;
1496 if (ptr_cma_info
->sec_alloc_list
== NULL
)
1497 ptr_cma_info
->sec_alloc_list_tail
= NULL
;
1499 return sec_mem_elem
;
1505 static void * BCMFASTPATH
1506 osl_sec_dma_last_elem(osl_t
*osh
, struct sec_cma_info
*ptr_cma_info
)
1508 return ptr_cma_info
->sec_alloc_list_tail
;
1511 dma_addr_t BCMFASTPATH
1512 osl_sec_dma_map_txmeta(osl_t
*osh
, void *va
, uint size
, int direction
, void *p
,
1513 hnddma_seg_map_t
*dmah
, void *ptr_cma_info
)
1515 sec_mem_elem_t
*sec_mem_elem
;
1516 struct page
*pa_cma_page
;
1518 void *vaorig
= ((uint8
*)va
+ size
);
1519 dma_addr_t dma_handle
= 0x0;
1520 /* packet will be the one added with osl_sec_dma_map() just before this call */
1522 sec_mem_elem
= osl_sec_dma_last_elem(osh
, ptr_cma_info
);
1524 if (sec_mem_elem
&& sec_mem_elem
->va
== vaorig
) {
1526 pa_cma_page
= phys_to_page(sec_mem_elem
->pa_cma
);
1527 loffset
= sec_mem_elem
->pa_cma
-(sec_mem_elem
->pa_cma
& ~(PAGE_SIZE
-1));
1529 dma_handle
= dma_map_page(OSH_NULL
, pa_cma_page
, loffset
, size
,
1530 (direction
== DMA_TX
? DMA_TO_DEVICE
:DMA_FROM_DEVICE
));
1533 printf("%s: error orig va not found va = 0x%p \n",
1534 __FUNCTION__
, vaorig
);
1539 dma_addr_t BCMFASTPATH
1540 osl_sec_dma_map(osl_t
*osh
, void *va
, uint size
, int direction
, void *p
,
1541 hnddma_seg_map_t
*dmah
, void *ptr_cma_info
, uint offset
)
1544 sec_mem_elem_t
*sec_mem_elem
;
1545 struct page
*pa_cma_page
;
1546 void *pa_cma_kmap_va
= NULL
;
1548 dma_addr_t dma_handle
= 0x0;
1552 struct sk_buff
*skb
;
1554 #endif /* NOT_YET */
1556 ASSERT((direction
== DMA_RX
) || (direction
== DMA_TX
));
1557 sec_mem_elem
= osl_sec_dma_alloc_mem_elem(osh
, va
, size
, direction
, ptr_cma_info
, offset
);
1559 sec_mem_elem
->va
= va
;
1560 sec_mem_elem
->direction
= direction
;
1561 pa_cma_page
= sec_mem_elem
->pa_cma_page
;
1563 loffset
= sec_mem_elem
->pa_cma
-(sec_mem_elem
->pa_cma
& ~(PAGE_SIZE
-1));
1564 /* pa_cma_kmap_va = kmap_atomic(pa_cma_page);
1565 * pa_cma_kmap_va += loffset;
1568 pa_cma_kmap_va
= sec_mem_elem
->vac
;
1569 pa_cma_kmap_va
= ((uint8
*)pa_cma_kmap_va
+ offset
);
1572 if (direction
== DMA_TX
) {
1573 memcpy((uint8
*)pa_cma_kmap_va
+offset
, va
, size
);
1578 memcpy(pa_cma_kmap_va
, va
, size
);
1579 /* prhex("Txpkt",pa_cma_kmap_va, size); */
1581 for (skb
= (struct sk_buff
*)p
; skb
!= NULL
; skb
= PKTNEXT(osh
, skb
)) {
1582 if (skb_is_nonlinear(skb
)) {
1584 for (i
= 0; i
< skb_shinfo(skb
)->nr_frags
; i
++) {
1585 skb_frag_t
*f
= &skb_shinfo(skb
)->frags
[i
];
1586 fragva
= kmap_atomic(skb_frag_page(f
));
1587 pa_cma_kmap_va
= ((uint8
*)pa_cma_kmap_va
+ buflen
);
1588 memcpy((pa_cma_kmap_va
),
1589 (fragva
+ f
->page_offset
), skb_frag_size(f
));
1590 kunmap_atomic(fragva
);
1591 buflen
+= skb_frag_size(f
);
1595 pa_cma_kmap_va
= ((uint8
*)pa_cma_kmap_va
+ buflen
);
1596 memcpy(pa_cma_kmap_va
, skb
->data
, skb
->len
);
1602 #endif /* NOT_YET */
1605 dmah
->origsize
= buflen
;
1610 if ((p
!= NULL
) && (dmah
!= NULL
)) {
1612 dmah
->origsize
= buflen
;
1614 *(uint32
*)(pa_cma_kmap_va
) = 0x0;
1617 if (direction
== DMA_RX
) {
1618 flush_kernel_vmap_range(pa_cma_kmap_va
, sizeof(int));
1620 dma_handle
= dma_map_page(OSH_NULL
, pa_cma_page
, loffset
+offset
, buflen
,
1621 (direction
== DMA_TX
? DMA_TO_DEVICE
:DMA_FROM_DEVICE
));
1623 dmah
->segs
[0].addr
= dma_handle
;
1624 dmah
->segs
[0].length
= buflen
;
1626 sec_mem_elem
->dma_handle
= dma_handle
;
1627 /* kunmap_atomic(pa_cma_kmap_va-loffset); */
1631 dma_addr_t BCMFASTPATH
1632 osl_sec_dma_dd_map(osl_t
*osh
, void *va
, uint size
, int direction
, void *p
, hnddma_seg_map_t
*map
)
1635 struct page
*pa_cma_page
;
1637 dma_addr_t dma_handle
= 0x0;
1640 pa_cma
= ((uint8
*)va
- (uint8
*)osh
->contig_delta_va_pa
);
1641 pa_cma_page
= phys_to_page(pa_cma
);
1642 loffset
= pa_cma
-(pa_cma
& ~(PAGE_SIZE
-1));
1644 dma_handle
= dma_map_page(OSH_NULL
, pa_cma_page
, loffset
, size
,
1645 (direction
== DMA_TX
? DMA_TO_DEVICE
:DMA_FROM_DEVICE
));
1651 osl_sec_dma_unmap(osl_t
*osh
, dma_addr_t dma_handle
, uint size
, int direction
,
1652 void *p
, hnddma_seg_map_t
*map
, void *ptr_cma_info
, uint offset
)
1654 sec_mem_elem_t
*sec_mem_elem
;
1656 struct page
*pa_cma_page
;
1658 void *pa_cma_kmap_va
= NULL
;
1663 BCM_REFERENCE(buflen
);
1664 BCM_REFERENCE(read_count
);
1666 sec_mem_elem
= osl_sec_dma_find_rem_elem(osh
, ptr_cma_info
, dma_handle
);
1667 ASSERT(sec_mem_elem
);
1669 va
= sec_mem_elem
->va
;
1670 va
= (uint8
*)va
- offset
;
1671 pa_cma
= sec_mem_elem
->pa_cma
;
1674 pa_cma_page
= sec_mem_elem
->pa_cma_page
;
1677 if (direction
== DMA_RX
) {
1681 /* pa_cma_kmap_va = kmap_atomic(pa_cma_page);
1682 * pa_cma_kmap_va += loffset;
1685 pa_cma_kmap_va
= sec_mem_elem
->vac
;
1688 invalidate_kernel_vmap_range(pa_cma_kmap_va
, sizeof(int));
1690 buflen
= *(uint
*)(pa_cma_kmap_va
);
1696 } while (read_count
< 200);
1697 dma_unmap_page(OSH_NULL
, pa_cma
, size
, DMA_FROM_DEVICE
);
1698 memcpy(va
, pa_cma_kmap_va
, size
);
1699 /* kunmap_atomic(pa_cma_kmap_va); */
1704 for (skb
= (struct sk_buff
*)p
; (buflen
< size
) &&
1705 (skb
!= NULL
); skb
= skb
->next
) {
1706 if (skb_is_nonlinear(skb
)) {
1707 pa_cma_kmap_va
= kmap_atomic(pa_cma_page
);
1708 for (i
= 0; (buflen
< size
) &&
1709 (i
< skb_shinfo(skb
)->nr_frags
); i
++) {
1710 skb_frag_t
*f
= &skb_shinfo(skb
)->frags
[i
];
1711 cpuaddr
= kmap_atomic(skb_frag_page(f
));
1712 pa_cma_kmap_va
= ((uint8
*)pa_cma_kmap_va
+ buflen
);
1713 memcpy((cpuaddr
+ f
->page_offset
),
1714 pa_cma_kmap_va
, skb_frag_size(f
));
1715 kunmap_atomic(cpuaddr
);
1716 buflen
+= skb_frag_size(f
);
1718 kunmap_atomic(pa_cma_kmap_va
);
1720 pa_cma_kmap_va
= kmap_atomic(pa_cma_page
);
1721 pa_cma_kmap_va
= ((uint8
*)pa_cma_kmap_va
+ buflen
);
1722 memcpy(skb
->data
, pa_cma_kmap_va
, skb
->len
);
1723 kunmap_atomic(pa_cma_kmap_va
);
1730 #endif /* NOT YET */
1732 dma_unmap_page(OSH_NULL
, pa_cma
, size
+offset
, DMA_TO_DEVICE
);
1735 osl_sec_dma_free_mem_elem(osh
, sec_mem_elem
);
1739 osl_sec_dma_unmap_all(osl_t
*osh
, void *ptr_cma_info
)
1742 sec_mem_elem_t
*sec_mem_elem
;
1744 sec_mem_elem
= osl_sec_dma_rem_first_elem(osh
, ptr_cma_info
);
1746 while (sec_mem_elem
!= NULL
) {
1748 dma_unmap_page(OSH_NULL
, sec_mem_elem
->pa_cma
, sec_mem_elem
->size
,
1749 sec_mem_elem
->direction
== DMA_TX
? DMA_TO_DEVICE
: DMA_FROM_DEVICE
);
1750 osl_sec_dma_free_mem_elem(osh
, sec_mem_elem
);
1752 sec_mem_elem
= osl_sec_dma_rem_first_elem(osh
, ptr_cma_info
);
1757 osl_sec_dma_init_consistent(osl_t
*osh
)
1760 void *temp_va
= osh
->contig_base_alloc_coherent_va
;
1761 phys_addr_t temp_pa
= osh
->contig_base_alloc_coherent
;
1763 for (i
= 0; i
< SEC_CMA_COHERENT_MAX
; i
++) {
1764 osh
->sec_cma_coherent
[i
].avail
= TRUE
;
1765 osh
->sec_cma_coherent
[i
].va
= temp_va
;
1766 osh
->sec_cma_coherent
[i
].pa
= temp_pa
;
1767 temp_va
= ((uint8
*)temp_va
)+SEC_CMA_COHERENT_BLK
;
1768 temp_pa
+= SEC_CMA_COHERENT_BLK
;
1773 osl_sec_dma_alloc_consistent(osl_t
*osh
, uint size
, uint16 align_bits
, ulong
*pap
)
1776 void *temp_va
= NULL
;
1780 if (size
> SEC_CMA_COHERENT_BLK
) {
1781 printf("%s unsupported size\n", __FUNCTION__
);
1785 for (i
= 0; i
< SEC_CMA_COHERENT_MAX
; i
++) {
1786 if (osh
->sec_cma_coherent
[i
].avail
== TRUE
) {
1787 temp_va
= osh
->sec_cma_coherent
[i
].va
;
1788 temp_pa
= osh
->sec_cma_coherent
[i
].pa
;
1789 osh
->sec_cma_coherent
[i
].avail
= FALSE
;
1794 if (i
== SEC_CMA_COHERENT_MAX
)
1795 printf("%s:No coherent mem: va = 0x%p pa = 0x%lx size = %d\n", __FUNCTION__
,
1796 temp_va
, (ulong
)temp_pa
, size
);
1798 *pap
= (unsigned long)temp_pa
;
1803 osl_sec_dma_free_consistent(osl_t
*osh
, void *va
, uint size
, dmaaddr_t pa
)
1807 for (i
= 0; i
< SEC_CMA_COHERENT_MAX
; i
++) {
1808 if (osh
->sec_cma_coherent
[i
].va
== va
) {
1809 osh
->sec_cma_coherent
[i
].avail
= TRUE
;
1813 if (i
== SEC_CMA_COHERENT_MAX
)
1814 printf("%s:Error: va = 0x%p pa = 0x%lx size = %d\n", __FUNCTION__
,
1815 va
, (ulong
)pa
, size
);
1817 #endif /* BCM_SECURE_DMA */
1820 /* Note: All timer api's are thread unsafe and should be protected with locks by caller */
1822 #ifdef REPORT_FATAL_TIMEOUTS
1824 osl_timer_init(osl_t
*osh
, const char *name
, void (*fn
)(void *arg
), void *arg
)
1828 if ((t
= MALLOCZ(NULL
, sizeof(osl_timer_t
))) == NULL
) {
1829 printk(KERN_ERR
"osl_timer_init: out of memory, malloced %d bytes\n",
1830 (int)sizeof(osl_timer_t
));
1833 bzero(t
, sizeof(osl_timer_t
));
1834 if ((t
->timer
= MALLOCZ(NULL
, sizeof(struct timer_list
))) == NULL
) {
1835 printf("osl_timer_init: malloc failed\n");
1836 MFREE(NULL
, t
, sizeof(osl_timer_t
));
1839 t
->timer
->data
= (ulong
)arg
;
1840 t
->timer
->function
= (linux_timer_fn
)fn
;
1843 init_timer(t
->timer
);
1849 osl_timer_add(osl_t
*osh
, osl_timer_t
*t
, uint32 ms
, bool periodic
)
1852 printf("%s: Timer handle is NULL\n", __FUNCTION__
);
1859 printf("Periodic timers are not supported by Linux timer apis\n");
1861 t
->timer
->expires
= jiffies
+ ms
*HZ
/1000;
1863 add_timer(t
->timer
);
1869 osl_timer_update(osl_t
*osh
, osl_timer_t
*t
, uint32 ms
, bool periodic
)
1872 printf("%s: Timer handle is NULL\n", __FUNCTION__
);
1876 printf("Periodic timers are not supported by Linux timer apis\n");
1879 t
->timer
->expires
= jiffies
+ ms
*HZ
/1000;
1881 mod_timer(t
->timer
, t
->timer
->expires
);
1887 * Return TRUE if timer successfully deleted, FALSE if still pending
1890 osl_timer_del(osl_t
*osh
, osl_timer_t
*t
)
1893 printf("%s: Timer handle is NULL\n", __FUNCTION__
);
1899 del_timer(t
->timer
);
1900 MFREE(NULL
, t
->timer
, sizeof(struct timer_list
));
1902 MFREE(NULL
, t
, sizeof(osl_timer_t
));
1910 osl_dma_lock(osl_t
*osh
)
1912 /* XXX: The conditional check is to avoid the scheduling bug.
1913 * If the spin_lock_bh is used under the spin_lock_irqsave,
1914 * Kernel triggered the warning message as the spin_lock_irqsave
1915 * disables the interrupt and the spin_lock_bh doesn't use in case
1916 * interrupt is disabled.
1917 * Please refer to the __local_bh_enable_ip() function
1918 * in kernel/softirq.c to understand the condtion.
1920 if (likely(in_irq() || irqs_disabled())) {
1921 spin_lock(&osh
->dma_lock
);
1923 spin_lock_bh(&osh
->dma_lock
);
1924 osh
->dma_lock_bh
= TRUE
;
1929 osl_dma_unlock(osl_t
*osh
)
1931 if (unlikely(osh
->dma_lock_bh
)) {
1932 osh
->dma_lock_bh
= FALSE
;
1933 spin_unlock_bh(&osh
->dma_lock
);
1935 spin_unlock(&osh
->dma_lock
);
1940 osl_dma_lock_init(osl_t
*osh
)
1942 spin_lock_init(&osh
->dma_lock
);
1943 osh
->dma_lock_bh
= FALSE
;
1945 #endif /* USE_DMA_LOCK */