2 * Linux DHD Bus Module for PCIE
4 * Copyright (C) 1999-2019, Broadcom.
6 * Unless you and Broadcom execute a separate written software license
7 * agreement governing use of this software, this software is licensed to you
8 * under the terms of the GNU General Public License version 2 (the "GPL"),
9 * available at http://www.broadcom.com/licenses/GPLv2.php, with the
10 * following added to such license:
12 * As a special exception, the copyright holders of this software give you
13 * permission to link this software with independent modules, and to copy and
14 * distribute the resulting executable under terms of your choice, provided that
15 * you also meet, for each linked independent module, the terms and conditions of
16 * the license of that module. An independent module is a module which is not
17 * derived from this software. The special exception does not apply to any
18 * modifications of the software.
20 * Notwithstanding the above, under no circumstances may you combine this
21 * software in any way with any other Broadcom software provided under a license
22 * other than the GPL, without Broadcom's express prior written consent.
25 * <<Broadcom-WL-IPTag/Open:>>
27 * $Id: dhd_pcie_linux.c 797197 2018-12-29 03:31:21Z $
38 #if defined(DHD_DEBUG)
39 #include <hnd_armtrap.h>
41 #endif /* defined(DHD_DEBUG) */
42 #include <dngl_stats.h>
43 #include <pcie_core.h>
46 #include <dhd_proto.h>
49 #include <bcmmsgbuf.h>
52 #include <dhd_linux.h>
53 #ifdef CONFIG_ARCH_MSM
54 #if defined(CONFIG_PCI_MSM) || defined(CONFIG_ARCH_MSM8996)
55 #include <linux/msm_pcie.h>
57 #include <mach/msm_pcie.h>
58 #endif /* CONFIG_PCI_MSM */
59 #endif /* CONFIG_ARCH_MSM */
61 #ifdef DHD_PCIE_NATIVE_RUNTIMEPM
62 #include <linux/pm_runtime.h>
63 #endif /* DHD_PCIE_NATIVE_RUNTIMEPM */
65 #ifdef DHD_PCIE_NATIVE_RUNTIMEPM
66 #ifndef AUTO_SUSPEND_TIMEOUT
67 #define AUTO_SUSPEND_TIMEOUT 1000
68 #endif /* AUTO_SUSPEND_TIMEOUT */
69 #endif /* DHD_PCIE_NATIVE_RUNTIMEPM */
71 #include <linux/irq.h>
72 #ifdef USE_SMMU_ARCH_MSM
73 #include <asm/dma-iommu.h>
74 #include <linux/iommu.h>
76 #include <linux/platform_device.h>
77 #endif /* USE_SMMU_ARCH_MSM */
79 #define PCI_CFG_RETRY 10
80 #define OS_HANDLE_MAGIC 0x1234abcd /* Magic # to recognize osh */
81 #define BCM_MEM_FILENAME_LEN 24 /* Mem. filename length */
83 #define OSL_PKTTAG_CLEAR(p) \
85 struct sk_buff *s = (struct sk_buff *)(p); \
86 ASSERT(OSL_PKTTAG_SZ == 32); \
87 *(uint32 *)(&s->cb[0]) = 0; *(uint32 *)(&s->cb[4]) = 0; \
88 *(uint32 *)(&s->cb[8]) = 0; *(uint32 *)(&s->cb[12]) = 0; \
89 *(uint32 *)(&s->cb[16]) = 0; *(uint32 *)(&s->cb[20]) = 0; \
90 *(uint32 *)(&s->cb[24]) = 0; *(uint32 *)(&s->cb[28]) = 0; \
93 /* user defined data structures */
95 typedef struct dhd_pc_res
{
100 } pci_config_res
, *pPci_config_res
;
102 typedef bool (*dhdpcie_cb_fn_t
)(void *);
104 typedef struct dhdpcie_info
108 struct pci_dev
*dev
; /* pci device handle */
109 volatile char *regs
; /* pci device memory va */
110 volatile char *tcm
; /* pci device memory va */
111 uint32 tcm_size
; /* pci device memory size */
112 struct pcos_info
*pcos_info
;
113 uint16 last_intrstatus
; /* to cache intrstatus */
116 struct pci_saved_state
* default_state
;
117 struct pci_saved_state
* state
;
118 #ifdef BCMPCIE_OOB_HOST_WAKE
119 void *os_cxt
; /* Pointer to per-OS private data */
120 #endif /* BCMPCIE_OOB_HOST_WAKE */
121 #ifdef DHD_WAKE_STATUS
122 spinlock_t pcie_lock
;
123 unsigned int total_wake_count
;
126 #endif /* DHD_WAKE_STATUS */
127 #ifdef USE_SMMU_ARCH_MSM
129 #endif /* USE_SMMU_ARCH_MSM */
135 wait_queue_head_t intr_wait_queue
;
136 struct timer_list tuning_timer
;
137 int tuning_timer_exp
;
139 struct tasklet_struct tuning_tasklet
;
142 #ifdef BCMPCIE_OOB_HOST_WAKE
143 typedef struct dhdpcie_os_info
{
144 int oob_irq_num
; /* valid when hardware or software oob in use */
145 unsigned long oob_irq_flags
; /* valid when hardware or software oob in use */
146 bool oob_irq_registered
;
147 bool oob_irq_enabled
;
148 bool oob_irq_wake_enabled
;
149 spinlock_t oob_irq_spinlock
;
150 void *dev
; /* handle to the underlying device */
152 static irqreturn_t
wlan_oob_irq(int irq
, void *data
);
153 #endif /* BCMPCIE_OOB_HOST_WAKE */
155 #ifdef USE_SMMU_ARCH_MSM
156 typedef struct dhdpcie_smmu_info
{
157 struct dma_iommu_mapping
*smmu_mapping
;
158 dma_addr_t smmu_iova_start
;
159 size_t smmu_iova_len
;
160 } dhdpcie_smmu_info_t
;
161 #endif /* USE_SMMU_ARCH_MSM */
163 /* function declarations */
165 dhdpcie_pci_probe(struct pci_dev
*pdev
, const struct pci_device_id
*ent
);
166 static void __devexit
167 dhdpcie_pci_remove(struct pci_dev
*pdev
);
168 static int dhdpcie_init(struct pci_dev
*pdev
);
169 static irqreturn_t
dhdpcie_isr(int irq
, void *arg
);
170 /* OS Routine functions for PCI suspend/resume */
172 #ifdef DHD_PCIE_NATIVE_RUNTIMEPM
173 static int dhdpcie_set_suspend_resume(struct pci_dev
*dev
, bool state
, bool byint
);
175 static int dhdpcie_set_suspend_resume(dhd_bus_t
*bus
, bool state
);
176 #endif /* DHD_PCIE_NATIVE_RUNTIMEPM */
177 static int dhdpcie_resume_host_dev(dhd_bus_t
*bus
);
178 static int dhdpcie_suspend_host_dev(dhd_bus_t
*bus
);
179 static int dhdpcie_resume_dev(struct pci_dev
*dev
);
180 static int dhdpcie_suspend_dev(struct pci_dev
*dev
);
181 #ifdef DHD_PCIE_RUNTIMEPM
182 static int dhdpcie_pm_suspend(struct device
*dev
);
183 static int dhdpcie_pm_prepare(struct device
*dev
);
184 static int dhdpcie_pm_resume(struct device
*dev
);
185 static void dhdpcie_pm_complete(struct device
*dev
);
187 #ifdef DHD_PCIE_NATIVE_RUNTIMEPM
188 static int dhdpcie_pm_system_suspend_noirq(struct device
* dev
);
189 static int dhdpcie_pm_system_resume_noirq(struct device
* dev
);
191 static int dhdpcie_pci_suspend(struct pci_dev
*dev
, pm_message_t state
);
192 static int dhdpcie_pci_resume(struct pci_dev
*dev
);
193 #endif /* DHD_PCIE_NATIVE_RUNTIMEPM */
194 #endif /* DHD_PCIE_RUNTIMEPM */
196 #ifdef DHD_PCIE_NATIVE_RUNTIMEPM
197 static int dhdpcie_pm_runtime_suspend(struct device
* dev
);
198 static int dhdpcie_pm_runtime_resume(struct device
* dev
);
199 static int dhdpcie_pm_system_suspend_noirq(struct device
* dev
);
200 static int dhdpcie_pm_system_resume_noirq(struct device
* dev
);
201 #endif /* DHD_PCIE_NATIVE_RUNTIMEPM */
203 static struct pci_device_id dhdpcie_pci_devid
[] __devinitdata
= {
206 subvendor
: PCI_ANY_ID
,
207 subdevice
: PCI_ANY_ID
,
208 class: PCI_CLASS_NETWORK_OTHER
<< 8,
209 class_mask
: 0xffff00,
212 { 0, 0, 0, 0, 0, 0, 0}
214 MODULE_DEVICE_TABLE(pci
, dhdpcie_pci_devid
);
216 /* Power Management Hooks */
217 #ifdef DHD_PCIE_RUNTIMEPM
218 static const struct dev_pm_ops dhd_pcie_pm_ops
= {
219 .prepare
= dhdpcie_pm_prepare
,
220 .suspend
= dhdpcie_pm_suspend
,
221 .resume
= dhdpcie_pm_resume
,
222 .complete
= dhdpcie_pm_complete
,
224 #endif /* DHD_PCIE_RUNTIMEPM */
225 #ifdef DHD_PCIE_NATIVE_RUNTIMEPM
226 static const struct dev_pm_ops dhdpcie_pm_ops
= {
227 SET_RUNTIME_PM_OPS(dhdpcie_pm_runtime_suspend
, dhdpcie_pm_runtime_resume
, NULL
)
228 .suspend_noirq
= dhdpcie_pm_system_suspend_noirq
,
229 .resume_noirq
= dhdpcie_pm_system_resume_noirq
231 #endif /* DHD_PCIE_NATIVE_RUNTIMEPM */
233 static struct pci_driver dhdpcie_driver
= {
234 node
: {&dhdpcie_driver
.node
, &dhdpcie_driver
.node
},
236 id_table
: dhdpcie_pci_devid
,
237 probe
: dhdpcie_pci_probe
,
238 remove
: dhdpcie_pci_remove
,
239 #if (LINUX_VERSION_CODE < KERNEL_VERSION(2, 6, 0))
242 #if defined(DHD_PCIE_RUNTIMEPM) || defined(DHD_PCIE_NATIVE_RUNTIMEPM)
243 .driver
.pm
= &dhd_pcie_pm_ops
,
245 suspend
: dhdpcie_pci_suspend
,
246 resume
: dhdpcie_pci_resume
,
247 #endif /* DHD_PCIE_RUNTIMEPM || DHD_PCIE_NATIVE_RUNTIMEPM */
250 int dhdpcie_init_succeeded
= FALSE
;
252 #ifdef USE_SMMU_ARCH_MSM
253 static int dhdpcie_smmu_init(struct pci_dev
*pdev
, void *smmu_cxt
)
255 struct dma_iommu_mapping
*mapping
;
256 struct device_node
*root_node
= NULL
;
257 dhdpcie_smmu_info_t
*smmu_info
= (dhdpcie_smmu_info_t
*)smmu_cxt
;
258 int smmu_iova_address
[2];
259 char *wlan_node
= "android,bcmdhd_wlan";
260 char *wlan_smmu_node
= "wlan-smmu-iova-address";
265 DHD_ERROR(("%s: SMMU initialize\n", __FUNCTION__
));
267 root_node
= of_find_compatible_node(NULL
, NULL
, wlan_node
);
269 WARN(1, "failed to get device node of BRCM WLAN\n");
273 if (of_property_read_u32_array(root_node
, wlan_smmu_node
,
274 smmu_iova_address
, 2) == 0) {
275 DHD_ERROR(("%s : get SMMU start address 0x%x, size 0x%x\n",
276 __FUNCTION__
, smmu_iova_address
[0], smmu_iova_address
[1]));
277 smmu_info
->smmu_iova_start
= smmu_iova_address
[0];
278 smmu_info
->smmu_iova_len
= smmu_iova_address
[1];
280 printf("%s : can't get smmu iova address property\n",
285 if (smmu_info
->smmu_iova_len
<= 0) {
286 DHD_ERROR(("%s: Invalid smmu iova len %d\n",
287 __FUNCTION__
, (int)smmu_info
->smmu_iova_len
));
291 DHD_ERROR(("%s : SMMU init start\n", __FUNCTION__
));
293 if (pci_set_dma_mask(pdev
, DMA_BIT_MASK(64)) ||
294 pci_set_consistent_dma_mask(pdev
, DMA_BIT_MASK(64))) {
295 DHD_ERROR(("%s: DMA set 64bit mask failed.\n", __FUNCTION__
));
299 mapping
= arm_iommu_create_mapping(&platform_bus_type
,
300 smmu_info
->smmu_iova_start
, smmu_info
->smmu_iova_len
);
301 if (IS_ERR(mapping
)) {
302 DHD_ERROR(("%s: create mapping failed, err = %d\n",
304 ret
= PTR_ERR(mapping
);
308 ret
= iommu_domain_set_attr(mapping
->domain
,
309 DOMAIN_ATTR_ATOMIC
, &atomic_ctx
);
311 DHD_ERROR(("%s: set atomic_ctx attribute failed, err = %d\n",
316 ret
= iommu_domain_set_attr(mapping
->domain
,
317 DOMAIN_ATTR_S1_BYPASS
, &s1_bypass
);
319 DHD_ERROR(("%s: set s1_bypass attribute failed, err = %d\n",
324 ret
= arm_iommu_attach_device(&pdev
->dev
, mapping
);
326 DHD_ERROR(("%s: attach device failed, err = %d\n",
331 smmu_info
->smmu_mapping
= mapping
;
337 arm_iommu_release_mapping(mapping
);
342 static void dhdpcie_smmu_remove(struct pci_dev
*pdev
, void *smmu_cxt
)
344 dhdpcie_smmu_info_t
*smmu_info
;
350 smmu_info
= (dhdpcie_smmu_info_t
*)smmu_cxt
;
351 if (smmu_info
->smmu_mapping
) {
352 arm_iommu_detach_device(&pdev
->dev
);
353 arm_iommu_release_mapping(smmu_info
->smmu_mapping
);
354 smmu_info
->smmu_mapping
= NULL
;
357 #endif /* USE_SMMU_ARCH_MSM */
360 dhd_bus_aer_config(dhd_bus_t
*bus
)
364 DHD_ERROR(("%s: Configure AER registers for EP\n", __FUNCTION__
));
365 val
= dhdpcie_ep_access_cap(bus
, PCIE_ADVERRREP_CAPID
,
366 PCIE_ADV_CORR_ERR_MASK_OFFSET
, TRUE
, FALSE
, 0);
367 if (val
!= (uint32
)-1) {
369 dhdpcie_ep_access_cap(bus
, PCIE_ADVERRREP_CAPID
,
370 PCIE_ADV_CORR_ERR_MASK_OFFSET
, TRUE
, TRUE
, val
);
372 DHD_ERROR(("%s: Invalid EP's PCIE_ADV_CORR_ERR_MASK: 0x%x\n",
376 DHD_ERROR(("%s: Configure AER registers for RC\n", __FUNCTION__
));
377 val
= dhdpcie_rc_access_cap(bus
, PCIE_ADVERRREP_CAPID
,
378 PCIE_ADV_CORR_ERR_MASK_OFFSET
, TRUE
, FALSE
, 0);
379 if (val
!= (uint32
)-1) {
381 dhdpcie_rc_access_cap(bus
, PCIE_ADVERRREP_CAPID
,
382 PCIE_ADV_CORR_ERR_MASK_OFFSET
, TRUE
, TRUE
, val
);
384 DHD_ERROR(("%s: Invalid RC's PCIE_ADV_CORR_ERR_MASK: 0x%x\n",
389 #ifdef DHD_PCIE_RUNTIMEPM
390 static int dhdpcie_pm_suspend(struct device
*dev
)
393 struct pci_dev
*pdev
= to_pci_dev(dev
);
394 dhdpcie_info_t
*pch
= pci_get_drvdata(pdev
);
395 dhd_bus_t
*bus
= NULL
;
405 DHD_GENERAL_LOCK(bus
->dhd
, flags
);
406 if (!DHD_BUS_BUSY_CHECK_IDLE(bus
->dhd
)) {
407 DHD_ERROR(("%s: Bus not IDLE!! dhd_bus_busy_state = 0x%x\n",
408 __FUNCTION__
, bus
->dhd
->dhd_bus_busy_state
));
409 DHD_GENERAL_UNLOCK(bus
->dhd
, flags
);
412 DHD_BUS_BUSY_SET_SUSPEND_IN_PROGRESS(bus
->dhd
);
413 DHD_GENERAL_UNLOCK(bus
->dhd
, flags
);
415 if (!bus
->dhd
->dongle_reset
)
416 ret
= dhdpcie_set_suspend_resume(bus
, TRUE
);
418 DHD_GENERAL_LOCK(bus
->dhd
, flags
);
419 DHD_BUS_BUSY_CLEAR_SUSPEND_IN_PROGRESS(bus
->dhd
);
420 dhd_os_busbusy_wake(bus
->dhd
);
421 DHD_GENERAL_UNLOCK(bus
->dhd
, flags
);
427 static int dhdpcie_pm_prepare(struct device
*dev
)
429 struct pci_dev
*pdev
= to_pci_dev(dev
);
430 dhdpcie_info_t
*pch
= pci_get_drvdata(pdev
);
431 dhd_bus_t
*bus
= NULL
;
433 if (!pch
|| !pch
->bus
) {
438 DHD_DISABLE_RUNTIME_PM(bus
->dhd
);
444 static int dhdpcie_pm_resume(struct device
*dev
)
447 struct pci_dev
*pdev
= to_pci_dev(dev
);
448 dhdpcie_info_t
*pch
= pci_get_drvdata(pdev
);
449 dhd_bus_t
*bus
= NULL
;
459 DHD_GENERAL_LOCK(bus
->dhd
, flags
);
460 DHD_BUS_BUSY_SET_RESUME_IN_PROGRESS(bus
->dhd
);
461 DHD_GENERAL_UNLOCK(bus
->dhd
, flags
);
463 if (!bus
->dhd
->dongle_reset
)
464 ret
= dhdpcie_set_suspend_resume(bus
, FALSE
);
466 DHD_GENERAL_LOCK(bus
->dhd
, flags
);
467 DHD_BUS_BUSY_CLEAR_RESUME_IN_PROGRESS(bus
->dhd
);
468 dhd_os_busbusy_wake(bus
->dhd
);
469 DHD_GENERAL_UNLOCK(bus
->dhd
, flags
);
474 static void dhdpcie_pm_complete(struct device
*dev
)
476 struct pci_dev
*pdev
= to_pci_dev(dev
);
477 dhdpcie_info_t
*pch
= pci_get_drvdata(pdev
);
478 dhd_bus_t
*bus
= NULL
;
480 if (!pch
|| !pch
->bus
) {
485 DHD_ENABLE_RUNTIME_PM(bus
->dhd
);
491 static int dhdpcie_pci_suspend(struct pci_dev
* pdev
, pm_message_t state
)
494 dhdpcie_info_t
*pch
= pci_get_drvdata(pdev
);
495 dhd_bus_t
*bus
= NULL
;
505 BCM_REFERENCE(state
);
507 DHD_GENERAL_LOCK(bus
->dhd
, flags
);
508 if (!DHD_BUS_BUSY_CHECK_IDLE(bus
->dhd
)) {
509 DHD_ERROR(("%s: Bus not IDLE!! dhd_bus_busy_state = 0x%x\n",
510 __FUNCTION__
, bus
->dhd
->dhd_bus_busy_state
));
511 DHD_GENERAL_UNLOCK(bus
->dhd
, flags
);
514 DHD_BUS_BUSY_SET_SUSPEND_IN_PROGRESS(bus
->dhd
);
515 DHD_GENERAL_UNLOCK(bus
->dhd
, flags
);
517 if (!bus
->dhd
->dongle_reset
)
518 ret
= dhdpcie_set_suspend_resume(bus
, TRUE
);
520 DHD_GENERAL_LOCK(bus
->dhd
, flags
);
521 DHD_BUS_BUSY_CLEAR_SUSPEND_IN_PROGRESS(bus
->dhd
);
522 dhd_os_busbusy_wake(bus
->dhd
);
523 DHD_GENERAL_UNLOCK(bus
->dhd
, flags
);
528 static int dhdpcie_pci_resume(struct pci_dev
*pdev
)
531 dhdpcie_info_t
*pch
= pci_get_drvdata(pdev
);
532 dhd_bus_t
*bus
= NULL
;
542 DHD_GENERAL_LOCK(bus
->dhd
, flags
);
543 DHD_BUS_BUSY_SET_RESUME_IN_PROGRESS(bus
->dhd
);
544 DHD_GENERAL_UNLOCK(bus
->dhd
, flags
);
546 if (!bus
->dhd
->dongle_reset
)
547 ret
= dhdpcie_set_suspend_resume(bus
, FALSE
);
549 DHD_GENERAL_LOCK(bus
->dhd
, flags
);
550 DHD_BUS_BUSY_CLEAR_RESUME_IN_PROGRESS(bus
->dhd
);
551 dhd_os_busbusy_wake(bus
->dhd
);
552 DHD_GENERAL_UNLOCK(bus
->dhd
, flags
);
557 #endif /* DHD_PCIE_RUNTIMEPM */
558 #ifdef DHD_PCIE_NATIVE_RUNTIMEPM
559 static int dhdpcie_set_suspend_resume(dhd_bus_t
*bus
, bool state
, bool byint
)
561 static int dhdpcie_set_suspend_resume(dhd_bus_t
*bus
, bool state
)
562 #endif /* DHD_PCIE_NATIVE_RUNTIMEPM */
566 ASSERT(bus
&& !bus
->dhd
->dongle_reset
);
568 #ifdef DHD_PCIE_RUNTIMEPM
569 /* if wakelock is held during suspend, return failed */
570 if (state
== TRUE
&& dhd_os_check_wakelock_all(bus
->dhd
)) {
573 mutex_lock(&bus
->pm_lock
);
574 #endif /* DHD_PCIE_RUNTIMEPM */
576 /* When firmware is not loaded do the PCI bus */
577 /* suspend/resume only */
578 if (bus
->dhd
->busstate
== DHD_BUS_DOWN
) {
579 ret
= dhdpcie_pci_suspend_resume(bus
, state
);
580 #ifdef DHD_PCIE_RUNTIMEPM
581 mutex_unlock(&bus
->pm_lock
);
582 #endif /* DHD_PCIE_RUNTIMEPM */
585 #ifdef DHD_PCIE_NATIVE_RUNTIMEPM
586 ret
= dhdpcie_bus_suspend(bus
, state
, byint
);
588 ret
= dhdpcie_bus_suspend(bus
, state
);
589 #endif /* DHD_PCIE_NATIVE_RUNTIMEPM */
591 #ifdef DHD_PCIE_RUNTIMEPM
592 mutex_unlock(&bus
->pm_lock
);
593 #endif /* DHD_PCIE_RUNTIMEPM */
598 #ifdef DHD_PCIE_NATIVE_RUNTIMEPM
599 static int dhdpcie_pm_runtime_suspend(struct device
* dev
)
601 struct pci_dev
*pdev
= to_pci_dev(dev
);
602 dhdpcie_info_t
*pch
= pci_get_drvdata(pdev
);
603 dhd_bus_t
*bus
= NULL
;
611 DHD_RPM(("%s Enter\n", __FUNCTION__
));
613 if (atomic_read(&bus
->dhd
->block_bus
))
616 dhd_netif_stop_queue(bus
);
617 atomic_set(&bus
->dhd
->block_bus
, TRUE
);
619 if (dhdpcie_set_suspend_resume(pdev
, TRUE
, TRUE
)) {
620 pm_runtime_mark_last_busy(dev
);
624 atomic_set(&bus
->dhd
->block_bus
, FALSE
);
625 dhd_bus_start_queue(bus
);
630 static int dhdpcie_pm_runtime_resume(struct device
* dev
)
632 struct pci_dev
*pdev
= to_pci_dev(dev
);
633 dhdpcie_info_t
*pch
= pci_get_drvdata(pdev
);
634 dhd_bus_t
*bus
= pch
->bus
;
636 DHD_RPM(("%s Enter\n", __FUNCTION__
));
638 if (atomic_read(&bus
->dhd
->block_bus
))
641 if (dhdpcie_set_suspend_resume(pdev
, FALSE
, TRUE
))
647 static int dhdpcie_pm_system_suspend_noirq(struct device
* dev
)
649 struct pci_dev
*pdev
= to_pci_dev(dev
);
650 dhdpcie_info_t
*pch
= pci_get_drvdata(pdev
);
651 dhd_bus_t
*bus
= NULL
;
654 DHD_RPM(("%s Enter\n", __FUNCTION__
));
661 if (atomic_read(&bus
->dhd
->block_bus
))
664 dhd_netif_stop_queue(bus
);
665 atomic_set(&bus
->dhd
->block_bus
, TRUE
);
667 ret
= dhdpcie_set_suspend_resume(pdev
, TRUE
, FALSE
);
670 dhd_bus_start_queue(bus
);
671 atomic_set(&bus
->dhd
->block_bus
, FALSE
);
677 static int dhdpcie_pm_system_resume_noirq(struct device
* dev
)
679 struct pci_dev
*pdev
= to_pci_dev(dev
);
680 dhdpcie_info_t
*pch
= pci_get_drvdata(pdev
);
681 dhd_bus_t
*bus
= NULL
;
689 DHD_RPM(("%s Enter\n", __FUNCTION__
));
691 ret
= dhdpcie_set_suspend_resume(pdev
, FALSE
, FALSE
);
693 atomic_set(&bus
->dhd
->block_bus
, FALSE
);
694 dhd_bus_start_queue(bus
);
695 pm_runtime_mark_last_busy(dhd_bus_to_dev(bus
));
699 #endif /* DHD_PCIE_NATIVE_RUNTIMEPM */
701 #if (LINUX_VERSION_CODE >= KERNEL_VERSION(3, 0, 0))
702 extern void dhd_dpc_tasklet_kill(dhd_pub_t
*dhdp
);
703 #endif /* OEM_ANDROID && LINUX_VERSION_CODE >= KERNEL_VERSION(3, 0, 0) */
705 static int dhdpcie_suspend_dev(struct pci_dev
*dev
)
708 #if (LINUX_VERSION_CODE >= KERNEL_VERSION(3, 0, 0))
709 dhdpcie_info_t
*pch
= pci_get_drvdata(dev
);
710 dhd_bus_t
*bus
= pch
->bus
;
712 if (bus
->is_linkdown
) {
713 DHD_ERROR(("%s: PCIe link is down\n", __FUNCTION__
));
716 #endif /* OEM_ANDROID && LINUX_VERSION_CODE >= KERNEL_VERSION(3, 0, 0) */
717 DHD_TRACE_HW4(("%s: Enter\n", __FUNCTION__
));
718 #if (LINUX_VERSION_CODE >= KERNEL_VERSION(3, 0, 0))
719 dhd_dpc_tasklet_kill(bus
->dhd
);
720 #endif /* OEM_ANDROID && LINUX_VERSION_CODE >= KERNEL_VERSION(3, 0, 0) */
722 #if (LINUX_VERSION_CODE >= KERNEL_VERSION(3, 0, 0))
723 pch
->state
= pci_store_saved_state(dev
);
724 #endif /* OEM_ANDROID && LINUX_VERSION_CODE >= KERNEL_VERSION(3, 0, 0) */
725 pci_enable_wake(dev
, PCI_D0
, TRUE
);
726 #if (LINUX_VERSION_CODE >= KERNEL_VERSION(2, 6, 31))
727 if (pci_is_enabled(dev
))
729 pci_disable_device(dev
);
731 ret
= pci_set_power_state(dev
, PCI_D3hot
);
733 DHD_ERROR(("%s: pci_set_power_state error %d\n",
736 dev
->state_saved
= FALSE
;
740 #ifdef DHD_WAKE_STATUS
741 int bcmpcie_get_total_wake(struct dhd_bus
*bus
)
743 dhdpcie_info_t
*pch
= pci_get_drvdata(bus
->dev
);
745 return pch
->total_wake_count
;
748 int bcmpcie_set_get_wake(struct dhd_bus
*bus
, int flag
)
750 dhdpcie_info_t
*pch
= pci_get_drvdata(bus
->dev
);
754 spin_lock_irqsave(&pch
->pcie_lock
, flags
);
757 pch
->total_wake_count
+= flag
;
758 pch
->pkt_wake
= flag
;
760 spin_unlock_irqrestore(&pch
->pcie_lock
, flags
);
763 #endif /* DHD_WAKE_STATUS */
765 static int dhdpcie_resume_dev(struct pci_dev
*dev
)
768 #if (LINUX_VERSION_CODE >= KERNEL_VERSION(3, 0, 0))
769 dhdpcie_info_t
*pch
= pci_get_drvdata(dev
);
770 pci_load_and_free_saved_state(dev
, &pch
->state
);
771 #endif /* OEM_ANDROID && LINUX_VERSION_CODE >= KERNEL_VERSION(3, 0, 0) */
772 DHD_TRACE_HW4(("%s: Enter\n", __FUNCTION__
));
773 dev
->state_saved
= TRUE
;
774 pci_restore_state(dev
);
775 err
= pci_enable_device(dev
);
777 printf("%s:pci_enable_device error %d \n", __FUNCTION__
, err
);
781 err
= pci_set_power_state(dev
, PCI_D0
);
783 printf("%s:pci_set_power_state error %d \n", __FUNCTION__
, err
);
791 static int dhdpcie_resume_host_dev(dhd_bus_t
*bus
)
794 #ifdef USE_EXYNOS_PCIE_RC_PMPATCH
795 bcmerror
= exynos_pcie_pm_resume(SAMSUNG_PCIE_CH_NUM
);
796 #endif /* USE_EXYNOS_PCIE_RC_PMPATCH */
797 #ifdef CONFIG_ARCH_MSM
798 bcmerror
= dhdpcie_start_host_pcieclock(bus
);
799 #endif /* CONFIG_ARCH_MSM */
800 #ifdef CONFIG_ARCH_TEGRA
801 bcmerror
= tegra_pcie_pm_resume();
802 #endif /* CONFIG_ARCH_TEGRA */
804 DHD_ERROR(("%s: PCIe RC resume failed!!! (%d)\n",
805 __FUNCTION__
, bcmerror
));
806 bus
->is_linkdown
= 1;
807 #ifdef SUPPORT_LINKDOWN_RECOVERY
808 #ifdef CONFIG_ARCH_MSM
809 bus
->no_cfg_restore
= 1;
810 #endif /* CONFIG_ARCH_MSM */
811 #endif /* SUPPORT_LINKDOWN_RECOVERY */
817 static int dhdpcie_suspend_host_dev(dhd_bus_t
*bus
)
820 #ifdef USE_EXYNOS_PCIE_RC_PMPATCH
822 pci_save_state(bus
->rc_dev
);
824 DHD_ERROR(("%s: RC %x:%x handle is NULL\n",
825 __FUNCTION__
, PCIE_RC_VENDOR_ID
, PCIE_RC_DEVICE_ID
));
827 exynos_pcie_pm_suspend(SAMSUNG_PCIE_CH_NUM
);
828 #endif /* USE_EXYNOS_PCIE_RC_PMPATCH */
829 #ifdef CONFIG_ARCH_MSM
830 bcmerror
= dhdpcie_stop_host_pcieclock(bus
);
831 #endif /* CONFIG_ARCH_MSM */
832 #ifdef CONFIG_ARCH_TEGRA
833 bcmerror
= tegra_pcie_pm_suspend();
834 #endif /* CONFIG_ARCH_TEGRA */
839 dhdpcie_rc_config_read(dhd_bus_t
*bus
, uint offset
)
841 uint val
= -1; /* Initialise to 0xfffffff */
843 pci_read_config_dword(bus
->rc_dev
, offset
, &val
);
846 DHD_ERROR(("%s: RC %x:%x handle is NULL\n",
847 __FUNCTION__
, PCIE_RC_VENDOR_ID
, PCIE_RC_DEVICE_ID
));
849 DHD_ERROR(("%s: RC %x:%x offset 0x%x val 0x%x\n",
850 __FUNCTION__
, PCIE_RC_VENDOR_ID
, PCIE_RC_DEVICE_ID
, offset
, val
));
855 * Reads/ Writes the value of capability register
856 * from the given CAP_ID section of PCI Root Port
859 * @bus current dhd_bus_t pointer
860 * @cap Capability or Extended Capability ID to get
861 * @offset offset of Register to Read
862 * @is_ext TRUE if @cap is given for Extended Capability
863 * @is_write is set to TRUE to indicate write
864 * @val value to write
867 * Returns 0xffffffff on error
868 * on write success returns BCME_OK (0)
869 * on Read Success returns the value of register requested
870 * Note: caller shoud ensure valid capability ID and Ext. Capability ID.
874 dhdpcie_access_cap(struct pci_dev
*pdev
, int cap
, uint offset
, bool is_ext
, bool is_write
,
882 DHD_ERROR(("%s: pdev is NULL\n", __FUNCTION__
));
886 /* Find Capability offset */
888 /* removing max EXT_CAP_ID check as
889 * linux kernel definition's max value is not upadted yet as per spec
891 cap_ptr
= pci_find_ext_capability(pdev
, cap
);
894 /* removing max PCI_CAP_ID_MAX check as
895 * pervious kernel versions dont have this definition
897 cap_ptr
= pci_find_capability(pdev
, cap
);
900 /* Return if capability with given ID not found */
902 DHD_ERROR(("%s: PCI Cap(0x%02x) not supported.\n",
908 pci_write_config_dword(pdev
, (cap_ptr
+ offset
), writeval
);
913 pci_read_config_dword(pdev
, (cap_ptr
+ offset
), &readval
);
921 dhdpcie_rc_access_cap(dhd_bus_t
*bus
, int cap
, uint offset
, bool is_ext
, bool is_write
,
924 if (!(bus
->rc_dev
)) {
925 DHD_ERROR(("%s: RC %x:%x handle is NULL\n",
926 __FUNCTION__
, PCIE_RC_VENDOR_ID
, PCIE_RC_DEVICE_ID
));
930 return dhdpcie_access_cap(bus
->rc_dev
, cap
, offset
, is_ext
, is_write
, writeval
);
934 dhdpcie_ep_access_cap(dhd_bus_t
*bus
, int cap
, uint offset
, bool is_ext
, bool is_write
,
938 DHD_ERROR(("%s: EP handle is NULL\n", __FUNCTION__
));
942 return dhdpcie_access_cap(bus
->dev
, cap
, offset
, is_ext
, is_write
, writeval
);
945 /* API wrapper to read Root Port link capability
946 * Returns 2 = GEN2 1 = GEN1 BCME_ERR on linkcap not found
949 uint32
dhd_debug_get_rc_linkcap(dhd_bus_t
*bus
)
952 linkcap
= dhdpcie_rc_access_cap(bus
, PCIE_CAP_ID_EXP
,
953 PCIE_CAP_LINKCAP_OFFSET
, FALSE
, FALSE
, 0);
954 linkcap
&= PCIE_CAP_LINKCAP_LNKSPEED_MASK
;
958 int dhdpcie_pci_suspend_resume(dhd_bus_t
*bus
, bool state
)
962 struct pci_dev
*dev
= bus
->dev
;
965 #if !defined(BCMPCIE_OOB_HOST_WAKE)
966 dhdpcie_pme_active(bus
->osh
, state
);
968 rc
= dhdpcie_suspend_dev(dev
);
970 dhdpcie_suspend_host_dev(bus
);
973 rc
= dhdpcie_resume_host_dev(bus
);
975 rc
= dhdpcie_resume_dev(dev
);
976 if (MULTIBP_ENAB(bus
->sih
) && (bus
->sih
->buscorerev
>= 66)) {
977 /* reinit CTO configuration
978 * because cfg space got reset at D3 (PERST)
980 dhdpcie_cto_init(bus
, bus
->cto_enable
);
982 if (bus
->sih
->buscorerev
== 66) {
983 dhdpcie_ssreset_dis_enum_rst(bus
);
985 #if !defined(BCMPCIE_OOB_HOST_WAKE)
986 dhdpcie_pme_active(bus
->osh
, state
);
989 #if (LINUX_VERSION_CODE >= KERNEL_VERSION(2, 6, 27))
990 #if defined(DHD_HANG_SEND_UP_TEST)
991 if (bus
->is_linkdown
||
992 bus
->dhd
->req_hang_type
== HANG_REASON_PCIE_RC_LINK_UP_FAIL
) {
993 #else /* DHD_HANG_SEND_UP_TEST */
994 if (bus
->is_linkdown
) {
995 #endif /* DHD_HANG_SEND_UP_TEST */
996 bus
->dhd
->hang_reason
= HANG_REASON_PCIE_RC_LINK_UP_FAIL
;
997 dhd_os_send_hang_message(bus
->dhd
);
1004 #if (LINUX_VERSION_CODE >= KERNEL_VERSION(2, 6, 0))
1005 static int dhdpcie_device_scan(struct device
*dev
, void *data
)
1007 struct pci_dev
*pcidev
;
1010 #if defined(STRICT_GCC_WARNINGS) && defined(__GNUC__)
1011 #pragma GCC diagnostic push
1012 #pragma GCC diagnostic ignored "-Wcast-qual"
1014 pcidev
= container_of(dev
, struct pci_dev
, dev
);
1015 #if defined(STRICT_GCC_WARNINGS) && defined(__GNUC__)
1016 #pragma GCC diagnostic pop
1018 if (pcidev
->vendor
!= 0x14e4)
1021 DHD_INFO(("Found Broadcom PCI device 0x%04x\n", pcidev
->device
));
1023 if (pcidev
->driver
&& strcmp(pcidev
->driver
->name
, dhdpcie_driver
.name
))
1024 DHD_ERROR(("Broadcom PCI Device 0x%04x has allocated with driver %s\n",
1025 pcidev
->device
, pcidev
->driver
->name
));
1029 #endif /* LINUX_VERSION >= 2.6.0 */
1032 dhdpcie_bus_register(void)
1036 #if (LINUX_VERSION_CODE < KERNEL_VERSION(2, 6, 0))
1037 if (!(error
= pci_module_init(&dhdpcie_driver
)))
1040 DHD_ERROR(("%s: pci_module_init failed 0x%x\n", __FUNCTION__
, error
));
1042 if (!(error
= pci_register_driver(&dhdpcie_driver
))) {
1043 bus_for_each_dev(dhdpcie_driver
.driver
.bus
, NULL
, &error
, dhdpcie_device_scan
);
1045 DHD_ERROR(("No Broadcom PCI device enumerated!\n"));
1046 } else if (!dhdpcie_init_succeeded
) {
1047 DHD_ERROR(("%s: dhdpcie initialize failed.\n", __FUNCTION__
));
1052 pci_unregister_driver(&dhdpcie_driver
);
1055 #endif /* LINUX_VERSION < 2.6.0 */
1061 dhdpcie_bus_unregister(void)
1063 pci_unregister_driver(&dhdpcie_driver
);
1067 dhdpcie_pci_probe(struct pci_dev
*pdev
, const struct pci_device_id
*ent
)
1070 if (dhdpcie_chipmatch (pdev
->vendor
, pdev
->device
)) {
1071 DHD_ERROR(("%s: chipmatch failed!!\n", __FUNCTION__
));
1074 printf("PCI_PROBE: bus %X, slot %X,vendor %X, device %X"
1075 "(good PCI location)\n", pdev
->bus
->number
,
1076 PCI_SLOT(pdev
->devfn
), pdev
->vendor
, pdev
->device
);
1078 if (dhdpcie_init (pdev
)) {
1079 DHD_ERROR(("%s: PCIe Enumeration failed\n", __FUNCTION__
));
1083 #ifdef DHD_PCIE_NATIVE_RUNTIMEPM
1085 Since MSM PCIe RC dev usage conunt already incremented +2 even
1086 before dhdpcie_pci_probe() called, then we inevitably to call
1087 pm_runtime_put_noidle() two times to make the count start with zero.
1090 pm_runtime_put_noidle(&pdev
->dev
);
1091 pm_runtime_put_noidle(&pdev
->dev
);
1092 pm_runtime_set_suspended(&pdev
->dev
);
1093 #endif /* DHD_PCIE_NATIVE_RUNTIMEPM */
1095 #ifdef BCMPCIE_DISABLE_ASYNC_SUSPEND
1096 /* disable async suspend */
1097 device_disable_async_suspend(&pdev
->dev
);
1098 #endif /* BCMPCIE_DISABLE_ASYNC_SUSPEND */
1100 DHD_TRACE(("%s: PCIe Enumeration done!!\n", __FUNCTION__
));
1105 dhdpcie_detach(dhdpcie_info_t
*pch
)
1108 #if (LINUX_VERSION_CODE >= KERNEL_VERSION(3, 0, 0))
1109 if (!dhd_download_fw_on_driverload
) {
1110 pci_load_and_free_saved_state(pch
->dev
, &pch
->default_state
);
1112 #endif /* OEM_ANDROID && LINUX_VERSION_CODE >= KERNEL_VERSION(3, 0, 0) */
1113 MFREE(pch
->osh
, pch
, sizeof(dhdpcie_info_t
));
1119 dhdpcie_pci_remove(struct pci_dev
*pdev
)
1122 dhdpcie_info_t
*pch
= NULL
;
1123 dhd_bus_t
*bus
= NULL
;
1125 DHD_TRACE(("%s Enter\n", __FUNCTION__
));
1126 pch
= pci_get_drvdata(pdev
);
1130 #ifdef DHD_PCIE_NATIVE_RUNTIMEPM
1131 pm_runtime_get_noresume(&pdev
->dev
);
1132 pm_runtime_get_noresume(&pdev
->dev
);
1133 #endif /* DHD_PCIE_NATIVE_RUNTIMEPM */
1136 #ifdef SUPPORT_LINKDOWN_RECOVERY
1137 #ifdef CONFIG_ARCH_MSM
1138 msm_pcie_deregister_event(&bus
->pcie_event
);
1139 #endif /* CONFIG_ARCH_MSM */
1140 #ifdef EXYNOS_PCIE_LINKDOWN_RECOVERY
1141 #if defined(CONFIG_SOC_EXYNOS8890) || defined(CONFIG_SOC_EXYNOS8895) || \
1142 defined(CONFIG_SOC_EXYNOS9810)
1143 exynos_pcie_deregister_event(&bus
->pcie_event
);
1144 #endif /* CONFIG_SOC_EXYNOS8890 || CONFIG_SOC_EXYNOS8895 ||
1145 * CONFIG_SOC_EXYNOS9810
1147 #endif /* EXYNOS_PCIE_LINKDOWN_RECOVERY */
1148 #endif /* SUPPORT_LINKDOWN_RECOVERY */
1152 dhdpcie_bus_release(bus
);
1154 #if (LINUX_VERSION_CODE >= KERNEL_VERSION(2, 6, 31))
1155 if (pci_is_enabled(pdev
))
1157 pci_disable_device(pdev
);
1158 #ifdef BCMPCIE_OOB_HOST_WAKE
1159 /* pcie os info detach */
1160 MFREE(osh
, pch
->os_cxt
, sizeof(dhdpcie_os_info_t
));
1161 #endif /* BCMPCIE_OOB_HOST_WAKE */
1162 #ifdef USE_SMMU_ARCH_MSM
1163 /* smmu info detach */
1164 dhdpcie_smmu_remove(pdev
, pch
->smmu_cxt
);
1165 MFREE(osh
, pch
->smmu_cxt
, sizeof(dhdpcie_smmu_info_t
));
1166 #endif /* USE_SMMU_ARCH_MSM */
1167 /* pcie info detach */
1168 dhdpcie_detach(pch
);
1172 dhdpcie_init_succeeded
= FALSE
;
1174 DHD_TRACE(("%s Exit\n", __FUNCTION__
));
1179 /* Enable Linux Msi */
1181 dhdpcie_enable_msi(struct pci_dev
*pdev
, unsigned int min_vecs
, unsigned int max_vecs
)
1183 #if (LINUX_VERSION_CODE >= KERNEL_VERSION(4, 8, 0))
1184 return pci_alloc_irq_vectors(pdev
, min_vecs
, max_vecs
, PCI_IRQ_MSI
);
1185 #elif (LINUX_VERSION_CODE >= KERNEL_VERSION(3, 16, 0))
1186 return pci_enable_msi_range(pdev
, min_vecs
, max_vecs
);
1188 return pci_enable_msi_block(pdev
, max_vecs
);
1192 /* Disable Linux Msi */
1194 dhdpcie_disable_msi(struct pci_dev
*pdev
)
1196 #if (LINUX_VERSION_CODE >= KERNEL_VERSION(4, 8, 0))
1197 pci_free_irq_vectors(pdev
);
1198 #elif (LINUX_VERSION_CODE >= KERNEL_VERSION(4, 0, 0))
1199 pci_disable_msi(pdev
);
1201 pci_disable_msi(pdev
);
1206 /* Request Linux irq */
1208 dhdpcie_request_irq(dhdpcie_info_t
*dhdpcie_info
)
1210 dhd_bus_t
*bus
= dhdpcie_info
->bus
;
1211 struct pci_dev
*pdev
= dhdpcie_info
->bus
->dev
;
1212 int host_irq_disabled
;
1214 if (!bus
->irq_registered
) {
1215 snprintf(dhdpcie_info
->pciname
, sizeof(dhdpcie_info
->pciname
),
1216 "dhdpcie:%s", pci_name(pdev
));
1218 if (bus
->d2h_intr_method
== PCIE_MSI
) {
1219 if (dhdpcie_enable_msi(pdev
, 1, 1) < 0) {
1220 DHD_ERROR(("%s: dhdpcie_enable_msi() failed\n", __FUNCTION__
));
1221 dhdpcie_disable_msi(pdev
);
1222 bus
->d2h_intr_method
= PCIE_INTX
;
1226 if (request_irq(pdev
->irq
, dhdpcie_isr
, IRQF_SHARED
,
1227 dhdpcie_info
->pciname
, bus
) < 0) {
1228 DHD_ERROR(("%s: request_irq() failed\n", __FUNCTION__
));
1229 if (bus
->d2h_intr_method
== PCIE_MSI
) {
1230 dhdpcie_disable_msi(pdev
);
1235 bus
->irq_registered
= TRUE
;
1238 DHD_ERROR(("%s: PCI IRQ is already registered\n", __FUNCTION__
));
1241 host_irq_disabled
= dhdpcie_irq_disabled(bus
);
1242 if (host_irq_disabled
) {
1243 DHD_ERROR(("%s: PCIe IRQ was disabled(%d), so, enabled it again\n",
1244 __FUNCTION__
, host_irq_disabled
));
1245 dhdpcie_enable_irq(bus
);
1248 DHD_TRACE(("%s %s\n", __FUNCTION__
, dhdpcie_info
->pciname
));
1250 return 0; /* SUCCESS */
1254 * dhdpcie_get_pcieirq - return pcie irq number to linux-dhd
1257 dhdpcie_get_pcieirq(struct dhd_bus
*bus
, unsigned int *irq
)
1259 struct pci_dev
*pdev
= bus
->dev
;
1262 DHD_ERROR(("%s : bus->dev is NULL\n", __FUNCTION__
));
1268 return 0; /* SUCCESS */
1271 #ifdef CONFIG_PHYS_ADDR_T_64BIT
1272 #define PRINTF_RESOURCE "0x%016llx"
1274 #define PRINTF_RESOURCE "0x%08x"
1277 #ifdef EXYNOS_PCIE_MODULE_PATCH
1278 #if (LINUX_VERSION_CODE >= KERNEL_VERSION(3, 0, 0))
1279 extern struct pci_saved_state
*bcm_pcie_default_state
;
1280 #endif /* LINUX_VERSION_CODE >= KERNEL_VERSION(3, 0, 0) */
1281 #endif /* EXYNOS_MODULE_PATCH */
1285 Name: osl_pci_get_resource
1289 1: struct pci_dev *pdev -- pci device structure
1290 2: pci_res -- structure containing pci configuration space values
1294 int - Status (TRUE or FALSE)
1297 Access PCI configuration space, retrieve PCI allocated resources , updates in resource structure.
1300 int dhdpcie_get_resource(dhdpcie_info_t
*dhdpcie_info
)
1302 phys_addr_t bar0_addr
, bar1_addr
;
1304 struct pci_dev
*pdev
= NULL
;
1305 pdev
= dhdpcie_info
->dev
;
1306 #ifdef EXYNOS_PCIE_MODULE_PATCH
1307 #if (LINUX_VERSION_CODE >= KERNEL_VERSION(3, 0, 0))
1308 if (bcm_pcie_default_state
) {
1309 pci_load_saved_state(pdev
, bcm_pcie_default_state
);
1310 pci_restore_state(pdev
);
1312 #endif /* LINUX_VERSION_CODE >= KERNEL_VERSION(3, 0, 0) */
1313 #endif /* EXYNOS_MODULE_PATCH */
1315 if (pci_enable_device(pdev
)) {
1316 printf("%s: Cannot enable PCI device\n", __FUNCTION__
);
1319 pci_set_master(pdev
);
1320 bar0_addr
= pci_resource_start(pdev
, 0); /* Bar-0 mapped address */
1321 bar1_addr
= pci_resource_start(pdev
, 2); /* Bar-1 mapped address */
1323 /* read Bar-1 mapped memory range */
1324 bar1_size
= pci_resource_len(pdev
, 2);
1326 if ((bar1_size
== 0) || (bar1_addr
== 0)) {
1327 printf("%s: BAR1 Not enabled for this device size(%ld),"
1328 " addr(0x"PRINTF_RESOURCE
")\n",
1329 __FUNCTION__
, bar1_size
, bar1_addr
);
1333 dhdpcie_info
->regs
= (volatile char *) REG_MAP(bar0_addr
, DONGLE_REG_MAP_SIZE
);
1334 dhdpcie_info
->tcm_size
=
1335 (bar1_size
> DONGLE_TCM_MAP_SIZE
) ? bar1_size
: DONGLE_TCM_MAP_SIZE
;
1336 dhdpcie_info
->tcm
= (volatile char *) REG_MAP(bar1_addr
, dhdpcie_info
->tcm_size
);
1338 if (!dhdpcie_info
->regs
|| !dhdpcie_info
->tcm
) {
1339 DHD_ERROR(("%s:ioremap() failed\n", __FUNCTION__
));
1342 #ifdef EXYNOS_PCIE_MODULE_PATCH
1343 #if (LINUX_VERSION_CODE >= KERNEL_VERSION(3, 0, 0))
1344 if (bcm_pcie_default_state
== NULL
) {
1345 pci_save_state(pdev
);
1346 bcm_pcie_default_state
= pci_store_saved_state(pdev
);
1348 #endif /* LINUX_VERSION_CODE >= KERNEL_VERSION(3, 0, 0) */
1349 #endif /* EXYNOS_MODULE_PATCH */
1351 #if (LINUX_VERSION_CODE >= KERNEL_VERSION(3, 0, 0))
1352 /* Backup PCIe configuration so as to use Wi-Fi on/off process
1353 * in case of built in driver
1355 pci_save_state(pdev
);
1356 dhdpcie_info
->default_state
= pci_store_saved_state(pdev
);
1358 if (dhdpcie_info
->default_state
== NULL
) {
1359 DHD_ERROR(("%s pci_store_saved_state returns NULL\n",
1361 REG_UNMAP(dhdpcie_info
->regs
);
1362 REG_UNMAP(dhdpcie_info
->tcm
);
1363 pci_disable_device(pdev
);
1366 #endif /* LINUX_VERSION_CODE >= KERNEL_VERSION(3, 0, 0) */
1368 DHD_TRACE(("%s:Phys addr : reg space = %p base addr 0x"PRINTF_RESOURCE
" \n",
1369 __FUNCTION__
, dhdpcie_info
->regs
, bar0_addr
));
1370 DHD_TRACE(("%s:Phys addr : tcm_space = %p base addr 0x"PRINTF_RESOURCE
" \n",
1371 __FUNCTION__
, dhdpcie_info
->tcm
, bar1_addr
));
1373 return 0; /* SUCCESS */
1376 return -1; /* FAILURE */
1379 int dhdpcie_scan_resource(dhdpcie_info_t
*dhdpcie_info
)
1382 DHD_TRACE(("%s: ENTER\n", __FUNCTION__
));
1385 /* define it here only!! */
1386 if (dhdpcie_get_resource (dhdpcie_info
)) {
1387 DHD_ERROR(("%s: Failed to get PCI resources\n", __FUNCTION__
));
1390 DHD_TRACE(("%s:Exit - SUCCESS \n",
1393 return 0; /* SUCCESS */
1397 DHD_TRACE(("%s:Exit - FAILURE \n", __FUNCTION__
));
1399 return -1; /* FAILURE */
1403 void dhdpcie_dump_resource(dhd_bus_t
*bus
)
1405 dhdpcie_info_t
*pch
;
1408 DHD_ERROR(("%s: bus is NULL\n", __FUNCTION__
));
1412 if (bus
->dev
== NULL
) {
1413 DHD_ERROR(("%s: bus->dev is NULL\n", __FUNCTION__
));
1417 pch
= pci_get_drvdata(bus
->dev
);
1419 DHD_ERROR(("%s: pch is NULL\n", __FUNCTION__
));
1424 DHD_ERROR(("%s: BAR0(VA): 0x%pK, BAR0(PA): "PRINTF_RESOURCE
", SIZE: %d\n",
1425 __FUNCTION__
, pch
->regs
, pci_resource_start(bus
->dev
, 0),
1426 DONGLE_REG_MAP_SIZE
));
1429 DHD_ERROR(("%s: BAR1(VA): 0x%pK, BAR1(PA): "PRINTF_RESOURCE
", SIZE: %d\n",
1430 __FUNCTION__
, pch
->tcm
, pci_resource_start(bus
->dev
, 2),
1434 #ifdef SUPPORT_LINKDOWN_RECOVERY
1435 #if defined(CONFIG_ARCH_MSM) || (defined(EXYNOS_PCIE_LINKDOWN_RECOVERY) && \
1436 (defined(CONFIG_SOC_EXYNOS8890) || defined(CONFIG_SOC_EXYNOS8895) || \
1437 defined(CONFIG_SOC_EXYNOS9810)))
1438 void dhdpcie_linkdown_cb(struct_pcie_notify
*noti
)
1440 struct pci_dev
*pdev
= (struct pci_dev
*)noti
->user
;
1441 dhdpcie_info_t
*pch
= NULL
;
1444 pch
= pci_get_drvdata(pdev
);
1446 dhd_bus_t
*bus
= pch
->bus
;
1448 dhd_pub_t
*dhd
= bus
->dhd
;
1450 DHD_ERROR(("%s: Event HANG send up "
1451 "due to PCIe linkdown\n",
1453 #ifdef CONFIG_ARCH_MSM
1454 bus
->no_cfg_restore
= 1;
1455 #endif /* CONFIG_ARCH_MSM */
1456 bus
->is_linkdown
= 1;
1457 DHD_OS_WAKE_LOCK(dhd
);
1458 dhd
->hang_reason
= HANG_REASON_PCIE_LINK_DOWN
;
1459 dhd_os_send_hang_message(dhd
);
1466 #endif /* CONFIG_ARCH_MSM || (EXYNOS_PCIE_LINKDOWN_RECOVERY &&
1467 * (CONFIG_SOC_EXYNOS8890 || CONFIG_SOC_EXYNOS8895 || CONFIG_SOC_EXYNOS9810))
1469 #endif /* SUPPORT_LINKDOWN_RECOVERY */
1471 int dhdpcie_init(struct pci_dev
*pdev
)
1475 dhd_bus_t
*bus
= NULL
;
1476 dhdpcie_info_t
*dhdpcie_info
= NULL
;
1477 wifi_adapter_info_t
*adapter
= NULL
;
1478 #ifdef BCMPCIE_OOB_HOST_WAKE
1479 dhdpcie_os_info_t
*dhdpcie_osinfo
= NULL
;
1480 #endif /* BCMPCIE_OOB_HOST_WAKE */
1481 #ifdef USE_SMMU_ARCH_MSM
1482 dhdpcie_smmu_info_t
*dhdpcie_smmu_info
= NULL
;
1483 #endif /* USE_SMMU_ARCH_MSM */
1488 if (!(osh
= osl_attach(pdev
, PCI_BUS
, FALSE
))) {
1489 DHD_ERROR(("%s: osl_attach failed\n", __FUNCTION__
));
1493 /* initialize static buffer */
1494 adapter
= dhd_wifi_platform_get_adapter(PCI_BUS
, pdev
->bus
->number
,
1495 PCI_SLOT(pdev
->devfn
));
1496 if (adapter
!= NULL
)
1497 DHD_ERROR(("%s: found adapter info '%s'\n", __FUNCTION__
, adapter
->name
));
1499 DHD_ERROR(("%s: can't find adapter info for this chip\n", __FUNCTION__
));
1500 osl_static_mem_init(osh
, adapter
);
1502 /* Set ACP coherence flag */
1503 if (OSL_ACP_WAR_ENAB() || OSL_ARCH_IS_COHERENT())
1504 osl_flag_set(osh
, OSL_ACP_COHERENCE
);
1506 /* allocate linux spcific pcie structure here */
1507 if (!(dhdpcie_info
= MALLOC(osh
, sizeof(dhdpcie_info_t
)))) {
1508 DHD_ERROR(("%s: MALLOC of dhd_bus_t failed\n", __FUNCTION__
));
1511 bzero(dhdpcie_info
, sizeof(dhdpcie_info_t
));
1512 dhdpcie_info
->osh
= osh
;
1513 dhdpcie_info
->dev
= pdev
;
1515 #ifdef BCMPCIE_OOB_HOST_WAKE
1516 /* allocate OS speicific structure */
1517 dhdpcie_osinfo
= MALLOC(osh
, sizeof(dhdpcie_os_info_t
));
1518 if (dhdpcie_osinfo
== NULL
) {
1519 DHD_ERROR(("%s: MALLOC of dhdpcie_os_info_t failed\n",
1523 bzero(dhdpcie_osinfo
, sizeof(dhdpcie_os_info_t
));
1524 dhdpcie_info
->os_cxt
= (void *)dhdpcie_osinfo
;
1526 /* Initialize host wake IRQ */
1527 spin_lock_init(&dhdpcie_osinfo
->oob_irq_spinlock
);
1528 /* Get customer specific host wake IRQ parametres: IRQ number as IRQ type */
1529 dhdpcie_osinfo
->oob_irq_num
= wifi_platform_get_irq_number(adapter
,
1530 &dhdpcie_osinfo
->oob_irq_flags
);
1531 if (dhdpcie_osinfo
->oob_irq_num
< 0) {
1532 DHD_ERROR(("%s: Host OOB irq is not defined\n", __FUNCTION__
));
1534 #endif /* BCMPCIE_OOB_HOST_WAKE */
1536 #ifdef USE_SMMU_ARCH_MSM
1537 /* allocate private structure for using SMMU */
1538 dhdpcie_smmu_info
= MALLOC(osh
, sizeof(dhdpcie_smmu_info_t
));
1539 if (dhdpcie_smmu_info
== NULL
) {
1540 DHD_ERROR(("%s: MALLOC of dhdpcie_smmu_info_t failed\n",
1544 bzero(dhdpcie_smmu_info
, sizeof(dhdpcie_smmu_info_t
));
1545 dhdpcie_info
->smmu_cxt
= (void *)dhdpcie_smmu_info
;
1547 /* Initialize smmu structure */
1548 if (dhdpcie_smmu_init(pdev
, dhdpcie_info
->smmu_cxt
) < 0) {
1549 DHD_ERROR(("%s: Failed to initialize SMMU\n",
1553 #endif /* USE_SMMU_ARCH_MSM */
1555 #ifdef DHD_WAKE_STATUS
1556 /* Initialize pcie_lock */
1557 spin_lock_init(&dhdpcie_info
->pcie_lock
);
1558 #endif /* DHD_WAKE_STATUS */
1560 /* Find the PCI resources, verify the */
1561 /* vendor and device ID, map BAR regions and irq, update in structures */
1562 if (dhdpcie_scan_resource(dhdpcie_info
)) {
1563 DHD_ERROR(("%s: dhd_Scan_PCI_Res failed\n", __FUNCTION__
));
1568 /* Bus initialization */
1569 ret
= dhdpcie_bus_attach(osh
, &bus
, dhdpcie_info
->regs
, dhdpcie_info
->tcm
, pdev
);
1570 if (ret
!= BCME_OK
) {
1571 DHD_ERROR(("%s:dhdpcie_bus_attach() failed\n", __FUNCTION__
));
1575 dhdpcie_info
->bus
= bus
;
1576 bus
->is_linkdown
= 0;
1577 bus
->no_bus_init
= FALSE
;
1579 /* Get RC Device Handle */
1580 bus
->rc_dev
= pci_get_device(PCIE_RC_VENDOR_ID
, PCIE_RC_DEVICE_ID
, NULL
);
1582 #ifdef DONGLE_ENABLE_ISOLATION
1583 bus
->dhd
->dongle_isolation
= TRUE
;
1584 #endif /* DONGLE_ENABLE_ISOLATION */
1585 #ifdef SUPPORT_LINKDOWN_RECOVERY
1586 #ifdef CONFIG_ARCH_MSM
1587 bus
->pcie_event
.events
= MSM_PCIE_EVENT_LINKDOWN
;
1588 bus
->pcie_event
.user
= pdev
;
1589 bus
->pcie_event
.mode
= MSM_PCIE_TRIGGER_CALLBACK
;
1590 bus
->pcie_event
.callback
= dhdpcie_linkdown_cb
;
1591 bus
->pcie_event
.options
= MSM_PCIE_CONFIG_NO_RECOVERY
;
1592 msm_pcie_register_event(&bus
->pcie_event
);
1593 bus
->no_cfg_restore
= FALSE
;
1594 #endif /* CONFIG_ARCH_MSM */
1595 #ifdef EXYNOS_PCIE_LINKDOWN_RECOVERY
1596 #if defined(CONFIG_SOC_EXYNOS8890) || defined(CONFIG_SOC_EXYNOS8895) || \
1597 defined(CONFIG_SOC_EXYNOS9810)
1598 bus
->pcie_event
.events
= EXYNOS_PCIE_EVENT_LINKDOWN
;
1599 bus
->pcie_event
.user
= pdev
;
1600 bus
->pcie_event
.mode
= EXYNOS_PCIE_TRIGGER_CALLBACK
;
1601 bus
->pcie_event
.callback
= dhdpcie_linkdown_cb
;
1602 exynos_pcie_register_event(&bus
->pcie_event
);
1603 #endif /* CONFIG_SOC_EXYNOS8890 || CONFIG_SOC_EXYNOS8895 ||
1604 * CONFIG_SOC_EXYNOS9810
1606 #endif /* EXYNOS_PCIE_LINKDOWN_RECOVERY */
1607 bus
->read_shm_fail
= FALSE
;
1608 #endif /* SUPPORT_LINKDOWN_RECOVERY */
1611 /* Register interrupt callback, but mask it (not operational yet). */
1612 DHD_INTR(("%s: Registering and masking interrupts\n", __FUNCTION__
));
1613 dhdpcie_bus_intr_disable(bus
);
1615 if (dhdpcie_request_irq(dhdpcie_info
)) {
1616 DHD_ERROR(("%s: request_irq() failed\n", __FUNCTION__
));
1621 DHD_INFO(("%s: PCIe interrupt function is NOT registered "
1622 "due to polling mode\n", __FUNCTION__
));
1625 #if defined(BCM_REQUEST_FW)
1626 if (dhd_bus_download_firmware(bus
, osh
, NULL
, NULL
) < 0) {
1627 DHD_ERROR(("%s: failed to download firmware\n", __FUNCTION__
));
1629 bus
->nv_path
= NULL
;
1630 bus
->fw_path
= NULL
;
1631 #endif /* BCM_REQUEST_FW */
1633 /* set private data for pci_dev */
1634 pci_set_drvdata(pdev
, dhdpcie_info
);
1636 if (dhd_download_fw_on_driverload
) {
1637 if (dhd_bus_start(bus
->dhd
)) {
1638 DHD_ERROR(("%s: dhd_bud_start() failed\n", __FUNCTION__
));
1639 if (!allow_delay_fwdl
)
1643 /* Set ramdom MAC address during boot time */
1644 get_random_bytes(&bus
->dhd
->mac
.octet
[3], 3);
1645 /* Adding BRCM OUI */
1646 bus
->dhd
->mac
.octet
[0] = 0;
1647 bus
->dhd
->mac
.octet
[1] = 0x90;
1648 bus
->dhd
->mac
.octet
[2] = 0x4C;
1651 /* Attach to the OS network interface */
1652 DHD_TRACE(("%s(): Calling dhd_register_if() \n", __FUNCTION__
));
1653 if (dhd_attach_net(bus
->dhd
, TRUE
)) {
1654 DHD_ERROR(("%s(): ERROR.. dhd_register_if() failed\n", __FUNCTION__
));
1658 dhdpcie_init_succeeded
= TRUE
;
1660 #ifdef DHD_PCIE_NATIVE_RUNTIMEPM
1661 pm_runtime_set_autosuspend_delay(&pdev
->dev
, AUTO_SUSPEND_TIMEOUT
);
1662 pm_runtime_use_autosuspend(&pdev
->dev
);
1663 atomic_set(&bus
->dhd
->block_bus
, FALSE
);
1664 #endif /* DHD_PCIE_NATIVE_RUNTIMEPM */
1666 DHD_TRACE(("%s:Exit - SUCCESS \n", __FUNCTION__
));
1667 return 0; /* return SUCCESS */
1670 /* reverse the initialization in order in case of error */
1673 dhdpcie_bus_release(bus
);
1675 #ifdef BCMPCIE_OOB_HOST_WAKE
1676 if (dhdpcie_osinfo
) {
1677 MFREE(osh
, dhdpcie_osinfo
, sizeof(dhdpcie_os_info_t
));
1679 #endif /* BCMPCIE_OOB_HOST_WAKE */
1681 #ifdef USE_SMMU_ARCH_MSM
1682 if (dhdpcie_smmu_info
) {
1683 MFREE(osh
, dhdpcie_smmu_info
, sizeof(dhdpcie_smmu_info_t
));
1684 dhdpcie_info
->smmu_cxt
= NULL
;
1686 #endif /* USE_SMMU_ARCH_MSM */
1689 dhdpcie_detach(dhdpcie_info
);
1690 pci_disable_device(pdev
);
1694 dhdpcie_init_succeeded
= FALSE
;
1696 DHD_TRACE(("%s:Exit - FAILURE \n", __FUNCTION__
));
1698 return -1; /* return FAILURE */
1701 /* Free Linux irq */
1703 dhdpcie_free_irq(dhd_bus_t
*bus
)
1705 struct pci_dev
*pdev
= NULL
;
1707 DHD_TRACE(("%s: freeing up the IRQ\n", __FUNCTION__
));
1710 if (bus
->irq_registered
) {
1711 free_irq(pdev
->irq
, bus
);
1712 bus
->irq_registered
= FALSE
;
1713 if (bus
->d2h_intr_method
== PCIE_MSI
) {
1714 dhdpcie_disable_msi(pdev
);
1717 DHD_ERROR(("%s: PCIe IRQ is not registered\n", __FUNCTION__
));
1720 DHD_TRACE(("%s: Exit\n", __FUNCTION__
));
1730 1: IN int irq -- interrupt vector
1731 2: IN void *arg -- handle to private data structure
1735 Status (TRUE or FALSE)
1738 Interrupt Service routine checks for the status register,
1739 disable interrupt and queue DPC if mail box interrupts are raised.
1743 dhdpcie_isr(int irq
, void *arg
)
1745 dhd_bus_t
*bus
= (dhd_bus_t
*)arg
;
1746 bus
->isr_entry_time
= OSL_LOCALTIME_NS();
1747 if (!dhdpcie_bus_isr(bus
)) {
1748 DHD_LOG_MEM(("%s: dhdpcie_bus_isr returns with FALSE\n", __FUNCTION__
));
1750 bus
->isr_exit_time
= OSL_LOCALTIME_NS();
1755 dhdpcie_disable_irq_nosync(dhd_bus_t
*bus
)
1757 struct pci_dev
*dev
;
1758 if ((bus
== NULL
) || (bus
->dev
== NULL
)) {
1759 DHD_ERROR(("%s: bus or bus->dev is NULL\n", __FUNCTION__
));
1764 disable_irq_nosync(dev
->irq
);
1769 dhdpcie_disable_irq(dhd_bus_t
*bus
)
1771 struct pci_dev
*dev
;
1772 if ((bus
== NULL
) || (bus
->dev
== NULL
)) {
1773 DHD_ERROR(("%s: bus or bus->dev is NULL\n", __FUNCTION__
));
1778 disable_irq(dev
->irq
);
1783 dhdpcie_enable_irq(dhd_bus_t
*bus
)
1785 struct pci_dev
*dev
;
1786 if ((bus
== NULL
) || (bus
->dev
== NULL
)) {
1787 DHD_ERROR(("%s: bus or bus->dev is NULL\n", __FUNCTION__
));
1792 enable_irq(dev
->irq
);
1797 dhdpcie_irq_disabled(dhd_bus_t
*bus
)
1799 #if (LINUX_VERSION_CODE >= KERNEL_VERSION(2, 6, 28))
1800 struct irq_desc
*desc
= irq_to_desc(bus
->dev
->irq
);
1801 /* depth will be zero, if enabled */
1804 /* return ERROR by default as there is no support for lower versions */
1806 #endif /* LINUX_VERSION_CODE >= KERNEL_VERSION(3, 0, 0) */
1810 dhdpcie_start_host_pcieclock(dhd_bus_t
*bus
)
1813 #ifdef CONFIG_ARCH_MSM
1814 #ifdef SUPPORT_LINKDOWN_RECOVERY
1816 #endif /* SUPPORT_LINKDOWN_RECOVERY */
1817 #endif /* CONFIG_ARCH_MSM */
1818 DHD_TRACE(("%s Enter:\n", __FUNCTION__
));
1824 if (bus
->dev
== NULL
) {
1828 #ifdef CONFIG_ARCH_MSM
1829 #ifdef SUPPORT_LINKDOWN_RECOVERY
1830 if (bus
->no_cfg_restore
) {
1831 options
= MSM_PCIE_CONFIG_NO_CFG_RESTORE
;
1833 ret
= msm_pcie_pm_control(MSM_PCIE_RESUME
, bus
->dev
->bus
->number
,
1834 bus
->dev
, NULL
, options
);
1835 if (bus
->no_cfg_restore
&& !ret
) {
1836 msm_pcie_recover_config(bus
->dev
);
1837 bus
->no_cfg_restore
= 0;
1840 ret
= msm_pcie_pm_control(MSM_PCIE_RESUME
, bus
->dev
->bus
->number
,
1842 #endif /* SUPPORT_LINKDOWN_RECOVERY */
1844 DHD_ERROR(("%s Failed to bring up PCIe link\n", __FUNCTION__
));
1849 #endif /* CONFIG_ARCH_MSM */
1850 DHD_TRACE(("%s Exit:\n", __FUNCTION__
));
1855 dhdpcie_stop_host_pcieclock(dhd_bus_t
*bus
)
1858 #ifdef CONFIG_ARCH_MSM
1859 #ifdef SUPPORT_LINKDOWN_RECOVERY
1861 #endif /* SUPPORT_LINKDOWN_RECOVERY */
1862 #endif /* CONFIG_ARCH_MSM */
1864 DHD_TRACE(("%s Enter:\n", __FUNCTION__
));
1870 if (bus
->dev
== NULL
) {
1874 #ifdef CONFIG_ARCH_MSM
1875 #ifdef SUPPORT_LINKDOWN_RECOVERY
1876 if (bus
->no_cfg_restore
) {
1877 options
= MSM_PCIE_CONFIG_NO_CFG_RESTORE
| MSM_PCIE_CONFIG_LINKDOWN
;
1880 ret
= msm_pcie_pm_control(MSM_PCIE_SUSPEND
, bus
->dev
->bus
->number
,
1881 bus
->dev
, NULL
, options
);
1883 ret
= msm_pcie_pm_control(MSM_PCIE_SUSPEND
, bus
->dev
->bus
->number
,
1885 #endif /* SUPPORT_LINKDOWN_RECOVERY */
1887 DHD_ERROR(("Failed to stop PCIe link\n"));
1891 #endif /* CONFIG_ARCH_MSM */
1892 DHD_TRACE(("%s Exit:\n", __FUNCTION__
));
1897 dhdpcie_disable_device(dhd_bus_t
*bus
)
1899 DHD_TRACE(("%s Enter:\n", __FUNCTION__
));
1905 if (bus
->dev
== NULL
) {
1909 #if (LINUX_VERSION_CODE >= KERNEL_VERSION(2, 6, 31))
1910 if (pci_is_enabled(bus
->dev
))
1912 pci_disable_device(bus
->dev
);
1918 dhdpcie_enable_device(dhd_bus_t
*bus
)
1920 int ret
= BCME_ERROR
;
1921 #if (LINUX_VERSION_CODE >= KERNEL_VERSION(3, 0, 0))
1922 dhdpcie_info_t
*pch
;
1923 #endif /* LINUX_VERSION_CODE >= KERNEL_VERSION(3, 0, 0) */
1925 DHD_TRACE(("%s Enter:\n", __FUNCTION__
));
1931 if (bus
->dev
== NULL
) {
1935 #if (LINUX_VERSION_CODE >= KERNEL_VERSION(3, 0, 0))
1936 pch
= pci_get_drvdata(bus
->dev
);
1941 #if (LINUX_VERSION_CODE >= KERNEL_VERSION(3, 14, 0)) && (LINUX_VERSION_CODE < \
1942 KERNEL_VERSION(3, 19, 0)) && !defined(CONFIG_SOC_EXYNOS8890)
1943 /* Updated with pci_load_and_free_saved_state to compatible
1944 * with Kernel version 3.14.0 to 3.18.41.
1946 pci_load_and_free_saved_state(bus
->dev
, &pch
->default_state
);
1947 pch
->default_state
= pci_store_saved_state(bus
->dev
);
1949 pci_load_saved_state(bus
->dev
, pch
->default_state
);
1950 #endif /* LINUX_VERSION >= 3.14.0 && LINUX_VERSION < 3.19.0 && !CONFIG_SOC_EXYNOS8890 */
1952 /* Check if Device ID is valid */
1953 if (bus
->dev
->state_saved
) {
1954 uint32 vid
, saved_vid
;
1955 pci_read_config_dword(bus
->dev
, PCI_CFG_VID
, &vid
);
1956 saved_vid
= bus
->dev
->saved_config_space
[PCI_CFG_VID
];
1957 if (vid
!= saved_vid
) {
1958 DHD_ERROR(("%s: VID(0x%x) is different from saved VID(0x%x) "
1959 "Skip the bus init\n", __FUNCTION__
, vid
, saved_vid
));
1960 bus
->no_bus_init
= TRUE
;
1961 /* Check if the PCIe link is down */
1962 if (vid
== (uint32
)-1) {
1963 bus
->is_linkdown
= 1;
1964 #ifdef SUPPORT_LINKDOWN_RECOVERY
1965 #ifdef CONFIG_ARCH_MSM
1966 bus
->no_cfg_restore
= TRUE
;
1967 #endif /* CONFIG_ARCH_MSM */
1968 #endif /* SUPPORT_LINKDOWN_RECOVERY */
1974 pci_restore_state(bus
->dev
);
1975 #endif /* (LINUX_VERSION_CODE >= KERNEL_VERSION(3, 0, 0)) */
1977 ret
= pci_enable_device(bus
->dev
);
1979 pci_disable_device(bus
->dev
);
1981 pci_set_master(bus
->dev
);
1988 dhdpcie_alloc_resource(dhd_bus_t
*bus
)
1990 dhdpcie_info_t
*dhdpcie_info
;
1991 phys_addr_t bar0_addr
, bar1_addr
;
1996 DHD_ERROR(("%s: bus is NULL\n", __FUNCTION__
));
2000 if (bus
->dev
== NULL
) {
2001 DHD_ERROR(("%s: bus->dev is NULL\n", __FUNCTION__
));
2005 dhdpcie_info
= pci_get_drvdata(bus
->dev
);
2006 if (dhdpcie_info
== NULL
) {
2007 DHD_ERROR(("%s: dhdpcie_info is NULL\n", __FUNCTION__
));
2011 bar0_addr
= pci_resource_start(bus
->dev
, 0); /* Bar-0 mapped address */
2012 bar1_addr
= pci_resource_start(bus
->dev
, 2); /* Bar-1 mapped address */
2014 /* read Bar-1 mapped memory range */
2015 bar1_size
= pci_resource_len(bus
->dev
, 2);
2017 if ((bar1_size
== 0) || (bar1_addr
== 0)) {
2018 printf("%s: BAR1 Not enabled for this device size(%ld),"
2019 " addr(0x"PRINTF_RESOURCE
")\n",
2020 __FUNCTION__
, bar1_size
, bar1_addr
);
2024 dhdpcie_info
->regs
= (volatile char *) REG_MAP(bar0_addr
, DONGLE_REG_MAP_SIZE
);
2025 if (!dhdpcie_info
->regs
) {
2026 DHD_ERROR(("%s: ioremap() for regs is failed\n", __FUNCTION__
));
2030 bus
->regs
= dhdpcie_info
->regs
;
2031 dhdpcie_info
->tcm_size
=
2032 (bar1_size
> DONGLE_TCM_MAP_SIZE
) ? bar1_size
: DONGLE_TCM_MAP_SIZE
;
2033 dhdpcie_info
->tcm
= (volatile char *) REG_MAP(bar1_addr
, dhdpcie_info
->tcm_size
);
2034 if (!dhdpcie_info
->tcm
) {
2035 DHD_ERROR(("%s: ioremap() for regs is failed\n", __FUNCTION__
));
2036 REG_UNMAP(dhdpcie_info
->regs
);
2041 bus
->tcm
= dhdpcie_info
->tcm
;
2043 DHD_TRACE(("%s:Phys addr : reg space = %p base addr 0x"PRINTF_RESOURCE
" \n",
2044 __FUNCTION__
, dhdpcie_info
->regs
, bar0_addr
));
2045 DHD_TRACE(("%s:Phys addr : tcm_space = %p base addr 0x"PRINTF_RESOURCE
" \n",
2046 __FUNCTION__
, dhdpcie_info
->tcm
, bar1_addr
));
2055 dhdpcie_free_resource(dhd_bus_t
*bus
)
2057 dhdpcie_info_t
*dhdpcie_info
;
2060 DHD_ERROR(("%s: bus is NULL\n", __FUNCTION__
));
2064 if (bus
->dev
== NULL
) {
2065 DHD_ERROR(("%s: bus->dev is NULL\n", __FUNCTION__
));
2069 dhdpcie_info
= pci_get_drvdata(bus
->dev
);
2070 if (dhdpcie_info
== NULL
) {
2071 DHD_ERROR(("%s: dhdpcie_info is NULL\n", __FUNCTION__
));
2076 REG_UNMAP(dhdpcie_info
->regs
);
2081 REG_UNMAP(dhdpcie_info
->tcm
);
2087 dhdpcie_bus_request_irq(struct dhd_bus
*bus
)
2089 dhdpcie_info_t
*dhdpcie_info
;
2093 DHD_ERROR(("%s: bus is NULL\n", __FUNCTION__
));
2097 if (bus
->dev
== NULL
) {
2098 DHD_ERROR(("%s: bus->dev is NULL\n", __FUNCTION__
));
2102 dhdpcie_info
= pci_get_drvdata(bus
->dev
);
2103 if (dhdpcie_info
== NULL
) {
2104 DHD_ERROR(("%s: dhdpcie_info is NULL\n", __FUNCTION__
));
2109 /* Register interrupt callback, but mask it (not operational yet). */
2110 DHD_INTR(("%s: Registering and masking interrupts\n", __FUNCTION__
));
2111 dhdpcie_bus_intr_disable(bus
);
2112 ret
= dhdpcie_request_irq(dhdpcie_info
);
2114 DHD_ERROR(("%s: request_irq() failed, ret=%d\n",
2115 __FUNCTION__
, ret
));
2123 #ifdef BCMPCIE_OOB_HOST_WAKE
2124 int dhdpcie_get_oob_irq_num(dhd_bus_t
*bus
)
2126 dhdpcie_info_t
*pch
;
2127 dhdpcie_os_info_t
*dhdpcie_osinfo
;
2130 DHD_ERROR(("%s: bus is NULL\n", __FUNCTION__
));
2134 if (bus
->dev
== NULL
) {
2135 DHD_ERROR(("%s: bus->dev is NULL\n", __FUNCTION__
));
2139 pch
= pci_get_drvdata(bus
->dev
);
2141 DHD_ERROR(("%s: pch is NULL\n", __FUNCTION__
));
2145 dhdpcie_osinfo
= (dhdpcie_os_info_t
*)pch
->os_cxt
;
2147 return dhdpcie_osinfo
? dhdpcie_osinfo
->oob_irq_num
: 0;
2150 void dhdpcie_oob_intr_set(dhd_bus_t
*bus
, bool enable
)
2152 unsigned long flags
;
2153 dhdpcie_info_t
*pch
;
2154 dhdpcie_os_info_t
*dhdpcie_osinfo
;
2157 DHD_ERROR(("%s: bus is NULL\n", __FUNCTION__
));
2161 if (bus
->dev
== NULL
) {
2162 DHD_ERROR(("%s: bus->dev is NULL\n", __FUNCTION__
));
2166 pch
= pci_get_drvdata(bus
->dev
);
2168 DHD_ERROR(("%s: pch is NULL\n", __FUNCTION__
));
2172 dhdpcie_osinfo
= (dhdpcie_os_info_t
*)pch
->os_cxt
;
2173 spin_lock_irqsave(&dhdpcie_osinfo
->oob_irq_spinlock
, flags
);
2174 if ((dhdpcie_osinfo
->oob_irq_enabled
!= enable
) &&
2175 (dhdpcie_osinfo
->oob_irq_num
> 0)) {
2177 enable_irq(dhdpcie_osinfo
->oob_irq_num
);
2178 bus
->oob_intr_enable_count
++;
2180 disable_irq_nosync(dhdpcie_osinfo
->oob_irq_num
);
2181 bus
->oob_intr_disable_count
++;
2183 dhdpcie_osinfo
->oob_irq_enabled
= enable
;
2185 spin_unlock_irqrestore(&dhdpcie_osinfo
->oob_irq_spinlock
, flags
);
2188 static irqreturn_t
wlan_oob_irq(int irq
, void *data
)
2191 unsigned long flags_bus
;
2192 DHD_TRACE(("%s: IRQ Triggered\n", __FUNCTION__
));
2193 bus
= (dhd_bus_t
*)data
;
2194 dhdpcie_oob_intr_set(bus
, FALSE
);
2195 bus
->last_oob_irq_time
= OSL_LOCALTIME_NS();
2196 bus
->oob_intr_count
++;
2197 #ifdef DHD_WAKE_STATUS
2198 #ifdef DHD_PCIE_RUNTIMEPM
2199 /* This condition is for avoiding counting of wake up from Runtime PM */
2201 #endif /* DHD_PCIE_RUNTIMPM */
2203 bcmpcie_set_get_wake(bus
, 1);
2205 #endif /* DHD_WAKE_STATUS */
2206 #ifdef DHD_PCIE_RUNTIMEPM
2207 dhdpcie_runtime_bus_wake(bus
->dhd
, FALSE
, wlan_oob_irq
);
2208 #endif /* DHD_PCIE_RUNTIMPM */
2209 #ifdef DHD_PCIE_NATIVE_RUNTIMEPM
2210 dhd_bus_wakeup_work(bus
->dhd
);
2211 #endif /* DHD_PCIE_NATIVE_RUNTIMEPM */
2212 DHD_BUS_LOCK(bus
->bus_lock
, flags_bus
);
2213 /* Hold wakelock if bus_low_power_state is
2214 * DHD_BUS_D3_INFORM_SENT OR DHD_BUS_D3_ACK_RECIEVED
2216 if (bus
->dhd
->up
&& bus
->bus_low_power_state
!= DHD_BUS_NO_LOW_POWER_STATE
) {
2217 DHD_OS_OOB_IRQ_WAKE_LOCK_TIMEOUT(bus
->dhd
, OOB_WAKE_LOCK_TIMEOUT
);
2219 DHD_BUS_UNLOCK(bus
->bus_lock
, flags_bus
);
2223 int dhdpcie_oob_intr_register(dhd_bus_t
*bus
)
2226 dhdpcie_info_t
*pch
;
2227 dhdpcie_os_info_t
*dhdpcie_osinfo
;
2229 DHD_TRACE(("%s: Enter\n", __FUNCTION__
));
2231 DHD_ERROR(("%s: bus is NULL\n", __FUNCTION__
));
2235 if (bus
->dev
== NULL
) {
2236 DHD_ERROR(("%s: bus->dev is NULL\n", __FUNCTION__
));
2240 pch
= pci_get_drvdata(bus
->dev
);
2242 DHD_ERROR(("%s: pch is NULL\n", __FUNCTION__
));
2246 dhdpcie_osinfo
= (dhdpcie_os_info_t
*)pch
->os_cxt
;
2247 if (dhdpcie_osinfo
->oob_irq_registered
) {
2248 DHD_ERROR(("%s: irq is already registered\n", __FUNCTION__
));
2252 if (dhdpcie_osinfo
->oob_irq_num
> 0) {
2253 DHD_INFO_HW4(("%s OOB irq=%d flags=%X \n", __FUNCTION__
,
2254 (int)dhdpcie_osinfo
->oob_irq_num
,
2255 (int)dhdpcie_osinfo
->oob_irq_flags
));
2256 err
= request_irq(dhdpcie_osinfo
->oob_irq_num
, wlan_oob_irq
,
2257 dhdpcie_osinfo
->oob_irq_flags
, "dhdpcie_host_wake",
2260 DHD_ERROR(("%s: request_irq failed with %d\n",
2261 __FUNCTION__
, err
));
2264 err
= enable_irq_wake(dhdpcie_osinfo
->oob_irq_num
);
2266 dhdpcie_osinfo
->oob_irq_wake_enabled
= TRUE
;
2268 /* On Hikey platform enable_irq_wake() is failing with error
2269 * ENXIO (No such device or address). This is because the callback function
2270 * irq_set_wake() is not registered in kernel, hence returning BCME_OK.
2273 dhdpcie_osinfo
->oob_irq_enabled
= TRUE
;
2276 dhdpcie_osinfo
->oob_irq_registered
= TRUE
;
2281 void dhdpcie_oob_intr_unregister(dhd_bus_t
*bus
)
2284 dhdpcie_info_t
*pch
;
2285 dhdpcie_os_info_t
*dhdpcie_osinfo
;
2287 DHD_TRACE(("%s: Enter\n", __FUNCTION__
));
2289 DHD_ERROR(("%s: bus is NULL\n", __FUNCTION__
));
2293 if (bus
->dev
== NULL
) {
2294 DHD_ERROR(("%s: bus->dev is NULL\n", __FUNCTION__
));
2298 pch
= pci_get_drvdata(bus
->dev
);
2300 DHD_ERROR(("%s: pch is NULL\n", __FUNCTION__
));
2304 dhdpcie_osinfo
= (dhdpcie_os_info_t
*)pch
->os_cxt
;
2305 if (!dhdpcie_osinfo
->oob_irq_registered
) {
2306 DHD_ERROR(("%s: irq is not registered\n", __FUNCTION__
));
2309 if (dhdpcie_osinfo
->oob_irq_num
> 0) {
2310 if (dhdpcie_osinfo
->oob_irq_wake_enabled
) {
2311 err
= disable_irq_wake(dhdpcie_osinfo
->oob_irq_num
);
2313 dhdpcie_osinfo
->oob_irq_wake_enabled
= FALSE
;
2316 if (dhdpcie_osinfo
->oob_irq_enabled
) {
2317 disable_irq(dhdpcie_osinfo
->oob_irq_num
);
2318 dhdpcie_osinfo
->oob_irq_enabled
= FALSE
;
2320 free_irq(dhdpcie_osinfo
->oob_irq_num
, bus
);
2322 dhdpcie_osinfo
->oob_irq_registered
= FALSE
;
2324 #endif /* BCMPCIE_OOB_HOST_WAKE */
2326 #ifdef DHD_FW_COREDUMP
2328 dhd_dongle_mem_dump(void)
2331 DHD_ERROR(("%s: Bus is NULL\n", __FUNCTION__
));
2335 dhd_bus_dump_console_buffer(g_dhd_bus
);
2336 dhd_prot_debug_info_print(g_dhd_bus
->dhd
);
2338 g_dhd_bus
->dhd
->memdump_enabled
= DUMP_MEMFILE_BUGON
;
2339 g_dhd_bus
->dhd
->memdump_type
= DUMP_TYPE_AP_ABNORMAL_ACCESS
;
2341 #ifdef DHD_PCIE_RUNTIMEPM
2342 dhdpcie_runtime_bus_wake(g_dhd_bus
->dhd
, TRUE
, __builtin_return_address(0));
2343 #endif /* DHD_PCIE_RUNTIMEPM */
2345 dhd_bus_mem_dump(g_dhd_bus
->dhd
);
2348 EXPORT_SYMBOL(dhd_dongle_mem_dump
);
2349 #endif /* DHD_FW_COREDUMP */
2352 dhd_bus_check_driver_up(void)
2358 bus
= (dhd_bus_t
*)g_dhd_bus
;
2360 DHD_ERROR(("%s: bus is NULL\n", __FUNCTION__
));
2371 EXPORT_SYMBOL(dhd_bus_check_driver_up
);
2373 #ifdef DHD_PCIE_RUNTIMEPM
2374 bool dhd_runtimepm_state(dhd_pub_t
*dhd
)
2377 unsigned long flags
;
2380 DHD_GENERAL_LOCK(dhd
, flags
);
2383 DHD_TRACE(("%s : Enter \n", __FUNCTION__
));
2384 if ((bus
->idletime
> 0) && (bus
->idlecount
>= bus
->idletime
)) {
2386 if (DHD_BUS_BUSY_CHECK_IDLE(dhd
) && !DHD_BUS_CHECK_DOWN_OR_DOWN_IN_PROGRESS(dhd
)) {
2388 DHD_BUS_BUSY_SET_RPM_SUSPEND_IN_PROGRESS(dhd
);
2389 bus
->runtime_resume_done
= FALSE
;
2390 /* stop all interface network queue. */
2391 dhd_bus_stop_queue(bus
);
2392 DHD_GENERAL_UNLOCK(dhd
, flags
);
2393 DHD_ERROR(("%s: DHD Idle state!! - idletime :%d, wdtick :%d \n",
2394 __FUNCTION__
, bus
->idletime
, dhd_runtimepm_ms
));
2395 /* RPM suspend is failed, return FALSE then re-trying */
2396 if (dhdpcie_set_suspend_resume(bus
, TRUE
)) {
2397 DHD_ERROR(("%s: exit with wakelock \n", __FUNCTION__
));
2398 DHD_GENERAL_LOCK(dhd
, flags
);
2399 DHD_BUS_BUSY_CLEAR_RPM_SUSPEND_IN_PROGRESS(dhd
);
2400 dhd_os_busbusy_wake(bus
->dhd
);
2401 bus
->runtime_resume_done
= TRUE
;
2402 /* It can make stuck NET TX Queue without below */
2403 dhd_bus_start_queue(bus
);
2404 DHD_GENERAL_UNLOCK(dhd
, flags
);
2406 wake_up_interruptible(&bus
->rpm_queue
);
2410 DHD_GENERAL_LOCK(dhd
, flags
);
2411 DHD_BUS_BUSY_CLEAR_RPM_SUSPEND_IN_PROGRESS(dhd
);
2412 DHD_BUS_BUSY_SET_RPM_SUSPEND_DONE(dhd
);
2413 /* For making sure NET TX Queue active */
2414 dhd_bus_start_queue(bus
);
2415 DHD_GENERAL_UNLOCK(dhd
, flags
);
2417 wait_event_interruptible(bus
->rpm_queue
, bus
->bus_wake
);
2419 DHD_GENERAL_LOCK(dhd
, flags
);
2420 DHD_BUS_BUSY_CLEAR_RPM_SUSPEND_DONE(dhd
);
2421 DHD_BUS_BUSY_SET_RPM_RESUME_IN_PROGRESS(dhd
);
2422 DHD_GENERAL_UNLOCK(dhd
, flags
);
2424 dhdpcie_set_suspend_resume(bus
, FALSE
);
2426 DHD_GENERAL_LOCK(dhd
, flags
);
2427 DHD_BUS_BUSY_CLEAR_RPM_RESUME_IN_PROGRESS(dhd
);
2428 dhd_os_busbusy_wake(bus
->dhd
);
2429 /* Inform the wake up context that Resume is over */
2430 bus
->runtime_resume_done
= TRUE
;
2431 /* For making sure NET TX Queue active */
2432 dhd_bus_start_queue(bus
);
2433 DHD_GENERAL_UNLOCK(dhd
, flags
);
2436 wake_up_interruptible(&bus
->rpm_queue
);
2437 DHD_ERROR(("%s : runtime resume ended \n", __FUNCTION__
));
2440 DHD_GENERAL_UNLOCK(dhd
, flags
);
2441 /* Since one of the contexts are busy (TX, IOVAR or RX)
2442 * we should not suspend
2444 DHD_ERROR(("%s : bus is active with dhd_bus_busy_state = 0x%x\n",
2445 __FUNCTION__
, dhd
->dhd_bus_busy_state
));
2450 DHD_GENERAL_UNLOCK(dhd
, flags
);
2452 } /* dhd_runtimepm_state */
2455 * dhd_runtime_bus_wake
2456 * TRUE - related with runtime pm context
2457 * FALSE - It isn't invloved in runtime pm context
2459 bool dhd_runtime_bus_wake(dhd_bus_t
*bus
, bool wait
, void *func_addr
)
2461 unsigned long flags
;
2463 DHD_TRACE(("%s : enter\n", __FUNCTION__
));
2464 if (bus
->dhd
->up
== FALSE
) {
2465 DHD_INFO(("%s : dhd is not up\n", __FUNCTION__
));
2469 DHD_GENERAL_LOCK(bus
->dhd
, flags
);
2470 if (DHD_BUS_BUSY_CHECK_RPM_ALL(bus
->dhd
)) {
2471 /* Wake up RPM state thread if it is suspend in progress or suspended */
2472 if (DHD_BUS_BUSY_CHECK_RPM_SUSPEND_IN_PROGRESS(bus
->dhd
) ||
2473 DHD_BUS_BUSY_CHECK_RPM_SUSPEND_DONE(bus
->dhd
)) {
2476 DHD_GENERAL_UNLOCK(bus
->dhd
, flags
);
2478 DHD_ERROR(("Runtime Resume is called in %pf\n", func_addr
));
2480 wake_up_interruptible(&bus
->rpm_queue
);
2481 /* No need to wake up the RPM state thread */
2482 } else if (DHD_BUS_BUSY_CHECK_RPM_RESUME_IN_PROGRESS(bus
->dhd
)) {
2483 DHD_GENERAL_UNLOCK(bus
->dhd
, flags
);
2486 /* If wait is TRUE, function with wait = TRUE will be wait in here */
2488 wait_event_interruptible(bus
->rpm_queue
, bus
->runtime_resume_done
);
2490 DHD_INFO(("%s: bus wakeup but no wait until resume done\n", __FUNCTION__
));
2492 /* If it is called from RPM context, it returns TRUE */
2496 DHD_GENERAL_UNLOCK(bus
->dhd
, flags
);
2501 bool dhdpcie_runtime_bus_wake(dhd_pub_t
*dhdp
, bool wait
, void* func_addr
)
2503 dhd_bus_t
*bus
= dhdp
->bus
;
2504 return dhd_runtime_bus_wake(bus
, wait
, func_addr
);
2507 void dhdpcie_block_runtime_pm(dhd_pub_t
*dhdp
)
2509 dhd_bus_t
*bus
= dhdp
->bus
;
2513 bool dhdpcie_is_resume_done(dhd_pub_t
*dhdp
)
2515 dhd_bus_t
*bus
= dhdp
->bus
;
2516 return bus
->runtime_resume_done
;
2518 #endif /* DHD_PCIE_RUNTIMEPM */
2520 struct device
* dhd_bus_to_dev(dhd_bus_t
*bus
)
2522 struct pci_dev
*pdev
;