source: G950FXXS5DSI1
[GitHub/exynos8895/android_kernel_samsung_universal8895.git] / drivers / net / wireless / bcmdhd4361 / dhd_pcie_linux.c
CommitLineData
1cac41cb
MB
1/*
2 * Linux DHD Bus Module for PCIE
3 *
4 * Copyright (C) 1999-2019, Broadcom.
5 *
6 * Unless you and Broadcom execute a separate written software license
7 * agreement governing use of this software, this software is licensed to you
8 * under the terms of the GNU General Public License version 2 (the "GPL"),
9 * available at http://www.broadcom.com/licenses/GPLv2.php, with the
10 * following added to such license:
11 *
12 * As a special exception, the copyright holders of this software give you
13 * permission to link this software with independent modules, and to copy and
14 * distribute the resulting executable under terms of your choice, provided that
15 * you also meet, for each linked independent module, the terms and conditions of
16 * the license of that module. An independent module is a module which is not
17 * derived from this software. The special exception does not apply to any
18 * modifications of the software.
19 *
20 * Notwithstanding the above, under no circumstances may you combine this
21 * software in any way with any other Broadcom software provided under a license
22 * other than the GPL, without Broadcom's express prior written consent.
23 *
24 *
25 * <<Broadcom-WL-IPTag/Open:>>
26 *
5a068558 27 * $Id: dhd_pcie_linux.c 800754 2019-01-23 08:38:54Z $
1cac41cb
MB
28 */
29
30/* include files */
31#include <typedefs.h>
32#include <bcmutils.h>
33#include <bcmdevs.h>
34#include <siutils.h>
35#include <hndsoc.h>
36#include <hndpmu.h>
37#include <sbchipc.h>
38#if defined(DHD_DEBUG)
39#include <hnd_armtrap.h>
40#include <hnd_cons.h>
41#endif /* defined(DHD_DEBUG) */
42#include <dngl_stats.h>
43#include <pcie_core.h>
44#include <dhd.h>
45#include <dhd_bus.h>
46#include <dhd_proto.h>
47#include <dhd_dbg.h>
48#include <dhdioctl.h>
49#include <bcmmsgbuf.h>
50#include <pcicfg.h>
51#include <dhd_pcie.h>
52#include <dhd_linux.h>
53#ifdef CONFIG_ARCH_MSM
54#if defined(CONFIG_PCI_MSM) || defined(CONFIG_ARCH_MSM8996)
55#include <linux/msm_pcie.h>
56#else
57#include <mach/msm_pcie.h>
58#endif /* CONFIG_PCI_MSM */
59#endif /* CONFIG_ARCH_MSM */
60
61#ifdef DHD_PCIE_NATIVE_RUNTIMEPM
62#include <linux/pm_runtime.h>
63#endif /* DHD_PCIE_NATIVE_RUNTIMEPM */
64
65#ifdef DHD_PCIE_NATIVE_RUNTIMEPM
66#ifndef AUTO_SUSPEND_TIMEOUT
67#define AUTO_SUSPEND_TIMEOUT 1000
68#endif /* AUTO_SUSPEND_TIMEOUT */
69#endif /* DHD_PCIE_NATIVE_RUNTIMEPM */
70
71#include <linux/irq.h>
72#ifdef USE_SMMU_ARCH_MSM
73#include <asm/dma-iommu.h>
74#include <linux/iommu.h>
75#include <linux/of.h>
76#include <linux/platform_device.h>
77#endif /* USE_SMMU_ARCH_MSM */
78
79#define PCI_CFG_RETRY 10
80#define OS_HANDLE_MAGIC 0x1234abcd /* Magic # to recognize osh */
81#define BCM_MEM_FILENAME_LEN 24 /* Mem. filename length */
82
1cac41cb
MB
83/* user defined data structures */
84
85typedef struct dhd_pc_res {
86 uint32 bar0_size;
87 void* bar0_addr;
88 uint32 bar1_size;
89 void* bar1_addr;
90} pci_config_res, *pPci_config_res;
91
92typedef bool (*dhdpcie_cb_fn_t)(void *);
93
94typedef struct dhdpcie_info
95{
96 dhd_bus_t *bus;
97 osl_t *osh;
98 struct pci_dev *dev; /* pci device handle */
99 volatile char *regs; /* pci device memory va */
100 volatile char *tcm; /* pci device memory va */
101 uint32 tcm_size; /* pci device memory size */
102 struct pcos_info *pcos_info;
103 uint16 last_intrstatus; /* to cache intrstatus */
104 int irq;
105 char pciname[32];
106 struct pci_saved_state* default_state;
107 struct pci_saved_state* state;
108#ifdef BCMPCIE_OOB_HOST_WAKE
109 void *os_cxt; /* Pointer to per-OS private data */
110#endif /* BCMPCIE_OOB_HOST_WAKE */
111#ifdef DHD_WAKE_STATUS
112 spinlock_t pcie_lock;
113 unsigned int total_wake_count;
114 int pkt_wake;
115 int wake_irq;
116#endif /* DHD_WAKE_STATUS */
117#ifdef USE_SMMU_ARCH_MSM
118 void *smmu_cxt;
119#endif /* USE_SMMU_ARCH_MSM */
120} dhdpcie_info_t;
121
122struct pcos_info {
123 dhdpcie_info_t *pc;
124 spinlock_t lock;
125 wait_queue_head_t intr_wait_queue;
126 struct timer_list tuning_timer;
127 int tuning_timer_exp;
128 atomic_t timer_enab;
129 struct tasklet_struct tuning_tasklet;
130};
131
132#ifdef BCMPCIE_OOB_HOST_WAKE
133typedef struct dhdpcie_os_info {
134 int oob_irq_num; /* valid when hardware or software oob in use */
135 unsigned long oob_irq_flags; /* valid when hardware or software oob in use */
136 bool oob_irq_registered;
137 bool oob_irq_enabled;
138 bool oob_irq_wake_enabled;
139 spinlock_t oob_irq_spinlock;
140 void *dev; /* handle to the underlying device */
141} dhdpcie_os_info_t;
142static irqreturn_t wlan_oob_irq(int irq, void *data);
143#endif /* BCMPCIE_OOB_HOST_WAKE */
144
145#ifdef USE_SMMU_ARCH_MSM
146typedef struct dhdpcie_smmu_info {
147 struct dma_iommu_mapping *smmu_mapping;
148 dma_addr_t smmu_iova_start;
149 size_t smmu_iova_len;
150} dhdpcie_smmu_info_t;
151#endif /* USE_SMMU_ARCH_MSM */
152
153/* function declarations */
154static int __devinit
155dhdpcie_pci_probe(struct pci_dev *pdev, const struct pci_device_id *ent);
156static void __devexit
157dhdpcie_pci_remove(struct pci_dev *pdev);
158static int dhdpcie_init(struct pci_dev *pdev);
159static irqreturn_t dhdpcie_isr(int irq, void *arg);
160/* OS Routine functions for PCI suspend/resume */
161
162#ifdef DHD_PCIE_NATIVE_RUNTIMEPM
163static int dhdpcie_set_suspend_resume(struct pci_dev *dev, bool state, bool byint);
164#else
165static int dhdpcie_set_suspend_resume(dhd_bus_t *bus, bool state);
166#endif /* DHD_PCIE_NATIVE_RUNTIMEPM */
167static int dhdpcie_resume_host_dev(dhd_bus_t *bus);
168static int dhdpcie_suspend_host_dev(dhd_bus_t *bus);
169static int dhdpcie_resume_dev(struct pci_dev *dev);
170static int dhdpcie_suspend_dev(struct pci_dev *dev);
171#ifdef DHD_PCIE_RUNTIMEPM
172static int dhdpcie_pm_suspend(struct device *dev);
173static int dhdpcie_pm_prepare(struct device *dev);
174static int dhdpcie_pm_resume(struct device *dev);
175static void dhdpcie_pm_complete(struct device *dev);
176#else
177#ifdef DHD_PCIE_NATIVE_RUNTIMEPM
178static int dhdpcie_pm_system_suspend_noirq(struct device * dev);
179static int dhdpcie_pm_system_resume_noirq(struct device * dev);
180#else
181static int dhdpcie_pci_suspend(struct pci_dev *dev, pm_message_t state);
182static int dhdpcie_pci_resume(struct pci_dev *dev);
183#endif /* DHD_PCIE_NATIVE_RUNTIMEPM */
184#endif /* DHD_PCIE_RUNTIMEPM */
185
186#ifdef DHD_PCIE_NATIVE_RUNTIMEPM
187static int dhdpcie_pm_runtime_suspend(struct device * dev);
188static int dhdpcie_pm_runtime_resume(struct device * dev);
189static int dhdpcie_pm_system_suspend_noirq(struct device * dev);
190static int dhdpcie_pm_system_resume_noirq(struct device * dev);
191#endif /* DHD_PCIE_NATIVE_RUNTIMEPM */
192
193static struct pci_device_id dhdpcie_pci_devid[] __devinitdata = {
194 { vendor: 0x14e4,
195 device: PCI_ANY_ID,
196 subvendor: PCI_ANY_ID,
197 subdevice: PCI_ANY_ID,
198 class: PCI_CLASS_NETWORK_OTHER << 8,
199 class_mask: 0xffff00,
200 driver_data: 0,
201 },
202 { 0, 0, 0, 0, 0, 0, 0}
203};
204MODULE_DEVICE_TABLE(pci, dhdpcie_pci_devid);
205
206/* Power Management Hooks */
207#ifdef DHD_PCIE_RUNTIMEPM
208static const struct dev_pm_ops dhd_pcie_pm_ops = {
209 .prepare = dhdpcie_pm_prepare,
210 .suspend = dhdpcie_pm_suspend,
211 .resume = dhdpcie_pm_resume,
212 .complete = dhdpcie_pm_complete,
213};
214#endif /* DHD_PCIE_RUNTIMEPM */
215#ifdef DHD_PCIE_NATIVE_RUNTIMEPM
216static const struct dev_pm_ops dhdpcie_pm_ops = {
217 SET_RUNTIME_PM_OPS(dhdpcie_pm_runtime_suspend, dhdpcie_pm_runtime_resume, NULL)
218 .suspend_noirq = dhdpcie_pm_system_suspend_noirq,
219 .resume_noirq = dhdpcie_pm_system_resume_noirq
220};
221#endif /* DHD_PCIE_NATIVE_RUNTIMEPM */
222
223static struct pci_driver dhdpcie_driver = {
224 node: {&dhdpcie_driver.node, &dhdpcie_driver.node},
225 name: "pcieh",
226 id_table: dhdpcie_pci_devid,
227 probe: dhdpcie_pci_probe,
228 remove: dhdpcie_pci_remove,
229#if (LINUX_VERSION_CODE < KERNEL_VERSION(2, 6, 0))
230 save_state: NULL,
231#endif // endif
232#if defined(DHD_PCIE_RUNTIMEPM) || defined(DHD_PCIE_NATIVE_RUNTIMEPM)
233 .driver.pm = &dhd_pcie_pm_ops,
234#else
235 suspend: dhdpcie_pci_suspend,
236 resume: dhdpcie_pci_resume,
237#endif /* DHD_PCIE_RUNTIMEPM || DHD_PCIE_NATIVE_RUNTIMEPM */
238};
239
240int dhdpcie_init_succeeded = FALSE;
241
242#ifdef USE_SMMU_ARCH_MSM
243static int dhdpcie_smmu_init(struct pci_dev *pdev, void *smmu_cxt)
244{
245 struct dma_iommu_mapping *mapping;
246 struct device_node *root_node = NULL;
247 dhdpcie_smmu_info_t *smmu_info = (dhdpcie_smmu_info_t *)smmu_cxt;
248 int smmu_iova_address[2];
249 char *wlan_node = "android,bcmdhd_wlan";
250 char *wlan_smmu_node = "wlan-smmu-iova-address";
251 int atomic_ctx = 1;
252 int s1_bypass = 1;
253 int ret = 0;
254
255 DHD_ERROR(("%s: SMMU initialize\n", __FUNCTION__));
256
257 root_node = of_find_compatible_node(NULL, NULL, wlan_node);
258 if (!root_node) {
259 WARN(1, "failed to get device node of BRCM WLAN\n");
260 return -ENODEV;
261 }
262
263 if (of_property_read_u32_array(root_node, wlan_smmu_node,
264 smmu_iova_address, 2) == 0) {
265 DHD_ERROR(("%s : get SMMU start address 0x%x, size 0x%x\n",
266 __FUNCTION__, smmu_iova_address[0], smmu_iova_address[1]));
267 smmu_info->smmu_iova_start = smmu_iova_address[0];
268 smmu_info->smmu_iova_len = smmu_iova_address[1];
269 } else {
270 printf("%s : can't get smmu iova address property\n",
271 __FUNCTION__);
272 return -ENODEV;
273 }
274
275 if (smmu_info->smmu_iova_len <= 0) {
276 DHD_ERROR(("%s: Invalid smmu iova len %d\n",
277 __FUNCTION__, (int)smmu_info->smmu_iova_len));
278 return -EINVAL;
279 }
280
281 DHD_ERROR(("%s : SMMU init start\n", __FUNCTION__));
282
283 if (pci_set_dma_mask(pdev, DMA_BIT_MASK(64)) ||
284 pci_set_consistent_dma_mask(pdev, DMA_BIT_MASK(64))) {
285 DHD_ERROR(("%s: DMA set 64bit mask failed.\n", __FUNCTION__));
286 return -EINVAL;
287 }
288
289 mapping = arm_iommu_create_mapping(&platform_bus_type,
290 smmu_info->smmu_iova_start, smmu_info->smmu_iova_len);
291 if (IS_ERR(mapping)) {
292 DHD_ERROR(("%s: create mapping failed, err = %d\n",
293 __FUNCTION__, ret));
294 ret = PTR_ERR(mapping);
295 goto map_fail;
296 }
297
298 ret = iommu_domain_set_attr(mapping->domain,
299 DOMAIN_ATTR_ATOMIC, &atomic_ctx);
300 if (ret) {
301 DHD_ERROR(("%s: set atomic_ctx attribute failed, err = %d\n",
302 __FUNCTION__, ret));
303 goto set_attr_fail;
304 }
305
306 ret = iommu_domain_set_attr(mapping->domain,
307 DOMAIN_ATTR_S1_BYPASS, &s1_bypass);
308 if (ret < 0) {
309 DHD_ERROR(("%s: set s1_bypass attribute failed, err = %d\n",
310 __FUNCTION__, ret));
311 goto set_attr_fail;
312 }
313
314 ret = arm_iommu_attach_device(&pdev->dev, mapping);
315 if (ret) {
316 DHD_ERROR(("%s: attach device failed, err = %d\n",
317 __FUNCTION__, ret));
318 goto attach_fail;
319 }
320
321 smmu_info->smmu_mapping = mapping;
322
323 return ret;
324
325attach_fail:
326set_attr_fail:
327 arm_iommu_release_mapping(mapping);
328map_fail:
329 return ret;
330}
331
332static void dhdpcie_smmu_remove(struct pci_dev *pdev, void *smmu_cxt)
333{
334 dhdpcie_smmu_info_t *smmu_info;
335
336 if (!smmu_cxt) {
337 return;
338 }
339
340 smmu_info = (dhdpcie_smmu_info_t *)smmu_cxt;
341 if (smmu_info->smmu_mapping) {
342 arm_iommu_detach_device(&pdev->dev);
343 arm_iommu_release_mapping(smmu_info->smmu_mapping);
344 smmu_info->smmu_mapping = NULL;
345 }
346}
347#endif /* USE_SMMU_ARCH_MSM */
348
349void
350dhd_bus_aer_config(dhd_bus_t *bus)
351{
352 uint32 val;
353
354 DHD_ERROR(("%s: Configure AER registers for EP\n", __FUNCTION__));
355 val = dhdpcie_ep_access_cap(bus, PCIE_ADVERRREP_CAPID,
356 PCIE_ADV_CORR_ERR_MASK_OFFSET, TRUE, FALSE, 0);
357 if (val != (uint32)-1) {
358 val &= ~CORR_ERR_AE;
359 dhdpcie_ep_access_cap(bus, PCIE_ADVERRREP_CAPID,
360 PCIE_ADV_CORR_ERR_MASK_OFFSET, TRUE, TRUE, val);
361 } else {
362 DHD_ERROR(("%s: Invalid EP's PCIE_ADV_CORR_ERR_MASK: 0x%x\n",
363 __FUNCTION__, val));
364 }
365
366 DHD_ERROR(("%s: Configure AER registers for RC\n", __FUNCTION__));
367 val = dhdpcie_rc_access_cap(bus, PCIE_ADVERRREP_CAPID,
368 PCIE_ADV_CORR_ERR_MASK_OFFSET, TRUE, FALSE, 0);
369 if (val != (uint32)-1) {
370 val &= ~CORR_ERR_AE;
371 dhdpcie_rc_access_cap(bus, PCIE_ADVERRREP_CAPID,
372 PCIE_ADV_CORR_ERR_MASK_OFFSET, TRUE, TRUE, val);
373 } else {
374 DHD_ERROR(("%s: Invalid RC's PCIE_ADV_CORR_ERR_MASK: 0x%x\n",
375 __FUNCTION__, val));
376 }
377}
378
379#ifdef DHD_PCIE_RUNTIMEPM
380static int dhdpcie_pm_suspend(struct device *dev)
381{
382 int ret = 0;
383 struct pci_dev *pdev = to_pci_dev(dev);
384 dhdpcie_info_t *pch = pci_get_drvdata(pdev);
385 dhd_bus_t *bus = NULL;
386 unsigned long flags;
387
388 if (pch) {
389 bus = pch->bus;
390 }
391 if (!bus) {
392 return ret;
393 }
394
395 DHD_GENERAL_LOCK(bus->dhd, flags);
396 if (!DHD_BUS_BUSY_CHECK_IDLE(bus->dhd)) {
397 DHD_ERROR(("%s: Bus not IDLE!! dhd_bus_busy_state = 0x%x\n",
398 __FUNCTION__, bus->dhd->dhd_bus_busy_state));
399 DHD_GENERAL_UNLOCK(bus->dhd, flags);
400 return -EBUSY;
401 }
402 DHD_BUS_BUSY_SET_SUSPEND_IN_PROGRESS(bus->dhd);
403 DHD_GENERAL_UNLOCK(bus->dhd, flags);
404
405 if (!bus->dhd->dongle_reset)
406 ret = dhdpcie_set_suspend_resume(bus, TRUE);
407
408 DHD_GENERAL_LOCK(bus->dhd, flags);
409 DHD_BUS_BUSY_CLEAR_SUSPEND_IN_PROGRESS(bus->dhd);
410 dhd_os_busbusy_wake(bus->dhd);
411 DHD_GENERAL_UNLOCK(bus->dhd, flags);
412
413 return ret;
414
415}
416
417static int dhdpcie_pm_prepare(struct device *dev)
418{
419 struct pci_dev *pdev = to_pci_dev(dev);
420 dhdpcie_info_t *pch = pci_get_drvdata(pdev);
421 dhd_bus_t *bus = NULL;
422
423 if (!pch || !pch->bus) {
424 return 0;
425 }
426
427 bus = pch->bus;
428 DHD_DISABLE_RUNTIME_PM(bus->dhd);
429 bus->chk_pm = TRUE;
430
431 return 0;
432}
433
434static int dhdpcie_pm_resume(struct device *dev)
435{
436 int ret = 0;
437 struct pci_dev *pdev = to_pci_dev(dev);
438 dhdpcie_info_t *pch = pci_get_drvdata(pdev);
439 dhd_bus_t *bus = NULL;
440 unsigned long flags;
441
442 if (pch) {
443 bus = pch->bus;
444 }
445 if (!bus) {
446 return ret;
447 }
448
449 DHD_GENERAL_LOCK(bus->dhd, flags);
450 DHD_BUS_BUSY_SET_RESUME_IN_PROGRESS(bus->dhd);
451 DHD_GENERAL_UNLOCK(bus->dhd, flags);
452
453 if (!bus->dhd->dongle_reset)
454 ret = dhdpcie_set_suspend_resume(bus, FALSE);
455
456 DHD_GENERAL_LOCK(bus->dhd, flags);
457 DHD_BUS_BUSY_CLEAR_RESUME_IN_PROGRESS(bus->dhd);
458 dhd_os_busbusy_wake(bus->dhd);
459 DHD_GENERAL_UNLOCK(bus->dhd, flags);
460
461 return ret;
462}
463
464static void dhdpcie_pm_complete(struct device *dev)
465{
466 struct pci_dev *pdev = to_pci_dev(dev);
467 dhdpcie_info_t *pch = pci_get_drvdata(pdev);
468 dhd_bus_t *bus = NULL;
469
470 if (!pch || !pch->bus) {
471 return;
472 }
473
474 bus = pch->bus;
475 DHD_ENABLE_RUNTIME_PM(bus->dhd);
476 bus->chk_pm = FALSE;
477
478 return;
479}
480#else
481static int dhdpcie_pci_suspend(struct pci_dev * pdev, pm_message_t state)
482{
483 int ret = 0;
484 dhdpcie_info_t *pch = pci_get_drvdata(pdev);
485 dhd_bus_t *bus = NULL;
486 unsigned long flags;
487
488 if (pch) {
489 bus = pch->bus;
490 }
491 if (!bus) {
492 return ret;
493 }
494
495 BCM_REFERENCE(state);
496
497 DHD_GENERAL_LOCK(bus->dhd, flags);
498 if (!DHD_BUS_BUSY_CHECK_IDLE(bus->dhd)) {
499 DHD_ERROR(("%s: Bus not IDLE!! dhd_bus_busy_state = 0x%x\n",
500 __FUNCTION__, bus->dhd->dhd_bus_busy_state));
501 DHD_GENERAL_UNLOCK(bus->dhd, flags);
502 return -EBUSY;
503 }
504 DHD_BUS_BUSY_SET_SUSPEND_IN_PROGRESS(bus->dhd);
505 DHD_GENERAL_UNLOCK(bus->dhd, flags);
506
507 if (!bus->dhd->dongle_reset)
508 ret = dhdpcie_set_suspend_resume(bus, TRUE);
509
510 DHD_GENERAL_LOCK(bus->dhd, flags);
511 DHD_BUS_BUSY_CLEAR_SUSPEND_IN_PROGRESS(bus->dhd);
512 dhd_os_busbusy_wake(bus->dhd);
513 DHD_GENERAL_UNLOCK(bus->dhd, flags);
514
515 return ret;
516}
517
518static int dhdpcie_pci_resume(struct pci_dev *pdev)
519{
520 int ret = 0;
521 dhdpcie_info_t *pch = pci_get_drvdata(pdev);
522 dhd_bus_t *bus = NULL;
523 unsigned long flags;
524
525 if (pch) {
526 bus = pch->bus;
527 }
528 if (!bus) {
529 return ret;
530 }
531
532 DHD_GENERAL_LOCK(bus->dhd, flags);
533 DHD_BUS_BUSY_SET_RESUME_IN_PROGRESS(bus->dhd);
534 DHD_GENERAL_UNLOCK(bus->dhd, flags);
535
536 if (!bus->dhd->dongle_reset)
537 ret = dhdpcie_set_suspend_resume(bus, FALSE);
538
539 DHD_GENERAL_LOCK(bus->dhd, flags);
540 DHD_BUS_BUSY_CLEAR_RESUME_IN_PROGRESS(bus->dhd);
541 dhd_os_busbusy_wake(bus->dhd);
542 DHD_GENERAL_UNLOCK(bus->dhd, flags);
543
544 return ret;
545}
546
547#endif /* DHD_PCIE_RUNTIMEPM */
548#ifdef DHD_PCIE_NATIVE_RUNTIMEPM
549static int dhdpcie_set_suspend_resume(dhd_bus_t *bus, bool state, bool byint)
550#else
551static int dhdpcie_set_suspend_resume(dhd_bus_t *bus, bool state)
552#endif /* DHD_PCIE_NATIVE_RUNTIMEPM */
553{
554 int ret = 0;
555
556 ASSERT(bus && !bus->dhd->dongle_reset);
557
558#ifdef DHD_PCIE_RUNTIMEPM
559 /* if wakelock is held during suspend, return failed */
560 if (state == TRUE && dhd_os_check_wakelock_all(bus->dhd)) {
561 return -EBUSY;
562 }
563 mutex_lock(&bus->pm_lock);
564#endif /* DHD_PCIE_RUNTIMEPM */
565
566 /* When firmware is not loaded do the PCI bus */
567 /* suspend/resume only */
568 if (bus->dhd->busstate == DHD_BUS_DOWN) {
569 ret = dhdpcie_pci_suspend_resume(bus, state);
570#ifdef DHD_PCIE_RUNTIMEPM
571 mutex_unlock(&bus->pm_lock);
572#endif /* DHD_PCIE_RUNTIMEPM */
573 return ret;
574 }
575#ifdef DHD_PCIE_NATIVE_RUNTIMEPM
576 ret = dhdpcie_bus_suspend(bus, state, byint);
577#else
578 ret = dhdpcie_bus_suspend(bus, state);
579#endif /* DHD_PCIE_NATIVE_RUNTIMEPM */
580
581#ifdef DHD_PCIE_RUNTIMEPM
582 mutex_unlock(&bus->pm_lock);
583#endif /* DHD_PCIE_RUNTIMEPM */
584
585 return ret;
586}
587
588#ifdef DHD_PCIE_NATIVE_RUNTIMEPM
589static int dhdpcie_pm_runtime_suspend(struct device * dev)
590{
591 struct pci_dev *pdev = to_pci_dev(dev);
592 dhdpcie_info_t *pch = pci_get_drvdata(pdev);
593 dhd_bus_t *bus = NULL;
594 int ret = 0;
595
596 if (!pch)
597 return -EBUSY;
598
599 bus = pch->bus;
600
601 DHD_RPM(("%s Enter\n", __FUNCTION__));
602
603 if (atomic_read(&bus->dhd->block_bus))
604 return -EHOSTDOWN;
605
606 dhd_netif_stop_queue(bus);
607 atomic_set(&bus->dhd->block_bus, TRUE);
608
609 if (dhdpcie_set_suspend_resume(pdev, TRUE, TRUE)) {
610 pm_runtime_mark_last_busy(dev);
611 ret = -EAGAIN;
612 }
613
614 atomic_set(&bus->dhd->block_bus, FALSE);
615 dhd_bus_start_queue(bus);
616
617 return ret;
618}
619
620static int dhdpcie_pm_runtime_resume(struct device * dev)
621{
622 struct pci_dev *pdev = to_pci_dev(dev);
623 dhdpcie_info_t *pch = pci_get_drvdata(pdev);
624 dhd_bus_t *bus = pch->bus;
625
626 DHD_RPM(("%s Enter\n", __FUNCTION__));
627
628 if (atomic_read(&bus->dhd->block_bus))
629 return -EHOSTDOWN;
630
631 if (dhdpcie_set_suspend_resume(pdev, FALSE, TRUE))
632 return -EAGAIN;
633
634 return 0;
635}
636
637static int dhdpcie_pm_system_suspend_noirq(struct device * dev)
638{
639 struct pci_dev *pdev = to_pci_dev(dev);
640 dhdpcie_info_t *pch = pci_get_drvdata(pdev);
641 dhd_bus_t *bus = NULL;
642 int ret;
643
644 DHD_RPM(("%s Enter\n", __FUNCTION__));
645
646 if (!pch)
647 return -EBUSY;
648
649 bus = pch->bus;
650
651 if (atomic_read(&bus->dhd->block_bus))
652 return -EHOSTDOWN;
653
654 dhd_netif_stop_queue(bus);
655 atomic_set(&bus->dhd->block_bus, TRUE);
656
657 ret = dhdpcie_set_suspend_resume(pdev, TRUE, FALSE);
658
659 if (ret) {
660 dhd_bus_start_queue(bus);
661 atomic_set(&bus->dhd->block_bus, FALSE);
662 }
663
664 return ret;
665}
666
667static int dhdpcie_pm_system_resume_noirq(struct device * dev)
668{
669 struct pci_dev *pdev = to_pci_dev(dev);
670 dhdpcie_info_t *pch = pci_get_drvdata(pdev);
671 dhd_bus_t *bus = NULL;
672 int ret;
673
674 if (!pch)
675 return -EBUSY;
676
677 bus = pch->bus;
678
679 DHD_RPM(("%s Enter\n", __FUNCTION__));
680
681 ret = dhdpcie_set_suspend_resume(pdev, FALSE, FALSE);
682
683 atomic_set(&bus->dhd->block_bus, FALSE);
684 dhd_bus_start_queue(bus);
685 pm_runtime_mark_last_busy(dhd_bus_to_dev(bus));
686
687 return ret;
688}
689#endif /* DHD_PCIE_NATIVE_RUNTIMEPM */
690
691#if (LINUX_VERSION_CODE >= KERNEL_VERSION(3, 0, 0))
692extern void dhd_dpc_tasklet_kill(dhd_pub_t *dhdp);
693#endif /* OEM_ANDROID && LINUX_VERSION_CODE >= KERNEL_VERSION(3, 0, 0) */
694
695static int dhdpcie_suspend_dev(struct pci_dev *dev)
696{
697 int ret;
698#if (LINUX_VERSION_CODE >= KERNEL_VERSION(3, 0, 0))
699 dhdpcie_info_t *pch = pci_get_drvdata(dev);
700 dhd_bus_t *bus = pch->bus;
701
702 if (bus->is_linkdown) {
703 DHD_ERROR(("%s: PCIe link is down\n", __FUNCTION__));
704 return BCME_ERROR;
705 }
706#endif /* OEM_ANDROID && LINUX_VERSION_CODE >= KERNEL_VERSION(3, 0, 0) */
707 DHD_TRACE_HW4(("%s: Enter\n", __FUNCTION__));
708#if (LINUX_VERSION_CODE >= KERNEL_VERSION(3, 0, 0))
709 dhd_dpc_tasklet_kill(bus->dhd);
710#endif /* OEM_ANDROID && LINUX_VERSION_CODE >= KERNEL_VERSION(3, 0, 0) */
711 pci_save_state(dev);
712#if (LINUX_VERSION_CODE >= KERNEL_VERSION(3, 0, 0))
713 pch->state = pci_store_saved_state(dev);
714#endif /* OEM_ANDROID && LINUX_VERSION_CODE >= KERNEL_VERSION(3, 0, 0) */
715 pci_enable_wake(dev, PCI_D0, TRUE);
716#if (LINUX_VERSION_CODE >= KERNEL_VERSION(2, 6, 31))
717 if (pci_is_enabled(dev))
718#endif // endif
719 pci_disable_device(dev);
720
721 ret = pci_set_power_state(dev, PCI_D3hot);
722 if (ret) {
723 DHD_ERROR(("%s: pci_set_power_state error %d\n",
724 __FUNCTION__, ret));
725 }
726 dev->state_saved = FALSE;
727 return ret;
728}
729
730#ifdef DHD_WAKE_STATUS
731int bcmpcie_get_total_wake(struct dhd_bus *bus)
732{
733 dhdpcie_info_t *pch = pci_get_drvdata(bus->dev);
734
735 return pch->total_wake_count;
736}
737
738int bcmpcie_set_get_wake(struct dhd_bus *bus, int flag)
739{
740 dhdpcie_info_t *pch = pci_get_drvdata(bus->dev);
741 unsigned long flags;
742 int ret;
743
744 spin_lock_irqsave(&pch->pcie_lock, flags);
745
746 ret = pch->pkt_wake;
747 pch->total_wake_count += flag;
748 pch->pkt_wake = flag;
749
750 spin_unlock_irqrestore(&pch->pcie_lock, flags);
751 return ret;
752}
753#endif /* DHD_WAKE_STATUS */
754
755static int dhdpcie_resume_dev(struct pci_dev *dev)
756{
757 int err = 0;
758#if (LINUX_VERSION_CODE >= KERNEL_VERSION(3, 0, 0))
759 dhdpcie_info_t *pch = pci_get_drvdata(dev);
760 pci_load_and_free_saved_state(dev, &pch->state);
761#endif /* OEM_ANDROID && LINUX_VERSION_CODE >= KERNEL_VERSION(3, 0, 0) */
762 DHD_TRACE_HW4(("%s: Enter\n", __FUNCTION__));
763 dev->state_saved = TRUE;
764 pci_restore_state(dev);
765 err = pci_enable_device(dev);
766 if (err) {
767 printf("%s:pci_enable_device error %d \n", __FUNCTION__, err);
768 goto out;
769 }
770 pci_set_master(dev);
771 err = pci_set_power_state(dev, PCI_D0);
772 if (err) {
773 printf("%s:pci_set_power_state error %d \n", __FUNCTION__, err);
774 goto out;
775 }
776
777out:
778 return err;
779}
780
781static int dhdpcie_resume_host_dev(dhd_bus_t *bus)
782{
783 int bcmerror = 0;
784#ifdef USE_EXYNOS_PCIE_RC_PMPATCH
785 bcmerror = exynos_pcie_pm_resume(SAMSUNG_PCIE_CH_NUM);
786#endif /* USE_EXYNOS_PCIE_RC_PMPATCH */
787#ifdef CONFIG_ARCH_MSM
788 bcmerror = dhdpcie_start_host_pcieclock(bus);
789#endif /* CONFIG_ARCH_MSM */
790#ifdef CONFIG_ARCH_TEGRA
791 bcmerror = tegra_pcie_pm_resume();
792#endif /* CONFIG_ARCH_TEGRA */
793 if (bcmerror < 0) {
794 DHD_ERROR(("%s: PCIe RC resume failed!!! (%d)\n",
795 __FUNCTION__, bcmerror));
796 bus->is_linkdown = 1;
797#ifdef SUPPORT_LINKDOWN_RECOVERY
798#ifdef CONFIG_ARCH_MSM
799 bus->no_cfg_restore = 1;
800#endif /* CONFIG_ARCH_MSM */
801#endif /* SUPPORT_LINKDOWN_RECOVERY */
802 }
803
804 return bcmerror;
805}
806
807static int dhdpcie_suspend_host_dev(dhd_bus_t *bus)
808{
809 int bcmerror = 0;
810#ifdef USE_EXYNOS_PCIE_RC_PMPATCH
811 if (bus->rc_dev) {
812 pci_save_state(bus->rc_dev);
813 } else {
814 DHD_ERROR(("%s: RC %x:%x handle is NULL\n",
815 __FUNCTION__, PCIE_RC_VENDOR_ID, PCIE_RC_DEVICE_ID));
816 }
817 exynos_pcie_pm_suspend(SAMSUNG_PCIE_CH_NUM);
818#endif /* USE_EXYNOS_PCIE_RC_PMPATCH */
819#ifdef CONFIG_ARCH_MSM
820 bcmerror = dhdpcie_stop_host_pcieclock(bus);
821#endif /* CONFIG_ARCH_MSM */
822#ifdef CONFIG_ARCH_TEGRA
823 bcmerror = tegra_pcie_pm_suspend();
824#endif /* CONFIG_ARCH_TEGRA */
825 return bcmerror;
826}
827
828uint32
829dhdpcie_rc_config_read(dhd_bus_t *bus, uint offset)
830{
831 uint val = -1; /* Initialise to 0xfffffff */
832 if (bus->rc_dev) {
833 pci_read_config_dword(bus->rc_dev, offset, &val);
834 OSL_DELAY(100);
835 } else {
836 DHD_ERROR(("%s: RC %x:%x handle is NULL\n",
837 __FUNCTION__, PCIE_RC_VENDOR_ID, PCIE_RC_DEVICE_ID));
838 }
839 DHD_ERROR(("%s: RC %x:%x offset 0x%x val 0x%x\n",
840 __FUNCTION__, PCIE_RC_VENDOR_ID, PCIE_RC_DEVICE_ID, offset, val));
841 return (val);
842}
843
844/*
845 * Reads/ Writes the value of capability register
846 * from the given CAP_ID section of PCI Root Port
847 *
848 * Arguements
849 * @bus current dhd_bus_t pointer
850 * @cap Capability or Extended Capability ID to get
851 * @offset offset of Register to Read
852 * @is_ext TRUE if @cap is given for Extended Capability
853 * @is_write is set to TRUE to indicate write
854 * @val value to write
855 *
856 * Return Value
857 * Returns 0xffffffff on error
858 * on write success returns BCME_OK (0)
859 * on Read Success returns the value of register requested
860 * Note: caller shoud ensure valid capability ID and Ext. Capability ID.
861 */
862
863uint32
864dhdpcie_access_cap(struct pci_dev *pdev, int cap, uint offset, bool is_ext, bool is_write,
865 uint32 writeval)
866{
867 int cap_ptr = 0;
868 uint32 ret = -1;
869 uint32 readval;
870
871 if (!(pdev)) {
872 DHD_ERROR(("%s: pdev is NULL\n", __FUNCTION__));
873 return ret;
874 }
875
876 /* Find Capability offset */
877 if (is_ext) {
878 /* removing max EXT_CAP_ID check as
879 * linux kernel definition's max value is not upadted yet as per spec
880 */
881 cap_ptr = pci_find_ext_capability(pdev, cap);
882
883 } else {
884 /* removing max PCI_CAP_ID_MAX check as
885 * pervious kernel versions dont have this definition
886 */
887 cap_ptr = pci_find_capability(pdev, cap);
888 }
889
890 /* Return if capability with given ID not found */
891 if (cap_ptr == 0) {
892 DHD_ERROR(("%s: PCI Cap(0x%02x) not supported.\n",
893 __FUNCTION__, cap));
894 return BCME_ERROR;
895 }
896
897 if (is_write) {
898 pci_write_config_dword(pdev, (cap_ptr + offset), writeval);
899 ret = BCME_OK;
900
901 } else {
902
903 pci_read_config_dword(pdev, (cap_ptr + offset), &readval);
904 ret = readval;
905 }
906
907 return ret;
908}
909
910uint32
911dhdpcie_rc_access_cap(dhd_bus_t *bus, int cap, uint offset, bool is_ext, bool is_write,
912 uint32 writeval)
913{
914 if (!(bus->rc_dev)) {
915 DHD_ERROR(("%s: RC %x:%x handle is NULL\n",
916 __FUNCTION__, PCIE_RC_VENDOR_ID, PCIE_RC_DEVICE_ID));
917 return BCME_ERROR;
918 }
919
920 return dhdpcie_access_cap(bus->rc_dev, cap, offset, is_ext, is_write, writeval);
921}
922
923uint32
924dhdpcie_ep_access_cap(dhd_bus_t *bus, int cap, uint offset, bool is_ext, bool is_write,
925 uint32 writeval)
926{
927 if (!(bus->dev)) {
928 DHD_ERROR(("%s: EP handle is NULL\n", __FUNCTION__));
929 return BCME_ERROR;
930 }
931
932 return dhdpcie_access_cap(bus->dev, cap, offset, is_ext, is_write, writeval);
933}
934
935/* API wrapper to read Root Port link capability
936 * Returns 2 = GEN2 1 = GEN1 BCME_ERR on linkcap not found
937 */
938
939uint32 dhd_debug_get_rc_linkcap(dhd_bus_t *bus)
940{
941 uint32 linkcap = -1;
942 linkcap = dhdpcie_rc_access_cap(bus, PCIE_CAP_ID_EXP,
943 PCIE_CAP_LINKCAP_OFFSET, FALSE, FALSE, 0);
944 linkcap &= PCIE_CAP_LINKCAP_LNKSPEED_MASK;
945 return linkcap;
946}
947
948int dhdpcie_pci_suspend_resume(dhd_bus_t *bus, bool state)
949{
950 int rc;
951
952 struct pci_dev *dev = bus->dev;
953
954 if (state) {
955#if !defined(BCMPCIE_OOB_HOST_WAKE)
956 dhdpcie_pme_active(bus->osh, state);
957#endif // endif
958 rc = dhdpcie_suspend_dev(dev);
959 if (!rc) {
960 dhdpcie_suspend_host_dev(bus);
961 }
962 } else {
963 rc = dhdpcie_resume_host_dev(bus);
964 if (!rc) {
965 rc = dhdpcie_resume_dev(dev);
966 if (MULTIBP_ENAB(bus->sih) && (bus->sih->buscorerev >= 66)) {
967 /* reinit CTO configuration
968 * because cfg space got reset at D3 (PERST)
969 */
970 dhdpcie_cto_init(bus, bus->cto_enable);
971 }
972 if (bus->sih->buscorerev == 66) {
973 dhdpcie_ssreset_dis_enum_rst(bus);
974 }
975#if !defined(BCMPCIE_OOB_HOST_WAKE)
976 dhdpcie_pme_active(bus->osh, state);
977#endif // endif
978 }
979#if (LINUX_VERSION_CODE >= KERNEL_VERSION(2, 6, 27))
980#if defined(DHD_HANG_SEND_UP_TEST)
981 if (bus->is_linkdown ||
982 bus->dhd->req_hang_type == HANG_REASON_PCIE_RC_LINK_UP_FAIL) {
983#else /* DHD_HANG_SEND_UP_TEST */
984 if (bus->is_linkdown) {
985#endif /* DHD_HANG_SEND_UP_TEST */
986 bus->dhd->hang_reason = HANG_REASON_PCIE_RC_LINK_UP_FAIL;
987 dhd_os_send_hang_message(bus->dhd);
988 }
989#endif // endif
990 }
991 return rc;
992}
993
994#if (LINUX_VERSION_CODE >= KERNEL_VERSION(2, 6, 0))
995static int dhdpcie_device_scan(struct device *dev, void *data)
996{
997 struct pci_dev *pcidev;
998 int *cnt = data;
999
1000#if defined(STRICT_GCC_WARNINGS) && defined(__GNUC__)
1001#pragma GCC diagnostic push
1002#pragma GCC diagnostic ignored "-Wcast-qual"
1003#endif // endif
1004 pcidev = container_of(dev, struct pci_dev, dev);
1005#if defined(STRICT_GCC_WARNINGS) && defined(__GNUC__)
1006#pragma GCC diagnostic pop
1007#endif // endif
1008 if (pcidev->vendor != 0x14e4)
1009 return 0;
1010
1011 DHD_INFO(("Found Broadcom PCI device 0x%04x\n", pcidev->device));
1012 *cnt += 1;
1013 if (pcidev->driver && strcmp(pcidev->driver->name, dhdpcie_driver.name))
1014 DHD_ERROR(("Broadcom PCI Device 0x%04x has allocated with driver %s\n",
1015 pcidev->device, pcidev->driver->name));
1016
1017 return 0;
1018}
1019#endif /* LINUX_VERSION >= 2.6.0 */
1020
1021int
1022dhdpcie_bus_register(void)
1023{
1024 int error = 0;
1025
1026#if (LINUX_VERSION_CODE < KERNEL_VERSION(2, 6, 0))
1027 if (!(error = pci_module_init(&dhdpcie_driver)))
1028 return 0;
1029
1030 DHD_ERROR(("%s: pci_module_init failed 0x%x\n", __FUNCTION__, error));
1031#else
1032 if (!(error = pci_register_driver(&dhdpcie_driver))) {
1033 bus_for_each_dev(dhdpcie_driver.driver.bus, NULL, &error, dhdpcie_device_scan);
1034 if (!error) {
1035 DHD_ERROR(("No Broadcom PCI device enumerated!\n"));
1036 } else if (!dhdpcie_init_succeeded) {
1037 DHD_ERROR(("%s: dhdpcie initialize failed.\n", __FUNCTION__));
1038 } else {
1039 return 0;
1040 }
1041
1042 pci_unregister_driver(&dhdpcie_driver);
1043 error = BCME_ERROR;
1044 }
1045#endif /* LINUX_VERSION < 2.6.0 */
1046
1047 return error;
1048}
1049
1050void
1051dhdpcie_bus_unregister(void)
1052{
1053 pci_unregister_driver(&dhdpcie_driver);
1054}
1055
1056int __devinit
1057dhdpcie_pci_probe(struct pci_dev *pdev, const struct pci_device_id *ent)
1058{
1059
1060 if (dhdpcie_chipmatch (pdev->vendor, pdev->device)) {
1061 DHD_ERROR(("%s: chipmatch failed!!\n", __FUNCTION__));
1062 return -ENODEV;
1063 }
1064 printf("PCI_PROBE: bus %X, slot %X,vendor %X, device %X"
1065 "(good PCI location)\n", pdev->bus->number,
1066 PCI_SLOT(pdev->devfn), pdev->vendor, pdev->device);
1067
1068 if (dhdpcie_init (pdev)) {
1069 DHD_ERROR(("%s: PCIe Enumeration failed\n", __FUNCTION__));
1070 return -ENODEV;
1071 }
1072
1073#ifdef DHD_PCIE_NATIVE_RUNTIMEPM
1074 /*
1075 Since MSM PCIe RC dev usage conunt already incremented +2 even
1076 before dhdpcie_pci_probe() called, then we inevitably to call
1077 pm_runtime_put_noidle() two times to make the count start with zero.
1078 */
1079
1080 pm_runtime_put_noidle(&pdev->dev);
1081 pm_runtime_put_noidle(&pdev->dev);
1082 pm_runtime_set_suspended(&pdev->dev);
1083#endif /* DHD_PCIE_NATIVE_RUNTIMEPM */
1084
1085#ifdef BCMPCIE_DISABLE_ASYNC_SUSPEND
1086 /* disable async suspend */
1087 device_disable_async_suspend(&pdev->dev);
1088#endif /* BCMPCIE_DISABLE_ASYNC_SUSPEND */
1089
1090 DHD_TRACE(("%s: PCIe Enumeration done!!\n", __FUNCTION__));
1091 return 0;
1092}
1093
1094int
1095dhdpcie_detach(dhdpcie_info_t *pch)
1096{
1097 if (pch) {
1098#if (LINUX_VERSION_CODE >= KERNEL_VERSION(3, 0, 0))
1099 if (!dhd_download_fw_on_driverload) {
1100 pci_load_and_free_saved_state(pch->dev, &pch->default_state);
1101 }
1102#endif /* OEM_ANDROID && LINUX_VERSION_CODE >= KERNEL_VERSION(3, 0, 0) */
1103 MFREE(pch->osh, pch, sizeof(dhdpcie_info_t));
1104 }
1105 return 0;
1106}
1107
1108void __devexit
1109dhdpcie_pci_remove(struct pci_dev *pdev)
1110{
1111 osl_t *osh = NULL;
1112 dhdpcie_info_t *pch = NULL;
1113 dhd_bus_t *bus = NULL;
1114
1115 DHD_TRACE(("%s Enter\n", __FUNCTION__));
1116 pch = pci_get_drvdata(pdev);
1117 bus = pch->bus;
1118 osh = pch->osh;
1119
1120#ifdef DHD_PCIE_NATIVE_RUNTIMEPM
1121 pm_runtime_get_noresume(&pdev->dev);
1122 pm_runtime_get_noresume(&pdev->dev);
1123#endif /* DHD_PCIE_NATIVE_RUNTIMEPM */
1124
1125 if (bus) {
1126#ifdef SUPPORT_LINKDOWN_RECOVERY
1127#ifdef CONFIG_ARCH_MSM
1128 msm_pcie_deregister_event(&bus->pcie_event);
1129#endif /* CONFIG_ARCH_MSM */
1130#ifdef EXYNOS_PCIE_LINKDOWN_RECOVERY
1131#if defined(CONFIG_SOC_EXYNOS8890) || defined(CONFIG_SOC_EXYNOS8895) || \
1132 defined(CONFIG_SOC_EXYNOS9810)
1133 exynos_pcie_deregister_event(&bus->pcie_event);
1134#endif /* CONFIG_SOC_EXYNOS8890 || CONFIG_SOC_EXYNOS8895 ||
1135 * CONFIG_SOC_EXYNOS9810
1136 */
1137#endif /* EXYNOS_PCIE_LINKDOWN_RECOVERY */
1138#endif /* SUPPORT_LINKDOWN_RECOVERY */
1139
1140 bus->rc_dev = NULL;
1141
1142 dhdpcie_bus_release(bus);
1143 }
1144#if (LINUX_VERSION_CODE >= KERNEL_VERSION(2, 6, 31))
1145 if (pci_is_enabled(pdev))
1146#endif // endif
1147 pci_disable_device(pdev);
1148#ifdef BCMPCIE_OOB_HOST_WAKE
1149 /* pcie os info detach */
1150 MFREE(osh, pch->os_cxt, sizeof(dhdpcie_os_info_t));
1151#endif /* BCMPCIE_OOB_HOST_WAKE */
1152#ifdef USE_SMMU_ARCH_MSM
1153 /* smmu info detach */
1154 dhdpcie_smmu_remove(pdev, pch->smmu_cxt);
1155 MFREE(osh, pch->smmu_cxt, sizeof(dhdpcie_smmu_info_t));
1156#endif /* USE_SMMU_ARCH_MSM */
1157 /* pcie info detach */
1158 dhdpcie_detach(pch);
1159 /* osl detach */
1160 osl_detach(osh);
1161
1162 dhdpcie_init_succeeded = FALSE;
1163
1164 DHD_TRACE(("%s Exit\n", __FUNCTION__));
1165
1166 return;
1167}
1168
1169/* Enable Linux Msi */
1170int
1171dhdpcie_enable_msi(struct pci_dev *pdev, unsigned int min_vecs, unsigned int max_vecs)
1172{
1173#if (LINUX_VERSION_CODE >= KERNEL_VERSION(4, 8, 0))
1174 return pci_alloc_irq_vectors(pdev, min_vecs, max_vecs, PCI_IRQ_MSI);
1175#elif (LINUX_VERSION_CODE >= KERNEL_VERSION(3, 16, 0))
1176 return pci_enable_msi_range(pdev, min_vecs, max_vecs);
1177#else
1178 return pci_enable_msi_block(pdev, max_vecs);
1179#endif // endif
1180}
1181
1182/* Disable Linux Msi */
1183void
1184dhdpcie_disable_msi(struct pci_dev *pdev)
1185{
1186#if (LINUX_VERSION_CODE >= KERNEL_VERSION(4, 8, 0))
1187 pci_free_irq_vectors(pdev);
1188#elif (LINUX_VERSION_CODE >= KERNEL_VERSION(4, 0, 0))
1189 pci_disable_msi(pdev);
1190#else
1191 pci_disable_msi(pdev);
1192#endif // endif
1193 return;
1194}
1195
1196/* Request Linux irq */
1197int
1198dhdpcie_request_irq(dhdpcie_info_t *dhdpcie_info)
1199{
1200 dhd_bus_t *bus = dhdpcie_info->bus;
1201 struct pci_dev *pdev = dhdpcie_info->bus->dev;
1202 int host_irq_disabled;
1203
1204 if (!bus->irq_registered) {
1205 snprintf(dhdpcie_info->pciname, sizeof(dhdpcie_info->pciname),
1206 "dhdpcie:%s", pci_name(pdev));
1207
1208 if (bus->d2h_intr_method == PCIE_MSI) {
1209 if (dhdpcie_enable_msi(pdev, 1, 1) < 0) {
1210 DHD_ERROR(("%s: dhdpcie_enable_msi() failed\n", __FUNCTION__));
1211 dhdpcie_disable_msi(pdev);
1212 bus->d2h_intr_method = PCIE_INTX;
1213 }
1214 }
1215
1216 if (request_irq(pdev->irq, dhdpcie_isr, IRQF_SHARED,
1217 dhdpcie_info->pciname, bus) < 0) {
1218 DHD_ERROR(("%s: request_irq() failed\n", __FUNCTION__));
1219 if (bus->d2h_intr_method == PCIE_MSI) {
1220 dhdpcie_disable_msi(pdev);
1221 }
1222 return -1;
1223 }
1224 else {
1225 bus->irq_registered = TRUE;
1226 }
1227 } else {
1228 DHD_ERROR(("%s: PCI IRQ is already registered\n", __FUNCTION__));
1229 }
1230
1231 host_irq_disabled = dhdpcie_irq_disabled(bus);
1232 if (host_irq_disabled) {
1233 DHD_ERROR(("%s: PCIe IRQ was disabled(%d), so, enabled it again\n",
1234 __FUNCTION__, host_irq_disabled));
1235 dhdpcie_enable_irq(bus);
1236 }
1237
1238 DHD_TRACE(("%s %s\n", __FUNCTION__, dhdpcie_info->pciname));
1239
1240 return 0; /* SUCCESS */
1241}
1242
1243/**
1244 * dhdpcie_get_pcieirq - return pcie irq number to linux-dhd
1245 */
1246int
1247dhdpcie_get_pcieirq(struct dhd_bus *bus, unsigned int *irq)
1248{
1249 struct pci_dev *pdev = bus->dev;
1250
1251 if (!pdev) {
1252 DHD_ERROR(("%s : bus->dev is NULL\n", __FUNCTION__));
1253 return -ENODEV;
1254 }
1255
1256 *irq = pdev->irq;
1257
1258 return 0; /* SUCCESS */
1259}
1260
1261#ifdef CONFIG_PHYS_ADDR_T_64BIT
1262#define PRINTF_RESOURCE "0x%016llx"
1263#else
1264#define PRINTF_RESOURCE "0x%08x"
1265#endif // endif
1266
1267#ifdef EXYNOS_PCIE_MODULE_PATCH
1268#if (LINUX_VERSION_CODE >= KERNEL_VERSION(3, 0, 0))
1269extern struct pci_saved_state *bcm_pcie_default_state;
1270#endif /* LINUX_VERSION_CODE >= KERNEL_VERSION(3, 0, 0) */
1271#endif /* EXYNOS_MODULE_PATCH */
1272
1273/*
1274
1275Name: osl_pci_get_resource
1276
1277Parametrs:
1278
12791: struct pci_dev *pdev -- pci device structure
12802: pci_res -- structure containing pci configuration space values
1281
1282Return value:
1283
1284int - Status (TRUE or FALSE)
1285
1286Description:
1287Access PCI configuration space, retrieve PCI allocated resources , updates in resource structure.
1288
1289 */
1290int dhdpcie_get_resource(dhdpcie_info_t *dhdpcie_info)
1291{
1292 phys_addr_t bar0_addr, bar1_addr;
1293 ulong bar1_size;
1294 struct pci_dev *pdev = NULL;
1295 pdev = dhdpcie_info->dev;
1296#ifdef EXYNOS_PCIE_MODULE_PATCH
1297#if (LINUX_VERSION_CODE >= KERNEL_VERSION(3, 0, 0))
1298 if (bcm_pcie_default_state) {
1299 pci_load_saved_state(pdev, bcm_pcie_default_state);
1300 pci_restore_state(pdev);
1301 }
1302#endif /* LINUX_VERSION_CODE >= KERNEL_VERSION(3, 0, 0) */
1303#endif /* EXYNOS_MODULE_PATCH */
1304 do {
1305 if (pci_enable_device(pdev)) {
1306 printf("%s: Cannot enable PCI device\n", __FUNCTION__);
1307 break;
1308 }
1309 pci_set_master(pdev);
1310 bar0_addr = pci_resource_start(pdev, 0); /* Bar-0 mapped address */
1311 bar1_addr = pci_resource_start(pdev, 2); /* Bar-1 mapped address */
1312
1313 /* read Bar-1 mapped memory range */
1314 bar1_size = pci_resource_len(pdev, 2);
1315
1316 if ((bar1_size == 0) || (bar1_addr == 0)) {
1317 printf("%s: BAR1 Not enabled for this device size(%ld),"
1318 " addr(0x"PRINTF_RESOURCE")\n",
1319 __FUNCTION__, bar1_size, bar1_addr);
1320 goto err;
1321 }
1322
1323 dhdpcie_info->regs = (volatile char *) REG_MAP(bar0_addr, DONGLE_REG_MAP_SIZE);
1324 dhdpcie_info->tcm_size =
1325 (bar1_size > DONGLE_TCM_MAP_SIZE) ? bar1_size : DONGLE_TCM_MAP_SIZE;
1326 dhdpcie_info->tcm = (volatile char *) REG_MAP(bar1_addr, dhdpcie_info->tcm_size);
1327
1328 if (!dhdpcie_info->regs || !dhdpcie_info->tcm) {
1329 DHD_ERROR(("%s:ioremap() failed\n", __FUNCTION__));
1330 break;
1331 }
1332#ifdef EXYNOS_PCIE_MODULE_PATCH
1333#if (LINUX_VERSION_CODE >= KERNEL_VERSION(3, 0, 0))
1334 if (bcm_pcie_default_state == NULL) {
1335 pci_save_state(pdev);
1336 bcm_pcie_default_state = pci_store_saved_state(pdev);
1337 }
1338#endif /* LINUX_VERSION_CODE >= KERNEL_VERSION(3, 0, 0) */
1339#endif /* EXYNOS_MODULE_PATCH */
1340
1341#if (LINUX_VERSION_CODE >= KERNEL_VERSION(3, 0, 0))
1342 /* Backup PCIe configuration so as to use Wi-Fi on/off process
1343 * in case of built in driver
1344 */
1345 pci_save_state(pdev);
1346 dhdpcie_info->default_state = pci_store_saved_state(pdev);
1347
1348 if (dhdpcie_info->default_state == NULL) {
1349 DHD_ERROR(("%s pci_store_saved_state returns NULL\n",
1350 __FUNCTION__));
1351 REG_UNMAP(dhdpcie_info->regs);
1352 REG_UNMAP(dhdpcie_info->tcm);
1353 pci_disable_device(pdev);
1354 break;
1355 }
1356#endif /* LINUX_VERSION_CODE >= KERNEL_VERSION(3, 0, 0) */
1357
1358 DHD_TRACE(("%s:Phys addr : reg space = %p base addr 0x"PRINTF_RESOURCE" \n",
1359 __FUNCTION__, dhdpcie_info->regs, bar0_addr));
1360 DHD_TRACE(("%s:Phys addr : tcm_space = %p base addr 0x"PRINTF_RESOURCE" \n",
1361 __FUNCTION__, dhdpcie_info->tcm, bar1_addr));
1362
1363 return 0; /* SUCCESS */
1364 } while (0);
1365err:
1366 return -1; /* FAILURE */
1367}
1368
1369int dhdpcie_scan_resource(dhdpcie_info_t *dhdpcie_info)
1370{
1371
1372 DHD_TRACE(("%s: ENTER\n", __FUNCTION__));
1373
1374 do {
1375 /* define it here only!! */
1376 if (dhdpcie_get_resource (dhdpcie_info)) {
1377 DHD_ERROR(("%s: Failed to get PCI resources\n", __FUNCTION__));
1378 break;
1379 }
1380 DHD_TRACE(("%s:Exit - SUCCESS \n",
1381 __FUNCTION__));
1382
1383 return 0; /* SUCCESS */
1384
1385 } while (0);
1386
1387 DHD_TRACE(("%s:Exit - FAILURE \n", __FUNCTION__));
1388
1389 return -1; /* FAILURE */
1390
1391}
1392
1393void dhdpcie_dump_resource(dhd_bus_t *bus)
1394{
1395 dhdpcie_info_t *pch;
1396
1397 if (bus == NULL) {
1398 DHD_ERROR(("%s: bus is NULL\n", __FUNCTION__));
1399 return;
1400 }
1401
1402 if (bus->dev == NULL) {
1403 DHD_ERROR(("%s: bus->dev is NULL\n", __FUNCTION__));
1404 return;
1405 }
1406
1407 pch = pci_get_drvdata(bus->dev);
1408 if (pch == NULL) {
1409 DHD_ERROR(("%s: pch is NULL\n", __FUNCTION__));
1410 return;
1411 }
1412
1413 /* BAR0 */
1414 DHD_ERROR(("%s: BAR0(VA): 0x%pK, BAR0(PA): "PRINTF_RESOURCE", SIZE: %d\n",
1415 __FUNCTION__, pch->regs, pci_resource_start(bus->dev, 0),
1416 DONGLE_REG_MAP_SIZE));
1417
1418 /* BAR1 */
1419 DHD_ERROR(("%s: BAR1(VA): 0x%pK, BAR1(PA): "PRINTF_RESOURCE", SIZE: %d\n",
1420 __FUNCTION__, pch->tcm, pci_resource_start(bus->dev, 2),
1421 pch->tcm_size));
1422}
1423
1424#ifdef SUPPORT_LINKDOWN_RECOVERY
1425#if defined(CONFIG_ARCH_MSM) || (defined(EXYNOS_PCIE_LINKDOWN_RECOVERY) && \
1426 (defined(CONFIG_SOC_EXYNOS8890) || defined(CONFIG_SOC_EXYNOS8895) || \
1427 defined(CONFIG_SOC_EXYNOS9810)))
1428void dhdpcie_linkdown_cb(struct_pcie_notify *noti)
1429{
1430 struct pci_dev *pdev = (struct pci_dev *)noti->user;
1431 dhdpcie_info_t *pch = NULL;
1432
1433 if (pdev) {
1434 pch = pci_get_drvdata(pdev);
1435 if (pch) {
1436 dhd_bus_t *bus = pch->bus;
1437 if (bus) {
1438 dhd_pub_t *dhd = bus->dhd;
1439 if (dhd) {
1440 DHD_ERROR(("%s: Event HANG send up "
1441 "due to PCIe linkdown\n",
1442 __FUNCTION__));
1443#ifdef CONFIG_ARCH_MSM
1444 bus->no_cfg_restore = 1;
1445#endif /* CONFIG_ARCH_MSM */
1446 bus->is_linkdown = 1;
1447 DHD_OS_WAKE_LOCK(dhd);
1448 dhd->hang_reason = HANG_REASON_PCIE_LINK_DOWN;
1449 dhd_os_send_hang_message(dhd);
1450 }
1451 }
1452 }
1453 }
1454
1455}
1456#endif /* CONFIG_ARCH_MSM || (EXYNOS_PCIE_LINKDOWN_RECOVERY &&
1457 * (CONFIG_SOC_EXYNOS8890 || CONFIG_SOC_EXYNOS8895 || CONFIG_SOC_EXYNOS9810))
1458 */
1459#endif /* SUPPORT_LINKDOWN_RECOVERY */
1460
1461int dhdpcie_init(struct pci_dev *pdev)
1462{
1463
1464 osl_t *osh = NULL;
1465 dhd_bus_t *bus = NULL;
1466 dhdpcie_info_t *dhdpcie_info = NULL;
1467 wifi_adapter_info_t *adapter = NULL;
1468#ifdef BCMPCIE_OOB_HOST_WAKE
1469 dhdpcie_os_info_t *dhdpcie_osinfo = NULL;
1470#endif /* BCMPCIE_OOB_HOST_WAKE */
1471#ifdef USE_SMMU_ARCH_MSM
1472 dhdpcie_smmu_info_t *dhdpcie_smmu_info = NULL;
1473#endif /* USE_SMMU_ARCH_MSM */
1474 int ret = 0;
1475
1476 do {
1477 /* osl attach */
1478 if (!(osh = osl_attach(pdev, PCI_BUS, FALSE))) {
1479 DHD_ERROR(("%s: osl_attach failed\n", __FUNCTION__));
1480 break;
1481 }
1482
1483 /* initialize static buffer */
1484 adapter = dhd_wifi_platform_get_adapter(PCI_BUS, pdev->bus->number,
1485 PCI_SLOT(pdev->devfn));
1486 if (adapter != NULL)
1487 DHD_ERROR(("%s: found adapter info '%s'\n", __FUNCTION__, adapter->name));
1488 else
1489 DHD_ERROR(("%s: can't find adapter info for this chip\n", __FUNCTION__));
1490 osl_static_mem_init(osh, adapter);
1491
1492 /* Set ACP coherence flag */
1493 if (OSL_ACP_WAR_ENAB() || OSL_ARCH_IS_COHERENT())
1494 osl_flag_set(osh, OSL_ACP_COHERENCE);
1495
1496 /* allocate linux spcific pcie structure here */
1497 if (!(dhdpcie_info = MALLOC(osh, sizeof(dhdpcie_info_t)))) {
1498 DHD_ERROR(("%s: MALLOC of dhd_bus_t failed\n", __FUNCTION__));
1499 break;
1500 }
1501 bzero(dhdpcie_info, sizeof(dhdpcie_info_t));
1502 dhdpcie_info->osh = osh;
1503 dhdpcie_info->dev = pdev;
1504
1505#ifdef BCMPCIE_OOB_HOST_WAKE
1506 /* allocate OS speicific structure */
1507 dhdpcie_osinfo = MALLOC(osh, sizeof(dhdpcie_os_info_t));
1508 if (dhdpcie_osinfo == NULL) {
1509 DHD_ERROR(("%s: MALLOC of dhdpcie_os_info_t failed\n",
1510 __FUNCTION__));
1511 break;
1512 }
1513 bzero(dhdpcie_osinfo, sizeof(dhdpcie_os_info_t));
1514 dhdpcie_info->os_cxt = (void *)dhdpcie_osinfo;
1515
1516 /* Initialize host wake IRQ */
1517 spin_lock_init(&dhdpcie_osinfo->oob_irq_spinlock);
1518 /* Get customer specific host wake IRQ parametres: IRQ number as IRQ type */
1519 dhdpcie_osinfo->oob_irq_num = wifi_platform_get_irq_number(adapter,
1520 &dhdpcie_osinfo->oob_irq_flags);
1521 if (dhdpcie_osinfo->oob_irq_num < 0) {
1522 DHD_ERROR(("%s: Host OOB irq is not defined\n", __FUNCTION__));
1523 }
1524#endif /* BCMPCIE_OOB_HOST_WAKE */
1525
1526#ifdef USE_SMMU_ARCH_MSM
1527 /* allocate private structure for using SMMU */
1528 dhdpcie_smmu_info = MALLOC(osh, sizeof(dhdpcie_smmu_info_t));
1529 if (dhdpcie_smmu_info == NULL) {
1530 DHD_ERROR(("%s: MALLOC of dhdpcie_smmu_info_t failed\n",
1531 __FUNCTION__));
1532 break;
1533 }
1534 bzero(dhdpcie_smmu_info, sizeof(dhdpcie_smmu_info_t));
1535 dhdpcie_info->smmu_cxt = (void *)dhdpcie_smmu_info;
1536
1537 /* Initialize smmu structure */
1538 if (dhdpcie_smmu_init(pdev, dhdpcie_info->smmu_cxt) < 0) {
1539 DHD_ERROR(("%s: Failed to initialize SMMU\n",
1540 __FUNCTION__));
1541 break;
1542 }
1543#endif /* USE_SMMU_ARCH_MSM */
1544
1545#ifdef DHD_WAKE_STATUS
1546 /* Initialize pcie_lock */
1547 spin_lock_init(&dhdpcie_info->pcie_lock);
1548#endif /* DHD_WAKE_STATUS */
1549
1550 /* Find the PCI resources, verify the */
1551 /* vendor and device ID, map BAR regions and irq, update in structures */
1552 if (dhdpcie_scan_resource(dhdpcie_info)) {
1553 DHD_ERROR(("%s: dhd_Scan_PCI_Res failed\n", __FUNCTION__));
1554
1555 break;
1556 }
1557
1558 /* Bus initialization */
1559 ret = dhdpcie_bus_attach(osh, &bus, dhdpcie_info->regs, dhdpcie_info->tcm, pdev);
1560 if (ret != BCME_OK) {
1561 DHD_ERROR(("%s:dhdpcie_bus_attach() failed\n", __FUNCTION__));
1562 break;
1563 }
1564
1565 dhdpcie_info->bus = bus;
1566 bus->is_linkdown = 0;
1567 bus->no_bus_init = FALSE;
1568
1569 /* Get RC Device Handle */
1570 bus->rc_dev = pci_get_device(PCIE_RC_VENDOR_ID, PCIE_RC_DEVICE_ID, NULL);
1571
1572#ifdef DONGLE_ENABLE_ISOLATION
1573 bus->dhd->dongle_isolation = TRUE;
1574#endif /* DONGLE_ENABLE_ISOLATION */
1575#ifdef SUPPORT_LINKDOWN_RECOVERY
1576#ifdef CONFIG_ARCH_MSM
1577 bus->pcie_event.events = MSM_PCIE_EVENT_LINKDOWN;
1578 bus->pcie_event.user = pdev;
1579 bus->pcie_event.mode = MSM_PCIE_TRIGGER_CALLBACK;
1580 bus->pcie_event.callback = dhdpcie_linkdown_cb;
1581 bus->pcie_event.options = MSM_PCIE_CONFIG_NO_RECOVERY;
1582 msm_pcie_register_event(&bus->pcie_event);
1583 bus->no_cfg_restore = FALSE;
1584#endif /* CONFIG_ARCH_MSM */
1585#ifdef EXYNOS_PCIE_LINKDOWN_RECOVERY
1586#if defined(CONFIG_SOC_EXYNOS8890) || defined(CONFIG_SOC_EXYNOS8895) || \
1587 defined(CONFIG_SOC_EXYNOS9810)
1588 bus->pcie_event.events = EXYNOS_PCIE_EVENT_LINKDOWN;
1589 bus->pcie_event.user = pdev;
1590 bus->pcie_event.mode = EXYNOS_PCIE_TRIGGER_CALLBACK;
1591 bus->pcie_event.callback = dhdpcie_linkdown_cb;
1592 exynos_pcie_register_event(&bus->pcie_event);
1593#endif /* CONFIG_SOC_EXYNOS8890 || CONFIG_SOC_EXYNOS8895 ||
1594 * CONFIG_SOC_EXYNOS9810
1595 */
1596#endif /* EXYNOS_PCIE_LINKDOWN_RECOVERY */
1597 bus->read_shm_fail = FALSE;
1598#endif /* SUPPORT_LINKDOWN_RECOVERY */
1599
1600 if (bus->intr) {
1601 /* Register interrupt callback, but mask it (not operational yet). */
1602 DHD_INTR(("%s: Registering and masking interrupts\n", __FUNCTION__));
1603 dhdpcie_bus_intr_disable(bus);
1604
1605 if (dhdpcie_request_irq(dhdpcie_info)) {
1606 DHD_ERROR(("%s: request_irq() failed\n", __FUNCTION__));
1607 break;
1608 }
1609 } else {
1610 bus->pollrate = 1;
1611 DHD_INFO(("%s: PCIe interrupt function is NOT registered "
1612 "due to polling mode\n", __FUNCTION__));
1613 }
1614
1615#if defined(BCM_REQUEST_FW)
1616 if (dhd_bus_download_firmware(bus, osh, NULL, NULL) < 0) {
1617 DHD_ERROR(("%s: failed to download firmware\n", __FUNCTION__));
1618 }
1619 bus->nv_path = NULL;
1620 bus->fw_path = NULL;
1621#endif /* BCM_REQUEST_FW */
1622
1623 /* set private data for pci_dev */
1624 pci_set_drvdata(pdev, dhdpcie_info);
1625
1626 if (dhd_download_fw_on_driverload) {
1627 if (dhd_bus_start(bus->dhd)) {
1628 DHD_ERROR(("%s: dhd_bud_start() failed\n", __FUNCTION__));
1629 if (!allow_delay_fwdl)
1630 break;
1631 }
1632 } else {
1633 /* Set ramdom MAC address during boot time */
1634 get_random_bytes(&bus->dhd->mac.octet[3], 3);
1635 /* Adding BRCM OUI */
1636 bus->dhd->mac.octet[0] = 0;
1637 bus->dhd->mac.octet[1] = 0x90;
1638 bus->dhd->mac.octet[2] = 0x4C;
1639 }
1640
1641 /* Attach to the OS network interface */
1642 DHD_TRACE(("%s(): Calling dhd_register_if() \n", __FUNCTION__));
1643 if (dhd_attach_net(bus->dhd, TRUE)) {
1644 DHD_ERROR(("%s(): ERROR.. dhd_register_if() failed\n", __FUNCTION__));
1645 break;
1646 }
1647
1648 dhdpcie_init_succeeded = TRUE;
1649
1650#ifdef DHD_PCIE_NATIVE_RUNTIMEPM
1651 pm_runtime_set_autosuspend_delay(&pdev->dev, AUTO_SUSPEND_TIMEOUT);
1652 pm_runtime_use_autosuspend(&pdev->dev);
1653 atomic_set(&bus->dhd->block_bus, FALSE);
1654#endif /* DHD_PCIE_NATIVE_RUNTIMEPM */
1655
1656 DHD_TRACE(("%s:Exit - SUCCESS \n", __FUNCTION__));
1657 return 0; /* return SUCCESS */
1658
1659 } while (0);
1660 /* reverse the initialization in order in case of error */
1661
1662 if (bus)
1663 dhdpcie_bus_release(bus);
1664
1665#ifdef BCMPCIE_OOB_HOST_WAKE
1666 if (dhdpcie_osinfo) {
1667 MFREE(osh, dhdpcie_osinfo, sizeof(dhdpcie_os_info_t));
1668 }
1669#endif /* BCMPCIE_OOB_HOST_WAKE */
1670
1671#ifdef USE_SMMU_ARCH_MSM
1672 if (dhdpcie_smmu_info) {
1673 MFREE(osh, dhdpcie_smmu_info, sizeof(dhdpcie_smmu_info_t));
1674 dhdpcie_info->smmu_cxt = NULL;
1675 }
1676#endif /* USE_SMMU_ARCH_MSM */
1677
1678 if (dhdpcie_info)
1679 dhdpcie_detach(dhdpcie_info);
1680 pci_disable_device(pdev);
1681 if (osh)
1682 osl_detach(osh);
1683
1684 dhdpcie_init_succeeded = FALSE;
1685
1686 DHD_TRACE(("%s:Exit - FAILURE \n", __FUNCTION__));
1687
1688 return -1; /* return FAILURE */
1689}
1690
1691/* Free Linux irq */
1692void
1693dhdpcie_free_irq(dhd_bus_t *bus)
1694{
1695 struct pci_dev *pdev = NULL;
1696
1697 DHD_TRACE(("%s: freeing up the IRQ\n", __FUNCTION__));
1698 if (bus) {
1699 pdev = bus->dev;
1700 if (bus->irq_registered) {
1701 free_irq(pdev->irq, bus);
1702 bus->irq_registered = FALSE;
1703 if (bus->d2h_intr_method == PCIE_MSI) {
1704 dhdpcie_disable_msi(pdev);
1705 }
1706 } else {
1707 DHD_ERROR(("%s: PCIe IRQ is not registered\n", __FUNCTION__));
1708 }
1709 }
1710 DHD_TRACE(("%s: Exit\n", __FUNCTION__));
1711 return;
1712}
1713
1714/*
1715
1716Name: dhdpcie_isr
1717
1718Parametrs:
1719
17201: IN int irq -- interrupt vector
17212: IN void *arg -- handle to private data structure
1722
1723Return value:
1724
1725Status (TRUE or FALSE)
1726
1727Description:
1728Interrupt Service routine checks for the status register,
1729disable interrupt and queue DPC if mail box interrupts are raised.
1730*/
1731
1732irqreturn_t
1733dhdpcie_isr(int irq, void *arg)
1734{
1735 dhd_bus_t *bus = (dhd_bus_t*)arg;
1736 bus->isr_entry_time = OSL_LOCALTIME_NS();
1737 if (!dhdpcie_bus_isr(bus)) {
1738 DHD_LOG_MEM(("%s: dhdpcie_bus_isr returns with FALSE\n", __FUNCTION__));
1739 }
1740 bus->isr_exit_time = OSL_LOCALTIME_NS();
1741 return IRQ_HANDLED;
1742}
1743
1744int
1745dhdpcie_disable_irq_nosync(dhd_bus_t *bus)
1746{
1747 struct pci_dev *dev;
1748 if ((bus == NULL) || (bus->dev == NULL)) {
1749 DHD_ERROR(("%s: bus or bus->dev is NULL\n", __FUNCTION__));
1750 return BCME_ERROR;
1751 }
1752
1753 dev = bus->dev;
1754 disable_irq_nosync(dev->irq);
1755 return BCME_OK;
1756}
1757
1758int
1759dhdpcie_disable_irq(dhd_bus_t *bus)
1760{
1761 struct pci_dev *dev;
1762 if ((bus == NULL) || (bus->dev == NULL)) {
1763 DHD_ERROR(("%s: bus or bus->dev is NULL\n", __FUNCTION__));
1764 return BCME_ERROR;
1765 }
1766
1767 dev = bus->dev;
1768 disable_irq(dev->irq);
1769 return BCME_OK;
1770}
1771
1772int
1773dhdpcie_enable_irq(dhd_bus_t *bus)
1774{
1775 struct pci_dev *dev;
1776 if ((bus == NULL) || (bus->dev == NULL)) {
1777 DHD_ERROR(("%s: bus or bus->dev is NULL\n", __FUNCTION__));
1778 return BCME_ERROR;
1779 }
1780
1781 dev = bus->dev;
1782 enable_irq(dev->irq);
1783 return BCME_OK;
1784}
1785
1786int
1787dhdpcie_irq_disabled(dhd_bus_t *bus)
1788{
1789#if (LINUX_VERSION_CODE >= KERNEL_VERSION(2, 6, 28))
1790 struct irq_desc *desc = irq_to_desc(bus->dev->irq);
1791 /* depth will be zero, if enabled */
1792 return desc->depth;
1793#else
1794 /* return ERROR by default as there is no support for lower versions */
1795 return BCME_ERROR;
1796#endif /* LINUX_VERSION_CODE >= KERNEL_VERSION(3, 0, 0) */
1797}
1798
1799int
1800dhdpcie_start_host_pcieclock(dhd_bus_t *bus)
1801{
1802 int ret = 0;
1803#ifdef CONFIG_ARCH_MSM
1804#ifdef SUPPORT_LINKDOWN_RECOVERY
1805 int options = 0;
1806#endif /* SUPPORT_LINKDOWN_RECOVERY */
1807#endif /* CONFIG_ARCH_MSM */
1808 DHD_TRACE(("%s Enter:\n", __FUNCTION__));
1809
1810 if (bus == NULL) {
1811 return BCME_ERROR;
1812 }
1813
1814 if (bus->dev == NULL) {
1815 return BCME_ERROR;
1816 }
1817
1818#ifdef CONFIG_ARCH_MSM
1819#ifdef SUPPORT_LINKDOWN_RECOVERY
1820 if (bus->no_cfg_restore) {
1821 options = MSM_PCIE_CONFIG_NO_CFG_RESTORE;
1822 }
1823 ret = msm_pcie_pm_control(MSM_PCIE_RESUME, bus->dev->bus->number,
1824 bus->dev, NULL, options);
1825 if (bus->no_cfg_restore && !ret) {
1826 msm_pcie_recover_config(bus->dev);
1827 bus->no_cfg_restore = 0;
1828 }
1829#else
1830 ret = msm_pcie_pm_control(MSM_PCIE_RESUME, bus->dev->bus->number,
1831 bus->dev, NULL, 0);
1832#endif /* SUPPORT_LINKDOWN_RECOVERY */
1833 if (ret) {
1834 DHD_ERROR(("%s Failed to bring up PCIe link\n", __FUNCTION__));
1835 goto done;
1836 }
1837
1838done:
1839#endif /* CONFIG_ARCH_MSM */
1840 DHD_TRACE(("%s Exit:\n", __FUNCTION__));
1841 return ret;
1842}
1843
1844int
1845dhdpcie_stop_host_pcieclock(dhd_bus_t *bus)
1846{
1847 int ret = 0;
1848#ifdef CONFIG_ARCH_MSM
1849#ifdef SUPPORT_LINKDOWN_RECOVERY
1850 int options = 0;
1851#endif /* SUPPORT_LINKDOWN_RECOVERY */
1852#endif /* CONFIG_ARCH_MSM */
1853
1854 DHD_TRACE(("%s Enter:\n", __FUNCTION__));
1855
1856 if (bus == NULL) {
1857 return BCME_ERROR;
1858 }
1859
1860 if (bus->dev == NULL) {
1861 return BCME_ERROR;
1862 }
1863
1864#ifdef CONFIG_ARCH_MSM
1865#ifdef SUPPORT_LINKDOWN_RECOVERY
1866 if (bus->no_cfg_restore) {
1867 options = MSM_PCIE_CONFIG_NO_CFG_RESTORE | MSM_PCIE_CONFIG_LINKDOWN;
1868 }
1869
1870 ret = msm_pcie_pm_control(MSM_PCIE_SUSPEND, bus->dev->bus->number,
1871 bus->dev, NULL, options);
1872#else
1873 ret = msm_pcie_pm_control(MSM_PCIE_SUSPEND, bus->dev->bus->number,
1874 bus->dev, NULL, 0);
1875#endif /* SUPPORT_LINKDOWN_RECOVERY */
1876 if (ret) {
1877 DHD_ERROR(("Failed to stop PCIe link\n"));
1878 goto done;
1879 }
1880done:
1881#endif /* CONFIG_ARCH_MSM */
1882 DHD_TRACE(("%s Exit:\n", __FUNCTION__));
1883 return ret;
1884}
1885
1886int
1887dhdpcie_disable_device(dhd_bus_t *bus)
1888{
1889 DHD_TRACE(("%s Enter:\n", __FUNCTION__));
1890
1891 if (bus == NULL) {
1892 return BCME_ERROR;
1893 }
1894
1895 if (bus->dev == NULL) {
1896 return BCME_ERROR;
1897 }
1898
1899#if (LINUX_VERSION_CODE >= KERNEL_VERSION(2, 6, 31))
1900 if (pci_is_enabled(bus->dev))
1901#endif // endif
1902 pci_disable_device(bus->dev);
1903
1904 return 0;
1905}
1906
1907int
1908dhdpcie_enable_device(dhd_bus_t *bus)
1909{
1910 int ret = BCME_ERROR;
1911#if (LINUX_VERSION_CODE >= KERNEL_VERSION(3, 0, 0))
1912 dhdpcie_info_t *pch;
1913#endif /* LINUX_VERSION_CODE >= KERNEL_VERSION(3, 0, 0) */
1914
1915 DHD_TRACE(("%s Enter:\n", __FUNCTION__));
1916
1917 if (bus == NULL) {
1918 return BCME_ERROR;
1919 }
1920
1921 if (bus->dev == NULL) {
1922 return BCME_ERROR;
1923 }
1924
1925#if (LINUX_VERSION_CODE >= KERNEL_VERSION(3, 0, 0))
1926 pch = pci_get_drvdata(bus->dev);
1927 if (pch == NULL) {
1928 return BCME_ERROR;
1929 }
1930
1931#if (LINUX_VERSION_CODE >= KERNEL_VERSION(3, 14, 0)) && (LINUX_VERSION_CODE < \
1932 KERNEL_VERSION(3, 19, 0)) && !defined(CONFIG_SOC_EXYNOS8890)
1933 /* Updated with pci_load_and_free_saved_state to compatible
1934 * with Kernel version 3.14.0 to 3.18.41.
1935 */
1936 pci_load_and_free_saved_state(bus->dev, &pch->default_state);
1937 pch->default_state = pci_store_saved_state(bus->dev);
1938#else
1939 pci_load_saved_state(bus->dev, pch->default_state);
1940#endif /* LINUX_VERSION >= 3.14.0 && LINUX_VERSION < 3.19.0 && !CONFIG_SOC_EXYNOS8890 */
1941
1942 /* Check if Device ID is valid */
1943 if (bus->dev->state_saved) {
1944 uint32 vid, saved_vid;
1945 pci_read_config_dword(bus->dev, PCI_CFG_VID, &vid);
1946 saved_vid = bus->dev->saved_config_space[PCI_CFG_VID];
1947 if (vid != saved_vid) {
1948 DHD_ERROR(("%s: VID(0x%x) is different from saved VID(0x%x) "
1949 "Skip the bus init\n", __FUNCTION__, vid, saved_vid));
1950 bus->no_bus_init = TRUE;
1951 /* Check if the PCIe link is down */
1952 if (vid == (uint32)-1) {
1953 bus->is_linkdown = 1;
1954#ifdef SUPPORT_LINKDOWN_RECOVERY
1955#ifdef CONFIG_ARCH_MSM
1956 bus->no_cfg_restore = TRUE;
1957#endif /* CONFIG_ARCH_MSM */
1958#endif /* SUPPORT_LINKDOWN_RECOVERY */
1959 }
1960 return BCME_ERROR;
1961 }
1962 }
1963
1964 pci_restore_state(bus->dev);
1965#endif /* (LINUX_VERSION_CODE >= KERNEL_VERSION(3, 0, 0)) */
1966
1967 ret = pci_enable_device(bus->dev);
1968 if (ret) {
1969 pci_disable_device(bus->dev);
1970 } else {
1971 pci_set_master(bus->dev);
1972 }
1973
1974 return ret;
1975}
1976
1977int
1978dhdpcie_alloc_resource(dhd_bus_t *bus)
1979{
1980 dhdpcie_info_t *dhdpcie_info;
1981 phys_addr_t bar0_addr, bar1_addr;
1982 ulong bar1_size;
1983
1984 do {
1985 if (bus == NULL) {
1986 DHD_ERROR(("%s: bus is NULL\n", __FUNCTION__));
1987 break;
1988 }
1989
1990 if (bus->dev == NULL) {
1991 DHD_ERROR(("%s: bus->dev is NULL\n", __FUNCTION__));
1992 break;
1993 }
1994
1995 dhdpcie_info = pci_get_drvdata(bus->dev);
1996 if (dhdpcie_info == NULL) {
1997 DHD_ERROR(("%s: dhdpcie_info is NULL\n", __FUNCTION__));
1998 break;
1999 }
2000
2001 bar0_addr = pci_resource_start(bus->dev, 0); /* Bar-0 mapped address */
2002 bar1_addr = pci_resource_start(bus->dev, 2); /* Bar-1 mapped address */
2003
2004 /* read Bar-1 mapped memory range */
2005 bar1_size = pci_resource_len(bus->dev, 2);
2006
2007 if ((bar1_size == 0) || (bar1_addr == 0)) {
2008 printf("%s: BAR1 Not enabled for this device size(%ld),"
2009 " addr(0x"PRINTF_RESOURCE")\n",
2010 __FUNCTION__, bar1_size, bar1_addr);
2011 break;
2012 }
2013
2014 dhdpcie_info->regs = (volatile char *) REG_MAP(bar0_addr, DONGLE_REG_MAP_SIZE);
2015 if (!dhdpcie_info->regs) {
2016 DHD_ERROR(("%s: ioremap() for regs is failed\n", __FUNCTION__));
2017 break;
2018 }
2019
2020 bus->regs = dhdpcie_info->regs;
2021 dhdpcie_info->tcm_size =
2022 (bar1_size > DONGLE_TCM_MAP_SIZE) ? bar1_size : DONGLE_TCM_MAP_SIZE;
2023 dhdpcie_info->tcm = (volatile char *) REG_MAP(bar1_addr, dhdpcie_info->tcm_size);
2024 if (!dhdpcie_info->tcm) {
2025 DHD_ERROR(("%s: ioremap() for regs is failed\n", __FUNCTION__));
2026 REG_UNMAP(dhdpcie_info->regs);
2027 bus->regs = NULL;
2028 break;
2029 }
2030
2031 bus->tcm = dhdpcie_info->tcm;
2032
2033 DHD_TRACE(("%s:Phys addr : reg space = %p base addr 0x"PRINTF_RESOURCE" \n",
2034 __FUNCTION__, dhdpcie_info->regs, bar0_addr));
2035 DHD_TRACE(("%s:Phys addr : tcm_space = %p base addr 0x"PRINTF_RESOURCE" \n",
2036 __FUNCTION__, dhdpcie_info->tcm, bar1_addr));
2037
2038 return 0;
2039 } while (0);
2040
2041 return BCME_ERROR;
2042}
2043
2044void
2045dhdpcie_free_resource(dhd_bus_t *bus)
2046{
2047 dhdpcie_info_t *dhdpcie_info;
2048
2049 if (bus == NULL) {
2050 DHD_ERROR(("%s: bus is NULL\n", __FUNCTION__));
2051 return;
2052 }
2053
2054 if (bus->dev == NULL) {
2055 DHD_ERROR(("%s: bus->dev is NULL\n", __FUNCTION__));
2056 return;
2057 }
2058
2059 dhdpcie_info = pci_get_drvdata(bus->dev);
2060 if (dhdpcie_info == NULL) {
2061 DHD_ERROR(("%s: dhdpcie_info is NULL\n", __FUNCTION__));
2062 return;
2063 }
2064
2065 if (bus->regs) {
2066 REG_UNMAP(dhdpcie_info->regs);
2067 bus->regs = NULL;
2068 }
2069
2070 if (bus->tcm) {
2071 REG_UNMAP(dhdpcie_info->tcm);
2072 bus->tcm = NULL;
2073 }
2074}
2075
2076int
2077dhdpcie_bus_request_irq(struct dhd_bus *bus)
2078{
2079 dhdpcie_info_t *dhdpcie_info;
2080 int ret = 0;
2081
2082 if (bus == NULL) {
2083 DHD_ERROR(("%s: bus is NULL\n", __FUNCTION__));
2084 return BCME_ERROR;
2085 }
2086
2087 if (bus->dev == NULL) {
2088 DHD_ERROR(("%s: bus->dev is NULL\n", __FUNCTION__));
2089 return BCME_ERROR;
2090 }
2091
2092 dhdpcie_info = pci_get_drvdata(bus->dev);
2093 if (dhdpcie_info == NULL) {
2094 DHD_ERROR(("%s: dhdpcie_info is NULL\n", __FUNCTION__));
2095 return BCME_ERROR;
2096 }
2097
2098 if (bus->intr) {
2099 /* Register interrupt callback, but mask it (not operational yet). */
2100 DHD_INTR(("%s: Registering and masking interrupts\n", __FUNCTION__));
2101 dhdpcie_bus_intr_disable(bus);
2102 ret = dhdpcie_request_irq(dhdpcie_info);
2103 if (ret) {
2104 DHD_ERROR(("%s: request_irq() failed, ret=%d\n",
2105 __FUNCTION__, ret));
2106 return ret;
2107 }
2108 }
2109
2110 return ret;
2111}
2112
2113#ifdef BCMPCIE_OOB_HOST_WAKE
2114int dhdpcie_get_oob_irq_num(dhd_bus_t *bus)
2115{
2116 dhdpcie_info_t *pch;
2117 dhdpcie_os_info_t *dhdpcie_osinfo;
2118
2119 if (bus == NULL) {
2120 DHD_ERROR(("%s: bus is NULL\n", __FUNCTION__));
2121 return 0;
2122 }
2123
2124 if (bus->dev == NULL) {
2125 DHD_ERROR(("%s: bus->dev is NULL\n", __FUNCTION__));
2126 return 0;
2127 }
2128
2129 pch = pci_get_drvdata(bus->dev);
2130 if (pch == NULL) {
2131 DHD_ERROR(("%s: pch is NULL\n", __FUNCTION__));
2132 return 0;
2133 }
2134
2135 dhdpcie_osinfo = (dhdpcie_os_info_t *)pch->os_cxt;
2136
2137 return dhdpcie_osinfo ? dhdpcie_osinfo->oob_irq_num : 0;
2138}
2139
2140void dhdpcie_oob_intr_set(dhd_bus_t *bus, bool enable)
2141{
2142 unsigned long flags;
2143 dhdpcie_info_t *pch;
2144 dhdpcie_os_info_t *dhdpcie_osinfo;
2145
2146 if (bus == NULL) {
2147 DHD_ERROR(("%s: bus is NULL\n", __FUNCTION__));
2148 return;
2149 }
2150
2151 if (bus->dev == NULL) {
2152 DHD_ERROR(("%s: bus->dev is NULL\n", __FUNCTION__));
2153 return;
2154 }
2155
2156 pch = pci_get_drvdata(bus->dev);
2157 if (pch == NULL) {
2158 DHD_ERROR(("%s: pch is NULL\n", __FUNCTION__));
2159 return;
2160 }
2161
2162 dhdpcie_osinfo = (dhdpcie_os_info_t *)pch->os_cxt;
2163 spin_lock_irqsave(&dhdpcie_osinfo->oob_irq_spinlock, flags);
2164 if ((dhdpcie_osinfo->oob_irq_enabled != enable) &&
2165 (dhdpcie_osinfo->oob_irq_num > 0)) {
2166 if (enable) {
2167 enable_irq(dhdpcie_osinfo->oob_irq_num);
2168 bus->oob_intr_enable_count++;
2169 } else {
2170 disable_irq_nosync(dhdpcie_osinfo->oob_irq_num);
2171 bus->oob_intr_disable_count++;
2172 }
2173 dhdpcie_osinfo->oob_irq_enabled = enable;
2174 }
2175 spin_unlock_irqrestore(&dhdpcie_osinfo->oob_irq_spinlock, flags);
2176}
2177
2178static irqreturn_t wlan_oob_irq(int irq, void *data)
2179{
2180 dhd_bus_t *bus;
2181 unsigned long flags_bus;
2182 DHD_TRACE(("%s: IRQ Triggered\n", __FUNCTION__));
2183 bus = (dhd_bus_t *)data;
2184 dhdpcie_oob_intr_set(bus, FALSE);
2185 bus->last_oob_irq_time = OSL_LOCALTIME_NS();
2186 bus->oob_intr_count++;
2187#ifdef DHD_WAKE_STATUS
2188#ifdef DHD_PCIE_RUNTIMEPM
2189 /* This condition is for avoiding counting of wake up from Runtime PM */
2190 if (bus->chk_pm)
2191#endif /* DHD_PCIE_RUNTIMPM */
2192 {
2193 bcmpcie_set_get_wake(bus, 1);
2194 }
2195#endif /* DHD_WAKE_STATUS */
2196#ifdef DHD_PCIE_RUNTIMEPM
2197 dhdpcie_runtime_bus_wake(bus->dhd, FALSE, wlan_oob_irq);
2198#endif /* DHD_PCIE_RUNTIMPM */
2199#ifdef DHD_PCIE_NATIVE_RUNTIMEPM
2200 dhd_bus_wakeup_work(bus->dhd);
2201#endif /* DHD_PCIE_NATIVE_RUNTIMEPM */
2202 DHD_BUS_LOCK(bus->bus_lock, flags_bus);
2203 /* Hold wakelock if bus_low_power_state is
2204 * DHD_BUS_D3_INFORM_SENT OR DHD_BUS_D3_ACK_RECIEVED
2205 */
2206 if (bus->dhd->up && bus->bus_low_power_state != DHD_BUS_NO_LOW_POWER_STATE) {
2207 DHD_OS_OOB_IRQ_WAKE_LOCK_TIMEOUT(bus->dhd, OOB_WAKE_LOCK_TIMEOUT);
2208 }
2209 DHD_BUS_UNLOCK(bus->bus_lock, flags_bus);
2210 return IRQ_HANDLED;
2211}
2212
2213int dhdpcie_oob_intr_register(dhd_bus_t *bus)
2214{
2215 int err = 0;
2216 dhdpcie_info_t *pch;
2217 dhdpcie_os_info_t *dhdpcie_osinfo;
2218
2219 DHD_TRACE(("%s: Enter\n", __FUNCTION__));
2220 if (bus == NULL) {
2221 DHD_ERROR(("%s: bus is NULL\n", __FUNCTION__));
2222 return -EINVAL;
2223 }
2224
2225 if (bus->dev == NULL) {
2226 DHD_ERROR(("%s: bus->dev is NULL\n", __FUNCTION__));
2227 return -EINVAL;
2228 }
2229
2230 pch = pci_get_drvdata(bus->dev);
2231 if (pch == NULL) {
2232 DHD_ERROR(("%s: pch is NULL\n", __FUNCTION__));
2233 return -EINVAL;
2234 }
2235
2236 dhdpcie_osinfo = (dhdpcie_os_info_t *)pch->os_cxt;
2237 if (dhdpcie_osinfo->oob_irq_registered) {
2238 DHD_ERROR(("%s: irq is already registered\n", __FUNCTION__));
2239 return -EBUSY;
2240 }
2241
2242 if (dhdpcie_osinfo->oob_irq_num > 0) {
2243 DHD_INFO_HW4(("%s OOB irq=%d flags=%X \n", __FUNCTION__,
2244 (int)dhdpcie_osinfo->oob_irq_num,
2245 (int)dhdpcie_osinfo->oob_irq_flags));
2246 err = request_irq(dhdpcie_osinfo->oob_irq_num, wlan_oob_irq,
2247 dhdpcie_osinfo->oob_irq_flags, "dhdpcie_host_wake",
2248 bus);
2249 if (err) {
2250 DHD_ERROR(("%s: request_irq failed with %d\n",
2251 __FUNCTION__, err));
2252 return err;
2253 }
2254 err = enable_irq_wake(dhdpcie_osinfo->oob_irq_num);
2255 if (!err) {
2256 dhdpcie_osinfo->oob_irq_wake_enabled = TRUE;
2257 } else {
2258 /* On Hikey platform enable_irq_wake() is failing with error
2259 * ENXIO (No such device or address). This is because the callback function
2260 * irq_set_wake() is not registered in kernel, hence returning BCME_OK.
2261 */
2262 }
2263 dhdpcie_osinfo->oob_irq_enabled = TRUE;
2264 }
2265
2266 dhdpcie_osinfo->oob_irq_registered = TRUE;
2267
2268 return err;
2269}
2270
2271void dhdpcie_oob_intr_unregister(dhd_bus_t *bus)
2272{
2273 int err = 0;
2274 dhdpcie_info_t *pch;
2275 dhdpcie_os_info_t *dhdpcie_osinfo;
2276
2277 DHD_TRACE(("%s: Enter\n", __FUNCTION__));
2278 if (bus == NULL) {
2279 DHD_ERROR(("%s: bus is NULL\n", __FUNCTION__));
2280 return;
2281 }
2282
2283 if (bus->dev == NULL) {
2284 DHD_ERROR(("%s: bus->dev is NULL\n", __FUNCTION__));
2285 return;
2286 }
2287
2288 pch = pci_get_drvdata(bus->dev);
2289 if (pch == NULL) {
2290 DHD_ERROR(("%s: pch is NULL\n", __FUNCTION__));
2291 return;
2292 }
2293
2294 dhdpcie_osinfo = (dhdpcie_os_info_t *)pch->os_cxt;
2295 if (!dhdpcie_osinfo->oob_irq_registered) {
2296 DHD_ERROR(("%s: irq is not registered\n", __FUNCTION__));
2297 return;
2298 }
2299 if (dhdpcie_osinfo->oob_irq_num > 0) {
2300 if (dhdpcie_osinfo->oob_irq_wake_enabled) {
2301 err = disable_irq_wake(dhdpcie_osinfo->oob_irq_num);
2302 if (!err) {
2303 dhdpcie_osinfo->oob_irq_wake_enabled = FALSE;
2304 }
2305 }
2306 if (dhdpcie_osinfo->oob_irq_enabled) {
2307 disable_irq(dhdpcie_osinfo->oob_irq_num);
2308 dhdpcie_osinfo->oob_irq_enabled = FALSE;
2309 }
2310 free_irq(dhdpcie_osinfo->oob_irq_num, bus);
2311 }
2312 dhdpcie_osinfo->oob_irq_registered = FALSE;
2313}
2314#endif /* BCMPCIE_OOB_HOST_WAKE */
2315
2316#ifdef DHD_FW_COREDUMP
2317int
2318dhd_dongle_mem_dump(void)
2319{
2320 if (!g_dhd_bus) {
2321 DHD_ERROR(("%s: Bus is NULL\n", __FUNCTION__));
2322 return -ENODEV;
2323 }
2324
2325 dhd_bus_dump_console_buffer(g_dhd_bus);
2326 dhd_prot_debug_info_print(g_dhd_bus->dhd);
2327
2328 g_dhd_bus->dhd->memdump_enabled = DUMP_MEMFILE_BUGON;
2329 g_dhd_bus->dhd->memdump_type = DUMP_TYPE_AP_ABNORMAL_ACCESS;
2330
2331#ifdef DHD_PCIE_RUNTIMEPM
2332 dhdpcie_runtime_bus_wake(g_dhd_bus->dhd, TRUE, __builtin_return_address(0));
2333#endif /* DHD_PCIE_RUNTIMEPM */
2334
2335 dhd_bus_mem_dump(g_dhd_bus->dhd);
2336 return 0;
2337}
2338EXPORT_SYMBOL(dhd_dongle_mem_dump);
2339#endif /* DHD_FW_COREDUMP */
2340
2341bool
2342dhd_bus_check_driver_up(void)
2343{
2344 dhd_bus_t *bus;
2345 dhd_pub_t *dhdp;
2346 bool isup = FALSE;
2347
2348 bus = (dhd_bus_t *)g_dhd_bus;
2349 if (!bus) {
2350 DHD_ERROR(("%s: bus is NULL\n", __FUNCTION__));
2351 return isup;
2352 }
2353
2354 dhdp = bus->dhd;
2355 if (dhdp) {
2356 isup = dhdp->up;
2357 }
2358
2359 return isup;
2360}
2361EXPORT_SYMBOL(dhd_bus_check_driver_up);
2362
2363#ifdef DHD_PCIE_RUNTIMEPM
2364bool dhd_runtimepm_state(dhd_pub_t *dhd)
2365{
2366 dhd_bus_t *bus;
2367 unsigned long flags;
2368 bus = dhd->bus;
2369
2370 DHD_GENERAL_LOCK(dhd, flags);
2371 bus->idlecount++;
2372
2373 DHD_TRACE(("%s : Enter \n", __FUNCTION__));
2374 if ((bus->idletime > 0) && (bus->idlecount >= bus->idletime)) {
2375 bus->idlecount = 0;
2376 if (DHD_BUS_BUSY_CHECK_IDLE(dhd) && !DHD_BUS_CHECK_DOWN_OR_DOWN_IN_PROGRESS(dhd)) {
2377 bus->bus_wake = 0;
2378 DHD_BUS_BUSY_SET_RPM_SUSPEND_IN_PROGRESS(dhd);
2379 bus->runtime_resume_done = FALSE;
2380 /* stop all interface network queue. */
2381 dhd_bus_stop_queue(bus);
2382 DHD_GENERAL_UNLOCK(dhd, flags);
2383 DHD_ERROR(("%s: DHD Idle state!! - idletime :%d, wdtick :%d \n",
2384 __FUNCTION__, bus->idletime, dhd_runtimepm_ms));
2385 /* RPM suspend is failed, return FALSE then re-trying */
2386 if (dhdpcie_set_suspend_resume(bus, TRUE)) {
2387 DHD_ERROR(("%s: exit with wakelock \n", __FUNCTION__));
2388 DHD_GENERAL_LOCK(dhd, flags);
2389 DHD_BUS_BUSY_CLEAR_RPM_SUSPEND_IN_PROGRESS(dhd);
2390 dhd_os_busbusy_wake(bus->dhd);
2391 bus->runtime_resume_done = TRUE;
2392 /* It can make stuck NET TX Queue without below */
2393 dhd_bus_start_queue(bus);
2394 DHD_GENERAL_UNLOCK(dhd, flags);
2395 smp_wmb();
2396 wake_up_interruptible(&bus->rpm_queue);
2397 return FALSE;
2398 }
2399
2400 DHD_GENERAL_LOCK(dhd, flags);
2401 DHD_BUS_BUSY_CLEAR_RPM_SUSPEND_IN_PROGRESS(dhd);
2402 DHD_BUS_BUSY_SET_RPM_SUSPEND_DONE(dhd);
2403 /* For making sure NET TX Queue active */
2404 dhd_bus_start_queue(bus);
2405 DHD_GENERAL_UNLOCK(dhd, flags);
2406
2407 wait_event_interruptible(bus->rpm_queue, bus->bus_wake);
2408
2409 DHD_GENERAL_LOCK(dhd, flags);
2410 DHD_BUS_BUSY_CLEAR_RPM_SUSPEND_DONE(dhd);
2411 DHD_BUS_BUSY_SET_RPM_RESUME_IN_PROGRESS(dhd);
2412 DHD_GENERAL_UNLOCK(dhd, flags);
2413
2414 dhdpcie_set_suspend_resume(bus, FALSE);
2415
2416 DHD_GENERAL_LOCK(dhd, flags);
2417 DHD_BUS_BUSY_CLEAR_RPM_RESUME_IN_PROGRESS(dhd);
2418 dhd_os_busbusy_wake(bus->dhd);
2419 /* Inform the wake up context that Resume is over */
2420 bus->runtime_resume_done = TRUE;
2421 /* For making sure NET TX Queue active */
2422 dhd_bus_start_queue(bus);
2423 DHD_GENERAL_UNLOCK(dhd, flags);
2424
2425 smp_wmb();
2426 wake_up_interruptible(&bus->rpm_queue);
2427 DHD_ERROR(("%s : runtime resume ended \n", __FUNCTION__));
2428 return TRUE;
2429 } else {
2430 DHD_GENERAL_UNLOCK(dhd, flags);
2431 /* Since one of the contexts are busy (TX, IOVAR or RX)
2432 * we should not suspend
2433 */
2434 DHD_ERROR(("%s : bus is active with dhd_bus_busy_state = 0x%x\n",
2435 __FUNCTION__, dhd->dhd_bus_busy_state));
2436 return FALSE;
2437 }
2438 }
2439
2440 DHD_GENERAL_UNLOCK(dhd, flags);
2441 return FALSE;
2442} /* dhd_runtimepm_state */
2443
2444/*
2445 * dhd_runtime_bus_wake
2446 * TRUE - related with runtime pm context
2447 * FALSE - It isn't invloved in runtime pm context
2448 */
2449bool dhd_runtime_bus_wake(dhd_bus_t *bus, bool wait, void *func_addr)
2450{
2451 unsigned long flags;
2452 bus->idlecount = 0;
2453 DHD_TRACE(("%s : enter\n", __FUNCTION__));
2454 if (bus->dhd->up == FALSE) {
2455 DHD_INFO(("%s : dhd is not up\n", __FUNCTION__));
2456 return FALSE;
2457 }
2458
2459 DHD_GENERAL_LOCK(bus->dhd, flags);
2460 if (DHD_BUS_BUSY_CHECK_RPM_ALL(bus->dhd)) {
2461 /* Wake up RPM state thread if it is suspend in progress or suspended */
2462 if (DHD_BUS_BUSY_CHECK_RPM_SUSPEND_IN_PROGRESS(bus->dhd) ||
2463 DHD_BUS_BUSY_CHECK_RPM_SUSPEND_DONE(bus->dhd)) {
2464 bus->bus_wake = 1;
2465
2466 DHD_GENERAL_UNLOCK(bus->dhd, flags);
2467
2468 DHD_ERROR(("Runtime Resume is called in %pf\n", func_addr));
2469 smp_wmb();
2470 wake_up_interruptible(&bus->rpm_queue);
2471 /* No need to wake up the RPM state thread */
2472 } else if (DHD_BUS_BUSY_CHECK_RPM_RESUME_IN_PROGRESS(bus->dhd)) {
2473 DHD_GENERAL_UNLOCK(bus->dhd, flags);
2474 }
2475
2476 /* If wait is TRUE, function with wait = TRUE will be wait in here */
2477 if (wait) {
2478 wait_event_interruptible(bus->rpm_queue, bus->runtime_resume_done);
2479 } else {
2480 DHD_INFO(("%s: bus wakeup but no wait until resume done\n", __FUNCTION__));
2481 }
2482 /* If it is called from RPM context, it returns TRUE */
2483 return TRUE;
2484 }
2485
2486 DHD_GENERAL_UNLOCK(bus->dhd, flags);
2487
2488 return FALSE;
2489}
2490
2491bool dhdpcie_runtime_bus_wake(dhd_pub_t *dhdp, bool wait, void* func_addr)
2492{
2493 dhd_bus_t *bus = dhdp->bus;
2494 return dhd_runtime_bus_wake(bus, wait, func_addr);
2495}
2496
2497void dhdpcie_block_runtime_pm(dhd_pub_t *dhdp)
2498{
2499 dhd_bus_t *bus = dhdp->bus;
2500 bus->idletime = 0;
2501}
2502
2503bool dhdpcie_is_resume_done(dhd_pub_t *dhdp)
2504{
2505 dhd_bus_t *bus = dhdp->bus;
2506 return bus->runtime_resume_done;
2507}
2508#endif /* DHD_PCIE_RUNTIMEPM */
2509
2510struct device * dhd_bus_to_dev(dhd_bus_t *bus)
2511{
2512 struct pci_dev *pdev;
2513 pdev = bus->dev;
2514
2515 if (pdev)
2516 return &pdev->dev;
2517 else
2518 return NULL;
2519}