Commit | Line | Data |
---|---|---|
1b4a7c03 LJ |
1 | /* |
2 | * Linux DHD Bus Module for PCIE | |
3 | * | |
4 | * Copyright (C) 2020, Broadcom. | |
5 | * | |
6 | * Unless you and Broadcom execute a separate written software license | |
7 | * agreement governing use of this software, this software is licensed to you | |
8 | * under the terms of the GNU General Public License version 2 (the "GPL"), | |
9 | * available at http://www.broadcom.com/licenses/GPLv2.php, with the | |
10 | * following added to such license: | |
11 | * | |
12 | * As a special exception, the copyright holders of this software give you | |
13 | * permission to link this software with independent modules, and to copy and | |
14 | * distribute the resulting executable under terms of your choice, provided that | |
15 | * you also meet, for each linked independent module, the terms and conditions of | |
16 | * the license of that module. An independent module is a module which is not | |
17 | * derived from this software. The special exception does not apply to any | |
18 | * modifications of the software. | |
19 | * | |
20 | * | |
21 | * <<Broadcom-WL-IPTag/Open:>> | |
22 | * | |
23 | * $Id$ | |
24 | */ | |
25 | ||
26 | #ifndef dhd_pcie_h | |
27 | #define dhd_pcie_h | |
28 | ||
29 | #include <bcmpcie.h> | |
30 | #include <hnd_cons.h> | |
31 | #ifdef SUPPORT_LINKDOWN_RECOVERY | |
32 | #ifdef CONFIG_ARCH_MSM | |
33 | #ifdef CONFIG_PCI_MSM | |
34 | #include <linux/msm_pcie.h> | |
35 | #else | |
36 | #include <mach/msm_pcie.h> | |
37 | #endif /* CONFIG_PCI_MSM */ | |
38 | #endif /* CONFIG_ARCH_MSM */ | |
39 | #ifdef CONFIG_ARCH_EXYNOS | |
40 | #ifndef SUPPORT_EXYNOS7420 | |
41 | #include <linux/exynos-pci-noti.h> | |
42 | extern int exynos_pcie_register_event(struct exynos_pcie_register_event *reg); | |
43 | extern int exynos_pcie_deregister_event(struct exynos_pcie_register_event *reg); | |
44 | #endif /* !SUPPORT_EXYNOS7420 */ | |
45 | #endif /* CONFIG_ARCH_EXYNOS */ | |
46 | #endif /* SUPPORT_LINKDOWN_RECOVERY */ | |
47 | ||
48 | #ifdef DHD_PCIE_RUNTIMEPM | |
49 | #include <linux/mutex.h> | |
50 | #include <linux/wait.h> | |
51 | #endif /* DHD_PCIE_RUNTIMEPM */ | |
52 | ||
53 | /* defines */ | |
54 | #define PCIE_SHARED_VERSION PCIE_SHARED_VERSION_7 | |
55 | ||
56 | #define PCMSGBUF_HDRLEN 0 | |
57 | #define DONGLE_REG_MAP_SIZE (32 * 1024) | |
58 | #define DONGLE_TCM_MAP_SIZE (4096 * 1024) | |
59 | #define DONGLE_MIN_MEMSIZE (128 *1024) | |
60 | #ifdef DHD_DEBUG | |
61 | #define DHD_PCIE_SUCCESS 0 | |
62 | #define DHD_PCIE_FAILURE 1 | |
63 | #endif /* DHD_DEBUG */ | |
64 | #define REMAP_ENAB(bus) ((bus)->remap) | |
65 | #define REMAP_ISADDR(bus, a) (((a) >= ((bus)->orig_ramsize)) && ((a) < ((bus)->ramsize))) | |
66 | ||
67 | #ifdef SUPPORT_LINKDOWN_RECOVERY | |
68 | #ifdef CONFIG_ARCH_MSM | |
69 | #define struct_pcie_notify struct msm_pcie_notify | |
70 | #define struct_pcie_register_event struct msm_pcie_register_event | |
71 | #endif /* CONFIG_ARCH_MSM */ | |
72 | #ifdef CONFIG_ARCH_EXYNOS | |
73 | #ifndef SUPPORT_EXYNOS7420 | |
74 | #define struct_pcie_notify struct exynos_pcie_notify | |
75 | #define struct_pcie_register_event struct exynos_pcie_register_event | |
76 | #endif /* !SUPPORT_EXYNOS7420 */ | |
77 | #endif /* CONFIG_ARCH_EXYNOS */ | |
78 | #endif /* SUPPORT_LINKDOWN_RECOVERY */ | |
79 | ||
80 | #define MAX_DHD_TX_FLOWS 320 | |
81 | ||
82 | /* user defined data structures */ | |
83 | /* Device console log buffer state */ | |
84 | #define CONSOLE_LINE_MAX 192u | |
85 | #define CONSOLE_BUFFER_MAX (8 * 1024) | |
86 | ||
87 | #ifdef IDLE_TX_FLOW_MGMT | |
88 | #define IDLE_FLOW_LIST_TIMEOUT 5000 | |
89 | #define IDLE_FLOW_RING_TIMEOUT 5000 | |
90 | #endif /* IDLE_TX_FLOW_MGMT */ | |
91 | ||
92 | #define HWA_RING_INDEX_SHIFT(val) ((uint32)(val) << 16u) | |
93 | ||
94 | /* HWA enabled and inited */ | |
95 | #define HWA_CAPAB(dhd) ((dhd)->hwa_capable) | |
96 | #define HWA_ACTIVE(dhd) ((dhd)->hwa_inited) | |
97 | #define RING_HWA_DB_IDX(dhd, ringid) ((dhd)->bus->ring_sh[ringid].ring_hwa_db_idx) | |
98 | ||
99 | /* implicit DMA for h2d wr and d2h rd indice from Host memory to TCM */ | |
100 | #define IDMA_ENAB(dhd) ((dhd)->idma_enable) | |
101 | #define IDMA_ACTIVE(dhd) (((dhd)->idma_enable) && ((dhd)->idma_inited)) | |
102 | ||
103 | #define IDMA_CAPABLE(bus) (((bus)->sih->buscorerev == 19) || ((bus)->sih->buscorerev >= 23)) | |
104 | ||
105 | /* IFRM (Implicit Flow Ring Manager enable and inited */ | |
106 | #define IFRM_ENAB(dhd) ((dhd)->ifrm_enable) | |
107 | #define IFRM_ACTIVE(dhd) (((dhd)->ifrm_enable) && ((dhd)->ifrm_inited)) | |
108 | ||
109 | /* DAR registers use for h2d doorbell */ | |
110 | #define DAR_ENAB(dhd) ((dhd)->dar_enable) | |
111 | #define DAR_ACTIVE(dhd) (((dhd)->dar_enable) && ((dhd)->dar_inited)) | |
112 | ||
113 | /* DAR WAR for revs < 64 */ | |
114 | #define DAR_PWRREQ(bus) (((bus)->_dar_war) && DAR_ACTIVE((bus)->dhd)) | |
115 | ||
116 | /* PCIE CTO Prevention and Recovery */ | |
117 | #define PCIECTO_ENAB(bus) ((bus)->cto_enable) | |
118 | ||
119 | /* Implicit DMA index usage : | |
120 | * Index 0 for h2d write index transfer | |
121 | * Index 1 for d2h read index transfer | |
122 | */ | |
123 | #define IDMA_IDX0 0 | |
124 | #define IDMA_IDX1 1 | |
125 | #define IDMA_IDX2 2 | |
126 | #define IDMA_IDX3 3 | |
127 | #define DMA_TYPE_SHIFT 4 | |
128 | #define DMA_TYPE_IDMA 1 | |
129 | ||
130 | #define DHDPCIE_CONFIG_HDR_SIZE 16 | |
131 | #define DHDPCIE_CONFIG_CHECK_DELAY_MS 10 /* 10ms */ | |
132 | #define DHDPCIE_CONFIG_CHECK_RETRY_COUNT 20 | |
133 | #define DHDPCIE_DONGLE_PWR_TOGGLE_DELAY 1000 /* 1ms in units of us */ | |
134 | #define DHDPCIE_PM_D3_DELAY 200000 /* 200ms in units of us */ | |
135 | #define DHDPCIE_PM_D2_DELAY 200 /* 200us */ | |
136 | ||
137 | typedef struct dhd_console { | |
138 | uint count; /* Poll interval msec counter */ | |
139 | uint log_addr; /* Log struct address (fixed) */ | |
140 | hnd_log_t log; /* Log struct (host copy) */ | |
141 | uint bufsize; /* Size of log buffer */ | |
142 | uint8 *buf; /* Log buffer (host copy) */ | |
143 | uint last; /* Last buffer read index */ | |
144 | } dhd_console_t; | |
145 | ||
146 | typedef struct ring_sh_info { | |
147 | uint32 ring_mem_addr; | |
148 | uint32 ring_state_w; | |
149 | uint32 ring_state_r; | |
150 | pcie_hwa_db_index_t ring_hwa_db_idx; /* HWA DB index value per ring */ | |
151 | } ring_sh_info_t; | |
152 | #define MAX_DS_TRACE_SIZE 50 | |
153 | typedef struct _dhd_ds_trace_t { | |
154 | uint64 timestamp; | |
155 | bool d2h; | |
156 | uint32 dsval; | |
157 | #ifdef PCIE_INB_DW | |
158 | enum dhd_bus_ds_state inbstate; | |
159 | #endif /* PCIE_INB_DW */ | |
160 | } dhd_ds_trace_t; | |
161 | ||
162 | #define DEVICE_WAKE_NONE 0 | |
163 | #define DEVICE_WAKE_OOB 1 | |
164 | #define DEVICE_WAKE_INB 2 | |
165 | ||
166 | #define INBAND_DW_ENAB(bus) ((bus)->dw_option == DEVICE_WAKE_INB) | |
167 | #define OOB_DW_ENAB(bus) ((bus)->dw_option == DEVICE_WAKE_OOB) | |
168 | #define NO_DW_ENAB(bus) ((bus)->dw_option == DEVICE_WAKE_NONE) | |
169 | ||
170 | #define PCIE_PWR_REQ_RELOAD_WAR_ENAB(buscorerev) \ | |
171 | ((buscorerev == 66) || (buscorerev == 67) || (buscorerev == 68) || \ | |
172 | (buscorerev == 70) || (buscorerev == 72)) | |
173 | ||
174 | #define PCIE_FASTLPO_ENABLED(buscorerev) \ | |
175 | ((buscorerev == 66) || (buscorerev == 67) || (buscorerev == 68) || \ | |
176 | (buscorerev == 70) || (buscorerev == 72)) | |
177 | ||
178 | /* | |
179 | * HW JIRA - CRWLPCIEGEN2-672 | |
180 | * Producer Index Feature which is used by F1 gets reset on F0 FLR | |
181 | * fixed in REV68 | |
182 | */ | |
183 | #define PCIE_ENUM_RESET_WAR_ENAB(buscorerev) \ | |
184 | ((buscorerev == 66) || (buscorerev == 67)) | |
185 | ||
186 | struct dhd_bus; | |
187 | ||
188 | struct dhd_pcie_rev { | |
189 | uint8 fw_rev; | |
190 | void (*handle_mb_data)(struct dhd_bus *); | |
191 | }; | |
192 | ||
193 | typedef struct dhdpcie_config_save | |
194 | { | |
195 | uint32 header[DHDPCIE_CONFIG_HDR_SIZE]; | |
196 | /* pmcsr save */ | |
197 | uint32 pmcsr; | |
198 | /* express save */ | |
199 | uint32 exp_dev_ctrl_stat; | |
200 | uint32 exp_link_ctrl_stat; | |
201 | uint32 exp_dev_ctrl_stat2; | |
202 | uint32 exp_link_ctrl_stat2; | |
203 | /* msi save */ | |
204 | uint32 msi_cap; | |
205 | uint32 msi_addr0; | |
206 | uint32 msi_addr1; | |
207 | uint32 msi_data; | |
208 | /* l1pm save */ | |
209 | uint32 l1pm0; | |
210 | uint32 l1pm1; | |
211 | /* ltr save */ | |
212 | uint32 ltr; | |
213 | /* aer save */ | |
214 | uint32 aer_caps_ctrl; /* 0x18 */ | |
215 | uint32 aer_severity; /* 0x0C */ | |
216 | uint32 aer_umask; /* 0x08 */ | |
217 | uint32 aer_cmask; /* 0x14 */ | |
218 | uint32 aer_root_cmd; /* 0x2c */ | |
219 | /* BAR0 and BAR1 windows */ | |
220 | uint32 bar0_win; | |
221 | uint32 bar1_win; | |
222 | } dhdpcie_config_save_t; | |
223 | ||
224 | /* The level of bus communication with the dongle */ | |
225 | enum dhd_bus_low_power_state { | |
226 | DHD_BUS_NO_LOW_POWER_STATE, /* Not in low power state */ | |
227 | DHD_BUS_D3_INFORM_SENT, /* D3 INFORM sent */ | |
228 | DHD_BUS_D3_ACK_RECIEVED, /* D3 ACK recieved */ | |
229 | }; | |
230 | ||
231 | /** Instantiated once for each hardware (dongle) instance that this DHD manages */ | |
232 | typedef struct dhd_bus { | |
233 | dhd_pub_t *dhd; /**< pointer to per hardware (dongle) unique instance */ | |
234 | struct pci_dev *rc_dev; /* pci RC device handle */ | |
235 | struct pci_dev *dev; /* pci device handle */ | |
236 | dll_t flowring_active_list; /* constructed list of tx flowring queues */ | |
237 | #ifdef IDLE_TX_FLOW_MGMT | |
238 | uint64 active_list_last_process_ts; | |
239 | /* stores the timestamp of active list processing */ | |
240 | #endif /* IDLE_TX_FLOW_MGMT */ | |
241 | ||
242 | si_t *sih; /* Handle for SI calls */ | |
243 | char *vars; /* Variables (from CIS and/or other) */ | |
244 | uint varsz; /* Size of variables buffer */ | |
245 | uint32 sbaddr; /* Current SB window pointer (-1, invalid) */ | |
246 | sbpcieregs_t *reg; /* Registers for PCIE core */ | |
247 | ||
248 | uint armrev; /* CPU core revision */ | |
249 | uint coreid; /* CPU core id */ | |
250 | uint ramrev; /* SOCRAM core revision */ | |
251 | uint32 ramsize; /* Size of RAM in SOCRAM (bytes) */ | |
252 | uint32 orig_ramsize; /* Size of RAM in SOCRAM (bytes) */ | |
253 | uint32 srmemsize; /* Size of SRMEM */ | |
254 | ||
255 | uint32 bus; /* gSPI or SDIO bus */ | |
256 | uint32 intstatus; /* Intstatus bits (events) pending */ | |
257 | bool dpc_sched; /* Indicates DPC schedule (intrpt rcvd) */ | |
258 | bool fcstate; /* State of dongle flow-control */ | |
259 | ||
260 | uint16 cl_devid; /* cached devid for dhdsdio_probe_attach() */ | |
261 | char *fw_path; /* module_param: path to firmware image */ | |
262 | char *nv_path; /* module_param: path to nvram vars file */ | |
263 | ||
264 | struct pktq txq; /* Queue length used for flow-control */ | |
265 | ||
266 | bool intr; /* Use interrupts */ | |
267 | bool poll; /* Use polling */ | |
268 | bool ipend; /* Device interrupt is pending */ | |
269 | bool intdis; /* Interrupts disabled by isr */ | |
270 | uint intrcount; /* Count of device interrupt callbacks */ | |
271 | uint lastintrs; /* Count as of last watchdog timer */ | |
272 | ||
273 | dhd_console_t console; /* Console output polling support */ | |
274 | uint console_addr; /* Console address from shared struct */ | |
275 | ||
276 | bool alp_only; /* Don't use HT clock (ALP only) */ | |
277 | ||
278 | bool remap; /* Contiguous 1MB RAM: 512K socram + 512K devram | |
279 | * Available with socram rev 16 | |
280 | * Remap region not DMA-able | |
281 | */ | |
282 | uint32 resetinstr; | |
283 | uint32 dongle_ram_base; | |
284 | uint32 next_tlv; /* Holds location of next available TLV */ | |
285 | ulong shared_addr; | |
286 | pciedev_shared_t *pcie_sh; | |
287 | uint32 dma_rxoffset; | |
288 | volatile char *regs; /* pci device memory va */ | |
289 | volatile char *tcm; /* pci device memory va */ | |
290 | uint32 bar1_size; /* pci device memory size */ | |
291 | uint32 curr_bar1_win; /* current PCIEBar1Window setting */ | |
292 | osl_t *osh; | |
293 | uint32 nvram_csm; /* Nvram checksum */ | |
294 | uint16 pollrate; | |
295 | uint16 polltick; | |
296 | ||
297 | volatile uint32 *pcie_mb_intr_addr; | |
298 | volatile uint32 *pcie_mb_intr_2_addr; | |
299 | void *pcie_mb_intr_osh; | |
300 | bool sleep_allowed; | |
301 | ||
302 | wake_counts_t wake_counts; | |
303 | ||
304 | /* version 3 shared struct related info start */ | |
305 | ring_sh_info_t ring_sh[BCMPCIE_COMMON_MSGRINGS + MAX_DHD_TX_FLOWS]; | |
306 | ||
307 | uint8 h2d_ring_count; | |
308 | uint8 d2h_ring_count; | |
309 | uint32 ringmem_ptr; | |
310 | uint32 ring_state_ptr; | |
311 | ||
312 | uint32 d2h_dma_scratch_buffer_mem_addr; | |
313 | ||
314 | uint32 h2d_mb_data_ptr_addr; | |
315 | uint32 d2h_mb_data_ptr_addr; | |
316 | /* version 3 shared struct related info end */ | |
317 | ||
318 | uint32 def_intmask; | |
319 | uint32 d2h_mb_mask; | |
320 | uint32 pcie_mailbox_mask; | |
321 | uint32 pcie_mailbox_int; | |
322 | bool ltrsleep_on_unload; | |
323 | uint wait_for_d3_ack; | |
324 | uint16 max_tx_flowrings; | |
325 | uint16 max_submission_rings; | |
326 | uint16 max_completion_rings; | |
327 | uint16 max_cmn_rings; | |
328 | uint32 rw_index_sz; | |
329 | uint32 hwa_db_index_sz; | |
330 | bool db1_for_mb; | |
331 | ||
332 | dhd_timeout_t doorbell_timer; | |
333 | bool device_wake_state; | |
334 | bool irq_registered; | |
335 | bool d2h_intr_method; | |
336 | #ifdef SUPPORT_LINKDOWN_RECOVERY | |
337 | #if defined(CONFIG_ARCH_MSM) || (defined(CONFIG_ARCH_EXYNOS) && \ | |
338 | !defined(SUPPORT_EXYNOS7420)) | |
339 | #ifdef CONFIG_ARCH_MSM | |
340 | uint8 no_cfg_restore; | |
341 | #endif /* CONFIG_ARCH_MSM */ | |
342 | struct_pcie_register_event pcie_event; | |
343 | #endif /* CONFIG_ARCH_MSM || CONFIG_ARCH_EXYNOS && !SUPPORT_EXYNOS7420 */ | |
344 | bool read_shm_fail; | |
345 | #endif /* SUPPORT_LINKDOWN_RECOVERY */ | |
346 | int32 idletime; /* Control for activity timeout */ | |
347 | bool rpm_enabled; | |
348 | #ifdef DHD_PCIE_RUNTIMEPM | |
349 | int32 idlecount; /* Activity timeout counter */ | |
350 | int32 bus_wake; /* For wake up the bus */ | |
351 | bool runtime_resume_done; /* For check runtime suspend end */ | |
352 | struct mutex pm_lock; /* Synchronize for system PM & runtime PM */ | |
353 | wait_queue_head_t rpm_queue; /* wait-queue for bus wake up */ | |
354 | #endif /* DHD_PCIE_RUNTIMEPM */ | |
355 | uint32 d3_inform_cnt; | |
356 | uint32 d0_inform_cnt; | |
357 | uint32 d0_inform_in_use_cnt; | |
358 | uint8 force_suspend; | |
359 | uint8 is_linkdown; | |
360 | uint8 no_bus_init; | |
361 | #ifdef IDLE_TX_FLOW_MGMT | |
362 | bool enable_idle_flowring_mgmt; | |
363 | #endif /* IDLE_TX_FLOW_MGMT */ | |
364 | struct dhd_pcie_rev api; | |
365 | bool use_mailbox; | |
366 | bool use_d0_inform; | |
367 | void *bus_lp_state_lock; | |
368 | void *pwr_req_lock; | |
369 | bool bar1_switch_enab; | |
370 | void *bar1_switch_lock; | |
371 | void *backplane_access_lock; | |
372 | enum dhd_bus_low_power_state bus_low_power_state; | |
373 | dhd_ds_trace_t ds_trace[MAX_DS_TRACE_SIZE]; | |
374 | uint32 ds_trace_count; | |
375 | uint32 hostready_count; /* Number of hostready issued */ | |
376 | #if defined(BCMPCIE_OOB_HOST_WAKE) | |
377 | bool oob_presuspend; | |
378 | #endif | |
379 | dhdpcie_config_save_t saved_config; | |
380 | ulong resume_intr_enable_count; | |
381 | ulong dpc_intr_enable_count; | |
382 | ulong isr_intr_disable_count; | |
383 | ulong suspend_intr_disable_count; | |
384 | ulong dpc_return_busdown_count; | |
385 | ulong non_ours_irq_count; | |
386 | #ifdef BCMPCIE_OOB_HOST_WAKE | |
387 | ulong oob_intr_count; | |
388 | ulong oob_intr_enable_count; | |
389 | ulong oob_intr_disable_count; | |
390 | uint64 last_oob_irq_isr_time; | |
391 | uint64 last_oob_irq_thr_time; | |
392 | uint64 last_oob_irq_enable_time; | |
393 | uint64 last_oob_irq_disable_time; | |
394 | #endif /* BCMPCIE_OOB_HOST_WAKE */ | |
395 | uint64 isr_entry_time; | |
396 | uint64 isr_exit_time; | |
397 | uint64 isr_sched_dpc_time; | |
398 | uint64 rpm_sched_dpc_time; | |
399 | uint64 dpc_entry_time; | |
400 | uint64 dpc_exit_time; | |
401 | uint64 resched_dpc_time; | |
402 | uint64 last_d3_inform_time; | |
403 | uint64 last_process_ctrlbuf_time; | |
404 | uint64 last_process_flowring_time; | |
405 | uint64 last_process_txcpl_time; | |
406 | uint64 last_process_rxcpl_time; | |
407 | uint64 last_process_infocpl_time; | |
408 | uint64 last_process_edl_time; | |
409 | uint64 last_suspend_start_time; | |
410 | uint64 last_suspend_end_time; | |
411 | uint64 last_resume_start_time; | |
412 | uint64 last_resume_end_time; | |
413 | uint64 last_non_ours_irq_time; | |
414 | bool hwa_enabled; | |
415 | bool idma_enabled; | |
416 | bool ifrm_enabled; | |
417 | bool dar_enabled; | |
418 | uint32 dmaxfer_complete; | |
419 | uint8 dw_option; | |
420 | #ifdef PCIE_INB_DW | |
421 | bool inb_enabled; | |
422 | uint32 ds_exit_timeout; | |
423 | uint32 host_sleep_exit_timeout; | |
424 | uint wait_for_ds_exit; | |
425 | uint32 inband_dw_assert_cnt; /* # of inband device_wake assert */ | |
426 | uint32 inband_dw_deassert_cnt; /* # of inband device_wake deassert */ | |
427 | uint32 inband_ds_exit_host_cnt; /* # of DS-EXIT , host initiated */ | |
428 | uint32 inband_ds_exit_device_cnt; /* # of DS-EXIT , device initiated */ | |
429 | uint32 inband_ds_exit_to_cnt; /* # of DS-EXIT timeout */ | |
430 | uint32 inband_host_sleep_exit_to_cnt; /* # of Host_Sleep exit timeout */ | |
431 | void *inb_lock; /* Lock to serialize in band device wake activity */ | |
432 | /* # of contexts in the host which currently want a FW transaction */ | |
433 | uint32 host_active_cnt; | |
434 | bool skip_ds_ack; /* Skip DS-ACK during suspend in progress */ | |
435 | #endif /* PCIE_INB_DW */ | |
436 | #if defined(PCIE_INB_DW) | |
437 | bool ds_enabled; | |
438 | #endif | |
439 | #ifdef DHD_PCIE_RUNTIMEPM | |
440 | bool chk_pm; /* To avoid counting of wake up from Runtime PM */ | |
441 | #endif /* DHD_PCIE_RUNTIMEPM */ | |
442 | #if defined(PCIE_INB_DW) | |
443 | bool calc_ds_exit_latency; | |
444 | uint64 ds_exit_latency; | |
445 | uint64 ds_exit_ts1; | |
446 | uint64 ds_exit_ts2; | |
447 | #endif /* PCIE_INB_DW */ | |
448 | bool _dar_war; | |
449 | #ifdef GDB_PROXY | |
450 | /* True if firmware loaded and backplane accessible */ | |
451 | bool gdb_proxy_access_enabled; | |
452 | /* ID set by last "gdb_proxy_probe" iovar */ | |
453 | uint32 gdb_proxy_last_id; | |
454 | /* True if firmware was started in bootloader mode */ | |
455 | bool gdb_proxy_bootloader_mode; | |
456 | #endif /* GDB_PROXY */ | |
457 | uint8 dma_chan; | |
458 | ||
459 | bool cto_enable; /* enable PCIE CTO Prevention and recovery */ | |
460 | uint32 cto_threshold; /* PCIE CTO timeout threshold */ | |
461 | bool cto_triggered; /* CTO is triggered */ | |
462 | bool intr_enabled; /* ready to receive interrupts from dongle */ | |
463 | int pwr_req_ref; | |
464 | bool flr_force_fail; /* user intends to simulate flr force fail */ | |
465 | ||
466 | /* Information used to compose the memory map and to write the memory map, | |
467 | * FW, and FW signature to dongle RAM. | |
468 | * This information is used by the bootloader. | |
469 | */ | |
470 | uint32 ramtop_addr; /* Dongle address of unused space at top of RAM */ | |
471 | uint32 fw_download_addr; /* Dongle address of FW download */ | |
472 | uint32 fw_download_len; /* Length in bytes of FW download */ | |
473 | uint32 fwsig_download_addr; /* Dongle address of FW signature download */ | |
474 | uint32 fwsig_download_len; /* Length in bytes of FW signature download */ | |
475 | uint32 fwstat_download_addr; /* Dongle address of FWS status download */ | |
476 | uint32 fwstat_download_len; /* Length in bytes of FWS status download */ | |
477 | uint32 fw_memmap_download_addr; /* Dongle address of FWS memory-info download */ | |
478 | uint32 fw_memmap_download_len; /* Length in bytes of FWS memory-info download */ | |
479 | ||
480 | char fwsig_filename[DHD_FILENAME_MAX]; /* Name of FW signature file */ | |
481 | char bootloader_filename[DHD_FILENAME_MAX]; /* Name of bootloader image file */ | |
482 | uint32 bootloader_addr; /* Dongle address of bootloader download */ | |
483 | bool force_bt_quiesce; /* send bt_quiesce command to BT driver. */ | |
484 | bool rc_ep_aspm_cap; /* RC and EP ASPM capable */ | |
485 | bool rc_ep_l1ss_cap; /* EC and EP L1SS capable */ | |
486 | #if defined(DHD_H2D_LOG_TIME_SYNC) | |
487 | ulong dhd_rte_time_sync_count; /* OSL_SYSUPTIME_US() */ | |
488 | #endif /* DHD_H2D_LOG_TIME_SYNC */ | |
489 | uint16 hp2p_txcpl_max_items; | |
490 | uint16 hp2p_rxcpl_max_items; | |
491 | /* PCIE coherent status */ | |
492 | uint32 coherent_state; | |
493 | uint32 inb_dw_deassert_cnt; | |
494 | uint64 arm_oor_time; | |
495 | uint64 rd_shared_pass_time; | |
496 | } dhd_bus_t; | |
497 | ||
498 | #ifdef DHD_MSI_SUPPORT | |
499 | extern uint enable_msi; | |
500 | #endif /* DHD_MSI_SUPPORT */ | |
501 | ||
502 | enum { | |
503 | PCIE_INTX = 0, | |
504 | PCIE_MSI = 1 | |
505 | }; | |
506 | ||
507 | static INLINE bool | |
508 | __dhd_check_bus_in_lps(dhd_bus_t *bus) | |
509 | { | |
510 | bool ret = (bus->bus_low_power_state == DHD_BUS_D3_INFORM_SENT) || | |
511 | (bus->bus_low_power_state == DHD_BUS_D3_ACK_RECIEVED); | |
512 | return ret; | |
513 | } | |
514 | ||
515 | static INLINE bool | |
516 | dhd_check_bus_in_lps(dhd_bus_t *bus) | |
517 | { | |
518 | unsigned long flags_bus; | |
519 | bool ret; | |
520 | DHD_BUS_LP_STATE_LOCK(bus->bus_lp_state_lock, flags_bus); | |
521 | ret = __dhd_check_bus_in_lps(bus); | |
522 | DHD_BUS_LP_STATE_UNLOCK(bus->bus_lp_state_lock, flags_bus); | |
523 | return ret; | |
524 | } | |
525 | ||
526 | static INLINE bool | |
527 | __dhd_check_bus_lps_d3_acked(dhd_bus_t *bus) | |
528 | { | |
529 | bool ret = (bus->bus_low_power_state == DHD_BUS_D3_ACK_RECIEVED); | |
530 | return ret; | |
531 | } | |
532 | ||
533 | static INLINE bool | |
534 | dhd_check_bus_lps_d3_acked(dhd_bus_t *bus) | |
535 | { | |
536 | unsigned long flags_bus; | |
537 | bool ret; | |
538 | DHD_BUS_LP_STATE_LOCK(bus->bus_lp_state_lock, flags_bus); | |
539 | ret = __dhd_check_bus_lps_d3_acked(bus); | |
540 | DHD_BUS_LP_STATE_UNLOCK(bus->bus_lp_state_lock, flags_bus); | |
541 | return ret; | |
542 | } | |
543 | ||
544 | static INLINE void | |
545 | __dhd_set_bus_not_in_lps(dhd_bus_t *bus) | |
546 | { | |
547 | bus->bus_low_power_state = DHD_BUS_NO_LOW_POWER_STATE; | |
548 | return; | |
549 | } | |
550 | ||
551 | static INLINE void | |
552 | dhd_set_bus_not_in_lps(dhd_bus_t *bus) | |
553 | { | |
554 | unsigned long flags_bus; | |
555 | DHD_BUS_LP_STATE_LOCK(bus->bus_lp_state_lock, flags_bus); | |
556 | __dhd_set_bus_not_in_lps(bus); | |
557 | DHD_BUS_LP_STATE_UNLOCK(bus->bus_lp_state_lock, flags_bus); | |
558 | return; | |
559 | } | |
560 | ||
561 | static INLINE void | |
562 | __dhd_set_bus_lps_d3_informed(dhd_bus_t *bus) | |
563 | { | |
564 | bus->bus_low_power_state = DHD_BUS_D3_INFORM_SENT; | |
565 | return; | |
566 | } | |
567 | ||
568 | static INLINE void | |
569 | dhd_set_bus_lps_d3_informed(dhd_bus_t *bus) | |
570 | { | |
571 | unsigned long flags_bus; | |
572 | DHD_BUS_LP_STATE_LOCK(bus->bus_lp_state_lock, flags_bus); | |
573 | __dhd_set_bus_lps_d3_informed(bus); | |
574 | DHD_BUS_LP_STATE_UNLOCK(bus->bus_lp_state_lock, flags_bus); | |
575 | return; | |
576 | } | |
577 | ||
578 | static INLINE void | |
579 | __dhd_set_bus_lps_d3_acked(dhd_bus_t *bus) | |
580 | { | |
581 | bus->bus_low_power_state = DHD_BUS_D3_ACK_RECIEVED; | |
582 | return; | |
583 | } | |
584 | ||
585 | static INLINE void | |
586 | dhd_set_bus_lps_d3_acked(dhd_bus_t *bus) | |
587 | { | |
588 | unsigned long flags_bus; | |
589 | DHD_BUS_LP_STATE_LOCK(bus->bus_lp_state_lock, flags_bus); | |
590 | __dhd_set_bus_lps_d3_acked(bus); | |
591 | DHD_BUS_LP_STATE_UNLOCK(bus->bus_lp_state_lock, flags_bus); | |
592 | return; | |
593 | } | |
594 | ||
595 | /* check routines */ | |
596 | #define DHD_CHK_BUS_IN_LPS(bus) dhd_check_bus_in_lps(bus) | |
597 | #define __DHD_CHK_BUS_IN_LPS(bus) __dhd_check_bus_in_lps(bus) | |
598 | ||
599 | #define DHD_CHK_BUS_NOT_IN_LPS(bus) !(DHD_CHK_BUS_IN_LPS(bus)) | |
600 | #define __DHD_CHK_BUS_NOT_IN_LPS(bus) !(__DHD_CHK_BUS_IN_LPS(bus)) | |
601 | ||
602 | #define DHD_CHK_BUS_LPS_D3_INFORMED(bus) DHD_CHK_BUS_IN_LPS(bus) | |
603 | #define __DHD_CHK_BUS_LPS_D3_INFORMED(bus) __DHD_CHK_BUS_IN_LPS(bus) | |
604 | ||
605 | #define DHD_CHK_BUS_LPS_D3_ACKED(bus) dhd_check_bus_lps_d3_acked(bus) | |
606 | #define __DHD_CHK_BUS_LPS_D3_ACKED(bus) __dhd_check_bus_lps_d3_acked(bus) | |
607 | ||
608 | /* set routines */ | |
609 | #define DHD_SET_BUS_NOT_IN_LPS(bus) dhd_set_bus_not_in_lps(bus) | |
610 | #define __DHD_SET_BUS_NOT_IN_LPS(bus) __dhd_set_bus_not_in_lps(bus) | |
611 | ||
612 | #define DHD_SET_BUS_LPS_D3_INFORMED(bus) dhd_set_bus_lps_d3_informed(bus) | |
613 | #define __DHD_SET_BUS_LPS_D3_INFORMED(bus) __dhd_set_bus_lps_d3_informed(bus) | |
614 | ||
615 | #define DHD_SET_BUS_LPS_D3_ACKED(bus) dhd_set_bus_lps_d3_acked(bus) | |
616 | #define __DHD_SET_BUS_LPS_D3_ACKED(bus) __dhd_set_bus_lps_d3_acked(bus) | |
617 | ||
618 | /* function declarations */ | |
619 | ||
620 | extern uint32* dhdpcie_bus_reg_map(osl_t *osh, ulong addr, int size); | |
621 | extern int dhdpcie_bus_register(void); | |
622 | extern void dhdpcie_bus_unregister(void); | |
623 | extern bool dhdpcie_chipmatch(uint16 vendor, uint16 device); | |
624 | ||
625 | extern int dhdpcie_bus_attach(osl_t *osh, dhd_bus_t **bus_ptr, | |
626 | volatile char *regs, volatile char *tcm, void *pci_dev); | |
627 | extern uint32 dhdpcie_bus_cfg_read_dword(struct dhd_bus *bus, uint32 addr, uint32 size); | |
628 | extern void dhdpcie_bus_cfg_write_dword(struct dhd_bus *bus, uint32 addr, uint32 size, uint32 data); | |
629 | extern void dhdpcie_bus_intr_enable(struct dhd_bus *bus); | |
630 | extern void dhdpcie_bus_intr_disable(struct dhd_bus *bus); | |
631 | extern int dhpcie_bus_mask_interrupt(dhd_bus_t *bus); | |
632 | extern void dhdpcie_bus_release(struct dhd_bus *bus); | |
633 | extern int32 dhdpcie_bus_isr(struct dhd_bus *bus); | |
634 | extern void dhdpcie_free_irq(dhd_bus_t *bus); | |
635 | extern void dhdpcie_bus_ringbell_fast(struct dhd_bus *bus, uint32 value); | |
636 | extern void dhdpcie_bus_ringbell_2_fast(struct dhd_bus *bus, uint32 value, bool devwake); | |
637 | extern void dhdpcie_dongle_reset(dhd_bus_t *bus); | |
638 | extern int dhd_bus_cfg_sprom_ctrl_bp_reset(struct dhd_bus *bus); | |
639 | extern int dhd_bus_cfg_ss_ctrl_bp_reset(struct dhd_bus *bus); | |
640 | #ifdef DHD_PCIE_NATIVE_RUNTIMEPM | |
641 | extern int dhdpcie_bus_suspend(struct dhd_bus *bus, bool state, bool byint); | |
642 | #else | |
643 | extern int dhdpcie_bus_suspend(struct dhd_bus *bus, bool state); | |
644 | #endif /* DHD_PCIE_NATIVE_RUNTIMEPM */ | |
645 | extern int dhdpcie_pci_suspend_resume(struct dhd_bus *bus, bool state); | |
646 | extern uint32 dhdpcie_force_alp(struct dhd_bus *bus, bool enable); | |
647 | extern uint32 dhdpcie_set_l1_entry_time(struct dhd_bus *bus, int force_l1_entry_time); | |
648 | extern bool dhdpcie_tcm_valid(dhd_bus_t *bus); | |
649 | extern void dhdpcie_pme_active(osl_t *osh, bool enable); | |
650 | extern bool dhdpcie_pme_cap(osl_t *osh); | |
651 | extern uint32 dhdpcie_lcreg(osl_t *osh, uint32 mask, uint32 val); | |
652 | extern void dhdpcie_set_pmu_min_res_mask(struct dhd_bus *bus, uint min_res_mask); | |
653 | extern uint8 dhdpcie_clkreq(osl_t *osh, uint32 mask, uint32 val); | |
654 | extern int dhdpcie_disable_irq(dhd_bus_t *bus); | |
655 | extern int dhdpcie_disable_irq_nosync(dhd_bus_t *bus); | |
656 | extern int dhdpcie_enable_irq(dhd_bus_t *bus); | |
657 | ||
658 | extern void dhd_bus_dump_dar_registers(struct dhd_bus *bus); | |
659 | ||
660 | extern uint32 dhdpcie_rc_config_read(dhd_bus_t *bus, uint offset); | |
661 | extern uint32 dhdpcie_rc_access_cap(dhd_bus_t *bus, int cap, uint offset, bool is_ext, | |
662 | bool is_write, uint32 writeval); | |
663 | extern uint32 dhdpcie_ep_access_cap(dhd_bus_t *bus, int cap, uint offset, bool is_ext, | |
664 | bool is_write, uint32 writeval); | |
665 | extern uint32 dhd_debug_get_rc_linkcap(dhd_bus_t *bus); | |
666 | extern int dhdpcie_start_host_dev(dhd_bus_t *bus); | |
667 | extern int dhdpcie_stop_host_dev(dhd_bus_t *bus); | |
668 | extern int dhdpcie_disable_device(dhd_bus_t *bus); | |
669 | extern int dhdpcie_alloc_resource(dhd_bus_t *bus); | |
670 | extern void dhdpcie_free_resource(dhd_bus_t *bus); | |
671 | extern void dhdpcie_dump_resource(dhd_bus_t *bus); | |
672 | extern int dhdpcie_bus_request_irq(struct dhd_bus *bus); | |
673 | void dhdpcie_os_setbar1win(dhd_bus_t *bus, uint32 addr); | |
674 | void dhdpcie_os_wtcm8(dhd_bus_t *bus, ulong offset, uint8 data); | |
675 | uint8 dhdpcie_os_rtcm8(dhd_bus_t *bus, ulong offset); | |
676 | void dhdpcie_os_wtcm16(dhd_bus_t *bus, ulong offset, uint16 data); | |
677 | uint16 dhdpcie_os_rtcm16(dhd_bus_t *bus, ulong offset); | |
678 | void dhdpcie_os_wtcm32(dhd_bus_t *bus, ulong offset, uint32 data); | |
679 | uint32 dhdpcie_os_rtcm32(dhd_bus_t *bus, ulong offset); | |
680 | #ifdef DHD_SUPPORT_64BIT | |
681 | void dhdpcie_os_wtcm64(dhd_bus_t *bus, ulong offset, uint64 data); | |
682 | uint64 dhdpcie_os_rtcm64(dhd_bus_t *bus, ulong offset); | |
683 | #endif | |
684 | ||
685 | extern int dhdpcie_enable_device(dhd_bus_t *bus); | |
686 | ||
687 | #ifdef BCMPCIE_OOB_HOST_WAKE | |
688 | extern int dhdpcie_oob_intr_register(dhd_bus_t *bus); | |
689 | extern void dhdpcie_oob_intr_unregister(dhd_bus_t *bus); | |
690 | extern void dhdpcie_oob_intr_set(dhd_bus_t *bus, bool enable); | |
691 | extern int dhdpcie_get_oob_irq_num(struct dhd_bus *bus); | |
692 | extern int dhdpcie_get_oob_irq_status(struct dhd_bus *bus); | |
693 | extern int dhdpcie_get_oob_irq_level(void); | |
694 | #endif /* BCMPCIE_OOB_HOST_WAKE */ | |
695 | #if defined(PCIE_INB_DW) | |
696 | extern void dhd_bus_doorbell_timeout_reset(struct dhd_bus *bus); | |
697 | #endif | |
698 | ||
699 | /* XXX: SWWLAN-82173 Making PCIe RC D3cold by force during system PM | |
700 | * exynos_pcie_pm_suspend : RC goes to suspend status & assert PERST | |
701 | * exynos_pcie_pm_resume : de-assert PERST & RC goes to resume status | |
702 | */ | |
703 | #if defined(CONFIG_ARCH_EXYNOS) | |
704 | #define SAMSUNG_PCIE_VENDOR_ID 0x144d | |
705 | #if defined(CONFIG_MACH_UNIVERSAL7420) || defined(CONFIG_SOC_EXYNOS7420) | |
706 | #define SAMSUNG_PCIE_DEVICE_ID 0xa575 | |
707 | #define SAMSUNG_PCIE_CH_NUM 1 | |
708 | #elif defined(CONFIG_SOC_EXYNOS8890) | |
709 | #define SAMSUNG_PCIE_DEVICE_ID 0xa544 | |
710 | #define SAMSUNG_PCIE_CH_NUM 0 | |
711 | #elif defined(CONFIG_SOC_EXYNOS8895) || defined(CONFIG_SOC_EXYNOS9810) || \ | |
712 | defined(CONFIG_SOC_EXYNOS9820) || defined(CONFIG_SOC_EXYNOS9830) | |
713 | #define SAMSUNG_PCIE_DEVICE_ID 0xecec | |
714 | #define SAMSUNG_PCIE_CH_NUM 0 | |
715 | #else | |
716 | #error "Not supported platform" | |
717 | #endif /* CONFIG_SOC_EXYNOSXXXX & CONFIG_MACH_UNIVERSALXXXX */ | |
718 | extern void exynos_pcie_pm_suspend(int ch_num); | |
719 | extern void exynos_pcie_pm_resume(int ch_num); | |
720 | #endif /* CONFIG_ARCH_EXYNOS */ | |
721 | ||
722 | #if defined(CONFIG_ARCH_MSM) | |
723 | #define MSM_PCIE_VENDOR_ID 0x17cb | |
724 | #if defined(CONFIG_ARCH_APQ8084) | |
725 | #define MSM_PCIE_DEVICE_ID 0x0101 | |
726 | #elif defined(CONFIG_ARCH_MSM8994) | |
727 | #define MSM_PCIE_DEVICE_ID 0x0300 | |
728 | #elif defined(CONFIG_ARCH_MSM8996) | |
729 | #define MSM_PCIE_DEVICE_ID 0x0104 | |
730 | #elif defined(CONFIG_ARCH_MSM8998) | |
731 | #define MSM_PCIE_DEVICE_ID 0x0105 | |
732 | #elif defined(CONFIG_ARCH_SDM845) || defined(CONFIG_ARCH_SM8150) || \ | |
733 | defined(CONFIG_ARCH_KONA) | |
734 | #define MSM_PCIE_DEVICE_ID 0x0106 | |
735 | #else | |
736 | #error "Not supported platform" | |
737 | #endif | |
738 | #endif /* CONFIG_ARCH_MSM */ | |
739 | ||
740 | #if defined(CONFIG_X86) | |
741 | #define X86_PCIE_VENDOR_ID 0x8086 | |
742 | #define X86_PCIE_DEVICE_ID 0x9c1a | |
743 | #endif /* CONFIG_X86 */ | |
744 | ||
745 | #if defined(CONFIG_ARCH_TEGRA) | |
746 | #define TEGRA_PCIE_VENDOR_ID 0x14e4 | |
747 | #define TEGRA_PCIE_DEVICE_ID 0x4347 | |
748 | #endif /* CONFIG_ARCH_TEGRA */ | |
749 | ||
750 | #if defined(BOARD_HIKEY) | |
751 | #define HIKEY_PCIE_VENDOR_ID 0x19e5 | |
752 | #define HIKEY_PCIE_DEVICE_ID 0x3660 | |
753 | #endif /* BOARD_HIKEY */ | |
754 | ||
755 | #define DUMMY_PCIE_VENDOR_ID 0xffff | |
756 | #define DUMMY_PCIE_DEVICE_ID 0xffff | |
757 | ||
758 | #if defined(CONFIG_ARCH_EXYNOS) | |
759 | #define PCIE_RC_VENDOR_ID SAMSUNG_PCIE_VENDOR_ID | |
760 | #define PCIE_RC_DEVICE_ID SAMSUNG_PCIE_DEVICE_ID | |
761 | #elif defined(CONFIG_ARCH_MSM) | |
762 | #define PCIE_RC_VENDOR_ID MSM_PCIE_VENDOR_ID | |
763 | #define PCIE_RC_DEVICE_ID MSM_PCIE_DEVICE_ID | |
764 | #elif defined(CONFIG_X86) | |
765 | #define PCIE_RC_VENDOR_ID X86_PCIE_VENDOR_ID | |
766 | #define PCIE_RC_DEVICE_ID X86_PCIE_DEVICE_ID | |
767 | #elif defined(CONFIG_ARCH_TEGRA) | |
768 | #define PCIE_RC_VENDOR_ID TEGRA_PCIE_VENDOR_ID | |
769 | #define PCIE_RC_DEVICE_ID TEGRA_PCIE_DEVICE_ID | |
770 | #elif defined(BOARD_HIKEY) | |
771 | #define PCIE_RC_VENDOR_ID HIKEY_PCIE_VENDOR_ID | |
772 | #define PCIE_RC_DEVICE_ID HIKEY_PCIE_DEVICE_ID | |
773 | #else | |
774 | /* Use dummy vendor and device IDs */ | |
775 | #define PCIE_RC_VENDOR_ID DUMMY_PCIE_VENDOR_ID | |
776 | #define PCIE_RC_DEVICE_ID DUMMY_PCIE_DEVICE_ID | |
777 | #endif /* CONFIG_ARCH_EXYNOS */ | |
778 | ||
779 | #define DHD_REGULAR_RING 0 | |
780 | #define DHD_HP2P_RING 1 | |
781 | ||
782 | #ifdef CONFIG_ARCH_TEGRA | |
783 | extern int tegra_pcie_pm_suspend(void); | |
784 | extern int tegra_pcie_pm_resume(void); | |
785 | #endif /* CONFIG_ARCH_TEGRA */ | |
786 | ||
787 | extern int dhd_buzzz_dump_dngl(dhd_bus_t *bus); | |
788 | #ifdef IDLE_TX_FLOW_MGMT | |
789 | extern int dhd_bus_flow_ring_resume_request(struct dhd_bus *bus, void *arg); | |
790 | extern void dhd_bus_flow_ring_resume_response(struct dhd_bus *bus, uint16 flowid, int32 status); | |
791 | extern int dhd_bus_flow_ring_suspend_request(struct dhd_bus *bus, void *arg); | |
792 | extern void dhd_bus_flow_ring_suspend_response(struct dhd_bus *bus, uint16 flowid, uint32 status); | |
793 | extern void dhd_flow_ring_move_to_active_list_head(struct dhd_bus *bus, | |
794 | flow_ring_node_t *flow_ring_node); | |
795 | extern void dhd_flow_ring_add_to_active_list(struct dhd_bus *bus, | |
796 | flow_ring_node_t *flow_ring_node); | |
797 | extern void dhd_flow_ring_delete_from_active_list(struct dhd_bus *bus, | |
798 | flow_ring_node_t *flow_ring_node); | |
799 | extern void __dhd_flow_ring_delete_from_active_list(struct dhd_bus *bus, | |
800 | flow_ring_node_t *flow_ring_node); | |
801 | #endif /* IDLE_TX_FLOW_MGMT */ | |
802 | ||
803 | extern int dhdpcie_send_mb_data(dhd_bus_t *bus, uint32 h2d_mb_data); | |
804 | ||
805 | #ifdef DHD_WAKE_STATUS | |
806 | int bcmpcie_get_total_wake(struct dhd_bus *bus); | |
807 | int bcmpcie_set_get_wake(struct dhd_bus *bus, int flag); | |
808 | #endif /* DHD_WAKE_STATUS */ | |
809 | extern void dhd_dump_bus_ds_trace(dhd_bus_t *bus, struct bcmstrbuf *strbuf); | |
810 | extern bool dhdpcie_bus_get_pcie_hostready_supported(dhd_bus_t *bus); | |
811 | extern void dhd_bus_hostready(struct dhd_bus *bus); | |
812 | #ifdef PCIE_INB_DW | |
813 | extern bool dhdpcie_bus_get_pcie_inband_dw_supported(dhd_bus_t *bus); | |
814 | extern void dhdpcie_bus_set_pcie_inband_dw_state(dhd_bus_t *bus, | |
815 | enum dhd_bus_ds_state state); | |
816 | extern enum dhd_bus_ds_state dhdpcie_bus_get_pcie_inband_dw_state(dhd_bus_t *bus); | |
817 | extern const char * dhd_convert_inb_state_names(enum dhd_bus_ds_state inbstate); | |
818 | extern const char * dhd_convert_dsval(uint32 val, bool d2h); | |
819 | extern int dhd_bus_inb_set_device_wake(struct dhd_bus *bus, bool val); | |
820 | extern void dhd_bus_inb_ack_pending_ds_req(dhd_bus_t *bus); | |
821 | #endif /* PCIE_INB_DW */ | |
822 | extern void dhdpcie_bus_enab_pcie_dw(dhd_bus_t *bus, uint8 dw_option); | |
823 | extern int dhdpcie_irq_disabled(struct dhd_bus *bus); | |
824 | extern int dhdpcie_set_master_and_d0_pwrstate(struct dhd_bus *bus); | |
825 | ||
826 | static INLINE bool dhdpcie_is_arm_halted(struct dhd_bus *bus) {return TRUE;} | |
827 | static INLINE int dhd_os_wifi_platform_set_power(uint32 value) {return BCME_OK; } | |
828 | static INLINE void | |
829 | dhdpcie_dongle_flr_or_pwr_toggle(dhd_bus_t *bus) | |
830 | { return; } | |
831 | ||
832 | int dhdpcie_config_check(dhd_bus_t *bus); | |
833 | int dhdpcie_config_restore(dhd_bus_t *bus, bool restore_pmcsr); | |
834 | int dhdpcie_config_save(dhd_bus_t *bus); | |
835 | int dhdpcie_set_pwr_state(dhd_bus_t *bus, uint state); | |
836 | ||
837 | extern bool dhdpcie_bus_get_pcie_hwa_supported(dhd_bus_t *bus); | |
838 | extern bool dhdpcie_bus_get_pcie_idma_supported(dhd_bus_t *bus); | |
839 | extern bool dhdpcie_bus_get_pcie_ifrm_supported(dhd_bus_t *bus); | |
840 | extern bool dhdpcie_bus_get_pcie_dar_supported(dhd_bus_t *bus); | |
841 | extern bool dhdpcie_bus_get_hp2p_supported(dhd_bus_t *bus); | |
842 | ||
843 | static INLINE uint32 | |
844 | dhd_pcie_config_read(dhd_bus_t *bus, uint offset, uint size) | |
845 | { | |
846 | /* For 4375 or prior chips to 4375 */ | |
847 | if (bus->sih->buscorerev <= 64) { | |
848 | OSL_DELAY(100); | |
849 | } | |
850 | return OSL_PCI_READ_CONFIG(bus->osh, offset, size); | |
851 | } | |
852 | ||
853 | static INLINE uint32 | |
854 | dhd_pcie_corereg_read(si_t *sih, uint val) | |
855 | { | |
856 | /* For 4375 or prior chips to 4375 */ | |
857 | if (sih->buscorerev <= 64) { | |
858 | OSL_DELAY(100); | |
859 | } | |
860 | si_corereg(sih, sih->buscoreidx, OFFSETOF(sbpcieregs_t, configaddr), ~0, val); | |
861 | return si_corereg(sih, sih->buscoreidx, OFFSETOF(sbpcieregs_t, configdata), 0, 0); | |
862 | } | |
863 | ||
864 | extern int dhdpcie_get_fwpath_otp(dhd_bus_t *bus, char *fw_path, char *nv_path, | |
865 | char *clm_path, char *txcap_path); | |
866 | ||
867 | extern int dhd_pcie_debug_info_dump(dhd_pub_t *dhd); | |
868 | extern void dhd_pcie_intr_count_dump(dhd_pub_t *dhd); | |
869 | extern void dhdpcie_bus_clear_intstatus(dhd_bus_t *bus); | |
870 | ||
871 | extern int dhd_get_pcie_linkspeed(dhd_pub_t *dhd); | |
872 | extern void dhdpcie_bar1_window_switch_enab(dhd_bus_t *bus); | |
873 | ||
874 | #endif /* dhd_pcie_h */ |