2 * Misc utility routines for accessing chip-specific features
3 * of the SiliconBackplane-based Broadcom chips.
5 * Copyright (C) 1999-2018, Broadcom.
7 * Unless you and Broadcom execute a separate written software license
8 * agreement governing use of this software, this software is licensed to you
9 * under the terms of the GNU General Public License version 2 (the "GPL"),
10 * available at http://www.broadcom.com/licenses/GPLv2.php, with the
11 * following added to such license:
13 * As a special exception, the copyright holders of this software give you
14 * permission to link this software with independent modules, and to copy and
15 * distribute the resulting executable under terms of your choice, provided that
16 * you also meet, for each linked independent module, the terms and conditions of
17 * the license of that module. An independent module is a module which is not
18 * derived from this software. The special exception does not apply to any
19 * modifications of the software.
21 * Notwithstanding the above, under no circumstances may you combine this
22 * software in any way with any other Broadcom software provided under a license
23 * other than the GPL, without Broadcom's express prior written consent.
26 * <<Broadcom-WL-IPTag/Open:>>
28 * $Id: aiutils.c 769534 2018-06-26 21:19:11Z $
40 #include "siutils_priv.h"
43 #define BCM53573_DMP() (0)
44 #define BCM4707_DMP() (0)
48 #if defined(BCM_BACKPLANE_TIMEOUT)
49 static bool ai_get_apb_bridge(si_t
*sih
, uint32 coreidx
, uint32
*apb_id
, uint32
*apb_coreuinit
);
50 #endif /* BCM_BACKPLANE_TIMEOUT */
52 #if defined(AXI_TIMEOUTS) || defined(BCM_BACKPLANE_TIMEOUT)
53 static void ai_reset_axi_to(si_info_t
*sii
, aidmp_t
*ai
);
54 #endif /* defined (AXI_TIMEOUTS) || defined (BCM_BACKPLANE_TIMEOUT) */
59 get_erom_ent(si_t
*sih
, uint32
**eromptr
, uint32 mask
, uint32 match
)
62 uint inv
= 0, nom
= 0;
66 ent
= R_REG(si_osh(sih
), *eromptr
);
72 if ((ent
& ER_VALID
) == 0) {
77 if (ent
== (ER_END
| ER_VALID
))
80 if ((ent
& mask
) == match
)
83 /* escape condition related EROM size if it has invalid values */
84 size
+= sizeof(*eromptr
);
85 if (size
>= ER_SZ_MAX
) {
86 SI_ERROR(("Failed to find end of EROM marker\n"));
93 SI_VMSG(("%s: Returning ent 0x%08x\n", __FUNCTION__
, ent
));
95 SI_VMSG((" after %d invalid and %d non-matching entries\n", inv
, nom
));
101 get_asd(si_t
*sih
, uint32
**eromptr
, uint sp
, uint ad
, uint st
, uint32
*addrl
, uint32
*addrh
,
102 uint32
*sizel
, uint32
*sizeh
)
108 asd
= get_erom_ent(sih
, eromptr
, ER_VALID
, ER_VALID
);
109 if (((asd
& ER_TAG1
) != ER_ADD
) ||
110 (((asd
& AD_SP_MASK
) >> AD_SP_SHIFT
) != sp
) ||
111 ((asd
& AD_ST_MASK
) != st
)) {
112 /* This is not what we want, "push" it back */
116 *addrl
= asd
& AD_ADDR_MASK
;
118 *addrh
= get_erom_ent(sih
, eromptr
, 0, 0);
122 sz
= asd
& AD_SZ_MASK
;
123 if (sz
== AD_SZ_SZD
) {
124 szd
= get_erom_ent(sih
, eromptr
, 0, 0);
125 *sizel
= szd
& SD_SZ_MASK
;
127 *sizeh
= get_erom_ent(sih
, eromptr
, 0, 0);
129 *sizel
= AD_SZ_BASE
<< (sz
>> AD_SZ_SHIFT
);
131 SI_VMSG((" SP %d, ad %d: st = %d, 0x%08x_0x%08x @ 0x%08x_0x%08x\n",
132 sp
, ad
, st
, *sizeh
, *sizel
, *addrh
, *addrl
));
137 /* Parse the enumeration rom to identify all cores
138 * Erom content format can be found in:
139 * http://hwnbu-twiki.broadcom.com/twiki/pub/Mwgroup/ArmDocumentation/SystemDiscovery.pdf
142 ai_scan(si_t
*sih
, void *regs
, uint devid
)
144 si_info_t
*sii
= SI_INFO(sih
);
145 si_cores_info_t
*cores_info
= (si_cores_info_t
*)sii
->cores_info
;
146 chipcregs_t
*cc
= (chipcregs_t
*)regs
;
147 uint32 erombase
, *eromptr
, *eromlim
;
148 axi_wrapper_t
* axi_wrapper
= sii
->axi_wrapper
;
150 BCM_REFERENCE(devid
);
152 erombase
= R_REG(sii
->osh
, &cc
->eromptr
);
154 switch (BUSTYPE(sih
->bustype
)) {
156 eromptr
= (uint32
*)REG_MAP(erombase
, SI_CORE_SIZE
);
160 /* Set wrappers address */
161 sii
->curwrap
= (void *)((uintptr
)regs
+ SI_CORE_SIZE
);
163 /* Now point the window at the erom */
164 OSL_PCI_WRITE_CONFIG(sii
->osh
, PCI_BAR0_WIN
, 4, erombase
);
171 eromptr
= (uint32
*)(uintptr
)erombase
;
177 SI_ERROR(("Don't know how to do AXI enumertion on bus %d\n", sih
->bustype
));
181 eromlim
= eromptr
+ (ER_REMAPCONTROL
/ sizeof(uint32
));
182 sii
->axi_num_wrappers
= 0;
184 SI_VMSG(("ai_scan: regs = 0x%p, erombase = 0x%08x, eromptr = 0x%p, eromlim = 0x%p\n",
185 OSL_OBFUSCATE_BUF(regs
), erombase
,
186 OSL_OBFUSCATE_BUF(eromptr
), OSL_OBFUSATE_BUF(eromlim
)));
187 while (eromptr
< eromlim
) {
188 uint32 cia
, cib
, cid
, mfg
, crev
, nmw
, nsw
, nmp
, nsp
;
189 uint32 mpd
, asd
, addrl
, addrh
, sizel
, sizeh
;
195 /* Grok a component */
196 cia
= get_erom_ent(sih
, &eromptr
, ER_TAG
, ER_CI
);
197 if (cia
== (ER_END
| ER_VALID
)) {
198 SI_VMSG(("Found END of erom after %d cores\n", sii
->numcores
));
202 cib
= get_erom_ent(sih
, &eromptr
, 0, 0);
204 if ((cib
& ER_TAG
) != ER_CI
) {
205 SI_ERROR(("CIA not followed by CIB\n"));
209 cid
= (cia
& CIA_CID_MASK
) >> CIA_CID_SHIFT
;
210 mfg
= (cia
& CIA_MFG_MASK
) >> CIA_MFG_SHIFT
;
211 crev
= (cib
& CIB_REV_MASK
) >> CIB_REV_SHIFT
;
212 nmw
= (cib
& CIB_NMW_MASK
) >> CIB_NMW_SHIFT
;
213 nsw
= (cib
& CIB_NSW_MASK
) >> CIB_NSW_SHIFT
;
214 nmp
= (cib
& CIB_NMP_MASK
) >> CIB_NMP_SHIFT
;
215 nsp
= (cib
& CIB_NSP_MASK
) >> CIB_NSP_SHIFT
;
218 SI_VMSG(("Found component 0x%04x/0x%04x rev %d at erom addr 0x%p, with nmw = %d, "
219 "nsw = %d, nmp = %d & nsp = %d\n",
220 mfg
, cid
, crev
, OSL_OBFUSCATE_BUF(eromptr
- 1), nmw
, nsw
, nmp
, nsp
));
225 if (BCM4347_CHIP(sih
->chip
)) {
226 /* 4347 has more entries for ARM core
227 * This should apply to all chips but crashes on router
228 * This is a temp fix to be further analyze
234 /* Include Default slave wrapper for timeout monitoring */
236 #if !defined(AXI_TIMEOUTS) && !defined(BCM_BACKPLANE_TIMEOUT)
237 ((mfg
== MFGID_ARM
) && (cid
== DEF_AI_COMP
)) ||
239 ((CHIPTYPE(sii
->pub
.socitype
) == SOCI_NAI
) &&
240 (mfg
== MFGID_ARM
) && (cid
== DEF_AI_COMP
)) ||
241 #endif /* !defined(AXI_TIMEOUTS) && !defined(BCM_BACKPLANE_TIMEOUT) */
247 if ((nmw
+ nsw
== 0)) {
248 /* A component which is not a core */
249 if (cid
== OOB_ROUTER_CORE_ID
) {
250 asd
= get_asd(sih
, &eromptr
, 0, 0, AD_ST_SLAVE
,
251 &addrl
, &addrh
, &sizel
, &sizeh
);
253 sii
->oob_router
= addrl
;
256 if (cid
!= NS_CCB_CORE_ID
&&
257 cid
!= PMU_CORE_ID
&& cid
!= GCI_CORE_ID
&& cid
!= SR_CORE_ID
&&
264 cores_info
->cia
[idx
] = cia
;
265 cores_info
->cib
[idx
] = cib
;
266 cores_info
->coreid
[idx
] = cid
;
268 for (i
= 0; i
< nmp
; i
++) {
269 mpd
= get_erom_ent(sih
, &eromptr
, ER_VALID
, ER_VALID
);
270 if ((mpd
& ER_TAG
) != ER_MP
) {
271 SI_ERROR(("Not enough MP entries for component 0x%x\n", cid
));
274 SI_VMSG((" Master port %d, mp: %d id: %d\n", i
,
275 (mpd
& MPD_MP_MASK
) >> MPD_MP_SHIFT
,
276 (mpd
& MPD_MUI_MASK
) >> MPD_MUI_SHIFT
));
279 /* First Slave Address Descriptor should be port 0:
280 * the main register space for the core
282 asd
= get_asd(sih
, &eromptr
, 0, 0, AD_ST_SLAVE
, &addrl
, &addrh
, &sizel
, &sizeh
);
285 /* Try again to see if it is a bridge */
286 asd
= get_asd(sih
, &eromptr
, 0, 0, AD_ST_BRIDGE
, &addrl
, &addrh
,
294 else if ((addrh
!= 0) || (sizeh
!= 0) ||
295 (sizel
!= SI_CORE_SIZE
)) {
296 SI_ERROR(("addrh = 0x%x\t sizeh = 0x%x\t size1 ="
297 "0x%x\n", addrh
, sizeh
, sizel
));
298 SI_ERROR(("First Slave ASD for"
299 "core 0x%04x malformed "
300 "(0x%08x)\n", cid
, asd
));
306 cores_info
->coresba
[idx
] = addrl
;
307 cores_info
->coresba_size
[idx
] = sizel
;
308 /* Get any more ASDs in first port */
311 asd
= get_asd(sih
, &eromptr
, 0, j
, AD_ST_SLAVE
, &addrl
, &addrh
,
313 if ((asd
!= 0) && (j
== 1) && (sizel
== SI_CORE_SIZE
)) {
314 cores_info
->coresba2
[idx
] = addrl
;
315 cores_info
->coresba2_size
[idx
] = sizel
;
320 /* Go through the ASDs for other slave ports */
321 for (i
= 1; i
< nsp
; i
++) {
324 asd
= get_asd(sih
, &eromptr
, i
, j
, AD_ST_SLAVE
, &addrl
, &addrh
,
326 /* To get the first base address of second slave port */
327 if ((asd
!= 0) && (i
== 1) && (j
== 0)) {
328 cores_info
->csp2ba
[idx
] = addrl
;
329 cores_info
->csp2ba_size
[idx
] = sizel
;
336 SI_ERROR((" SP %d has no address descriptors\n", i
));
341 /* Now get master wrappers */
342 for (i
= 0; i
< nmw
; i
++) {
343 asd
= get_asd(sih
, &eromptr
, i
, 0, AD_ST_MWRAP
, &addrl
, &addrh
,
346 SI_ERROR(("Missing descriptor for MW %d\n", i
));
349 if ((sizeh
!= 0) || (sizel
!= SI_CORE_SIZE
)) {
350 SI_ERROR(("Master wrapper %d is not 4KB\n", i
));
354 cores_info
->wrapba
[idx
] = addrl
;
356 cores_info
->wrapba2
[idx
] = addrl
;
358 cores_info
->wrapba3
[idx
] = addrl
;
362 (sii
->axi_num_wrappers
< SI_MAX_AXI_WRAPPERS
)) {
363 axi_wrapper
[sii
->axi_num_wrappers
].mfg
= mfg
;
364 axi_wrapper
[sii
->axi_num_wrappers
].cid
= cid
;
365 axi_wrapper
[sii
->axi_num_wrappers
].rev
= crev
;
366 axi_wrapper
[sii
->axi_num_wrappers
].wrapper_type
= AI_MASTER_WRAPPER
;
367 axi_wrapper
[sii
->axi_num_wrappers
].wrapper_addr
= addrl
;
368 sii
->axi_num_wrappers
++;
369 SI_VMSG(("MASTER WRAPPER: %d, mfg:%x, cid:%x,"
370 "rev:%x, addr:%x, size:%x\n",
371 sii
->axi_num_wrappers
, mfg
, cid
, crev
, addrl
, sizel
));
375 /* And finally slave wrappers */
376 for (i
= 0; i
< nsw
; i
++) {
377 uint fwp
= (nsp
== 1) ? 0 : 1;
378 asd
= get_asd(sih
, &eromptr
, fwp
+ i
, 0, AD_ST_SWRAP
, &addrl
, &addrh
,
381 /* cache APB bridge wrapper address for set/clear timeout */
382 if ((mfg
== MFGID_ARM
) && (cid
== APB_BRIDGE_ID
)) {
383 ASSERT(sii
->num_br
< SI_MAXBR
);
384 sii
->br_wrapba
[sii
->num_br
++] = addrl
;
388 SI_ERROR(("Missing descriptor for SW %d\n", i
));
391 if ((sizeh
!= 0) || (sizel
!= SI_CORE_SIZE
)) {
392 SI_ERROR(("Slave wrapper %d is not 4KB\n", i
));
395 if ((nmw
== 0) && (i
== 0)) {
396 cores_info
->wrapba
[idx
] = addrl
;
397 } else if ((nmw
== 0) && (i
== 1)) {
398 cores_info
->wrapba2
[idx
] = addrl
;
399 } else if ((nmw
== 0) && (i
== 2)) {
400 cores_info
->wrapba3
[idx
] = addrl
;
403 /* Include all slave wrappers to the list to
404 * enable and monitor watchdog timeouts
408 (sii
->axi_num_wrappers
< SI_MAX_AXI_WRAPPERS
)) {
409 axi_wrapper
[sii
->axi_num_wrappers
].mfg
= mfg
;
410 axi_wrapper
[sii
->axi_num_wrappers
].cid
= cid
;
411 axi_wrapper
[sii
->axi_num_wrappers
].rev
= crev
;
412 axi_wrapper
[sii
->axi_num_wrappers
].wrapper_type
= AI_SLAVE_WRAPPER
;
414 /* Software WAR as discussed with hardware team, to ensure proper
415 * Slave Wrapper Base address is set for 4364 Chip ID.
416 * Current address is 0x1810c000, Corrected the same to 0x1810e000.
417 * This ensures AXI default slave wrapper is registered along with
418 * other slave wrapper cores and is useful while generating trap info
419 * when write operation is tried on Invalid Core / Wrapper register
422 if ((CHIPID(sih
->chip
) == BCM4364_CHIP_ID
) &&
423 (cid
== DEF_AI_COMP
)) {
424 axi_wrapper
[sii
->axi_num_wrappers
].wrapper_addr
=
427 axi_wrapper
[sii
->axi_num_wrappers
].wrapper_addr
= addrl
;
430 sii
->axi_num_wrappers
++;
432 SI_VMSG(("SLAVE WRAPPER: %d, mfg:%x, cid:%x,"
433 "rev:%x, addr:%x, size:%x\n",
434 sii
->axi_num_wrappers
, mfg
, cid
, crev
, addrl
, sizel
));
438 #ifndef BCM_BACKPLANE_TIMEOUT
439 /* Don't record bridges */
448 SI_ERROR(("Reached end of erom without finding END\n"));
455 #define AI_SETCOREIDX_MAPSIZE(coreid) \
456 (((coreid) == NS_CCB_CORE_ID) ? 15 * SI_CORE_SIZE : SI_CORE_SIZE)
458 /* This function changes the logical "focus" to the indicated core.
459 * Return the current core's virtual address.
461 static volatile void *
462 _ai_setcoreidx(si_t
*sih
, uint coreidx
, uint use_wrapn
)
464 si_info_t
*sii
= SI_INFO(sih
);
465 si_cores_info_t
*cores_info
= (si_cores_info_t
*)sii
->cores_info
;
466 uint32 addr
, wrap
, wrap2
, wrap3
;
469 if (coreidx
>= MIN(sii
->numcores
, SI_MAXCORES
))
472 addr
= cores_info
->coresba
[coreidx
];
473 wrap
= cores_info
->wrapba
[coreidx
];
474 wrap2
= cores_info
->wrapba2
[coreidx
];
475 wrap3
= cores_info
->wrapba3
[coreidx
];
477 #ifdef BCM_BACKPLANE_TIMEOUT
478 /* No need to disable interrupts while entering/exiting APB bridge core */
479 if ((cores_info
->coreid
[coreidx
] != APB_BRIDGE_CORE_ID
) &&
480 (cores_info
->coreid
[sii
->curidx
] != APB_BRIDGE_CORE_ID
))
481 #endif /* BCM_BACKPLANE_TIMEOUT */
484 * If the user has provided an interrupt mask enabled function,
485 * then assert interrupts are disabled before switching the core.
487 ASSERT((sii
->intrsenabled_fn
== NULL
) ||
488 !(*(sii
)->intrsenabled_fn
)((sii
)->intr_arg
));
491 switch (BUSTYPE(sih
->bustype
)) {
494 if (!cores_info
->regs
[coreidx
]) {
495 cores_info
->regs
[coreidx
] = REG_MAP(addr
,
496 AI_SETCOREIDX_MAPSIZE(cores_info
->coreid
[coreidx
]));
497 ASSERT(GOODREGS(cores_info
->regs
[coreidx
]));
499 sii
->curmap
= regs
= cores_info
->regs
[coreidx
];
500 if (!cores_info
->wrappers
[coreidx
] && (wrap
!= 0)) {
501 cores_info
->wrappers
[coreidx
] = REG_MAP(wrap
, SI_CORE_SIZE
);
502 ASSERT(GOODREGS(cores_info
->wrappers
[coreidx
]));
504 if (!cores_info
->wrappers2
[coreidx
] && (wrap2
!= 0)) {
505 cores_info
->wrappers2
[coreidx
] = REG_MAP(wrap2
, SI_CORE_SIZE
);
506 ASSERT(GOODREGS(cores_info
->wrappers2
[coreidx
]));
508 if (!cores_info
->wrappers3
[coreidx
] && (wrap3
!= 0)) {
509 cores_info
->wrappers3
[coreidx
] = REG_MAP(wrap3
, SI_CORE_SIZE
);
510 ASSERT(GOODREGS(cores_info
->wrappers3
[coreidx
]));
513 if (use_wrapn
== 2) {
514 sii
->curwrap
= cores_info
->wrappers3
[coreidx
];
515 } else if (use_wrapn
== 1) {
516 sii
->curwrap
= cores_info
->wrappers2
[coreidx
];
518 sii
->curwrap
= cores_info
->wrappers
[coreidx
];
523 #ifdef BCM_BACKPLANE_TIMEOUT
524 /* No need to set the BAR0 if core is APB Bridge.
525 * This is to reduce 2 PCI writes while checkng for errlog
527 if (cores_info
->coreid
[coreidx
] != APB_BRIDGE_CORE_ID
)
528 #endif /* BCM_BACKPLANE_TIMEOUT */
530 /* point bar0 window */
531 OSL_PCI_WRITE_CONFIG(sii
->osh
, PCI_BAR0_WIN
, 4, addr
);
535 /* point bar0 2nd 4KB window to the primary wrapper */
539 OSL_PCI_WRITE_CONFIG(sii
->osh
, PCIE2_BAR0_WIN2
, 4, wrap
);
541 OSL_PCI_WRITE_CONFIG(sii
->osh
, PCI_BAR0_WIN2
, 4, wrap
);
547 sii
->curmap
= regs
= (void *)((uintptr
)addr
);
549 sii
->curwrap
= (void *)((uintptr
)wrap2
);
551 sii
->curwrap
= (void *)((uintptr
)wrap
);
563 sii
->curidx
= coreidx
;
569 ai_setcoreidx(si_t
*sih
, uint coreidx
)
571 return _ai_setcoreidx(sih
, coreidx
, 0);
575 ai_setcoreidx_2ndwrap(si_t
*sih
, uint coreidx
)
577 return _ai_setcoreidx(sih
, coreidx
, 1);
581 ai_setcoreidx_3rdwrap(si_t
*sih
, uint coreidx
)
583 return _ai_setcoreidx(sih
, coreidx
, 2);
587 ai_coreaddrspaceX(si_t
*sih
, uint asidx
, uint32
*addr
, uint32
*size
)
589 si_info_t
*sii
= SI_INFO(sih
);
590 si_cores_info_t
*cores_info
= (si_cores_info_t
*)sii
->cores_info
;
591 chipcregs_t
*cc
= NULL
;
592 uint32 erombase
, *eromptr
, *eromlim
;
594 uint32 cia
, cib
, nmp
, nsp
;
595 uint32 asd
, addrl
, addrh
, sizel
, sizeh
;
597 for (i
= 0; i
< sii
->numcores
; i
++) {
598 if (cores_info
->coreid
[i
] == CC_CORE_ID
) {
599 cc
= (chipcregs_t
*)cores_info
->regs
[i
];
606 erombase
= R_REG(sii
->osh
, &cc
->eromptr
);
607 eromptr
= (uint32
*)REG_MAP(erombase
, SI_CORE_SIZE
);
608 eromlim
= eromptr
+ (ER_REMAPCONTROL
/ sizeof(uint32
));
611 cia
= cores_info
->cia
[cidx
];
612 cib
= cores_info
->cib
[cidx
];
614 nmp
= (cib
& CIB_NMP_MASK
) >> CIB_NMP_SHIFT
;
615 nsp
= (cib
& CIB_NSP_MASK
) >> CIB_NSP_SHIFT
;
618 while (eromptr
< eromlim
) {
619 if ((get_erom_ent(sih
, &eromptr
, ER_TAG
, ER_CI
) == cia
) &&
620 (get_erom_ent(sih
, &eromptr
, 0, 0) == cib
)) {
625 /* skip master ports */
626 for (i
= 0; i
< nmp
; i
++)
627 get_erom_ent(sih
, &eromptr
, ER_VALID
, ER_VALID
);
629 /* Skip ASDs in port 0 */
630 asd
= get_asd(sih
, &eromptr
, 0, 0, AD_ST_SLAVE
, &addrl
, &addrh
, &sizel
, &sizeh
);
632 /* Try again to see if it is a bridge */
633 asd
= get_asd(sih
, &eromptr
, 0, 0, AD_ST_BRIDGE
, &addrl
, &addrh
,
639 asd
= get_asd(sih
, &eromptr
, 0, j
, AD_ST_SLAVE
, &addrl
, &addrh
,
644 /* Go through the ASDs for other slave ports */
645 for (i
= 1; i
< nsp
; i
++) {
648 asd
= get_asd(sih
, &eromptr
, i
, j
, AD_ST_SLAVE
, &addrl
, &addrh
,
662 SI_ERROR((" SP %d has no address descriptors\n", i
));
672 /* Return the number of address spaces in current core */
674 ai_numaddrspaces(si_t
*sih
)
682 /* Return the address of the nth address space in the current core
684 * sih : Pointer to struct si_t
685 * spidx : slave port index
686 * baidx : base address index
689 ai_addrspace(si_t
*sih
, uint spidx
, uint baidx
)
691 si_info_t
*sii
= SI_INFO(sih
);
692 si_cores_info_t
*cores_info
= (si_cores_info_t
*)sii
->cores_info
;
697 if (spidx
== CORE_SLAVE_PORT_0
) {
698 if (baidx
== CORE_BASE_ADDR_0
)
699 return cores_info
->coresba
[cidx
];
700 else if (baidx
== CORE_BASE_ADDR_1
)
701 return cores_info
->coresba2
[cidx
];
703 else if (spidx
== CORE_SLAVE_PORT_1
) {
704 if (baidx
== CORE_BASE_ADDR_0
)
705 return cores_info
->csp2ba
[cidx
];
708 SI_ERROR(("%s: Need to parse the erom again to find %d base addr in %d slave port\n",
709 __FUNCTION__
, baidx
, spidx
));
715 /* Return the size of the nth address space in the current core
717 * sih : Pointer to struct si_t
718 * spidx : slave port index
719 * baidx : base address index
722 ai_addrspacesize(si_t
*sih
, uint spidx
, uint baidx
)
724 si_info_t
*sii
= SI_INFO(sih
);
725 si_cores_info_t
*cores_info
= (si_cores_info_t
*)sii
->cores_info
;
729 if (spidx
== CORE_SLAVE_PORT_0
) {
730 if (baidx
== CORE_BASE_ADDR_0
)
731 return cores_info
->coresba_size
[cidx
];
732 else if (baidx
== CORE_BASE_ADDR_1
)
733 return cores_info
->coresba2_size
[cidx
];
735 else if (spidx
== CORE_SLAVE_PORT_1
) {
736 if (baidx
== CORE_BASE_ADDR_0
)
737 return cores_info
->csp2ba_size
[cidx
];
740 SI_ERROR(("%s: Need to parse the erom again to find %d base addr in %d slave port\n",
741 __FUNCTION__
, baidx
, spidx
));
749 si_info_t
*sii
= SI_INFO(sih
);
753 SI_ERROR(("%s: Attempting to read CHIPCOMMONB DMP registers on 4707\n",
757 if (BCM53573_DMP()) {
758 SI_ERROR(("%s: Attempting to read DMP registers on 53573\n", __FUNCTION__
));
764 ai_setcoreidx(sih
, SI_CC_IDX
);
765 flag
= ai_flag_alt(sih
);
766 ai_setcoreidx(sih
, idx
);
773 return (R_REG(sii
->osh
, &ai
->oobselouta30
) & 0x1f);
777 ai_flag_alt(si_t
*sih
)
779 si_info_t
*sii
= SI_INFO(sih
);
783 SI_ERROR(("%s: Attempting to read CHIPCOMMONB DMP registers on 4707\n",
790 return ((R_REG(sii
->osh
, &ai
->oobselouta30
) >> AI_OOBSEL_1_SHIFT
) & AI_OOBSEL_MASK
);
794 ai_setint(si_t
*sih
, int siflag
)
797 BCM_REFERENCE(siflag
);
802 ai_wrap_reg(si_t
*sih
, uint32 offset
, uint32 mask
, uint32 val
)
804 si_info_t
*sii
= SI_INFO(sih
);
805 uint32
*addr
= (uint32
*) ((uchar
*)(sii
->curwrap
) + offset
);
808 uint32 w
= R_REG(sii
->osh
, addr
);
811 W_REG(sii
->osh
, addr
, w
);
813 return (R_REG(sii
->osh
, addr
));
817 ai_corevendor(si_t
*sih
)
819 si_info_t
*sii
= SI_INFO(sih
);
820 si_cores_info_t
*cores_info
= (si_cores_info_t
*)sii
->cores_info
;
823 cia
= cores_info
->cia
[sii
->curidx
];
824 return ((cia
& CIA_MFG_MASK
) >> CIA_MFG_SHIFT
);
828 ai_corerev(si_t
*sih
)
830 si_info_t
*sii
= SI_INFO(sih
);
831 si_cores_info_t
*cores_info
= (si_cores_info_t
*)sii
->cores_info
;
834 cib
= cores_info
->cib
[sii
->curidx
];
835 return ((cib
& CIB_REV_MASK
) >> CIB_REV_SHIFT
);
839 ai_corerev_minor(si_t
*sih
)
841 return (ai_core_sflags(sih
, 0, 0) >> SISF_MINORREV_D11_SHIFT
) &
842 SISF_MINORREV_D11_MASK
;
846 ai_iscoreup(si_t
*sih
)
848 si_info_t
*sii
= SI_INFO(sih
);
853 return (((R_REG(sii
->osh
, &ai
->ioctrl
) & (SICF_FGC
| SICF_CLOCK_EN
)) == SICF_CLOCK_EN
) &&
854 ((R_REG(sii
->osh
, &ai
->resetctrl
) & AIRC_RESET
) == 0));
858 * Switch to 'coreidx', issue a single arbitrary 32bit register mask&set operation,
859 * switch back to the original core, and return the new value.
861 * When using the silicon backplane, no fiddling with interrupts or core switches is needed.
863 * Also, when using pci/pcie, we can optimize away the core switching for pci registers
864 * and (on newer pci cores) chipcommon registers.
867 ai_corereg(si_t
*sih
, uint coreidx
, uint regoff
, uint mask
, uint val
)
870 volatile uint32
*r
= NULL
;
874 si_info_t
*sii
= SI_INFO(sih
);
875 si_cores_info_t
*cores_info
= (si_cores_info_t
*)sii
->cores_info
;
877 ASSERT(GOODIDX(coreidx
));
878 ASSERT(regoff
< SI_CORE_SIZE
);
879 ASSERT((val
& ~mask
) == 0);
881 if (coreidx
>= SI_MAXCORES
)
884 if (BUSTYPE(sih
->bustype
) == SI_BUS
) {
885 /* If internal bus, we can always get at everything */
887 /* map if does not exist */
888 if (!cores_info
->regs
[coreidx
]) {
889 cores_info
->regs
[coreidx
] = REG_MAP(cores_info
->coresba
[coreidx
],
891 ASSERT(GOODREGS(cores_info
->regs
[coreidx
]));
893 r
= (volatile uint32
*)((volatile uchar
*)cores_info
->regs
[coreidx
] + regoff
);
894 } else if (BUSTYPE(sih
->bustype
) == PCI_BUS
) {
895 /* If pci/pcie, we can get at pci/pcie regs and on newer cores to chipc */
897 if ((cores_info
->coreid
[coreidx
] == CC_CORE_ID
) && SI_FAST(sii
)) {
898 /* Chipc registers are mapped at 12KB */
901 r
= (volatile uint32
*)((volatile char *)sii
->curmap
+
902 PCI_16KB0_CCREGS_OFFSET
+ regoff
);
903 } else if (sii
->pub
.buscoreidx
== coreidx
) {
904 /* pci registers are at either in the last 2KB of an 8KB window
905 * or, in pcie and pci rev 13 at 8KB
909 r
= (volatile uint32
*)((volatile char *)sii
->curmap
+
910 PCI_16KB0_PCIREGS_OFFSET
+ regoff
);
912 r
= (volatile uint32
*)((volatile char *)sii
->curmap
+
913 ((regoff
>= SBCONFIGOFF
) ?
914 PCI_BAR0_PCISBR_OFFSET
: PCI_BAR0_PCIREGS_OFFSET
) +
920 INTR_OFF(sii
, intr_val
);
922 /* save current core index */
923 origidx
= si_coreidx(&sii
->pub
);
926 r
= (volatile uint32
*) ((volatile uchar
*) ai_setcoreidx(&sii
->pub
, coreidx
) +
933 w
= (R_REG(sii
->osh
, r
) & ~mask
) | val
;
934 W_REG(sii
->osh
, r
, w
);
938 w
= R_REG(sii
->osh
, r
);
941 /* restore core index */
942 if (origidx
!= coreidx
)
943 ai_setcoreidx(&sii
->pub
, origidx
);
945 INTR_RESTORE(sii
, intr_val
);
952 * Switch to 'coreidx', issue a single arbitrary 32bit register mask&set operation,
953 * switch back to the original core, and return the new value.
955 * When using the silicon backplane, no fiddling with interrupts or core switches is needed.
957 * Also, when using pci/pcie, we can optimize away the core switching for pci registers
958 * and (on newer pci cores) chipcommon registers.
961 ai_corereg_writeonly(si_t
*sih
, uint coreidx
, uint regoff
, uint mask
, uint val
)
964 volatile uint32
*r
= NULL
;
968 si_info_t
*sii
= SI_INFO(sih
);
969 si_cores_info_t
*cores_info
= (si_cores_info_t
*)sii
->cores_info
;
971 ASSERT(GOODIDX(coreidx
));
972 ASSERT(regoff
< SI_CORE_SIZE
);
973 ASSERT((val
& ~mask
) == 0);
975 if (coreidx
>= SI_MAXCORES
)
978 if (BUSTYPE(sih
->bustype
) == SI_BUS
) {
979 /* If internal bus, we can always get at everything */
981 /* map if does not exist */
982 if (!cores_info
->regs
[coreidx
]) {
983 cores_info
->regs
[coreidx
] = REG_MAP(cores_info
->coresba
[coreidx
],
985 ASSERT(GOODREGS(cores_info
->regs
[coreidx
]));
987 r
= (volatile uint32
*)((volatile uchar
*)cores_info
->regs
[coreidx
] + regoff
);
988 } else if (BUSTYPE(sih
->bustype
) == PCI_BUS
) {
989 /* If pci/pcie, we can get at pci/pcie regs and on newer cores to chipc */
991 if ((cores_info
->coreid
[coreidx
] == CC_CORE_ID
) && SI_FAST(sii
)) {
992 /* Chipc registers are mapped at 12KB */
995 r
= (volatile uint32
*)((volatile char *)sii
->curmap
+
996 PCI_16KB0_CCREGS_OFFSET
+ regoff
);
997 } else if (sii
->pub
.buscoreidx
== coreidx
) {
998 /* pci registers are at either in the last 2KB of an 8KB window
999 * or, in pcie and pci rev 13 at 8KB
1003 r
= (volatile uint32
*)((volatile char *)sii
->curmap
+
1004 PCI_16KB0_PCIREGS_OFFSET
+ regoff
);
1006 r
= (volatile uint32
*)((volatile char *)sii
->curmap
+
1007 ((regoff
>= SBCONFIGOFF
) ?
1008 PCI_BAR0_PCISBR_OFFSET
: PCI_BAR0_PCIREGS_OFFSET
) +
1014 INTR_OFF(sii
, intr_val
);
1016 /* save current core index */
1017 origidx
= si_coreidx(&sii
->pub
);
1020 r
= (volatile uint32
*) ((volatile uchar
*) ai_setcoreidx(&sii
->pub
, coreidx
) +
1027 w
= (R_REG(sii
->osh
, r
) & ~mask
) | val
;
1028 W_REG(sii
->osh
, r
, w
);
1032 /* restore core index */
1033 if (origidx
!= coreidx
)
1034 ai_setcoreidx(&sii
->pub
, origidx
);
1036 INTR_RESTORE(sii
, intr_val
);
1043 * If there is no need for fiddling with interrupts or core switches (typically silicon
1044 * back plane registers, pci registers and chipcommon registers), this function
1045 * returns the register offset on this core to a mapped address. This address can
1046 * be used for W_REG/R_REG directly.
1048 * For accessing registers that would need a core switch, this function will return
1052 ai_corereg_addr(si_t
*sih
, uint coreidx
, uint regoff
)
1054 volatile uint32
*r
= NULL
;
1056 si_info_t
*sii
= SI_INFO(sih
);
1057 si_cores_info_t
*cores_info
= (si_cores_info_t
*)sii
->cores_info
;
1059 ASSERT(GOODIDX(coreidx
));
1060 ASSERT(regoff
< SI_CORE_SIZE
);
1062 if (coreidx
>= SI_MAXCORES
)
1065 if (BUSTYPE(sih
->bustype
) == SI_BUS
) {
1066 /* If internal bus, we can always get at everything */
1068 /* map if does not exist */
1069 if (!cores_info
->regs
[coreidx
]) {
1070 cores_info
->regs
[coreidx
] = REG_MAP(cores_info
->coresba
[coreidx
],
1072 ASSERT(GOODREGS(cores_info
->regs
[coreidx
]));
1074 r
= (volatile uint32
*)((volatile uchar
*)cores_info
->regs
[coreidx
] + regoff
);
1075 } else if (BUSTYPE(sih
->bustype
) == PCI_BUS
) {
1076 /* If pci/pcie, we can get at pci/pcie regs and on newer cores to chipc */
1078 if ((cores_info
->coreid
[coreidx
] == CC_CORE_ID
) && SI_FAST(sii
)) {
1079 /* Chipc registers are mapped at 12KB */
1082 r
= (volatile uint32
*)((volatile char *)sii
->curmap
+
1083 PCI_16KB0_CCREGS_OFFSET
+ regoff
);
1084 } else if (sii
->pub
.buscoreidx
== coreidx
) {
1085 /* pci registers are at either in the last 2KB of an 8KB window
1086 * or, in pcie and pci rev 13 at 8KB
1090 r
= (volatile uint32
*)((volatile char *)sii
->curmap
+
1091 PCI_16KB0_PCIREGS_OFFSET
+ regoff
);
1093 r
= (volatile uint32
*)((volatile char *)sii
->curmap
+
1094 ((regoff
>= SBCONFIGOFF
) ?
1095 PCI_BAR0_PCISBR_OFFSET
: PCI_BAR0_PCIREGS_OFFSET
) +
1101 ASSERT(sii
->curidx
== coreidx
);
1102 r
= (volatile uint32
*) ((volatile uchar
*)sii
->curmap
+ regoff
);
1109 ai_core_disable(si_t
*sih
, uint32 bits
)
1111 si_info_t
*sii
= SI_INFO(sih
);
1112 volatile uint32 dummy
;
1116 ASSERT(GOODREGS(sii
->curwrap
));
1119 /* if core is already in reset, just return */
1120 if (R_REG(sii
->osh
, &ai
->resetctrl
) & AIRC_RESET
) {
1124 /* ensure there are no pending backplane operations */
1125 SPINWAIT(((status
= R_REG(sii
->osh
, &ai
->resetstatus
)) != 0), 300);
1127 /* if pending backplane ops still, try waiting longer */
1129 /* 300usecs was sufficient to allow backplane ops to clear for big hammer */
1130 /* during driver load we may need more time */
1131 SPINWAIT(((status
= R_REG(sii
->osh
, &ai
->resetstatus
)) != 0), 10000);
1132 /* if still pending ops, continue on and try disable anyway */
1133 /* this is in big hammer path, so don't call wl_reinit in this case... */
1136 W_REG(sii
->osh
, &ai
->resetctrl
, AIRC_RESET
);
1137 dummy
= R_REG(sii
->osh
, &ai
->resetctrl
);
1138 BCM_REFERENCE(dummy
);
1141 W_REG(sii
->osh
, &ai
->ioctrl
, bits
);
1142 dummy
= R_REG(sii
->osh
, &ai
->ioctrl
);
1143 BCM_REFERENCE(dummy
);
1147 /* reset and re-enable a core
1149 * bits - core specific bits that are set during and after reset sequence
1150 * resetbits - core specific bits that are set only during reset sequence
1153 _ai_core_reset(si_t
*sih
, uint32 bits
, uint32 resetbits
)
1155 si_info_t
*sii
= SI_INFO(sih
);
1156 #if defined(UCM_CORRUPTION_WAR)
1157 si_cores_info_t
*cores_info
= (si_cores_info_t
*)sii
->cores_info
;
1160 volatile uint32 dummy
;
1161 uint loop_counter
= 10;
1163 ASSERT(GOODREGS(sii
->curwrap
));
1166 /* ensure there are no pending backplane operations */
1167 SPINWAIT(((dummy
= R_REG(sii
->osh
, &ai
->resetstatus
)) != 0), 300);
1169 /* put core into reset state */
1170 W_REG(sii
->osh
, &ai
->resetctrl
, AIRC_RESET
);
1173 /* ensure there are no pending backplane operations */
1174 SPINWAIT((R_REG(sii
->osh
, &ai
->resetstatus
) != 0), 300);
1176 W_REG(sii
->osh
, &ai
->ioctrl
, (bits
| resetbits
| SICF_FGC
| SICF_CLOCK_EN
));
1177 dummy
= R_REG(sii
->osh
, &ai
->ioctrl
);
1178 BCM_REFERENCE(dummy
);
1179 #ifdef UCM_CORRUPTION_WAR
1180 if (cores_info
->coreid
[sii
->curidx
] == D11_CORE_ID
) {
1183 W_REG(sii
->osh
, &ai
->ioctrl
, (dummy
& (~SICF_FGC
)));
1185 #endif /* UCM_CORRUPTION_WAR */
1186 /* ensure there are no pending backplane operations */
1187 SPINWAIT(((dummy
= R_REG(sii
->osh
, &ai
->resetstatus
)) != 0), 300);
1189 while (R_REG(sii
->osh
, &ai
->resetctrl
) != 0 && --loop_counter
!= 0) {
1190 /* ensure there are no pending backplane operations */
1191 SPINWAIT(((dummy
= R_REG(sii
->osh
, &ai
->resetstatus
)) != 0), 300);
1193 /* take core out of reset */
1194 W_REG(sii
->osh
, &ai
->resetctrl
, 0);
1196 /* ensure there are no pending backplane operations */
1197 SPINWAIT((R_REG(sii
->osh
, &ai
->resetstatus
) != 0), 300);
1200 #ifdef UCM_CORRUPTION_WAR
1201 /* Pulse FGC after lifting Reset */
1202 W_REG(sii
->osh
, &ai
->ioctrl
, (bits
| SICF_FGC
| SICF_CLOCK_EN
));
1204 W_REG(sii
->osh
, &ai
->ioctrl
, (bits
| SICF_CLOCK_EN
));
1205 #endif /* UCM_CORRUPTION_WAR */
1206 dummy
= R_REG(sii
->osh
, &ai
->ioctrl
);
1207 BCM_REFERENCE(dummy
);
1208 #ifdef UCM_CORRUPTION_WAR
1209 if (cores_info
->coreid
[sii
->curidx
] == D11_CORE_ID
) {
1212 W_REG(sii
->osh
, &ai
->ioctrl
, (dummy
& (~SICF_FGC
)));
1214 #endif /* UCM_CORRUPTION_WAR */
1220 ai_core_reset(si_t
*sih
, uint32 bits
, uint32 resetbits
)
1222 si_info_t
*sii
= SI_INFO(sih
);
1223 si_cores_info_t
*cores_info
= (si_cores_info_t
*)sii
->cores_info
;
1224 uint idx
= sii
->curidx
;
1226 if (cores_info
->wrapba3
[idx
] != 0) {
1227 ai_setcoreidx_3rdwrap(sih
, idx
);
1228 _ai_core_reset(sih
, bits
, resetbits
);
1229 ai_setcoreidx(sih
, idx
);
1232 if (cores_info
->wrapba2
[idx
] != 0) {
1233 ai_setcoreidx_2ndwrap(sih
, idx
);
1234 _ai_core_reset(sih
, bits
, resetbits
);
1235 ai_setcoreidx(sih
, idx
);
1238 _ai_core_reset(sih
, bits
, resetbits
);
1242 ai_core_cflags_wo(si_t
*sih
, uint32 mask
, uint32 val
)
1244 si_info_t
*sii
= SI_INFO(sih
);
1248 if (BCM4707_DMP()) {
1249 SI_ERROR(("%s: Accessing CHIPCOMMONB DMP register (ioctrl) on 4707\n",
1254 SI_ERROR(("%s: Accessing PMU DMP register (ioctrl)\n",
1259 ASSERT(GOODREGS(sii
->curwrap
));
1262 ASSERT((val
& ~mask
) == 0);
1265 w
= ((R_REG(sii
->osh
, &ai
->ioctrl
) & ~mask
) | val
);
1266 W_REG(sii
->osh
, &ai
->ioctrl
, w
);
1271 ai_core_cflags(si_t
*sih
, uint32 mask
, uint32 val
)
1273 si_info_t
*sii
= SI_INFO(sih
);
1277 if (BCM4707_DMP()) {
1278 SI_ERROR(("%s: Accessing CHIPCOMMONB DMP register (ioctrl) on 4707\n",
1284 SI_ERROR(("%s: Accessing PMU DMP register (ioctrl)\n",
1288 ASSERT(GOODREGS(sii
->curwrap
));
1291 ASSERT((val
& ~mask
) == 0);
1294 w
= ((R_REG(sii
->osh
, &ai
->ioctrl
) & ~mask
) | val
);
1295 W_REG(sii
->osh
, &ai
->ioctrl
, w
);
1298 return R_REG(sii
->osh
, &ai
->ioctrl
);
1302 ai_core_sflags(si_t
*sih
, uint32 mask
, uint32 val
)
1304 si_info_t
*sii
= SI_INFO(sih
);
1308 if (BCM4707_DMP()) {
1309 SI_ERROR(("%s: Accessing CHIPCOMMONB DMP register (ioctrl) on 4707\n",
1314 SI_ERROR(("%s: Accessing PMU DMP register (ioctrl)\n",
1319 ASSERT(GOODREGS(sii
->curwrap
));
1322 ASSERT((val
& ~mask
) == 0);
1323 ASSERT((mask
& ~SISF_CORE_BITS
) == 0);
1326 w
= ((R_REG(sii
->osh
, &ai
->iostatus
) & ~mask
) | val
);
1327 W_REG(sii
->osh
, &ai
->iostatus
, w
);
1330 return R_REG(sii
->osh
, &ai
->iostatus
);
1333 #if defined(BCMDBG_PHYDUMP)
1334 /* print interesting aidmp registers */
1336 ai_dumpregs(si_t
*sih
, struct bcmstrbuf
*b
)
1338 si_info_t
*sii
= SI_INFO(sih
);
1342 uint32 prev_value
= 0;
1343 axi_wrapper_t
* axi_wrapper
= sii
->axi_wrapper
;
1345 uint bar0_win_offset
= 0;
1349 /* Save and restore wrapper access window */
1350 if (BUSTYPE(sii
->pub
.bustype
) == PCI_BUS
) {
1351 if (PCIE_GEN2(sii
)) {
1352 cfg_reg
= PCIE2_BAR0_CORE2_WIN2
;
1353 bar0_win_offset
= PCIE2_BAR0_CORE2_WIN2_OFFSET
;
1355 cfg_reg
= PCI_BAR0_WIN2
;
1356 bar0_win_offset
= PCI_BAR0_WIN2_OFFSET
;
1359 prev_value
= OSL_PCI_READ_CONFIG(osh
, cfg_reg
, 4);
1361 if (prev_value
== ID32_INVALID
) {
1362 SI_PRINT(("%s, PCI_BAR0_WIN2 - %x\n", __FUNCTION__
, prev_value
));
1367 bcm_bprintf(b
, "ChipNum:%x, ChipRev;%x, BusType:%x, BoardType:%x, BoardVendor:%x\n\n",
1368 sih
->chip
, sih
->chiprev
, sih
->bustype
, sih
->boardtype
, sih
->boardvendor
);
1370 for (i
= 0; i
< sii
->axi_num_wrappers
; i
++) {
1372 if (BUSTYPE(sii
->pub
.bustype
) == PCI_BUS
) {
1373 /* Set BAR0 window to bridge wapper base address */
1374 OSL_PCI_WRITE_CONFIG(osh
,
1375 cfg_reg
, 4, axi_wrapper
[i
].wrapper_addr
);
1377 ai
= (aidmp_t
*) ((volatile uint8
*)sii
->curmap
+ bar0_win_offset
);
1379 ai
= (aidmp_t
*)(uintptr
) axi_wrapper
[i
].wrapper_addr
;
1382 bcm_bprintf(b
, "core 0x%x: core_rev:%d, %s_WR ADDR:%x \n", axi_wrapper
[i
].cid
,
1384 axi_wrapper
[i
].wrapper_type
== AI_SLAVE_WRAPPER
? "SLAVE" : "MASTER",
1385 axi_wrapper
[i
].wrapper_addr
);
1388 if (BCM4707_CHIP(CHIPID(sih
->chip
)) &&
1389 (axi_wrapper
[i
].cid
== NS_CCB_CORE_ID
)) {
1390 bcm_bprintf(b
, "Skipping chipcommonb in 4707\n");
1394 bcm_bprintf(b
, "ioctrlset 0x%x ioctrlclear 0x%x ioctrl 0x%x iostatus 0x%x "
1395 "ioctrlwidth 0x%x iostatuswidth 0x%x\n"
1396 "resetctrl 0x%x resetstatus 0x%x resetreadid 0x%x resetwriteid 0x%x\n"
1397 "errlogctrl 0x%x errlogdone 0x%x errlogstatus 0x%x "
1398 "errlogaddrlo 0x%x errlogaddrhi 0x%x\n"
1399 "errlogid 0x%x errloguser 0x%x errlogflags 0x%x\n"
1400 "intstatus 0x%x config 0x%x itcr 0x%x\n\n",
1401 R_REG(osh
, &ai
->ioctrlset
),
1402 R_REG(osh
, &ai
->ioctrlclear
),
1403 R_REG(osh
, &ai
->ioctrl
),
1404 R_REG(osh
, &ai
->iostatus
),
1405 R_REG(osh
, &ai
->ioctrlwidth
),
1406 R_REG(osh
, &ai
->iostatuswidth
),
1407 R_REG(osh
, &ai
->resetctrl
),
1408 R_REG(osh
, &ai
->resetstatus
),
1409 R_REG(osh
, &ai
->resetreadid
),
1410 R_REG(osh
, &ai
->resetwriteid
),
1411 R_REG(osh
, &ai
->errlogctrl
),
1412 R_REG(osh
, &ai
->errlogdone
),
1413 R_REG(osh
, &ai
->errlogstatus
),
1414 R_REG(osh
, &ai
->errlogaddrlo
),
1415 R_REG(osh
, &ai
->errlogaddrhi
),
1416 R_REG(osh
, &ai
->errlogid
),
1417 R_REG(osh
, &ai
->errloguser
),
1418 R_REG(osh
, &ai
->errlogflags
),
1419 R_REG(osh
, &ai
->intstatus
),
1420 R_REG(osh
, &ai
->config
),
1421 R_REG(osh
, &ai
->itcr
));
1424 /* Restore the initial wrapper space */
1425 if (BUSTYPE(sii
->pub
.bustype
) == PCI_BUS
) {
1426 if (prev_value
&& cfg_reg
) {
1427 OSL_PCI_WRITE_CONFIG(osh
, cfg_reg
, 4, prev_value
);
1434 ai_update_backplane_timeouts(si_t
*sih
, bool enable
, uint32 timeout_exp
, uint32 cid
)
1436 #if defined(AXI_TIMEOUTS) || defined(BCM_BACKPLANE_TIMEOUT)
1437 si_info_t
*sii
= SI_INFO(sih
);
1440 axi_wrapper_t
* axi_wrapper
= sii
->axi_wrapper
;
1441 uint32 errlogctrl
= (enable
<< AIELC_TO_ENAB_SHIFT
) |
1442 ((timeout_exp
<< AIELC_TO_EXP_SHIFT
) & AIELC_TO_EXP_MASK
);
1444 #ifdef BCM_BACKPLANE_TIMEOUT
1445 uint32 prev_value
= 0;
1446 osl_t
*osh
= sii
->osh
;
1449 #endif /* BCM_BACKPLANE_TIMEOUT */
1451 if ((sii
->axi_num_wrappers
== 0) ||
1452 #ifdef BCM_BACKPLANE_TIMEOUT
1454 #endif /* BCM_BACKPLANE_TIMEOUT */
1456 SI_VMSG((" %s, axi_num_wrappers:%d, Is_PCIE:%d, BUS_TYPE:%d, ID:%x\n",
1457 __FUNCTION__
, sii
->axi_num_wrappers
, PCIE(sii
),
1458 BUSTYPE(sii
->pub
.bustype
), sii
->pub
.buscoretype
));
1462 #ifdef BCM_BACKPLANE_TIMEOUT
1463 /* Save and restore the wrapper access window */
1464 if (BUSTYPE(sii
->pub
.bustype
) == PCI_BUS
) {
1465 if (PCIE_GEN1(sii
)) {
1466 cfg_reg
= PCI_BAR0_WIN2
;
1467 offset
= PCI_BAR0_WIN2_OFFSET
;
1468 } else if (PCIE_GEN2(sii
)) {
1469 cfg_reg
= PCIE2_BAR0_CORE2_WIN2
;
1470 offset
= PCIE2_BAR0_CORE2_WIN2_OFFSET
;
1473 ASSERT(!"!PCIE_GEN1 && !PCIE_GEN2");
1476 prev_value
= OSL_PCI_READ_CONFIG(osh
, cfg_reg
, 4);
1477 if (prev_value
== ID32_INVALID
) {
1478 SI_PRINT(("%s, PCI_BAR0_WIN2 - %x\n", __FUNCTION__
, prev_value
));
1482 #endif /* BCM_BACKPLANE_TIMEOUT */
1484 for (i
= 0; i
< sii
->axi_num_wrappers
; ++i
) {
1486 if (axi_wrapper
[i
].wrapper_type
!= AI_SLAVE_WRAPPER
) {
1487 SI_VMSG(("SKIP ENABLE BPT: MFG:%x, CID:%x, ADDR:%x\n",
1490 axi_wrapper
[i
].wrapper_addr
));
1494 /* Update only given core if requested */
1495 if ((cid
!= 0) && (axi_wrapper
[i
].cid
!= cid
)) {
1499 #ifdef BCM_BACKPLANE_TIMEOUT
1500 if (BUSTYPE(sii
->pub
.bustype
) == PCI_BUS
) {
1501 /* Set BAR0_CORE2_WIN2 to bridge wapper base address */
1502 OSL_PCI_WRITE_CONFIG(osh
,
1503 cfg_reg
, 4, axi_wrapper
[i
].wrapper_addr
);
1505 /* set AI to BAR0 + Offset corresponding to Gen1 or gen2 */
1506 ai
= (aidmp_t
*) (DISCARD_QUAL(sii
->curmap
, uint8
) + offset
);
1509 #endif /* BCM_BACKPLANE_TIMEOUT */
1511 ai
= (aidmp_t
*)(uintptr
) axi_wrapper
[i
].wrapper_addr
;
1514 W_REG(sii
->osh
, &ai
->errlogctrl
, errlogctrl
);
1516 SI_VMSG(("ENABLED BPT: MFG:%x, CID:%x, ADDR:%x, ERR_CTRL:%x\n",
1519 axi_wrapper
[i
].wrapper_addr
,
1520 R_REG(sii
->osh
, &ai
->errlogctrl
)));
1523 #ifdef BCM_BACKPLANE_TIMEOUT
1524 /* Restore the initial wrapper space */
1526 OSL_PCI_WRITE_CONFIG(osh
, cfg_reg
, 4, prev_value
);
1528 #endif /* BCM_BACKPLANE_TIMEOUT */
1530 #endif /* AXI_TIMEOUTS || BCM_BACKPLANE_TIMEOUT */
1533 #if defined(AXI_TIMEOUTS) || defined(BCM_BACKPLANE_TIMEOUT)
1535 /* slave error is ignored, so account for those cases */
1536 static uint32 si_ignore_errlog_cnt
= 0;
1539 ai_ignore_errlog(si_info_t
*sii
, aidmp_t
*ai
,
1540 uint32 lo_addr
, uint32 hi_addr
, uint32 err_axi_id
, uint32 errsts
)
1543 #ifdef BCMPCIE_BTLOG
1544 uint32 axi_id2
= BCM4347_UNUSED_AXI_ID
;
1545 #endif /* BCMPCIE_BTLOG */
1546 uint32 ignore_errsts
= AIELS_SLAVE_ERR
;
1547 uint32 ignore_hi
= BT_CC_SPROM_BADREG_HI
;
1548 uint32 ignore_lo
= BT_CC_SPROM_BADREG_LO
;
1549 uint32 ignore_size
= BT_CC_SPROM_BADREG_SIZE
;
1551 /* ignore the BT slave errors if the errlog is to chipcommon addr 0x190 */
1552 switch (CHIPID(sii
->pub
.chip
)) {
1553 case BCM4350_CHIP_ID
:
1554 axi_id
= BCM4350_BT_AXI_ID
;
1556 case BCM4345_CHIP_ID
:
1557 axi_id
= BCM4345_BT_AXI_ID
;
1559 case BCM4349_CHIP_GRPID
:
1560 axi_id
= BCM4349_BT_AXI_ID
;
1562 case BCM4364_CHIP_ID
:
1563 case BCM4373_CHIP_ID
:
1564 axi_id
= BCM4364_BT_AXI_ID
;
1566 #ifdef BCMPCIE_BTLOG
1567 case BCM4347_CHIP_ID
:
1568 case BCM4357_CHIP_ID
:
1569 axi_id
= BCM4347_CC_AXI_ID
;
1570 axi_id2
= BCM4347_PCIE_AXI_ID
;
1571 ignore_errsts
= AIELS_TIMEOUT
;
1572 ignore_hi
= BCM4347_BT_ADDR_HI
;
1573 ignore_lo
= BCM4347_BT_ADDR_LO
;
1574 ignore_size
= BCM4347_BT_SIZE
;
1576 #endif /* BCMPCIE_BTLOG */
1583 err_axi_id
&= AI_ERRLOGID_AXI_ID_MASK
;
1584 if (!(err_axi_id
== axi_id
||
1585 #ifdef BCMPCIE_BTLOG
1586 (axi_id2
!= BCM4347_UNUSED_AXI_ID
&& err_axi_id
== axi_id2
)))
1589 #endif /* BCMPCIE_BTLOG */
1593 if ((errsts
& AIELS_TIMEOUT_MASK
) != ignore_errsts
)
1596 /* address range check */
1597 if ((hi_addr
!= ignore_hi
) ||
1598 (lo_addr
< ignore_lo
) || (lo_addr
>= (ignore_lo
+ ignore_size
)))
1601 #ifdef BCMPCIE_BTLOG
1602 if (ignore_errsts
== AIELS_TIMEOUT
) {
1603 /* reset AXI timeout */
1604 ai_reset_axi_to(sii
, ai
);
1606 #endif /* BCMPCIE_BTLOG */
1610 #endif /* defined (AXI_TIMEOUTS) || defined (BCM_BACKPLANE_TIMEOUT) */
1612 #ifdef BCM_BACKPLANE_TIMEOUT
1614 /* Function to return the APB bridge details corresponding to the core */
1616 ai_get_apb_bridge(si_t
* sih
, uint32 coreidx
, uint32
*apb_id
, uint32
* apb_coreuinit
)
1619 uint32 core_base
, core_end
;
1620 si_info_t
*sii
= SI_INFO(sih
);
1621 static uint32 coreidx_cached
= 0, apb_id_cached
= 0, apb_coreunit_cached
= 0;
1622 uint32 tmp_coreunit
= 0;
1623 si_cores_info_t
*cores_info
= (si_cores_info_t
*)sii
->cores_info
;
1625 if (coreidx
>= MIN(sii
->numcores
, SI_MAXCORES
))
1628 /* Most of the time apb bridge query will be for d11 core.
1629 * Maintain the last cache and return if found rather than iterating the table
1631 if (coreidx_cached
== coreidx
) {
1632 *apb_id
= apb_id_cached
;
1633 *apb_coreuinit
= apb_coreunit_cached
;
1637 core_base
= cores_info
->coresba
[coreidx
];
1638 core_end
= core_base
+ cores_info
->coresba_size
[coreidx
];
1640 for (i
= 0; i
< sii
->numcores
; i
++) {
1641 if (cores_info
->coreid
[i
] == APB_BRIDGE_ID
) {
1645 apb_base
= cores_info
->coresba
[i
];
1646 apb_end
= apb_base
+ cores_info
->coresba_size
[i
];
1648 if ((core_base
>= apb_base
) &&
1649 (core_end
<= apb_end
)) {
1650 /* Current core is attached to this APB bridge */
1651 *apb_id
= apb_id_cached
= APB_BRIDGE_ID
;
1652 *apb_coreuinit
= apb_coreunit_cached
= tmp_coreunit
;
1653 coreidx_cached
= coreidx
;
1656 /* Increment the coreunit */
1665 ai_clear_backplane_to_fast(si_t
*sih
, void *addr
)
1667 si_info_t
*sii
= SI_INFO(sih
);
1668 volatile void *curmap
= sii
->curmap
;
1669 bool core_reg
= FALSE
;
1671 /* Use fast path only for core register access */
1672 if (((uintptr
)addr
>= (uintptr
)curmap
) &&
1673 ((uintptr
)addr
< ((uintptr
)curmap
+ SI_CORE_SIZE
))) {
1674 /* address being accessed is within current core reg map */
1679 uint32 apb_id
, apb_coreuinit
;
1681 if (ai_get_apb_bridge(sih
, si_coreidx(&sii
->pub
),
1682 &apb_id
, &apb_coreuinit
) == TRUE
) {
1683 /* Found the APB bridge corresponding to current core,
1684 * Check for bus errors in APB wrapper
1686 return ai_clear_backplane_to_per_core(sih
,
1687 apb_id
, apb_coreuinit
, NULL
);
1691 /* Default is to poll for errors on all slave wrappers */
1692 return si_clear_backplane_to(sih
);
1694 #endif /* BCM_BACKPLANE_TIMEOUT */
1696 #if defined(AXI_TIMEOUTS) || defined(BCM_BACKPLANE_TIMEOUT)
1697 static bool g_disable_backplane_logs
= FALSE
;
1700 static uint32 last_axi_error
= AXI_WRAP_STS_NONE
;
1701 static uint32 last_axi_error_core
= 0;
1702 static uint32 last_axi_error_wrap
= 0;
1706 * API to clear the back plane timeout per core.
1707 * Caller may passs optional wrapper address. If present this will be used as
1708 * the wrapper base address. If wrapper base address is provided then caller
1709 * must provide the coreid also.
1710 * If both coreid and wrapper is zero, then err status of current bridge
1714 ai_clear_backplane_to_per_core(si_t
*sih
, uint coreid
, uint coreunit
, void *wrap
)
1716 int ret
= AXI_WRAP_STS_NONE
;
1718 uint32 errlog_status
= 0;
1719 si_info_t
*sii
= SI_INFO(sih
);
1720 uint32 errlog_lo
= 0, errlog_hi
= 0, errlog_id
= 0, errlog_flags
= 0;
1721 uint32 current_coreidx
= si_coreidx(sih
);
1722 uint32 target_coreidx
= si_findcoreidx(sih
, coreid
, coreunit
);
1724 #if defined(BCM_BACKPLANE_TIMEOUT)
1725 si_axi_error_t
* axi_error
= sih
->err_info
?
1726 &sih
->err_info
->axi_error
[sih
->err_info
->count
] : NULL
;
1727 #endif /* BCM_BACKPLANE_TIMEOUT */
1728 bool restore_core
= FALSE
;
1730 if ((sii
->axi_num_wrappers
== 0) ||
1731 #ifdef BCM_BACKPLANE_TIMEOUT
1733 #endif /* BCM_BACKPLANE_TIMEOUT */
1735 SI_VMSG((" %s, axi_num_wrappers:%d, Is_PCIE:%d, BUS_TYPE:%d, ID:%x\n",
1736 __FUNCTION__
, sii
->axi_num_wrappers
, PCIE(sii
),
1737 BUSTYPE(sii
->pub
.bustype
), sii
->pub
.buscoretype
));
1738 return AXI_WRAP_STS_NONE
;
1742 ai
= (aidmp_t
*)wrap
;
1743 } else if (coreid
&& (target_coreidx
!= current_coreidx
)) {
1745 if (ai_setcoreidx(sih
, target_coreidx
) == NULL
) {
1746 /* Unable to set the core */
1747 SI_PRINT(("Set Code Failed: coreid:%x, unit:%d, target_coreidx:%d\n",
1748 coreid
, coreunit
, target_coreidx
));
1749 errlog_lo
= target_coreidx
;
1750 ret
= AXI_WRAP_STS_SET_CORE_FAIL
;
1754 restore_core
= TRUE
;
1755 ai
= (aidmp_t
*)si_wrapperregs(sih
);
1757 /* Read error status of current wrapper */
1758 ai
= (aidmp_t
*)si_wrapperregs(sih
);
1760 /* Update CoreID to current Code ID */
1761 coreid
= si_coreid(sih
);
1764 /* read error log status */
1765 errlog_status
= R_REG(sii
->osh
, &ai
->errlogstatus
);
1767 if (errlog_status
== ID32_INVALID
) {
1768 /* Do not try to peek further */
1769 SI_PRINT(("%s, errlogstatus:%x - Slave Wrapper:%x\n",
1770 __FUNCTION__
, errlog_status
, coreid
));
1771 ret
= AXI_WRAP_STS_WRAP_RD_ERR
;
1772 errlog_lo
= (uint32
)(uintptr
)&ai
->errlogstatus
;
1776 if ((errlog_status
& AIELS_TIMEOUT_MASK
) != 0) {
1779 /* set ErrDone to clear the condition */
1780 W_REG(sii
->osh
, &ai
->errlogdone
, AIELD_ERRDONE_MASK
);
1782 /* SPINWAIT on errlogstatus timeout status bits */
1783 while ((tmp
= R_REG(sii
->osh
, &ai
->errlogstatus
)) & AIELS_TIMEOUT_MASK
) {
1785 if (tmp
== ID32_INVALID
) {
1786 SI_PRINT(("%s: prev errlogstatus:%x, errlogstatus:%x\n",
1787 __FUNCTION__
, errlog_status
, tmp
));
1788 ret
= AXI_WRAP_STS_WRAP_RD_ERR
;
1789 errlog_lo
= (uint32
)(uintptr
)&ai
->errlogstatus
;
1793 * Clear again, to avoid getting stuck in the loop, if a new error
1794 * is logged after we cleared the first timeout
1796 W_REG(sii
->osh
, &ai
->errlogdone
, AIELD_ERRDONE_MASK
);
1800 if ((10 * count
) > AI_REG_READ_TIMEOUT
) {
1801 errlog_status
= tmp
;
1806 errlog_lo
= R_REG(sii
->osh
, &ai
->errlogaddrlo
);
1807 errlog_hi
= R_REG(sii
->osh
, &ai
->errlogaddrhi
);
1808 errlog_id
= R_REG(sii
->osh
, &ai
->errlogid
);
1809 errlog_flags
= R_REG(sii
->osh
, &ai
->errlogflags
);
1811 /* we are already in the error path, so OK to check for the slave error */
1812 if (ai_ignore_errlog(sii
, ai
, errlog_lo
, errlog_hi
, errlog_id
,
1814 si_ignore_errlog_cnt
++;
1818 /* only reset APB Bridge on timeout (not slave error, or dec error) */
1819 switch (errlog_status
& AIELS_TIMEOUT_MASK
) {
1820 case AIELS_SLAVE_ERR
:
1821 SI_PRINT(("AXI slave error\n"));
1822 ret
= AXI_WRAP_STS_SLAVE_ERR
;
1826 ai_reset_axi_to(sii
, ai
);
1827 ret
= AXI_WRAP_STS_TIMEOUT
;
1831 SI_PRINT(("AXI decode error\n"));
1832 ret
= AXI_WRAP_STS_DECODE_ERR
;
1835 ASSERT(0); /* should be impossible */
1838 SI_PRINT(("\tCoreID: %x\n", coreid
));
1839 SI_PRINT(("\t errlog: lo 0x%08x, hi 0x%08x, id 0x%08x, flags 0x%08x"
1840 ", status 0x%08x\n",
1841 errlog_lo
, errlog_hi
, errlog_id
, errlog_flags
,
1847 if (ret
!= AXI_WRAP_STS_NONE
) {
1848 last_axi_error
= ret
;
1849 last_axi_error_core
= coreid
;
1850 last_axi_error_wrap
= (uint32
)ai
;
1854 #if defined(BCM_BACKPLANE_TIMEOUT)
1855 if (axi_error
&& (ret
!= AXI_WRAP_STS_NONE
)) {
1856 axi_error
->error
= ret
;
1857 axi_error
->coreid
= coreid
;
1858 axi_error
->errlog_lo
= errlog_lo
;
1859 axi_error
->errlog_hi
= errlog_hi
;
1860 axi_error
->errlog_id
= errlog_id
;
1861 axi_error
->errlog_flags
= errlog_flags
;
1862 axi_error
->errlog_status
= errlog_status
;
1863 sih
->err_info
->count
++;
1865 if (sih
->err_info
->count
== SI_MAX_ERRLOG_SIZE
) {
1866 sih
->err_info
->count
= SI_MAX_ERRLOG_SIZE
- 1;
1867 SI_PRINT(("AXI Error log overflow\n"));
1870 #endif /* BCM_BACKPLANE_TIMEOUT */
1873 if (ai_setcoreidx(sih
, current_coreidx
) == NULL
) {
1874 /* Unable to set the core */
1875 return ID32_INVALID
;
1882 /* reset AXI timeout */
1884 ai_reset_axi_to(si_info_t
*sii
, aidmp_t
*ai
)
1886 /* reset APB Bridge */
1887 OR_REG(sii
->osh
, &ai
->resetctrl
, AIRC_RESET
);
1889 (void)R_REG(sii
->osh
, &ai
->resetctrl
);
1890 /* clear Reset bit */
1891 AND_REG(sii
->osh
, &ai
->resetctrl
, ~(AIRC_RESET
));
1893 (void)R_REG(sii
->osh
, &ai
->resetctrl
);
1894 SI_PRINT(("AXI timeout\n"));
1895 if (R_REG(sii
->osh
, &ai
->resetctrl
) & AIRC_RESET
) {
1896 SI_PRINT(("reset failed on wrapper %p\n", ai
));
1897 g_disable_backplane_logs
= TRUE
;
1900 #endif /* AXI_TIMEOUTS || BCM_BACKPLANE_TIMEOUT */
1903 * This API polls all slave wrappers for errors and returns bit map of
1904 * all reported errors.
1905 * return - bit map of
1907 * AXI_WRAP_STS_TIMEOUT
1908 * AXI_WRAP_STS_SLAVE_ERR
1909 * AXI_WRAP_STS_DECODE_ERR
1910 * AXI_WRAP_STS_PCI_RD_ERR
1911 * AXI_WRAP_STS_WRAP_RD_ERR
1912 * AXI_WRAP_STS_SET_CORE_FAIL
1913 * On timeout detection, correspondign bridge will be reset to
1915 * Error reported in each wrapper can be retrieved using the API
1916 * si_get_axi_errlog_info()
1919 ai_clear_backplane_to(si_t
*sih
)
1922 #if defined(AXI_TIMEOUTS) || defined(BCM_BACKPLANE_TIMEOUT)
1924 si_info_t
*sii
= SI_INFO(sih
);
1927 axi_wrapper_t
* axi_wrapper
= sii
->axi_wrapper
;
1929 #ifdef BCM_BACKPLANE_TIMEOUT
1930 uint32 prev_value
= 0;
1931 osl_t
*osh
= sii
->osh
;
1935 if ((sii
->axi_num_wrappers
== 0) || (!PCIE(sii
)))
1937 if (sii
->axi_num_wrappers
== 0)
1940 SI_VMSG((" %s, axi_num_wrappers:%d, Is_PCIE:%d, BUS_TYPE:%d, ID:%x\n",
1941 __FUNCTION__
, sii
->axi_num_wrappers
, PCIE(sii
),
1942 BUSTYPE(sii
->pub
.bustype
), sii
->pub
.buscoretype
));
1943 return AXI_WRAP_STS_NONE
;
1946 #ifdef BCM_BACKPLANE_TIMEOUT
1947 /* Save and restore wrapper access window */
1948 if (BUSTYPE(sii
->pub
.bustype
) == PCI_BUS
) {
1949 if (PCIE_GEN1(sii
)) {
1950 cfg_reg
= PCI_BAR0_WIN2
;
1951 offset
= PCI_BAR0_WIN2_OFFSET
;
1952 } else if (PCIE_GEN2(sii
)) {
1953 cfg_reg
= PCIE2_BAR0_CORE2_WIN2
;
1954 offset
= PCIE2_BAR0_CORE2_WIN2_OFFSET
;
1957 ASSERT(!"!PCIE_GEN1 && !PCIE_GEN2");
1960 prev_value
= OSL_PCI_READ_CONFIG(osh
, cfg_reg
, 4);
1962 if (prev_value
== ID32_INVALID
) {
1963 si_axi_error_t
* axi_error
=
1965 &sih
->err_info
->axi_error
[sih
->err_info
->count
] :
1968 SI_PRINT(("%s, PCI_BAR0_WIN2 - %x\n", __FUNCTION__
, prev_value
));
1970 axi_error
->error
= ret
= AXI_WRAP_STS_PCI_RD_ERR
;
1971 axi_error
->errlog_lo
= cfg_reg
;
1972 sih
->err_info
->count
++;
1974 if (sih
->err_info
->count
== SI_MAX_ERRLOG_SIZE
) {
1975 sih
->err_info
->count
= SI_MAX_ERRLOG_SIZE
- 1;
1976 SI_PRINT(("AXI Error log overflow\n"));
1983 #endif /* BCM_BACKPLANE_TIMEOUT */
1985 for (i
= 0; i
< sii
->axi_num_wrappers
; ++i
) {
1988 if (axi_wrapper
[i
].wrapper_type
!= AI_SLAVE_WRAPPER
) {
1992 #ifdef BCM_BACKPLANE_TIMEOUT
1993 if (BUSTYPE(sii
->pub
.bustype
) == PCI_BUS
) {
1994 /* Set BAR0_CORE2_WIN2 to bridge wapper base address */
1995 OSL_PCI_WRITE_CONFIG(osh
,
1996 cfg_reg
, 4, axi_wrapper
[i
].wrapper_addr
);
1998 /* set AI to BAR0 + Offset corresponding to Gen1 or gen2 */
1999 ai
= (aidmp_t
*) (DISCARD_QUAL(sii
->curmap
, uint8
) + offset
);
2002 #endif /* BCM_BACKPLANE_TIMEOUT */
2004 ai
= (aidmp_t
*)(uintptr
) axi_wrapper
[i
].wrapper_addr
;
2007 tmp
= ai_clear_backplane_to_per_core(sih
, axi_wrapper
[i
].cid
, 0,
2008 DISCARD_QUAL(ai
, void));
2013 #ifdef BCM_BACKPLANE_TIMEOUT
2014 /* Restore the initial wrapper space */
2016 OSL_PCI_WRITE_CONFIG(osh
, cfg_reg
, 4, prev_value
);
2018 #endif /* BCM_BACKPLANE_TIMEOUT */
2020 #endif /* AXI_TIMEOUTS || BCM_BACKPLANE_TIMEOUT */
2026 ai_num_slaveports(si_t
*sih
, uint coreidx
)
2028 si_info_t
*sii
= SI_INFO(sih
);
2029 si_cores_info_t
*cores_info
= (si_cores_info_t
*)sii
->cores_info
;
2032 cib
= cores_info
->cib
[coreidx
];
2033 return ((cib
& CIB_NSP_MASK
) >> CIB_NSP_SHIFT
);
2036 #ifdef UART_TRAP_DBG
2038 ai_dump_APB_Bridge_registers(si_t
*sih
)
2041 si_info_t
*sii
= SI_INFO(sih
);
2043 ai
= (aidmp_t
*) sii
->br_wrapba
[0];
2044 printf("APB Bridge 0\n");
2045 printf("lo 0x%08x, hi 0x%08x, id 0x%08x, flags 0x%08x",
2046 R_REG(sii
->osh
, &ai
->errlogaddrlo
),
2047 R_REG(sii
->osh
, &ai
->errlogaddrhi
),
2048 R_REG(sii
->osh
, &ai
->errlogid
),
2049 R_REG(sii
->osh
, &ai
->errlogflags
));
2050 printf("\n status 0x%08x\n", R_REG(sii
->osh
, &ai
->errlogstatus
));
2052 #endif /* UART_TRAP_DBG */
2055 ai_force_clocks(si_t
*sih
, uint clock_state
)
2058 si_info_t
*sii
= SI_INFO(sih
);
2059 aidmp_t
*ai
, *ai_sec
= NULL
;
2060 volatile uint32 dummy
;
2062 si_cores_info_t
*cores_info
= (si_cores_info_t
*)sii
->cores_info
;
2064 ASSERT(GOODREGS(sii
->curwrap
));
2066 if (cores_info
->wrapba2
[sii
->curidx
])
2067 ai_sec
= REG_MAP(cores_info
->wrapba2
[sii
->curidx
], SI_CORE_SIZE
);
2069 /* ensure there are no pending backplane operations */
2070 SPINWAIT((R_REG(sii
->osh
, &ai
->resetstatus
) != 0), 300);
2072 if (clock_state
== FORCE_CLK_ON
) {
2073 ioctrl
= R_REG(sii
->osh
, &ai
->ioctrl
);
2074 W_REG(sii
->osh
, &ai
->ioctrl
, (ioctrl
| SICF_FGC
));
2075 dummy
= R_REG(sii
->osh
, &ai
->ioctrl
);
2076 BCM_REFERENCE(dummy
);
2078 ioctrl
= R_REG(sii
->osh
, &ai_sec
->ioctrl
);
2079 W_REG(sii
->osh
, &ai_sec
->ioctrl
, (ioctrl
| SICF_FGC
));
2080 dummy
= R_REG(sii
->osh
, &ai_sec
->ioctrl
);
2081 BCM_REFERENCE(dummy
);
2084 ioctrl
= R_REG(sii
->osh
, &ai
->ioctrl
);
2085 W_REG(sii
->osh
, &ai
->ioctrl
, (ioctrl
& (~SICF_FGC
)));
2086 dummy
= R_REG(sii
->osh
, &ai
->ioctrl
);
2087 BCM_REFERENCE(dummy
);
2089 ioctrl
= R_REG(sii
->osh
, &ai_sec
->ioctrl
);
2090 W_REG(sii
->osh
, &ai_sec
->ioctrl
, (ioctrl
& (~SICF_FGC
)));
2091 dummy
= R_REG(sii
->osh
, &ai_sec
->ioctrl
);
2092 BCM_REFERENCE(dummy
);
2095 /* ensure there are no pending backplane operations */
2096 SPINWAIT((R_REG(sii
->osh
, &ai
->resetstatus
) != 0), 300);