wifi: delete the invalid prints [1/1]
[GitHub/LineageOS/G12/android_hardware_amlogic_kernel-modules_dhd-driver.git] / bcmdhd.100.10.315.x / aiutils.c
1 /*
2 * Misc utility routines for accessing chip-specific features
3 * of the SiliconBackplane-based Broadcom chips.
4 *
5 * Copyright (C) 1999-2018, Broadcom.
6 *
7 * Unless you and Broadcom execute a separate written software license
8 * agreement governing use of this software, this software is licensed to you
9 * under the terms of the GNU General Public License version 2 (the "GPL"),
10 * available at http://www.broadcom.com/licenses/GPLv2.php, with the
11 * following added to such license:
12 *
13 * As a special exception, the copyright holders of this software give you
14 * permission to link this software with independent modules, and to copy and
15 * distribute the resulting executable under terms of your choice, provided that
16 * you also meet, for each linked independent module, the terms and conditions of
17 * the license of that module. An independent module is a module which is not
18 * derived from this software. The special exception does not apply to any
19 * modifications of the software.
20 *
21 * Notwithstanding the above, under no circumstances may you combine this
22 * software in any way with any other Broadcom software provided under a license
23 * other than the GPL, without Broadcom's express prior written consent.
24 *
25 *
26 * <<Broadcom-WL-IPTag/Open:>>
27 *
28 * $Id: aiutils.c 769534 2018-06-26 21:19:11Z $
29 */
30 #include <bcm_cfg.h>
31 #include <typedefs.h>
32 #include <bcmdefs.h>
33 #include <osl.h>
34 #include <bcmutils.h>
35 #include <siutils.h>
36 #include <hndsoc.h>
37 #include <sbchipc.h>
38 #include <pcicfg.h>
39
40 #include "siutils_priv.h"
41 #include <bcmdevs.h>
42
43 #define BCM53573_DMP() (0)
44 #define BCM4707_DMP() (0)
45 #define PMU_DMP() (0)
46 #define GCI_DMP() (0)
47
48 #if defined(BCM_BACKPLANE_TIMEOUT)
49 static bool ai_get_apb_bridge(si_t *sih, uint32 coreidx, uint32 *apb_id, uint32 *apb_coreuinit);
50 #endif /* BCM_BACKPLANE_TIMEOUT */
51
52 #if defined(AXI_TIMEOUTS) || defined(BCM_BACKPLANE_TIMEOUT)
53 static void ai_reset_axi_to(si_info_t *sii, aidmp_t *ai);
54 #endif /* defined (AXI_TIMEOUTS) || defined (BCM_BACKPLANE_TIMEOUT) */
55
56 /* EROM parsing */
57
58 static uint32
59 get_erom_ent(si_t *sih, uint32 **eromptr, uint32 mask, uint32 match)
60 {
61 uint32 ent;
62 uint inv = 0, nom = 0;
63 uint32 size = 0;
64
65 while (TRUE) {
66 ent = R_REG(si_osh(sih), *eromptr);
67 (*eromptr)++;
68
69 if (mask == 0)
70 break;
71
72 if ((ent & ER_VALID) == 0) {
73 inv++;
74 continue;
75 }
76
77 if (ent == (ER_END | ER_VALID))
78 break;
79
80 if ((ent & mask) == match)
81 break;
82
83 /* escape condition related EROM size if it has invalid values */
84 size += sizeof(*eromptr);
85 if (size >= ER_SZ_MAX) {
86 SI_ERROR(("Failed to find end of EROM marker\n"));
87 break;
88 }
89
90 nom++;
91 }
92
93 SI_VMSG(("%s: Returning ent 0x%08x\n", __FUNCTION__, ent));
94 if (inv + nom) {
95 SI_VMSG((" after %d invalid and %d non-matching entries\n", inv, nom));
96 }
97 return ent;
98 }
99
100 static uint32
101 get_asd(si_t *sih, uint32 **eromptr, uint sp, uint ad, uint st, uint32 *addrl, uint32 *addrh,
102 uint32 *sizel, uint32 *sizeh)
103 {
104 uint32 asd, sz, szd;
105
106 BCM_REFERENCE(ad);
107
108 asd = get_erom_ent(sih, eromptr, ER_VALID, ER_VALID);
109 if (((asd & ER_TAG1) != ER_ADD) ||
110 (((asd & AD_SP_MASK) >> AD_SP_SHIFT) != sp) ||
111 ((asd & AD_ST_MASK) != st)) {
112 /* This is not what we want, "push" it back */
113 (*eromptr)--;
114 return 0;
115 }
116 *addrl = asd & AD_ADDR_MASK;
117 if (asd & AD_AG32)
118 *addrh = get_erom_ent(sih, eromptr, 0, 0);
119 else
120 *addrh = 0;
121 *sizeh = 0;
122 sz = asd & AD_SZ_MASK;
123 if (sz == AD_SZ_SZD) {
124 szd = get_erom_ent(sih, eromptr, 0, 0);
125 *sizel = szd & SD_SZ_MASK;
126 if (szd & SD_SG32)
127 *sizeh = get_erom_ent(sih, eromptr, 0, 0);
128 } else
129 *sizel = AD_SZ_BASE << (sz >> AD_SZ_SHIFT);
130
131 SI_VMSG((" SP %d, ad %d: st = %d, 0x%08x_0x%08x @ 0x%08x_0x%08x\n",
132 sp, ad, st, *sizeh, *sizel, *addrh, *addrl));
133
134 return asd;
135 }
136
137 /* Parse the enumeration rom to identify all cores
138 * Erom content format can be found in:
139 * http://hwnbu-twiki.broadcom.com/twiki/pub/Mwgroup/ArmDocumentation/SystemDiscovery.pdf
140 */
141 void
142 ai_scan(si_t *sih, void *regs, uint devid)
143 {
144 si_info_t *sii = SI_INFO(sih);
145 si_cores_info_t *cores_info = (si_cores_info_t *)sii->cores_info;
146 chipcregs_t *cc = (chipcregs_t *)regs;
147 uint32 erombase, *eromptr, *eromlim;
148 axi_wrapper_t * axi_wrapper = sii->axi_wrapper;
149
150 BCM_REFERENCE(devid);
151
152 erombase = R_REG(sii->osh, &cc->eromptr);
153
154 switch (BUSTYPE(sih->bustype)) {
155 case SI_BUS:
156 eromptr = (uint32 *)REG_MAP(erombase, SI_CORE_SIZE);
157 break;
158
159 case PCI_BUS:
160 /* Set wrappers address */
161 sii->curwrap = (void *)((uintptr)regs + SI_CORE_SIZE);
162
163 /* Now point the window at the erom */
164 OSL_PCI_WRITE_CONFIG(sii->osh, PCI_BAR0_WIN, 4, erombase);
165 eromptr = regs;
166 break;
167
168 #ifdef BCMSDIO
169 case SPI_BUS:
170 case SDIO_BUS:
171 eromptr = (uint32 *)(uintptr)erombase;
172 break;
173 #endif /* BCMSDIO */
174
175 case PCMCIA_BUS:
176 default:
177 SI_ERROR(("Don't know how to do AXI enumertion on bus %d\n", sih->bustype));
178 ASSERT(0);
179 return;
180 }
181 eromlim = eromptr + (ER_REMAPCONTROL / sizeof(uint32));
182 sii->axi_num_wrappers = 0;
183
184 SI_VMSG(("ai_scan: regs = 0x%p, erombase = 0x%08x, eromptr = 0x%p, eromlim = 0x%p\n",
185 OSL_OBFUSCATE_BUF(regs), erombase,
186 OSL_OBFUSCATE_BUF(eromptr), OSL_OBFUSATE_BUF(eromlim)));
187 while (eromptr < eromlim) {
188 uint32 cia, cib, cid, mfg, crev, nmw, nsw, nmp, nsp;
189 uint32 mpd, asd, addrl, addrh, sizel, sizeh;
190 uint i, j, idx;
191 bool br;
192
193 br = FALSE;
194
195 /* Grok a component */
196 cia = get_erom_ent(sih, &eromptr, ER_TAG, ER_CI);
197 if (cia == (ER_END | ER_VALID)) {
198 SI_VMSG(("Found END of erom after %d cores\n", sii->numcores));
199 return;
200 }
201
202 cib = get_erom_ent(sih, &eromptr, 0, 0);
203
204 if ((cib & ER_TAG) != ER_CI) {
205 SI_ERROR(("CIA not followed by CIB\n"));
206 goto error;
207 }
208
209 cid = (cia & CIA_CID_MASK) >> CIA_CID_SHIFT;
210 mfg = (cia & CIA_MFG_MASK) >> CIA_MFG_SHIFT;
211 crev = (cib & CIB_REV_MASK) >> CIB_REV_SHIFT;
212 nmw = (cib & CIB_NMW_MASK) >> CIB_NMW_SHIFT;
213 nsw = (cib & CIB_NSW_MASK) >> CIB_NSW_SHIFT;
214 nmp = (cib & CIB_NMP_MASK) >> CIB_NMP_SHIFT;
215 nsp = (cib & CIB_NSP_MASK) >> CIB_NSP_SHIFT;
216
217 #ifdef BCMDBG_SI
218 SI_VMSG(("Found component 0x%04x/0x%04x rev %d at erom addr 0x%p, with nmw = %d, "
219 "nsw = %d, nmp = %d & nsp = %d\n",
220 mfg, cid, crev, OSL_OBFUSCATE_BUF(eromptr - 1), nmw, nsw, nmp, nsp));
221 #else
222 BCM_REFERENCE(crev);
223 #endif // endif
224
225 if (BCM4347_CHIP(sih->chip)) {
226 /* 4347 has more entries for ARM core
227 * This should apply to all chips but crashes on router
228 * This is a temp fix to be further analyze
229 */
230 if (nsp == 0)
231 continue;
232 } else
233 {
234 /* Include Default slave wrapper for timeout monitoring */
235 if ((nsp == 0) ||
236 #if !defined(AXI_TIMEOUTS) && !defined(BCM_BACKPLANE_TIMEOUT)
237 ((mfg == MFGID_ARM) && (cid == DEF_AI_COMP)) ||
238 #else
239 ((CHIPTYPE(sii->pub.socitype) == SOCI_NAI) &&
240 (mfg == MFGID_ARM) && (cid == DEF_AI_COMP)) ||
241 #endif /* !defined(AXI_TIMEOUTS) && !defined(BCM_BACKPLANE_TIMEOUT) */
242 FALSE) {
243 continue;
244 }
245 }
246
247 if ((nmw + nsw == 0)) {
248 /* A component which is not a core */
249 if (cid == OOB_ROUTER_CORE_ID) {
250 asd = get_asd(sih, &eromptr, 0, 0, AD_ST_SLAVE,
251 &addrl, &addrh, &sizel, &sizeh);
252 if (asd != 0) {
253 sii->oob_router = addrl;
254 }
255 }
256 if (cid != NS_CCB_CORE_ID &&
257 cid != PMU_CORE_ID && cid != GCI_CORE_ID && cid != SR_CORE_ID &&
258 cid != HUB_CORE_ID)
259 continue;
260 }
261
262 idx = sii->numcores;
263
264 cores_info->cia[idx] = cia;
265 cores_info->cib[idx] = cib;
266 cores_info->coreid[idx] = cid;
267
268 for (i = 0; i < nmp; i++) {
269 mpd = get_erom_ent(sih, &eromptr, ER_VALID, ER_VALID);
270 if ((mpd & ER_TAG) != ER_MP) {
271 SI_ERROR(("Not enough MP entries for component 0x%x\n", cid));
272 goto error;
273 }
274 SI_VMSG((" Master port %d, mp: %d id: %d\n", i,
275 (mpd & MPD_MP_MASK) >> MPD_MP_SHIFT,
276 (mpd & MPD_MUI_MASK) >> MPD_MUI_SHIFT));
277 }
278
279 /* First Slave Address Descriptor should be port 0:
280 * the main register space for the core
281 */
282 asd = get_asd(sih, &eromptr, 0, 0, AD_ST_SLAVE, &addrl, &addrh, &sizel, &sizeh);
283 if (asd == 0) {
284 do {
285 /* Try again to see if it is a bridge */
286 asd = get_asd(sih, &eromptr, 0, 0, AD_ST_BRIDGE, &addrl, &addrh,
287 &sizel, &sizeh);
288 if (asd != 0)
289 br = TRUE;
290 else {
291 if (br == TRUE) {
292 break;
293 }
294 else if ((addrh != 0) || (sizeh != 0) ||
295 (sizel != SI_CORE_SIZE)) {
296 SI_ERROR(("addrh = 0x%x\t sizeh = 0x%x\t size1 ="
297 "0x%x\n", addrh, sizeh, sizel));
298 SI_ERROR(("First Slave ASD for"
299 "core 0x%04x malformed "
300 "(0x%08x)\n", cid, asd));
301 goto error;
302 }
303 }
304 } while (1);
305 }
306 cores_info->coresba[idx] = addrl;
307 cores_info->coresba_size[idx] = sizel;
308 /* Get any more ASDs in first port */
309 j = 1;
310 do {
311 asd = get_asd(sih, &eromptr, 0, j, AD_ST_SLAVE, &addrl, &addrh,
312 &sizel, &sizeh);
313 if ((asd != 0) && (j == 1) && (sizel == SI_CORE_SIZE)) {
314 cores_info->coresba2[idx] = addrl;
315 cores_info->coresba2_size[idx] = sizel;
316 }
317 j++;
318 } while (asd != 0);
319
320 /* Go through the ASDs for other slave ports */
321 for (i = 1; i < nsp; i++) {
322 j = 0;
323 do {
324 asd = get_asd(sih, &eromptr, i, j, AD_ST_SLAVE, &addrl, &addrh,
325 &sizel, &sizeh);
326 /* To get the first base address of second slave port */
327 if ((asd != 0) && (i == 1) && (j == 0)) {
328 cores_info->csp2ba[idx] = addrl;
329 cores_info->csp2ba_size[idx] = sizel;
330 }
331 if (asd == 0)
332 break;
333 j++;
334 } while (1);
335 if (j == 0) {
336 SI_ERROR((" SP %d has no address descriptors\n", i));
337 goto error;
338 }
339 }
340
341 /* Now get master wrappers */
342 for (i = 0; i < nmw; i++) {
343 asd = get_asd(sih, &eromptr, i, 0, AD_ST_MWRAP, &addrl, &addrh,
344 &sizel, &sizeh);
345 if (asd == 0) {
346 SI_ERROR(("Missing descriptor for MW %d\n", i));
347 goto error;
348 }
349 if ((sizeh != 0) || (sizel != SI_CORE_SIZE)) {
350 SI_ERROR(("Master wrapper %d is not 4KB\n", i));
351 goto error;
352 }
353 if (i == 0) {
354 cores_info->wrapba[idx] = addrl;
355 } else if (i == 1) {
356 cores_info->wrapba2[idx] = addrl;
357 } else if (i == 2) {
358 cores_info->wrapba3[idx] = addrl;
359 }
360
361 if (axi_wrapper &&
362 (sii->axi_num_wrappers < SI_MAX_AXI_WRAPPERS)) {
363 axi_wrapper[sii->axi_num_wrappers].mfg = mfg;
364 axi_wrapper[sii->axi_num_wrappers].cid = cid;
365 axi_wrapper[sii->axi_num_wrappers].rev = crev;
366 axi_wrapper[sii->axi_num_wrappers].wrapper_type = AI_MASTER_WRAPPER;
367 axi_wrapper[sii->axi_num_wrappers].wrapper_addr = addrl;
368 sii->axi_num_wrappers++;
369 SI_VMSG(("MASTER WRAPPER: %d, mfg:%x, cid:%x,"
370 "rev:%x, addr:%x, size:%x\n",
371 sii->axi_num_wrappers, mfg, cid, crev, addrl, sizel));
372 }
373 }
374
375 /* And finally slave wrappers */
376 for (i = 0; i < nsw; i++) {
377 uint fwp = (nsp == 1) ? 0 : 1;
378 asd = get_asd(sih, &eromptr, fwp + i, 0, AD_ST_SWRAP, &addrl, &addrh,
379 &sizel, &sizeh);
380
381 /* cache APB bridge wrapper address for set/clear timeout */
382 if ((mfg == MFGID_ARM) && (cid == APB_BRIDGE_ID)) {
383 ASSERT(sii->num_br < SI_MAXBR);
384 sii->br_wrapba[sii->num_br++] = addrl;
385 }
386
387 if (asd == 0) {
388 SI_ERROR(("Missing descriptor for SW %d\n", i));
389 goto error;
390 }
391 if ((sizeh != 0) || (sizel != SI_CORE_SIZE)) {
392 SI_ERROR(("Slave wrapper %d is not 4KB\n", i));
393 goto error;
394 }
395 if ((nmw == 0) && (i == 0)) {
396 cores_info->wrapba[idx] = addrl;
397 } else if ((nmw == 0) && (i == 1)) {
398 cores_info->wrapba2[idx] = addrl;
399 } else if ((nmw == 0) && (i == 2)) {
400 cores_info->wrapba3[idx] = addrl;
401 }
402
403 /* Include all slave wrappers to the list to
404 * enable and monitor watchdog timeouts
405 */
406
407 if (axi_wrapper &&
408 (sii->axi_num_wrappers < SI_MAX_AXI_WRAPPERS)) {
409 axi_wrapper[sii->axi_num_wrappers].mfg = mfg;
410 axi_wrapper[sii->axi_num_wrappers].cid = cid;
411 axi_wrapper[sii->axi_num_wrappers].rev = crev;
412 axi_wrapper[sii->axi_num_wrappers].wrapper_type = AI_SLAVE_WRAPPER;
413
414 /* Software WAR as discussed with hardware team, to ensure proper
415 * Slave Wrapper Base address is set for 4364 Chip ID.
416 * Current address is 0x1810c000, Corrected the same to 0x1810e000.
417 * This ensures AXI default slave wrapper is registered along with
418 * other slave wrapper cores and is useful while generating trap info
419 * when write operation is tried on Invalid Core / Wrapper register
420 */
421
422 if ((CHIPID(sih->chip) == BCM4364_CHIP_ID) &&
423 (cid == DEF_AI_COMP)) {
424 axi_wrapper[sii->axi_num_wrappers].wrapper_addr =
425 0x1810e000;
426 } else {
427 axi_wrapper[sii->axi_num_wrappers].wrapper_addr = addrl;
428 }
429
430 sii->axi_num_wrappers++;
431
432 SI_VMSG(("SLAVE WRAPPER: %d, mfg:%x, cid:%x,"
433 "rev:%x, addr:%x, size:%x\n",
434 sii->axi_num_wrappers, mfg, cid, crev, addrl, sizel));
435 }
436 }
437
438 #ifndef BCM_BACKPLANE_TIMEOUT
439 /* Don't record bridges */
440 if (br)
441 continue;
442 #endif // endif
443
444 /* Done with core */
445 sii->numcores++;
446 }
447
448 SI_ERROR(("Reached end of erom without finding END\n"));
449
450 error:
451 sii->numcores = 0;
452 return;
453 }
454
455 #define AI_SETCOREIDX_MAPSIZE(coreid) \
456 (((coreid) == NS_CCB_CORE_ID) ? 15 * SI_CORE_SIZE : SI_CORE_SIZE)
457
458 /* This function changes the logical "focus" to the indicated core.
459 * Return the current core's virtual address.
460 */
461 static volatile void *
462 _ai_setcoreidx(si_t *sih, uint coreidx, uint use_wrapn)
463 {
464 si_info_t *sii = SI_INFO(sih);
465 si_cores_info_t *cores_info = (si_cores_info_t *)sii->cores_info;
466 uint32 addr, wrap, wrap2, wrap3;
467 volatile void *regs;
468
469 if (coreidx >= MIN(sii->numcores, SI_MAXCORES))
470 return (NULL);
471
472 addr = cores_info->coresba[coreidx];
473 wrap = cores_info->wrapba[coreidx];
474 wrap2 = cores_info->wrapba2[coreidx];
475 wrap3 = cores_info->wrapba3[coreidx];
476
477 #ifdef BCM_BACKPLANE_TIMEOUT
478 /* No need to disable interrupts while entering/exiting APB bridge core */
479 if ((cores_info->coreid[coreidx] != APB_BRIDGE_CORE_ID) &&
480 (cores_info->coreid[sii->curidx] != APB_BRIDGE_CORE_ID))
481 #endif /* BCM_BACKPLANE_TIMEOUT */
482 {
483 /*
484 * If the user has provided an interrupt mask enabled function,
485 * then assert interrupts are disabled before switching the core.
486 */
487 ASSERT((sii->intrsenabled_fn == NULL) ||
488 !(*(sii)->intrsenabled_fn)((sii)->intr_arg));
489 }
490
491 switch (BUSTYPE(sih->bustype)) {
492 case SI_BUS:
493 /* map new one */
494 if (!cores_info->regs[coreidx]) {
495 cores_info->regs[coreidx] = REG_MAP(addr,
496 AI_SETCOREIDX_MAPSIZE(cores_info->coreid[coreidx]));
497 ASSERT(GOODREGS(cores_info->regs[coreidx]));
498 }
499 sii->curmap = regs = cores_info->regs[coreidx];
500 if (!cores_info->wrappers[coreidx] && (wrap != 0)) {
501 cores_info->wrappers[coreidx] = REG_MAP(wrap, SI_CORE_SIZE);
502 ASSERT(GOODREGS(cores_info->wrappers[coreidx]));
503 }
504 if (!cores_info->wrappers2[coreidx] && (wrap2 != 0)) {
505 cores_info->wrappers2[coreidx] = REG_MAP(wrap2, SI_CORE_SIZE);
506 ASSERT(GOODREGS(cores_info->wrappers2[coreidx]));
507 }
508 if (!cores_info->wrappers3[coreidx] && (wrap3 != 0)) {
509 cores_info->wrappers3[coreidx] = REG_MAP(wrap3, SI_CORE_SIZE);
510 ASSERT(GOODREGS(cores_info->wrappers3[coreidx]));
511 }
512
513 if (use_wrapn == 2) {
514 sii->curwrap = cores_info->wrappers3[coreidx];
515 } else if (use_wrapn == 1) {
516 sii->curwrap = cores_info->wrappers2[coreidx];
517 } else {
518 sii->curwrap = cores_info->wrappers[coreidx];
519 }
520 break;
521
522 case PCI_BUS:
523 #ifdef BCM_BACKPLANE_TIMEOUT
524 /* No need to set the BAR0 if core is APB Bridge.
525 * This is to reduce 2 PCI writes while checkng for errlog
526 */
527 if (cores_info->coreid[coreidx] != APB_BRIDGE_CORE_ID)
528 #endif /* BCM_BACKPLANE_TIMEOUT */
529 {
530 /* point bar0 window */
531 OSL_PCI_WRITE_CONFIG(sii->osh, PCI_BAR0_WIN, 4, addr);
532 }
533
534 regs = sii->curmap;
535 /* point bar0 2nd 4KB window to the primary wrapper */
536 if (use_wrapn)
537 wrap = wrap2;
538 if (PCIE_GEN2(sii))
539 OSL_PCI_WRITE_CONFIG(sii->osh, PCIE2_BAR0_WIN2, 4, wrap);
540 else
541 OSL_PCI_WRITE_CONFIG(sii->osh, PCI_BAR0_WIN2, 4, wrap);
542 break;
543
544 #ifdef BCMSDIO
545 case SPI_BUS:
546 case SDIO_BUS:
547 sii->curmap = regs = (void *)((uintptr)addr);
548 if (use_wrapn)
549 sii->curwrap = (void *)((uintptr)wrap2);
550 else
551 sii->curwrap = (void *)((uintptr)wrap);
552 break;
553 #endif /* BCMSDIO */
554
555 case PCMCIA_BUS:
556 default:
557 ASSERT(0);
558 regs = NULL;
559 break;
560 }
561
562 sii->curmap = regs;
563 sii->curidx = coreidx;
564
565 return regs;
566 }
567
568 volatile void *
569 ai_setcoreidx(si_t *sih, uint coreidx)
570 {
571 return _ai_setcoreidx(sih, coreidx, 0);
572 }
573
574 volatile void *
575 ai_setcoreidx_2ndwrap(si_t *sih, uint coreidx)
576 {
577 return _ai_setcoreidx(sih, coreidx, 1);
578 }
579
580 volatile void *
581 ai_setcoreidx_3rdwrap(si_t *sih, uint coreidx)
582 {
583 return _ai_setcoreidx(sih, coreidx, 2);
584 }
585
586 void
587 ai_coreaddrspaceX(si_t *sih, uint asidx, uint32 *addr, uint32 *size)
588 {
589 si_info_t *sii = SI_INFO(sih);
590 si_cores_info_t *cores_info = (si_cores_info_t *)sii->cores_info;
591 chipcregs_t *cc = NULL;
592 uint32 erombase, *eromptr, *eromlim;
593 uint i, j, cidx;
594 uint32 cia, cib, nmp, nsp;
595 uint32 asd, addrl, addrh, sizel, sizeh;
596
597 for (i = 0; i < sii->numcores; i++) {
598 if (cores_info->coreid[i] == CC_CORE_ID) {
599 cc = (chipcregs_t *)cores_info->regs[i];
600 break;
601 }
602 }
603 if (cc == NULL)
604 goto error;
605
606 erombase = R_REG(sii->osh, &cc->eromptr);
607 eromptr = (uint32 *)REG_MAP(erombase, SI_CORE_SIZE);
608 eromlim = eromptr + (ER_REMAPCONTROL / sizeof(uint32));
609
610 cidx = sii->curidx;
611 cia = cores_info->cia[cidx];
612 cib = cores_info->cib[cidx];
613
614 nmp = (cib & CIB_NMP_MASK) >> CIB_NMP_SHIFT;
615 nsp = (cib & CIB_NSP_MASK) >> CIB_NSP_SHIFT;
616
617 /* scan for cores */
618 while (eromptr < eromlim) {
619 if ((get_erom_ent(sih, &eromptr, ER_TAG, ER_CI) == cia) &&
620 (get_erom_ent(sih, &eromptr, 0, 0) == cib)) {
621 break;
622 }
623 }
624
625 /* skip master ports */
626 for (i = 0; i < nmp; i++)
627 get_erom_ent(sih, &eromptr, ER_VALID, ER_VALID);
628
629 /* Skip ASDs in port 0 */
630 asd = get_asd(sih, &eromptr, 0, 0, AD_ST_SLAVE, &addrl, &addrh, &sizel, &sizeh);
631 if (asd == 0) {
632 /* Try again to see if it is a bridge */
633 asd = get_asd(sih, &eromptr, 0, 0, AD_ST_BRIDGE, &addrl, &addrh,
634 &sizel, &sizeh);
635 }
636
637 j = 1;
638 do {
639 asd = get_asd(sih, &eromptr, 0, j, AD_ST_SLAVE, &addrl, &addrh,
640 &sizel, &sizeh);
641 j++;
642 } while (asd != 0);
643
644 /* Go through the ASDs for other slave ports */
645 for (i = 1; i < nsp; i++) {
646 j = 0;
647 do {
648 asd = get_asd(sih, &eromptr, i, j, AD_ST_SLAVE, &addrl, &addrh,
649 &sizel, &sizeh);
650 if (asd == 0)
651 break;
652
653 if (!asidx--) {
654 *addr = addrl;
655 *size = sizel;
656 return;
657 }
658 j++;
659 } while (1);
660
661 if (j == 0) {
662 SI_ERROR((" SP %d has no address descriptors\n", i));
663 break;
664 }
665 }
666
667 error:
668 *size = 0;
669 return;
670 }
671
672 /* Return the number of address spaces in current core */
673 int
674 ai_numaddrspaces(si_t *sih)
675 {
676
677 BCM_REFERENCE(sih);
678
679 return 2;
680 }
681
682 /* Return the address of the nth address space in the current core
683 * Arguments:
684 * sih : Pointer to struct si_t
685 * spidx : slave port index
686 * baidx : base address index
687 */
688 uint32
689 ai_addrspace(si_t *sih, uint spidx, uint baidx)
690 {
691 si_info_t *sii = SI_INFO(sih);
692 si_cores_info_t *cores_info = (si_cores_info_t *)sii->cores_info;
693 uint cidx;
694
695 cidx = sii->curidx;
696
697 if (spidx == CORE_SLAVE_PORT_0) {
698 if (baidx == CORE_BASE_ADDR_0)
699 return cores_info->coresba[cidx];
700 else if (baidx == CORE_BASE_ADDR_1)
701 return cores_info->coresba2[cidx];
702 }
703 else if (spidx == CORE_SLAVE_PORT_1) {
704 if (baidx == CORE_BASE_ADDR_0)
705 return cores_info->csp2ba[cidx];
706 }
707
708 SI_ERROR(("%s: Need to parse the erom again to find %d base addr in %d slave port\n",
709 __FUNCTION__, baidx, spidx));
710
711 return 0;
712
713 }
714
715 /* Return the size of the nth address space in the current core
716 * Arguments:
717 * sih : Pointer to struct si_t
718 * spidx : slave port index
719 * baidx : base address index
720 */
721 uint32
722 ai_addrspacesize(si_t *sih, uint spidx, uint baidx)
723 {
724 si_info_t *sii = SI_INFO(sih);
725 si_cores_info_t *cores_info = (si_cores_info_t *)sii->cores_info;
726 uint cidx;
727
728 cidx = sii->curidx;
729 if (spidx == CORE_SLAVE_PORT_0) {
730 if (baidx == CORE_BASE_ADDR_0)
731 return cores_info->coresba_size[cidx];
732 else if (baidx == CORE_BASE_ADDR_1)
733 return cores_info->coresba2_size[cidx];
734 }
735 else if (spidx == CORE_SLAVE_PORT_1) {
736 if (baidx == CORE_BASE_ADDR_0)
737 return cores_info->csp2ba_size[cidx];
738 }
739
740 SI_ERROR(("%s: Need to parse the erom again to find %d base addr in %d slave port\n",
741 __FUNCTION__, baidx, spidx));
742
743 return 0;
744 }
745
746 uint
747 ai_flag(si_t *sih)
748 {
749 si_info_t *sii = SI_INFO(sih);
750 aidmp_t *ai;
751
752 if (BCM4707_DMP()) {
753 SI_ERROR(("%s: Attempting to read CHIPCOMMONB DMP registers on 4707\n",
754 __FUNCTION__));
755 return sii->curidx;
756 }
757 if (BCM53573_DMP()) {
758 SI_ERROR(("%s: Attempting to read DMP registers on 53573\n", __FUNCTION__));
759 return sii->curidx;
760 }
761 if (PMU_DMP()) {
762 uint idx, flag;
763 idx = sii->curidx;
764 ai_setcoreidx(sih, SI_CC_IDX);
765 flag = ai_flag_alt(sih);
766 ai_setcoreidx(sih, idx);
767 return flag;
768 }
769
770 ai = sii->curwrap;
771 ASSERT(ai != NULL);
772
773 return (R_REG(sii->osh, &ai->oobselouta30) & 0x1f);
774 }
775
776 uint
777 ai_flag_alt(si_t *sih)
778 {
779 si_info_t *sii = SI_INFO(sih);
780 aidmp_t *ai;
781
782 if (BCM4707_DMP()) {
783 SI_ERROR(("%s: Attempting to read CHIPCOMMONB DMP registers on 4707\n",
784 __FUNCTION__));
785 return sii->curidx;
786 }
787
788 ai = sii->curwrap;
789
790 return ((R_REG(sii->osh, &ai->oobselouta30) >> AI_OOBSEL_1_SHIFT) & AI_OOBSEL_MASK);
791 }
792
793 void
794 ai_setint(si_t *sih, int siflag)
795 {
796 BCM_REFERENCE(sih);
797 BCM_REFERENCE(siflag);
798
799 }
800
801 uint
802 ai_wrap_reg(si_t *sih, uint32 offset, uint32 mask, uint32 val)
803 {
804 si_info_t *sii = SI_INFO(sih);
805 uint32 *addr = (uint32 *) ((uchar *)(sii->curwrap) + offset);
806
807 if (mask || val) {
808 uint32 w = R_REG(sii->osh, addr);
809 w &= ~mask;
810 w |= val;
811 W_REG(sii->osh, addr, w);
812 }
813 return (R_REG(sii->osh, addr));
814 }
815
816 uint
817 ai_corevendor(si_t *sih)
818 {
819 si_info_t *sii = SI_INFO(sih);
820 si_cores_info_t *cores_info = (si_cores_info_t *)sii->cores_info;
821 uint32 cia;
822
823 cia = cores_info->cia[sii->curidx];
824 return ((cia & CIA_MFG_MASK) >> CIA_MFG_SHIFT);
825 }
826
827 uint
828 ai_corerev(si_t *sih)
829 {
830 si_info_t *sii = SI_INFO(sih);
831 si_cores_info_t *cores_info = (si_cores_info_t *)sii->cores_info;
832 uint32 cib;
833
834 cib = cores_info->cib[sii->curidx];
835 return ((cib & CIB_REV_MASK) >> CIB_REV_SHIFT);
836 }
837
838 uint
839 ai_corerev_minor(si_t *sih)
840 {
841 return (ai_core_sflags(sih, 0, 0) >> SISF_MINORREV_D11_SHIFT) &
842 SISF_MINORREV_D11_MASK;
843 }
844
845 bool
846 ai_iscoreup(si_t *sih)
847 {
848 si_info_t *sii = SI_INFO(sih);
849 aidmp_t *ai;
850
851 ai = sii->curwrap;
852
853 return (((R_REG(sii->osh, &ai->ioctrl) & (SICF_FGC | SICF_CLOCK_EN)) == SICF_CLOCK_EN) &&
854 ((R_REG(sii->osh, &ai->resetctrl) & AIRC_RESET) == 0));
855 }
856
857 /*
858 * Switch to 'coreidx', issue a single arbitrary 32bit register mask&set operation,
859 * switch back to the original core, and return the new value.
860 *
861 * When using the silicon backplane, no fiddling with interrupts or core switches is needed.
862 *
863 * Also, when using pci/pcie, we can optimize away the core switching for pci registers
864 * and (on newer pci cores) chipcommon registers.
865 */
866 uint
867 ai_corereg(si_t *sih, uint coreidx, uint regoff, uint mask, uint val)
868 {
869 uint origidx = 0;
870 volatile uint32 *r = NULL;
871 uint w;
872 uint intr_val = 0;
873 bool fast = FALSE;
874 si_info_t *sii = SI_INFO(sih);
875 si_cores_info_t *cores_info = (si_cores_info_t *)sii->cores_info;
876
877 ASSERT(GOODIDX(coreidx));
878 ASSERT(regoff < SI_CORE_SIZE);
879 ASSERT((val & ~mask) == 0);
880
881 if (coreidx >= SI_MAXCORES)
882 return 0;
883
884 if (BUSTYPE(sih->bustype) == SI_BUS) {
885 /* If internal bus, we can always get at everything */
886 fast = TRUE;
887 /* map if does not exist */
888 if (!cores_info->regs[coreidx]) {
889 cores_info->regs[coreidx] = REG_MAP(cores_info->coresba[coreidx],
890 SI_CORE_SIZE);
891 ASSERT(GOODREGS(cores_info->regs[coreidx]));
892 }
893 r = (volatile uint32 *)((volatile uchar *)cores_info->regs[coreidx] + regoff);
894 } else if (BUSTYPE(sih->bustype) == PCI_BUS) {
895 /* If pci/pcie, we can get at pci/pcie regs and on newer cores to chipc */
896
897 if ((cores_info->coreid[coreidx] == CC_CORE_ID) && SI_FAST(sii)) {
898 /* Chipc registers are mapped at 12KB */
899
900 fast = TRUE;
901 r = (volatile uint32 *)((volatile char *)sii->curmap +
902 PCI_16KB0_CCREGS_OFFSET + regoff);
903 } else if (sii->pub.buscoreidx == coreidx) {
904 /* pci registers are at either in the last 2KB of an 8KB window
905 * or, in pcie and pci rev 13 at 8KB
906 */
907 fast = TRUE;
908 if (SI_FAST(sii))
909 r = (volatile uint32 *)((volatile char *)sii->curmap +
910 PCI_16KB0_PCIREGS_OFFSET + regoff);
911 else
912 r = (volatile uint32 *)((volatile char *)sii->curmap +
913 ((regoff >= SBCONFIGOFF) ?
914 PCI_BAR0_PCISBR_OFFSET : PCI_BAR0_PCIREGS_OFFSET) +
915 regoff);
916 }
917 }
918
919 if (!fast) {
920 INTR_OFF(sii, intr_val);
921
922 /* save current core index */
923 origidx = si_coreidx(&sii->pub);
924
925 /* switch core */
926 r = (volatile uint32*) ((volatile uchar*) ai_setcoreidx(&sii->pub, coreidx) +
927 regoff);
928 }
929 ASSERT(r != NULL);
930
931 /* mask and set */
932 if (mask || val) {
933 w = (R_REG(sii->osh, r) & ~mask) | val;
934 W_REG(sii->osh, r, w);
935 }
936
937 /* readback */
938 w = R_REG(sii->osh, r);
939
940 if (!fast) {
941 /* restore core index */
942 if (origidx != coreidx)
943 ai_setcoreidx(&sii->pub, origidx);
944
945 INTR_RESTORE(sii, intr_val);
946 }
947
948 return (w);
949 }
950
951 /*
952 * Switch to 'coreidx', issue a single arbitrary 32bit register mask&set operation,
953 * switch back to the original core, and return the new value.
954 *
955 * When using the silicon backplane, no fiddling with interrupts or core switches is needed.
956 *
957 * Also, when using pci/pcie, we can optimize away the core switching for pci registers
958 * and (on newer pci cores) chipcommon registers.
959 */
960 uint
961 ai_corereg_writeonly(si_t *sih, uint coreidx, uint regoff, uint mask, uint val)
962 {
963 uint origidx = 0;
964 volatile uint32 *r = NULL;
965 uint w = 0;
966 uint intr_val = 0;
967 bool fast = FALSE;
968 si_info_t *sii = SI_INFO(sih);
969 si_cores_info_t *cores_info = (si_cores_info_t *)sii->cores_info;
970
971 ASSERT(GOODIDX(coreidx));
972 ASSERT(regoff < SI_CORE_SIZE);
973 ASSERT((val & ~mask) == 0);
974
975 if (coreidx >= SI_MAXCORES)
976 return 0;
977
978 if (BUSTYPE(sih->bustype) == SI_BUS) {
979 /* If internal bus, we can always get at everything */
980 fast = TRUE;
981 /* map if does not exist */
982 if (!cores_info->regs[coreidx]) {
983 cores_info->regs[coreidx] = REG_MAP(cores_info->coresba[coreidx],
984 SI_CORE_SIZE);
985 ASSERT(GOODREGS(cores_info->regs[coreidx]));
986 }
987 r = (volatile uint32 *)((volatile uchar *)cores_info->regs[coreidx] + regoff);
988 } else if (BUSTYPE(sih->bustype) == PCI_BUS) {
989 /* If pci/pcie, we can get at pci/pcie regs and on newer cores to chipc */
990
991 if ((cores_info->coreid[coreidx] == CC_CORE_ID) && SI_FAST(sii)) {
992 /* Chipc registers are mapped at 12KB */
993
994 fast = TRUE;
995 r = (volatile uint32 *)((volatile char *)sii->curmap +
996 PCI_16KB0_CCREGS_OFFSET + regoff);
997 } else if (sii->pub.buscoreidx == coreidx) {
998 /* pci registers are at either in the last 2KB of an 8KB window
999 * or, in pcie and pci rev 13 at 8KB
1000 */
1001 fast = TRUE;
1002 if (SI_FAST(sii))
1003 r = (volatile uint32 *)((volatile char *)sii->curmap +
1004 PCI_16KB0_PCIREGS_OFFSET + regoff);
1005 else
1006 r = (volatile uint32 *)((volatile char *)sii->curmap +
1007 ((regoff >= SBCONFIGOFF) ?
1008 PCI_BAR0_PCISBR_OFFSET : PCI_BAR0_PCIREGS_OFFSET) +
1009 regoff);
1010 }
1011 }
1012
1013 if (!fast) {
1014 INTR_OFF(sii, intr_val);
1015
1016 /* save current core index */
1017 origidx = si_coreidx(&sii->pub);
1018
1019 /* switch core */
1020 r = (volatile uint32*) ((volatile uchar*) ai_setcoreidx(&sii->pub, coreidx) +
1021 regoff);
1022 }
1023 ASSERT(r != NULL);
1024
1025 /* mask and set */
1026 if (mask || val) {
1027 w = (R_REG(sii->osh, r) & ~mask) | val;
1028 W_REG(sii->osh, r, w);
1029 }
1030
1031 if (!fast) {
1032 /* restore core index */
1033 if (origidx != coreidx)
1034 ai_setcoreidx(&sii->pub, origidx);
1035
1036 INTR_RESTORE(sii, intr_val);
1037 }
1038
1039 return (w);
1040 }
1041
1042 /*
1043 * If there is no need for fiddling with interrupts or core switches (typically silicon
1044 * back plane registers, pci registers and chipcommon registers), this function
1045 * returns the register offset on this core to a mapped address. This address can
1046 * be used for W_REG/R_REG directly.
1047 *
1048 * For accessing registers that would need a core switch, this function will return
1049 * NULL.
1050 */
1051 volatile uint32 *
1052 ai_corereg_addr(si_t *sih, uint coreidx, uint regoff)
1053 {
1054 volatile uint32 *r = NULL;
1055 bool fast = FALSE;
1056 si_info_t *sii = SI_INFO(sih);
1057 si_cores_info_t *cores_info = (si_cores_info_t *)sii->cores_info;
1058
1059 ASSERT(GOODIDX(coreidx));
1060 ASSERT(regoff < SI_CORE_SIZE);
1061
1062 if (coreidx >= SI_MAXCORES)
1063 return 0;
1064
1065 if (BUSTYPE(sih->bustype) == SI_BUS) {
1066 /* If internal bus, we can always get at everything */
1067 fast = TRUE;
1068 /* map if does not exist */
1069 if (!cores_info->regs[coreidx]) {
1070 cores_info->regs[coreidx] = REG_MAP(cores_info->coresba[coreidx],
1071 SI_CORE_SIZE);
1072 ASSERT(GOODREGS(cores_info->regs[coreidx]));
1073 }
1074 r = (volatile uint32 *)((volatile uchar *)cores_info->regs[coreidx] + regoff);
1075 } else if (BUSTYPE(sih->bustype) == PCI_BUS) {
1076 /* If pci/pcie, we can get at pci/pcie regs and on newer cores to chipc */
1077
1078 if ((cores_info->coreid[coreidx] == CC_CORE_ID) && SI_FAST(sii)) {
1079 /* Chipc registers are mapped at 12KB */
1080
1081 fast = TRUE;
1082 r = (volatile uint32 *)((volatile char *)sii->curmap +
1083 PCI_16KB0_CCREGS_OFFSET + regoff);
1084 } else if (sii->pub.buscoreidx == coreidx) {
1085 /* pci registers are at either in the last 2KB of an 8KB window
1086 * or, in pcie and pci rev 13 at 8KB
1087 */
1088 fast = TRUE;
1089 if (SI_FAST(sii))
1090 r = (volatile uint32 *)((volatile char *)sii->curmap +
1091 PCI_16KB0_PCIREGS_OFFSET + regoff);
1092 else
1093 r = (volatile uint32 *)((volatile char *)sii->curmap +
1094 ((regoff >= SBCONFIGOFF) ?
1095 PCI_BAR0_PCISBR_OFFSET : PCI_BAR0_PCIREGS_OFFSET) +
1096 regoff);
1097 }
1098 }
1099
1100 if (!fast) {
1101 ASSERT(sii->curidx == coreidx);
1102 r = (volatile uint32*) ((volatile uchar*)sii->curmap + regoff);
1103 }
1104
1105 return (r);
1106 }
1107
1108 void
1109 ai_core_disable(si_t *sih, uint32 bits)
1110 {
1111 si_info_t *sii = SI_INFO(sih);
1112 volatile uint32 dummy;
1113 uint32 status;
1114 aidmp_t *ai;
1115
1116 ASSERT(GOODREGS(sii->curwrap));
1117 ai = sii->curwrap;
1118
1119 /* if core is already in reset, just return */
1120 if (R_REG(sii->osh, &ai->resetctrl) & AIRC_RESET) {
1121 return;
1122 }
1123
1124 /* ensure there are no pending backplane operations */
1125 SPINWAIT(((status = R_REG(sii->osh, &ai->resetstatus)) != 0), 300);
1126
1127 /* if pending backplane ops still, try waiting longer */
1128 if (status != 0) {
1129 /* 300usecs was sufficient to allow backplane ops to clear for big hammer */
1130 /* during driver load we may need more time */
1131 SPINWAIT(((status = R_REG(sii->osh, &ai->resetstatus)) != 0), 10000);
1132 /* if still pending ops, continue on and try disable anyway */
1133 /* this is in big hammer path, so don't call wl_reinit in this case... */
1134 }
1135
1136 W_REG(sii->osh, &ai->resetctrl, AIRC_RESET);
1137 dummy = R_REG(sii->osh, &ai->resetctrl);
1138 BCM_REFERENCE(dummy);
1139 OSL_DELAY(1);
1140
1141 W_REG(sii->osh, &ai->ioctrl, bits);
1142 dummy = R_REG(sii->osh, &ai->ioctrl);
1143 BCM_REFERENCE(dummy);
1144 OSL_DELAY(10);
1145 }
1146
1147 /* reset and re-enable a core
1148 * inputs:
1149 * bits - core specific bits that are set during and after reset sequence
1150 * resetbits - core specific bits that are set only during reset sequence
1151 */
1152 static void
1153 _ai_core_reset(si_t *sih, uint32 bits, uint32 resetbits)
1154 {
1155 si_info_t *sii = SI_INFO(sih);
1156 #if defined(UCM_CORRUPTION_WAR)
1157 si_cores_info_t *cores_info = (si_cores_info_t *)sii->cores_info;
1158 #endif // endif
1159 aidmp_t *ai;
1160 volatile uint32 dummy;
1161 uint loop_counter = 10;
1162
1163 ASSERT(GOODREGS(sii->curwrap));
1164 ai = sii->curwrap;
1165
1166 /* ensure there are no pending backplane operations */
1167 SPINWAIT(((dummy = R_REG(sii->osh, &ai->resetstatus)) != 0), 300);
1168
1169 /* put core into reset state */
1170 W_REG(sii->osh, &ai->resetctrl, AIRC_RESET);
1171 OSL_DELAY(10);
1172
1173 /* ensure there are no pending backplane operations */
1174 SPINWAIT((R_REG(sii->osh, &ai->resetstatus) != 0), 300);
1175
1176 W_REG(sii->osh, &ai->ioctrl, (bits | resetbits | SICF_FGC | SICF_CLOCK_EN));
1177 dummy = R_REG(sii->osh, &ai->ioctrl);
1178 BCM_REFERENCE(dummy);
1179 #ifdef UCM_CORRUPTION_WAR
1180 if (cores_info->coreid[sii->curidx] == D11_CORE_ID) {
1181 /* Reset FGC */
1182 OSL_DELAY(1);
1183 W_REG(sii->osh, &ai->ioctrl, (dummy & (~SICF_FGC)));
1184 }
1185 #endif /* UCM_CORRUPTION_WAR */
1186 /* ensure there are no pending backplane operations */
1187 SPINWAIT(((dummy = R_REG(sii->osh, &ai->resetstatus)) != 0), 300);
1188
1189 while (R_REG(sii->osh, &ai->resetctrl) != 0 && --loop_counter != 0) {
1190 /* ensure there are no pending backplane operations */
1191 SPINWAIT(((dummy = R_REG(sii->osh, &ai->resetstatus)) != 0), 300);
1192
1193 /* take core out of reset */
1194 W_REG(sii->osh, &ai->resetctrl, 0);
1195
1196 /* ensure there are no pending backplane operations */
1197 SPINWAIT((R_REG(sii->osh, &ai->resetstatus) != 0), 300);
1198 }
1199
1200 #ifdef UCM_CORRUPTION_WAR
1201 /* Pulse FGC after lifting Reset */
1202 W_REG(sii->osh, &ai->ioctrl, (bits | SICF_FGC | SICF_CLOCK_EN));
1203 #else
1204 W_REG(sii->osh, &ai->ioctrl, (bits | SICF_CLOCK_EN));
1205 #endif /* UCM_CORRUPTION_WAR */
1206 dummy = R_REG(sii->osh, &ai->ioctrl);
1207 BCM_REFERENCE(dummy);
1208 #ifdef UCM_CORRUPTION_WAR
1209 if (cores_info->coreid[sii->curidx] == D11_CORE_ID) {
1210 /* Reset FGC */
1211 OSL_DELAY(1);
1212 W_REG(sii->osh, &ai->ioctrl, (dummy & (~SICF_FGC)));
1213 }
1214 #endif /* UCM_CORRUPTION_WAR */
1215 OSL_DELAY(1);
1216
1217 }
1218
1219 void
1220 ai_core_reset(si_t *sih, uint32 bits, uint32 resetbits)
1221 {
1222 si_info_t *sii = SI_INFO(sih);
1223 si_cores_info_t *cores_info = (si_cores_info_t *)sii->cores_info;
1224 uint idx = sii->curidx;
1225
1226 if (cores_info->wrapba3[idx] != 0) {
1227 ai_setcoreidx_3rdwrap(sih, idx);
1228 _ai_core_reset(sih, bits, resetbits);
1229 ai_setcoreidx(sih, idx);
1230 }
1231
1232 if (cores_info->wrapba2[idx] != 0) {
1233 ai_setcoreidx_2ndwrap(sih, idx);
1234 _ai_core_reset(sih, bits, resetbits);
1235 ai_setcoreidx(sih, idx);
1236 }
1237
1238 _ai_core_reset(sih, bits, resetbits);
1239 }
1240
1241 void
1242 ai_core_cflags_wo(si_t *sih, uint32 mask, uint32 val)
1243 {
1244 si_info_t *sii = SI_INFO(sih);
1245 aidmp_t *ai;
1246 uint32 w;
1247
1248 if (BCM4707_DMP()) {
1249 SI_ERROR(("%s: Accessing CHIPCOMMONB DMP register (ioctrl) on 4707\n",
1250 __FUNCTION__));
1251 return;
1252 }
1253 if (PMU_DMP()) {
1254 SI_ERROR(("%s: Accessing PMU DMP register (ioctrl)\n",
1255 __FUNCTION__));
1256 return;
1257 }
1258
1259 ASSERT(GOODREGS(sii->curwrap));
1260 ai = sii->curwrap;
1261
1262 ASSERT((val & ~mask) == 0);
1263
1264 if (mask || val) {
1265 w = ((R_REG(sii->osh, &ai->ioctrl) & ~mask) | val);
1266 W_REG(sii->osh, &ai->ioctrl, w);
1267 }
1268 }
1269
1270 uint32
1271 ai_core_cflags(si_t *sih, uint32 mask, uint32 val)
1272 {
1273 si_info_t *sii = SI_INFO(sih);
1274 aidmp_t *ai;
1275 uint32 w;
1276
1277 if (BCM4707_DMP()) {
1278 SI_ERROR(("%s: Accessing CHIPCOMMONB DMP register (ioctrl) on 4707\n",
1279 __FUNCTION__));
1280 return 0;
1281 }
1282
1283 if (PMU_DMP()) {
1284 SI_ERROR(("%s: Accessing PMU DMP register (ioctrl)\n",
1285 __FUNCTION__));
1286 return 0;
1287 }
1288 ASSERT(GOODREGS(sii->curwrap));
1289 ai = sii->curwrap;
1290
1291 ASSERT((val & ~mask) == 0);
1292
1293 if (mask || val) {
1294 w = ((R_REG(sii->osh, &ai->ioctrl) & ~mask) | val);
1295 W_REG(sii->osh, &ai->ioctrl, w);
1296 }
1297
1298 return R_REG(sii->osh, &ai->ioctrl);
1299 }
1300
1301 uint32
1302 ai_core_sflags(si_t *sih, uint32 mask, uint32 val)
1303 {
1304 si_info_t *sii = SI_INFO(sih);
1305 aidmp_t *ai;
1306 uint32 w;
1307
1308 if (BCM4707_DMP()) {
1309 SI_ERROR(("%s: Accessing CHIPCOMMONB DMP register (ioctrl) on 4707\n",
1310 __FUNCTION__));
1311 return 0;
1312 }
1313 if (PMU_DMP()) {
1314 SI_ERROR(("%s: Accessing PMU DMP register (ioctrl)\n",
1315 __FUNCTION__));
1316 return 0;
1317 }
1318
1319 ASSERT(GOODREGS(sii->curwrap));
1320 ai = sii->curwrap;
1321
1322 ASSERT((val & ~mask) == 0);
1323 ASSERT((mask & ~SISF_CORE_BITS) == 0);
1324
1325 if (mask || val) {
1326 w = ((R_REG(sii->osh, &ai->iostatus) & ~mask) | val);
1327 W_REG(sii->osh, &ai->iostatus, w);
1328 }
1329
1330 return R_REG(sii->osh, &ai->iostatus);
1331 }
1332
1333 #if defined(BCMDBG_PHYDUMP)
1334 /* print interesting aidmp registers */
1335 void
1336 ai_dumpregs(si_t *sih, struct bcmstrbuf *b)
1337 {
1338 si_info_t *sii = SI_INFO(sih);
1339 osl_t *osh;
1340 aidmp_t *ai;
1341 uint i;
1342 uint32 prev_value = 0;
1343 axi_wrapper_t * axi_wrapper = sii->axi_wrapper;
1344 uint32 cfg_reg = 0;
1345 uint bar0_win_offset = 0;
1346
1347 osh = sii->osh;
1348
1349 /* Save and restore wrapper access window */
1350 if (BUSTYPE(sii->pub.bustype) == PCI_BUS) {
1351 if (PCIE_GEN2(sii)) {
1352 cfg_reg = PCIE2_BAR0_CORE2_WIN2;
1353 bar0_win_offset = PCIE2_BAR0_CORE2_WIN2_OFFSET;
1354 } else {
1355 cfg_reg = PCI_BAR0_WIN2;
1356 bar0_win_offset = PCI_BAR0_WIN2_OFFSET;
1357 }
1358
1359 prev_value = OSL_PCI_READ_CONFIG(osh, cfg_reg, 4);
1360
1361 if (prev_value == ID32_INVALID) {
1362 SI_PRINT(("%s, PCI_BAR0_WIN2 - %x\n", __FUNCTION__, prev_value));
1363 return;
1364 }
1365 }
1366
1367 bcm_bprintf(b, "ChipNum:%x, ChipRev;%x, BusType:%x, BoardType:%x, BoardVendor:%x\n\n",
1368 sih->chip, sih->chiprev, sih->bustype, sih->boardtype, sih->boardvendor);
1369
1370 for (i = 0; i < sii->axi_num_wrappers; i++) {
1371
1372 if (BUSTYPE(sii->pub.bustype) == PCI_BUS) {
1373 /* Set BAR0 window to bridge wapper base address */
1374 OSL_PCI_WRITE_CONFIG(osh,
1375 cfg_reg, 4, axi_wrapper[i].wrapper_addr);
1376
1377 ai = (aidmp_t *) ((volatile uint8*)sii->curmap + bar0_win_offset);
1378 } else {
1379 ai = (aidmp_t *)(uintptr) axi_wrapper[i].wrapper_addr;
1380 }
1381
1382 bcm_bprintf(b, "core 0x%x: core_rev:%d, %s_WR ADDR:%x \n", axi_wrapper[i].cid,
1383 axi_wrapper[i].rev,
1384 axi_wrapper[i].wrapper_type == AI_SLAVE_WRAPPER ? "SLAVE" : "MASTER",
1385 axi_wrapper[i].wrapper_addr);
1386
1387 /* BCM4707_DMP() */
1388 if (BCM4707_CHIP(CHIPID(sih->chip)) &&
1389 (axi_wrapper[i].cid == NS_CCB_CORE_ID)) {
1390 bcm_bprintf(b, "Skipping chipcommonb in 4707\n");
1391 continue;
1392 }
1393
1394 bcm_bprintf(b, "ioctrlset 0x%x ioctrlclear 0x%x ioctrl 0x%x iostatus 0x%x "
1395 "ioctrlwidth 0x%x iostatuswidth 0x%x\n"
1396 "resetctrl 0x%x resetstatus 0x%x resetreadid 0x%x resetwriteid 0x%x\n"
1397 "errlogctrl 0x%x errlogdone 0x%x errlogstatus 0x%x "
1398 "errlogaddrlo 0x%x errlogaddrhi 0x%x\n"
1399 "errlogid 0x%x errloguser 0x%x errlogflags 0x%x\n"
1400 "intstatus 0x%x config 0x%x itcr 0x%x\n\n",
1401 R_REG(osh, &ai->ioctrlset),
1402 R_REG(osh, &ai->ioctrlclear),
1403 R_REG(osh, &ai->ioctrl),
1404 R_REG(osh, &ai->iostatus),
1405 R_REG(osh, &ai->ioctrlwidth),
1406 R_REG(osh, &ai->iostatuswidth),
1407 R_REG(osh, &ai->resetctrl),
1408 R_REG(osh, &ai->resetstatus),
1409 R_REG(osh, &ai->resetreadid),
1410 R_REG(osh, &ai->resetwriteid),
1411 R_REG(osh, &ai->errlogctrl),
1412 R_REG(osh, &ai->errlogdone),
1413 R_REG(osh, &ai->errlogstatus),
1414 R_REG(osh, &ai->errlogaddrlo),
1415 R_REG(osh, &ai->errlogaddrhi),
1416 R_REG(osh, &ai->errlogid),
1417 R_REG(osh, &ai->errloguser),
1418 R_REG(osh, &ai->errlogflags),
1419 R_REG(osh, &ai->intstatus),
1420 R_REG(osh, &ai->config),
1421 R_REG(osh, &ai->itcr));
1422 }
1423
1424 /* Restore the initial wrapper space */
1425 if (BUSTYPE(sii->pub.bustype) == PCI_BUS) {
1426 if (prev_value && cfg_reg) {
1427 OSL_PCI_WRITE_CONFIG(osh, cfg_reg, 4, prev_value);
1428 }
1429 }
1430 }
1431 #endif // endif
1432
1433 void
1434 ai_update_backplane_timeouts(si_t *sih, bool enable, uint32 timeout_exp, uint32 cid)
1435 {
1436 #if defined(AXI_TIMEOUTS) || defined(BCM_BACKPLANE_TIMEOUT)
1437 si_info_t *sii = SI_INFO(sih);
1438 aidmp_t *ai;
1439 uint32 i;
1440 axi_wrapper_t * axi_wrapper = sii->axi_wrapper;
1441 uint32 errlogctrl = (enable << AIELC_TO_ENAB_SHIFT) |
1442 ((timeout_exp << AIELC_TO_EXP_SHIFT) & AIELC_TO_EXP_MASK);
1443
1444 #ifdef BCM_BACKPLANE_TIMEOUT
1445 uint32 prev_value = 0;
1446 osl_t *osh = sii->osh;
1447 uint32 cfg_reg = 0;
1448 uint32 offset = 0;
1449 #endif /* BCM_BACKPLANE_TIMEOUT */
1450
1451 if ((sii->axi_num_wrappers == 0) ||
1452 #ifdef BCM_BACKPLANE_TIMEOUT
1453 (!PCIE(sii)) ||
1454 #endif /* BCM_BACKPLANE_TIMEOUT */
1455 FALSE) {
1456 SI_VMSG((" %s, axi_num_wrappers:%d, Is_PCIE:%d, BUS_TYPE:%d, ID:%x\n",
1457 __FUNCTION__, sii->axi_num_wrappers, PCIE(sii),
1458 BUSTYPE(sii->pub.bustype), sii->pub.buscoretype));
1459 return;
1460 }
1461
1462 #ifdef BCM_BACKPLANE_TIMEOUT
1463 /* Save and restore the wrapper access window */
1464 if (BUSTYPE(sii->pub.bustype) == PCI_BUS) {
1465 if (PCIE_GEN1(sii)) {
1466 cfg_reg = PCI_BAR0_WIN2;
1467 offset = PCI_BAR0_WIN2_OFFSET;
1468 } else if (PCIE_GEN2(sii)) {
1469 cfg_reg = PCIE2_BAR0_CORE2_WIN2;
1470 offset = PCIE2_BAR0_CORE2_WIN2_OFFSET;
1471 }
1472 else {
1473 ASSERT(!"!PCIE_GEN1 && !PCIE_GEN2");
1474 }
1475
1476 prev_value = OSL_PCI_READ_CONFIG(osh, cfg_reg, 4);
1477 if (prev_value == ID32_INVALID) {
1478 SI_PRINT(("%s, PCI_BAR0_WIN2 - %x\n", __FUNCTION__, prev_value));
1479 return;
1480 }
1481 }
1482 #endif /* BCM_BACKPLANE_TIMEOUT */
1483
1484 for (i = 0; i < sii->axi_num_wrappers; ++i) {
1485
1486 if (axi_wrapper[i].wrapper_type != AI_SLAVE_WRAPPER) {
1487 SI_VMSG(("SKIP ENABLE BPT: MFG:%x, CID:%x, ADDR:%x\n",
1488 axi_wrapper[i].mfg,
1489 axi_wrapper[i].cid,
1490 axi_wrapper[i].wrapper_addr));
1491 continue;
1492 }
1493
1494 /* Update only given core if requested */
1495 if ((cid != 0) && (axi_wrapper[i].cid != cid)) {
1496 continue;
1497 }
1498
1499 #ifdef BCM_BACKPLANE_TIMEOUT
1500 if (BUSTYPE(sii->pub.bustype) == PCI_BUS) {
1501 /* Set BAR0_CORE2_WIN2 to bridge wapper base address */
1502 OSL_PCI_WRITE_CONFIG(osh,
1503 cfg_reg, 4, axi_wrapper[i].wrapper_addr);
1504
1505 /* set AI to BAR0 + Offset corresponding to Gen1 or gen2 */
1506 ai = (aidmp_t *) (DISCARD_QUAL(sii->curmap, uint8) + offset);
1507 }
1508 else
1509 #endif /* BCM_BACKPLANE_TIMEOUT */
1510 {
1511 ai = (aidmp_t *)(uintptr) axi_wrapper[i].wrapper_addr;
1512 }
1513
1514 W_REG(sii->osh, &ai->errlogctrl, errlogctrl);
1515
1516 SI_VMSG(("ENABLED BPT: MFG:%x, CID:%x, ADDR:%x, ERR_CTRL:%x\n",
1517 axi_wrapper[i].mfg,
1518 axi_wrapper[i].cid,
1519 axi_wrapper[i].wrapper_addr,
1520 R_REG(sii->osh, &ai->errlogctrl)));
1521 }
1522
1523 #ifdef BCM_BACKPLANE_TIMEOUT
1524 /* Restore the initial wrapper space */
1525 if (prev_value) {
1526 OSL_PCI_WRITE_CONFIG(osh, cfg_reg, 4, prev_value);
1527 }
1528 #endif /* BCM_BACKPLANE_TIMEOUT */
1529
1530 #endif /* AXI_TIMEOUTS || BCM_BACKPLANE_TIMEOUT */
1531 }
1532
1533 #if defined(AXI_TIMEOUTS) || defined(BCM_BACKPLANE_TIMEOUT)
1534
1535 /* slave error is ignored, so account for those cases */
1536 static uint32 si_ignore_errlog_cnt = 0;
1537
1538 static bool
1539 ai_ignore_errlog(si_info_t *sii, aidmp_t *ai,
1540 uint32 lo_addr, uint32 hi_addr, uint32 err_axi_id, uint32 errsts)
1541 {
1542 uint32 axi_id;
1543 #ifdef BCMPCIE_BTLOG
1544 uint32 axi_id2 = BCM4347_UNUSED_AXI_ID;
1545 #endif /* BCMPCIE_BTLOG */
1546 uint32 ignore_errsts = AIELS_SLAVE_ERR;
1547 uint32 ignore_hi = BT_CC_SPROM_BADREG_HI;
1548 uint32 ignore_lo = BT_CC_SPROM_BADREG_LO;
1549 uint32 ignore_size = BT_CC_SPROM_BADREG_SIZE;
1550
1551 /* ignore the BT slave errors if the errlog is to chipcommon addr 0x190 */
1552 switch (CHIPID(sii->pub.chip)) {
1553 case BCM4350_CHIP_ID:
1554 axi_id = BCM4350_BT_AXI_ID;
1555 break;
1556 case BCM4345_CHIP_ID:
1557 axi_id = BCM4345_BT_AXI_ID;
1558 break;
1559 case BCM4349_CHIP_GRPID:
1560 axi_id = BCM4349_BT_AXI_ID;
1561 break;
1562 case BCM4364_CHIP_ID:
1563 case BCM4373_CHIP_ID:
1564 axi_id = BCM4364_BT_AXI_ID;
1565 break;
1566 #ifdef BCMPCIE_BTLOG
1567 case BCM4347_CHIP_ID:
1568 case BCM4357_CHIP_ID:
1569 axi_id = BCM4347_CC_AXI_ID;
1570 axi_id2 = BCM4347_PCIE_AXI_ID;
1571 ignore_errsts = AIELS_TIMEOUT;
1572 ignore_hi = BCM4347_BT_ADDR_HI;
1573 ignore_lo = BCM4347_BT_ADDR_LO;
1574 ignore_size = BCM4347_BT_SIZE;
1575 break;
1576 #endif /* BCMPCIE_BTLOG */
1577
1578 default:
1579 return FALSE;
1580 }
1581
1582 /* AXI ID check */
1583 err_axi_id &= AI_ERRLOGID_AXI_ID_MASK;
1584 if (!(err_axi_id == axi_id ||
1585 #ifdef BCMPCIE_BTLOG
1586 (axi_id2 != BCM4347_UNUSED_AXI_ID && err_axi_id == axi_id2)))
1587 #else
1588 FALSE))
1589 #endif /* BCMPCIE_BTLOG */
1590 return FALSE;
1591
1592 /* slave errors */
1593 if ((errsts & AIELS_TIMEOUT_MASK) != ignore_errsts)
1594 return FALSE;
1595
1596 /* address range check */
1597 if ((hi_addr != ignore_hi) ||
1598 (lo_addr < ignore_lo) || (lo_addr >= (ignore_lo + ignore_size)))
1599 return FALSE;
1600
1601 #ifdef BCMPCIE_BTLOG
1602 if (ignore_errsts == AIELS_TIMEOUT) {
1603 /* reset AXI timeout */
1604 ai_reset_axi_to(sii, ai);
1605 }
1606 #endif /* BCMPCIE_BTLOG */
1607
1608 return TRUE;
1609 }
1610 #endif /* defined (AXI_TIMEOUTS) || defined (BCM_BACKPLANE_TIMEOUT) */
1611
1612 #ifdef BCM_BACKPLANE_TIMEOUT
1613
1614 /* Function to return the APB bridge details corresponding to the core */
1615 static bool
1616 ai_get_apb_bridge(si_t * sih, uint32 coreidx, uint32 *apb_id, uint32 * apb_coreuinit)
1617 {
1618 uint i;
1619 uint32 core_base, core_end;
1620 si_info_t *sii = SI_INFO(sih);
1621 static uint32 coreidx_cached = 0, apb_id_cached = 0, apb_coreunit_cached = 0;
1622 uint32 tmp_coreunit = 0;
1623 si_cores_info_t *cores_info = (si_cores_info_t *)sii->cores_info;
1624
1625 if (coreidx >= MIN(sii->numcores, SI_MAXCORES))
1626 return FALSE;
1627
1628 /* Most of the time apb bridge query will be for d11 core.
1629 * Maintain the last cache and return if found rather than iterating the table
1630 */
1631 if (coreidx_cached == coreidx) {
1632 *apb_id = apb_id_cached;
1633 *apb_coreuinit = apb_coreunit_cached;
1634 return TRUE;
1635 }
1636
1637 core_base = cores_info->coresba[coreidx];
1638 core_end = core_base + cores_info->coresba_size[coreidx];
1639
1640 for (i = 0; i < sii->numcores; i++) {
1641 if (cores_info->coreid[i] == APB_BRIDGE_ID) {
1642 uint32 apb_base;
1643 uint32 apb_end;
1644
1645 apb_base = cores_info->coresba[i];
1646 apb_end = apb_base + cores_info->coresba_size[i];
1647
1648 if ((core_base >= apb_base) &&
1649 (core_end <= apb_end)) {
1650 /* Current core is attached to this APB bridge */
1651 *apb_id = apb_id_cached = APB_BRIDGE_ID;
1652 *apb_coreuinit = apb_coreunit_cached = tmp_coreunit;
1653 coreidx_cached = coreidx;
1654 return TRUE;
1655 }
1656 /* Increment the coreunit */
1657 tmp_coreunit++;
1658 }
1659 }
1660
1661 return FALSE;
1662 }
1663
1664 uint32
1665 ai_clear_backplane_to_fast(si_t *sih, void *addr)
1666 {
1667 si_info_t *sii = SI_INFO(sih);
1668 volatile void *curmap = sii->curmap;
1669 bool core_reg = FALSE;
1670
1671 /* Use fast path only for core register access */
1672 if (((uintptr)addr >= (uintptr)curmap) &&
1673 ((uintptr)addr < ((uintptr)curmap + SI_CORE_SIZE))) {
1674 /* address being accessed is within current core reg map */
1675 core_reg = TRUE;
1676 }
1677
1678 if (core_reg) {
1679 uint32 apb_id, apb_coreuinit;
1680
1681 if (ai_get_apb_bridge(sih, si_coreidx(&sii->pub),
1682 &apb_id, &apb_coreuinit) == TRUE) {
1683 /* Found the APB bridge corresponding to current core,
1684 * Check for bus errors in APB wrapper
1685 */
1686 return ai_clear_backplane_to_per_core(sih,
1687 apb_id, apb_coreuinit, NULL);
1688 }
1689 }
1690
1691 /* Default is to poll for errors on all slave wrappers */
1692 return si_clear_backplane_to(sih);
1693 }
1694 #endif /* BCM_BACKPLANE_TIMEOUT */
1695
1696 #if defined(AXI_TIMEOUTS) || defined(BCM_BACKPLANE_TIMEOUT)
1697 static bool g_disable_backplane_logs = FALSE;
1698
1699 #if defined(ETD)
1700 static uint32 last_axi_error = AXI_WRAP_STS_NONE;
1701 static uint32 last_axi_error_core = 0;
1702 static uint32 last_axi_error_wrap = 0;
1703 #endif /* ETD */
1704
1705 /*
1706 * API to clear the back plane timeout per core.
1707 * Caller may passs optional wrapper address. If present this will be used as
1708 * the wrapper base address. If wrapper base address is provided then caller
1709 * must provide the coreid also.
1710 * If both coreid and wrapper is zero, then err status of current bridge
1711 * will be verified.
1712 */
1713 uint32
1714 ai_clear_backplane_to_per_core(si_t *sih, uint coreid, uint coreunit, void *wrap)
1715 {
1716 int ret = AXI_WRAP_STS_NONE;
1717 aidmp_t *ai = NULL;
1718 uint32 errlog_status = 0;
1719 si_info_t *sii = SI_INFO(sih);
1720 uint32 errlog_lo = 0, errlog_hi = 0, errlog_id = 0, errlog_flags = 0;
1721 uint32 current_coreidx = si_coreidx(sih);
1722 uint32 target_coreidx = si_findcoreidx(sih, coreid, coreunit);
1723
1724 #if defined(BCM_BACKPLANE_TIMEOUT)
1725 si_axi_error_t * axi_error = sih->err_info ?
1726 &sih->err_info->axi_error[sih->err_info->count] : NULL;
1727 #endif /* BCM_BACKPLANE_TIMEOUT */
1728 bool restore_core = FALSE;
1729
1730 if ((sii->axi_num_wrappers == 0) ||
1731 #ifdef BCM_BACKPLANE_TIMEOUT
1732 (!PCIE(sii)) ||
1733 #endif /* BCM_BACKPLANE_TIMEOUT */
1734 FALSE) {
1735 SI_VMSG((" %s, axi_num_wrappers:%d, Is_PCIE:%d, BUS_TYPE:%d, ID:%x\n",
1736 __FUNCTION__, sii->axi_num_wrappers, PCIE(sii),
1737 BUSTYPE(sii->pub.bustype), sii->pub.buscoretype));
1738 return AXI_WRAP_STS_NONE;
1739 }
1740
1741 if (wrap != NULL) {
1742 ai = (aidmp_t *)wrap;
1743 } else if (coreid && (target_coreidx != current_coreidx)) {
1744
1745 if (ai_setcoreidx(sih, target_coreidx) == NULL) {
1746 /* Unable to set the core */
1747 SI_PRINT(("Set Code Failed: coreid:%x, unit:%d, target_coreidx:%d\n",
1748 coreid, coreunit, target_coreidx));
1749 errlog_lo = target_coreidx;
1750 ret = AXI_WRAP_STS_SET_CORE_FAIL;
1751 goto end;
1752 }
1753
1754 restore_core = TRUE;
1755 ai = (aidmp_t *)si_wrapperregs(sih);
1756 } else {
1757 /* Read error status of current wrapper */
1758 ai = (aidmp_t *)si_wrapperregs(sih);
1759
1760 /* Update CoreID to current Code ID */
1761 coreid = si_coreid(sih);
1762 }
1763
1764 /* read error log status */
1765 errlog_status = R_REG(sii->osh, &ai->errlogstatus);
1766
1767 if (errlog_status == ID32_INVALID) {
1768 /* Do not try to peek further */
1769 SI_PRINT(("%s, errlogstatus:%x - Slave Wrapper:%x\n",
1770 __FUNCTION__, errlog_status, coreid));
1771 ret = AXI_WRAP_STS_WRAP_RD_ERR;
1772 errlog_lo = (uint32)(uintptr)&ai->errlogstatus;
1773 goto end;
1774 }
1775
1776 if ((errlog_status & AIELS_TIMEOUT_MASK) != 0) {
1777 uint32 tmp;
1778 uint32 count = 0;
1779 /* set ErrDone to clear the condition */
1780 W_REG(sii->osh, &ai->errlogdone, AIELD_ERRDONE_MASK);
1781
1782 /* SPINWAIT on errlogstatus timeout status bits */
1783 while ((tmp = R_REG(sii->osh, &ai->errlogstatus)) & AIELS_TIMEOUT_MASK) {
1784
1785 if (tmp == ID32_INVALID) {
1786 SI_PRINT(("%s: prev errlogstatus:%x, errlogstatus:%x\n",
1787 __FUNCTION__, errlog_status, tmp));
1788 ret = AXI_WRAP_STS_WRAP_RD_ERR;
1789 errlog_lo = (uint32)(uintptr)&ai->errlogstatus;
1790 goto end;
1791 }
1792 /*
1793 * Clear again, to avoid getting stuck in the loop, if a new error
1794 * is logged after we cleared the first timeout
1795 */
1796 W_REG(sii->osh, &ai->errlogdone, AIELD_ERRDONE_MASK);
1797
1798 count++;
1799 OSL_DELAY(10);
1800 if ((10 * count) > AI_REG_READ_TIMEOUT) {
1801 errlog_status = tmp;
1802 break;
1803 }
1804 }
1805
1806 errlog_lo = R_REG(sii->osh, &ai->errlogaddrlo);
1807 errlog_hi = R_REG(sii->osh, &ai->errlogaddrhi);
1808 errlog_id = R_REG(sii->osh, &ai->errlogid);
1809 errlog_flags = R_REG(sii->osh, &ai->errlogflags);
1810
1811 /* we are already in the error path, so OK to check for the slave error */
1812 if (ai_ignore_errlog(sii, ai, errlog_lo, errlog_hi, errlog_id,
1813 errlog_status)) {
1814 si_ignore_errlog_cnt++;
1815 goto end;
1816 }
1817
1818 /* only reset APB Bridge on timeout (not slave error, or dec error) */
1819 switch (errlog_status & AIELS_TIMEOUT_MASK) {
1820 case AIELS_SLAVE_ERR:
1821 SI_PRINT(("AXI slave error\n"));
1822 ret = AXI_WRAP_STS_SLAVE_ERR;
1823 break;
1824
1825 case AIELS_TIMEOUT:
1826 ai_reset_axi_to(sii, ai);
1827 ret = AXI_WRAP_STS_TIMEOUT;
1828 break;
1829
1830 case AIELS_DECODE:
1831 SI_PRINT(("AXI decode error\n"));
1832 ret = AXI_WRAP_STS_DECODE_ERR;
1833 break;
1834 default:
1835 ASSERT(0); /* should be impossible */
1836 }
1837
1838 SI_PRINT(("\tCoreID: %x\n", coreid));
1839 SI_PRINT(("\t errlog: lo 0x%08x, hi 0x%08x, id 0x%08x, flags 0x%08x"
1840 ", status 0x%08x\n",
1841 errlog_lo, errlog_hi, errlog_id, errlog_flags,
1842 errlog_status));
1843 }
1844
1845 end:
1846 #if defined(ETD)
1847 if (ret != AXI_WRAP_STS_NONE) {
1848 last_axi_error = ret;
1849 last_axi_error_core = coreid;
1850 last_axi_error_wrap = (uint32)ai;
1851 }
1852 #endif /* ETD */
1853
1854 #if defined(BCM_BACKPLANE_TIMEOUT)
1855 if (axi_error && (ret != AXI_WRAP_STS_NONE)) {
1856 axi_error->error = ret;
1857 axi_error->coreid = coreid;
1858 axi_error->errlog_lo = errlog_lo;
1859 axi_error->errlog_hi = errlog_hi;
1860 axi_error->errlog_id = errlog_id;
1861 axi_error->errlog_flags = errlog_flags;
1862 axi_error->errlog_status = errlog_status;
1863 sih->err_info->count++;
1864
1865 if (sih->err_info->count == SI_MAX_ERRLOG_SIZE) {
1866 sih->err_info->count = SI_MAX_ERRLOG_SIZE - 1;
1867 SI_PRINT(("AXI Error log overflow\n"));
1868 }
1869 }
1870 #endif /* BCM_BACKPLANE_TIMEOUT */
1871
1872 if (restore_core) {
1873 if (ai_setcoreidx(sih, current_coreidx) == NULL) {
1874 /* Unable to set the core */
1875 return ID32_INVALID;
1876 }
1877 }
1878
1879 return ret;
1880 }
1881
1882 /* reset AXI timeout */
1883 static void
1884 ai_reset_axi_to(si_info_t *sii, aidmp_t *ai)
1885 {
1886 /* reset APB Bridge */
1887 OR_REG(sii->osh, &ai->resetctrl, AIRC_RESET);
1888 /* sync write */
1889 (void)R_REG(sii->osh, &ai->resetctrl);
1890 /* clear Reset bit */
1891 AND_REG(sii->osh, &ai->resetctrl, ~(AIRC_RESET));
1892 /* sync write */
1893 (void)R_REG(sii->osh, &ai->resetctrl);
1894 SI_PRINT(("AXI timeout\n"));
1895 if (R_REG(sii->osh, &ai->resetctrl) & AIRC_RESET) {
1896 SI_PRINT(("reset failed on wrapper %p\n", ai));
1897 g_disable_backplane_logs = TRUE;
1898 }
1899 }
1900 #endif /* AXI_TIMEOUTS || BCM_BACKPLANE_TIMEOUT */
1901
1902 /*
1903 * This API polls all slave wrappers for errors and returns bit map of
1904 * all reported errors.
1905 * return - bit map of
1906 * AXI_WRAP_STS_NONE
1907 * AXI_WRAP_STS_TIMEOUT
1908 * AXI_WRAP_STS_SLAVE_ERR
1909 * AXI_WRAP_STS_DECODE_ERR
1910 * AXI_WRAP_STS_PCI_RD_ERR
1911 * AXI_WRAP_STS_WRAP_RD_ERR
1912 * AXI_WRAP_STS_SET_CORE_FAIL
1913 * On timeout detection, correspondign bridge will be reset to
1914 * unblock the bus.
1915 * Error reported in each wrapper can be retrieved using the API
1916 * si_get_axi_errlog_info()
1917 */
1918 uint32
1919 ai_clear_backplane_to(si_t *sih)
1920 {
1921 uint32 ret = 0;
1922 #if defined(AXI_TIMEOUTS) || defined(BCM_BACKPLANE_TIMEOUT)
1923
1924 si_info_t *sii = SI_INFO(sih);
1925 aidmp_t *ai;
1926 uint32 i;
1927 axi_wrapper_t * axi_wrapper = sii->axi_wrapper;
1928
1929 #ifdef BCM_BACKPLANE_TIMEOUT
1930 uint32 prev_value = 0;
1931 osl_t *osh = sii->osh;
1932 uint32 cfg_reg = 0;
1933 uint32 offset = 0;
1934
1935 if ((sii->axi_num_wrappers == 0) || (!PCIE(sii)))
1936 #else
1937 if (sii->axi_num_wrappers == 0)
1938 #endif // endif
1939 {
1940 SI_VMSG((" %s, axi_num_wrappers:%d, Is_PCIE:%d, BUS_TYPE:%d, ID:%x\n",
1941 __FUNCTION__, sii->axi_num_wrappers, PCIE(sii),
1942 BUSTYPE(sii->pub.bustype), sii->pub.buscoretype));
1943 return AXI_WRAP_STS_NONE;
1944 }
1945
1946 #ifdef BCM_BACKPLANE_TIMEOUT
1947 /* Save and restore wrapper access window */
1948 if (BUSTYPE(sii->pub.bustype) == PCI_BUS) {
1949 if (PCIE_GEN1(sii)) {
1950 cfg_reg = PCI_BAR0_WIN2;
1951 offset = PCI_BAR0_WIN2_OFFSET;
1952 } else if (PCIE_GEN2(sii)) {
1953 cfg_reg = PCIE2_BAR0_CORE2_WIN2;
1954 offset = PCIE2_BAR0_CORE2_WIN2_OFFSET;
1955 }
1956 else {
1957 ASSERT(!"!PCIE_GEN1 && !PCIE_GEN2");
1958 }
1959
1960 prev_value = OSL_PCI_READ_CONFIG(osh, cfg_reg, 4);
1961
1962 if (prev_value == ID32_INVALID) {
1963 si_axi_error_t * axi_error =
1964 sih->err_info ?
1965 &sih->err_info->axi_error[sih->err_info->count] :
1966 NULL;
1967
1968 SI_PRINT(("%s, PCI_BAR0_WIN2 - %x\n", __FUNCTION__, prev_value));
1969 if (axi_error) {
1970 axi_error->error = ret = AXI_WRAP_STS_PCI_RD_ERR;
1971 axi_error->errlog_lo = cfg_reg;
1972 sih->err_info->count++;
1973
1974 if (sih->err_info->count == SI_MAX_ERRLOG_SIZE) {
1975 sih->err_info->count = SI_MAX_ERRLOG_SIZE - 1;
1976 SI_PRINT(("AXI Error log overflow\n"));
1977 }
1978 }
1979
1980 return ret;
1981 }
1982 }
1983 #endif /* BCM_BACKPLANE_TIMEOUT */
1984
1985 for (i = 0; i < sii->axi_num_wrappers; ++i) {
1986 uint32 tmp;
1987
1988 if (axi_wrapper[i].wrapper_type != AI_SLAVE_WRAPPER) {
1989 continue;
1990 }
1991
1992 #ifdef BCM_BACKPLANE_TIMEOUT
1993 if (BUSTYPE(sii->pub.bustype) == PCI_BUS) {
1994 /* Set BAR0_CORE2_WIN2 to bridge wapper base address */
1995 OSL_PCI_WRITE_CONFIG(osh,
1996 cfg_reg, 4, axi_wrapper[i].wrapper_addr);
1997
1998 /* set AI to BAR0 + Offset corresponding to Gen1 or gen2 */
1999 ai = (aidmp_t *) (DISCARD_QUAL(sii->curmap, uint8) + offset);
2000 }
2001 else
2002 #endif /* BCM_BACKPLANE_TIMEOUT */
2003 {
2004 ai = (aidmp_t *)(uintptr) axi_wrapper[i].wrapper_addr;
2005 }
2006
2007 tmp = ai_clear_backplane_to_per_core(sih, axi_wrapper[i].cid, 0,
2008 DISCARD_QUAL(ai, void));
2009
2010 ret |= tmp;
2011 }
2012
2013 #ifdef BCM_BACKPLANE_TIMEOUT
2014 /* Restore the initial wrapper space */
2015 if (prev_value) {
2016 OSL_PCI_WRITE_CONFIG(osh, cfg_reg, 4, prev_value);
2017 }
2018 #endif /* BCM_BACKPLANE_TIMEOUT */
2019
2020 #endif /* AXI_TIMEOUTS || BCM_BACKPLANE_TIMEOUT */
2021
2022 return ret;
2023 }
2024
2025 uint
2026 ai_num_slaveports(si_t *sih, uint coreidx)
2027 {
2028 si_info_t *sii = SI_INFO(sih);
2029 si_cores_info_t *cores_info = (si_cores_info_t *)sii->cores_info;
2030 uint32 cib;
2031
2032 cib = cores_info->cib[coreidx];
2033 return ((cib & CIB_NSP_MASK) >> CIB_NSP_SHIFT);
2034 }
2035
2036 #ifdef UART_TRAP_DBG
2037 void
2038 ai_dump_APB_Bridge_registers(si_t *sih)
2039 {
2040 aidmp_t *ai;
2041 si_info_t *sii = SI_INFO(sih);
2042
2043 ai = (aidmp_t *) sii->br_wrapba[0];
2044 printf("APB Bridge 0\n");
2045 printf("lo 0x%08x, hi 0x%08x, id 0x%08x, flags 0x%08x",
2046 R_REG(sii->osh, &ai->errlogaddrlo),
2047 R_REG(sii->osh, &ai->errlogaddrhi),
2048 R_REG(sii->osh, &ai->errlogid),
2049 R_REG(sii->osh, &ai->errlogflags));
2050 printf("\n status 0x%08x\n", R_REG(sii->osh, &ai->errlogstatus));
2051 }
2052 #endif /* UART_TRAP_DBG */
2053
2054 void
2055 ai_force_clocks(si_t *sih, uint clock_state)
2056 {
2057
2058 si_info_t *sii = SI_INFO(sih);
2059 aidmp_t *ai, *ai_sec = NULL;
2060 volatile uint32 dummy;
2061 uint32 ioctrl;
2062 si_cores_info_t *cores_info = (si_cores_info_t *)sii->cores_info;
2063
2064 ASSERT(GOODREGS(sii->curwrap));
2065 ai = sii->curwrap;
2066 if (cores_info->wrapba2[sii->curidx])
2067 ai_sec = REG_MAP(cores_info->wrapba2[sii->curidx], SI_CORE_SIZE);
2068
2069 /* ensure there are no pending backplane operations */
2070 SPINWAIT((R_REG(sii->osh, &ai->resetstatus) != 0), 300);
2071
2072 if (clock_state == FORCE_CLK_ON) {
2073 ioctrl = R_REG(sii->osh, &ai->ioctrl);
2074 W_REG(sii->osh, &ai->ioctrl, (ioctrl | SICF_FGC));
2075 dummy = R_REG(sii->osh, &ai->ioctrl);
2076 BCM_REFERENCE(dummy);
2077 if (ai_sec) {
2078 ioctrl = R_REG(sii->osh, &ai_sec->ioctrl);
2079 W_REG(sii->osh, &ai_sec->ioctrl, (ioctrl | SICF_FGC));
2080 dummy = R_REG(sii->osh, &ai_sec->ioctrl);
2081 BCM_REFERENCE(dummy);
2082 }
2083 } else {
2084 ioctrl = R_REG(sii->osh, &ai->ioctrl);
2085 W_REG(sii->osh, &ai->ioctrl, (ioctrl & (~SICF_FGC)));
2086 dummy = R_REG(sii->osh, &ai->ioctrl);
2087 BCM_REFERENCE(dummy);
2088 if (ai_sec) {
2089 ioctrl = R_REG(sii->osh, &ai_sec->ioctrl);
2090 W_REG(sii->osh, &ai_sec->ioctrl, (ioctrl & (~SICF_FGC)));
2091 dummy = R_REG(sii->osh, &ai_sec->ioctrl);
2092 BCM_REFERENCE(dummy);
2093 }
2094 }
2095 /* ensure there are no pending backplane operations */
2096 SPINWAIT((R_REG(sii->osh, &ai->resetstatus) != 0), 300);
2097 }