wifi: fix reboot panic issue
[GitHub/LineageOS/G12/android_hardware_amlogic_kernel-modules_dhd-driver.git] / bcmdhd.1.363.59.144.x.cn / aiutils.c
CommitLineData
ef6a5fee
RC
1/*
2 * Misc utility routines for accessing chip-specific features
3 * of the SiliconBackplane-based Broadcom chips.
4 *
5 * Copyright (C) 1999-2016, Broadcom Corporation
6 *
7 * Unless you and Broadcom execute a separate written software license
8 * agreement governing use of this software, this software is licensed to you
9 * under the terms of the GNU General Public License version 2 (the "GPL"),
10 * available at http://www.broadcom.com/licenses/GPLv2.php, with the
11 * following added to such license:
12 *
13 * As a special exception, the copyright holders of this software give you
14 * permission to link this software with independent modules, and to copy and
15 * distribute the resulting executable under terms of your choice, provided that
16 * you also meet, for each linked independent module, the terms and conditions of
17 * the license of that module. An independent module is a module which is not
18 * derived from this software. The special exception does not apply to any
19 * modifications of the software.
20 *
21 * Notwithstanding the above, under no circumstances may you combine this
22 * software in any way with any other Broadcom software provided under a license
23 * other than the GPL, without Broadcom's express prior written consent.
24 *
25 *
26 * <<Broadcom-WL-IPTag/Open:>>
27 *
28 * $Id: aiutils.c 607900 2015-12-22 13:38:53Z $
29 */
30#include <bcm_cfg.h>
31#include <typedefs.h>
32#include <bcmdefs.h>
33#include <osl.h>
34#include <bcmutils.h>
35#include <siutils.h>
36#include <hndsoc.h>
37#include <sbchipc.h>
38#include <pcicfg.h>
39
40#include "siutils_priv.h"
41
42#define BCM47162_DMP() (0)
43#define BCM5357_DMP() (0)
44#define BCM53573_DMP() (0)
45#define BCM4707_DMP() (0)
46#define PMU_DMP() (0)
47#define GCI_DMP() (0)
48#define remap_coreid(sih, coreid) (coreid)
49#define remap_corerev(sih, corerev) (corerev)
50
51/* EROM parsing */
52
53static uint32
54get_erom_ent(si_t *sih, uint32 **eromptr, uint32 mask, uint32 match)
55{
56 uint32 ent;
57 uint inv = 0, nom = 0;
58 uint32 size = 0;
59
60 while (TRUE) {
61 ent = R_REG(si_osh(sih), *eromptr);
62 (*eromptr)++;
63
64 if (mask == 0)
65 break;
66
67 if ((ent & ER_VALID) == 0) {
68 inv++;
69 continue;
70 }
71
72 if (ent == (ER_END | ER_VALID))
73 break;
74
75 if ((ent & mask) == match)
76 break;
77
78 /* escape condition related EROM size if it has invalid values */
79 size += sizeof(*eromptr);
80 if (size >= ER_SZ_MAX) {
81 SI_ERROR(("Failed to find end of EROM marker\n"));
82 break;
83 }
84
85 nom++;
86 }
87
88 SI_VMSG(("%s: Returning ent 0x%08x\n", __FUNCTION__, ent));
89 if (inv + nom) {
90 SI_VMSG((" after %d invalid and %d non-matching entries\n", inv, nom));
91 }
92 return ent;
93}
94
95static uint32
96get_asd(si_t *sih, uint32 **eromptr, uint sp, uint ad, uint st, uint32 *addrl, uint32 *addrh,
97 uint32 *sizel, uint32 *sizeh)
98{
99 uint32 asd, sz, szd;
100
101 asd = get_erom_ent(sih, eromptr, ER_VALID, ER_VALID);
102 if (((asd & ER_TAG1) != ER_ADD) ||
103 (((asd & AD_SP_MASK) >> AD_SP_SHIFT) != sp) ||
104 ((asd & AD_ST_MASK) != st)) {
105 /* This is not what we want, "push" it back */
106 (*eromptr)--;
107 return 0;
108 }
109 *addrl = asd & AD_ADDR_MASK;
110 if (asd & AD_AG32)
111 *addrh = get_erom_ent(sih, eromptr, 0, 0);
112 else
113 *addrh = 0;
114 *sizeh = 0;
115 sz = asd & AD_SZ_MASK;
116 if (sz == AD_SZ_SZD) {
117 szd = get_erom_ent(sih, eromptr, 0, 0);
118 *sizel = szd & SD_SZ_MASK;
119 if (szd & SD_SG32)
120 *sizeh = get_erom_ent(sih, eromptr, 0, 0);
121 } else
122 *sizel = AD_SZ_BASE << (sz >> AD_SZ_SHIFT);
123
124 SI_VMSG((" SP %d, ad %d: st = %d, 0x%08x_0x%08x @ 0x%08x_0x%08x\n",
125 sp, ad, st, *sizeh, *sizel, *addrh, *addrl));
126
127 return asd;
128}
129
130static void
131ai_hwfixup(si_info_t *sii)
132{
133}
134
135
136/* parse the enumeration rom to identify all cores */
137void
138ai_scan(si_t *sih, void *regs, uint devid)
139{
140 si_info_t *sii = SI_INFO(sih);
141 si_cores_info_t *cores_info = (si_cores_info_t *)sii->cores_info;
142 chipcregs_t *cc = (chipcregs_t *)regs;
143 uint32 erombase, *eromptr, *eromlim;
144
145 erombase = R_REG(sii->osh, &cc->eromptr);
146
147 switch (BUSTYPE(sih->bustype)) {
148 case SI_BUS:
149 eromptr = (uint32 *)REG_MAP(erombase, SI_CORE_SIZE);
150 break;
151
152 case PCI_BUS:
153 /* Set wrappers address */
154 sii->curwrap = (void *)((uintptr)regs + SI_CORE_SIZE);
155
156 /* Now point the window at the erom */
157 OSL_PCI_WRITE_CONFIG(sii->osh, PCI_BAR0_WIN, 4, erombase);
158 eromptr = regs;
159 break;
160
161#ifdef BCMSDIO
162 case SPI_BUS:
163 case SDIO_BUS:
164 eromptr = (uint32 *)(uintptr)erombase;
165 break;
166#endif /* BCMSDIO */
167
168 case PCMCIA_BUS:
169 default:
170 SI_ERROR(("Don't know how to do AXI enumertion on bus %d\n", sih->bustype));
171 ASSERT(0);
172 return;
173 }
174 eromlim = eromptr + (ER_REMAPCONTROL / sizeof(uint32));
175
176 SI_VMSG(("ai_scan: regs = 0x%p, erombase = 0x%08x, eromptr = 0x%p, eromlim = 0x%p\n",
177 regs, erombase, eromptr, eromlim));
178 while (eromptr < eromlim) {
179 uint32 cia, cib, cid, mfg, crev, nmw, nsw, nmp, nsp;
180 uint32 mpd, asd, addrl, addrh, sizel, sizeh;
181 uint i, j, idx;
182 bool br;
183
184 br = FALSE;
185
186 /* Grok a component */
187 cia = get_erom_ent(sih, &eromptr, ER_TAG, ER_CI);
188 if (cia == (ER_END | ER_VALID)) {
189 SI_VMSG(("Found END of erom after %d cores\n", sii->numcores));
190 ai_hwfixup(sii);
191 return;
192 }
193
194 cib = get_erom_ent(sih, &eromptr, 0, 0);
195
196 if ((cib & ER_TAG) != ER_CI) {
197 SI_ERROR(("CIA not followed by CIB\n"));
198 goto error;
199 }
200
201 cid = (cia & CIA_CID_MASK) >> CIA_CID_SHIFT;
202 mfg = (cia & CIA_MFG_MASK) >> CIA_MFG_SHIFT;
203 crev = (cib & CIB_REV_MASK) >> CIB_REV_SHIFT;
204 nmw = (cib & CIB_NMW_MASK) >> CIB_NMW_SHIFT;
205 nsw = (cib & CIB_NSW_MASK) >> CIB_NSW_SHIFT;
206 nmp = (cib & CIB_NMP_MASK) >> CIB_NMP_SHIFT;
207 nsp = (cib & CIB_NSP_MASK) >> CIB_NSP_SHIFT;
208
209#ifdef BCMDBG_SI
210 SI_VMSG(("Found component 0x%04x/0x%04x rev %d at erom addr 0x%p, with nmw = %d, "
211 "nsw = %d, nmp = %d & nsp = %d\n",
212 mfg, cid, crev, eromptr - 1, nmw, nsw, nmp, nsp));
213#else
214 BCM_REFERENCE(crev);
215#endif
216
217 if (((mfg == MFGID_ARM) && (cid == DEF_AI_COMP)) || (nsp == 0))
218 continue;
219 if ((nmw + nsw == 0)) {
220 /* A component which is not a core */
221 if (cid == OOB_ROUTER_CORE_ID) {
222 asd = get_asd(sih, &eromptr, 0, 0, AD_ST_SLAVE,
223 &addrl, &addrh, &sizel, &sizeh);
224 if (asd != 0) {
225 sii->oob_router = addrl;
226 }
227 }
228 if (cid != GMAC_COMMON_4706_CORE_ID && cid != NS_CCB_CORE_ID &&
229 cid != PMU_CORE_ID && cid != GCI_CORE_ID)
230 continue;
231 }
232
233 idx = sii->numcores;
234
235 cores_info->cia[idx] = cia;
236 cores_info->cib[idx] = cib;
237 cores_info->coreid[idx] = remap_coreid(sih, cid);
238
239 for (i = 0; i < nmp; i++) {
240 mpd = get_erom_ent(sih, &eromptr, ER_VALID, ER_VALID);
241 if ((mpd & ER_TAG) != ER_MP) {
242 SI_ERROR(("Not enough MP entries for component 0x%x\n", cid));
243 goto error;
244 }
245 SI_VMSG((" Master port %d, mp: %d id: %d\n", i,
246 (mpd & MPD_MP_MASK) >> MPD_MP_SHIFT,
247 (mpd & MPD_MUI_MASK) >> MPD_MUI_SHIFT));
248 }
249
250 /* First Slave Address Descriptor should be port 0:
251 * the main register space for the core
252 */
253 asd = get_asd(sih, &eromptr, 0, 0, AD_ST_SLAVE, &addrl, &addrh, &sizel, &sizeh);
254 if (asd == 0) {
255 do {
256 /* Try again to see if it is a bridge */
257 asd = get_asd(sih, &eromptr, 0, 0, AD_ST_BRIDGE, &addrl, &addrh,
258 &sizel, &sizeh);
259 if (asd != 0)
260 br = TRUE;
261 else {
262 if (br == TRUE) {
263 break;
264 }
265 else if ((addrh != 0) || (sizeh != 0) ||
266 (sizel != SI_CORE_SIZE)) {
267 SI_ERROR(("addrh = 0x%x\t sizeh = 0x%x\t size1 ="
268 "0x%x\n", addrh, sizeh, sizel));
269 SI_ERROR(("First Slave ASD for"
270 "core 0x%04x malformed "
271 "(0x%08x)\n", cid, asd));
272 goto error;
273 }
274 }
275 } while (1);
276 }
277 cores_info->coresba[idx] = addrl;
278 cores_info->coresba_size[idx] = sizel;
279 /* Get any more ASDs in port 0 */
280 j = 1;
281 do {
282 asd = get_asd(sih, &eromptr, 0, j, AD_ST_SLAVE, &addrl, &addrh,
283 &sizel, &sizeh);
284 if ((asd != 0) && (j == 1) && (sizel == SI_CORE_SIZE)) {
285 cores_info->coresba2[idx] = addrl;
286 cores_info->coresba2_size[idx] = sizel;
287 }
288 j++;
289 } while (asd != 0);
290
291 /* Go through the ASDs for other slave ports */
292 for (i = 1; i < nsp; i++) {
293 j = 0;
294 do {
295 asd = get_asd(sih, &eromptr, i, j, AD_ST_SLAVE, &addrl, &addrh,
296 &sizel, &sizeh);
297
298 if (asd == 0)
299 break;
300 j++;
301 } while (1);
302 if (j == 0) {
303 SI_ERROR((" SP %d has no address descriptors\n", i));
304 goto error;
305 }
306 }
307
308 /* Now get master wrappers */
309 for (i = 0; i < nmw; i++) {
310 asd = get_asd(sih, &eromptr, i, 0, AD_ST_MWRAP, &addrl, &addrh,
311 &sizel, &sizeh);
312 if (asd == 0) {
313 SI_ERROR(("Missing descriptor for MW %d\n", i));
314 goto error;
315 }
316 if ((sizeh != 0) || (sizel != SI_CORE_SIZE)) {
317 SI_ERROR(("Master wrapper %d is not 4KB\n", i));
318 goto error;
319 }
320 if (i == 0)
321 cores_info->wrapba[idx] = addrl;
322 else if (i == 1)
323 cores_info->wrapba2[idx] = addrl;
324 }
325
326 /* And finally slave wrappers */
327 for (i = 0; i < nsw; i++) {
328 uint fwp = (nsp == 1) ? 0 : 1;
329 asd = get_asd(sih, &eromptr, fwp + i, 0, AD_ST_SWRAP, &addrl, &addrh,
330 &sizel, &sizeh);
331
332 /* cache APB bridge wrapper address for set/clear timeout */
333 if ((mfg == MFGID_ARM) && (cid == APB_BRIDGE_ID)) {
334 ASSERT(sii->num_br < SI_MAXBR);
335 sii->br_wrapba[sii->num_br++] = addrl;
336 }
337 if (asd == 0) {
338 SI_ERROR(("Missing descriptor for SW %d\n", i));
339 goto error;
340 }
341 if ((sizeh != 0) || (sizel != SI_CORE_SIZE)) {
342 SI_ERROR(("Slave wrapper %d is not 4KB\n", i));
343 goto error;
344 }
345 if ((nmw == 0) && (i == 0))
346 cores_info->wrapba[idx] = addrl;
347 else if ((nmw == 0) && (i == 1))
348 cores_info->wrapba2[idx] = addrl;
349 }
350
351
352 /* Don't record bridges */
353 if (br)
354 continue;
355
356 /* Done with core */
357 sii->numcores++;
358 }
359
360 SI_ERROR(("Reached end of erom without finding END\n"));
361
362error:
363 sii->numcores = 0;
364 return;
365}
366
367#define AI_SETCOREIDX_MAPSIZE(coreid) \
368 (((coreid) == NS_CCB_CORE_ID) ? 15 * SI_CORE_SIZE : SI_CORE_SIZE)
369
370/* This function changes the logical "focus" to the indicated core.
371 * Return the current core's virtual address.
372 */
373static void *
374_ai_setcoreidx(si_t *sih, uint coreidx, uint use_wrap2)
375{
376 si_info_t *sii = SI_INFO(sih);
377 si_cores_info_t *cores_info = (si_cores_info_t *)sii->cores_info;
378 uint32 addr, wrap, wrap2;
379 void *regs;
380
381 if (coreidx >= MIN(sii->numcores, SI_MAXCORES))
382 return (NULL);
383
384 addr = cores_info->coresba[coreidx];
385 wrap = cores_info->wrapba[coreidx];
386 wrap2 = cores_info->wrapba2[coreidx];
387
388 /*
389 * If the user has provided an interrupt mask enabled function,
390 * then assert interrupts are disabled before switching the core.
391 */
392 ASSERT((sii->intrsenabled_fn == NULL) || !(*(sii)->intrsenabled_fn)((sii)->intr_arg));
393
394 switch (BUSTYPE(sih->bustype)) {
395 case SI_BUS:
396 /* map new one */
397 if (!cores_info->regs[coreidx]) {
398 cores_info->regs[coreidx] = REG_MAP(addr,
399 AI_SETCOREIDX_MAPSIZE(cores_info->coreid[coreidx]));
400 ASSERT(GOODREGS(cores_info->regs[coreidx]));
401 }
402 sii->curmap = regs = cores_info->regs[coreidx];
403 if (!cores_info->wrappers[coreidx] && (wrap != 0)) {
404 cores_info->wrappers[coreidx] = REG_MAP(wrap, SI_CORE_SIZE);
405 ASSERT(GOODREGS(cores_info->wrappers[coreidx]));
406 }
407 if (!cores_info->wrappers2[coreidx] && (wrap2 != 0)) {
408 cores_info->wrappers2[coreidx] = REG_MAP(wrap2, SI_CORE_SIZE);
409 ASSERT(GOODREGS(cores_info->wrappers2[coreidx]));
410 }
411 if (use_wrap2)
412 sii->curwrap = cores_info->wrappers2[coreidx];
413 else
414 sii->curwrap = cores_info->wrappers[coreidx];
415 break;
416
417 case PCI_BUS:
418 /* point bar0 window */
419 OSL_PCI_WRITE_CONFIG(sii->osh, PCI_BAR0_WIN, 4, addr);
420 regs = sii->curmap;
421 /* point bar0 2nd 4KB window to the primary wrapper */
422 if (use_wrap2)
423 wrap = wrap2;
424 if (PCIE_GEN2(sii))
425 OSL_PCI_WRITE_CONFIG(sii->osh, PCIE2_BAR0_WIN2, 4, wrap);
426 else
427 OSL_PCI_WRITE_CONFIG(sii->osh, PCI_BAR0_WIN2, 4, wrap);
428 break;
429
430#ifdef BCMSDIO
431 case SPI_BUS:
432 case SDIO_BUS:
433 sii->curmap = regs = (void *)((uintptr)addr);
434 if (use_wrap2)
435 sii->curwrap = (void *)((uintptr)wrap2);
436 else
437 sii->curwrap = (void *)((uintptr)wrap);
438 break;
439#endif /* BCMSDIO */
440
441 case PCMCIA_BUS:
442 default:
443 ASSERT(0);
444 regs = NULL;
445 break;
446 }
447
448 sii->curmap = regs;
449 sii->curidx = coreidx;
450
451 return regs;
452}
453
454void *
455ai_setcoreidx(si_t *sih, uint coreidx)
456{
457 return _ai_setcoreidx(sih, coreidx, 0);
458}
459
460void *
461ai_setcoreidx_2ndwrap(si_t *sih, uint coreidx)
462{
463 return _ai_setcoreidx(sih, coreidx, 1);
464}
465
466void
467ai_coreaddrspaceX(si_t *sih, uint asidx, uint32 *addr, uint32 *size)
468{
469 si_info_t *sii = SI_INFO(sih);
470 si_cores_info_t *cores_info = (si_cores_info_t *)sii->cores_info;
471 chipcregs_t *cc = NULL;
472 uint32 erombase, *eromptr, *eromlim;
473 uint i, j, cidx;
474 uint32 cia, cib, nmp, nsp;
475 uint32 asd, addrl, addrh, sizel, sizeh;
476
477 for (i = 0; i < sii->numcores; i++) {
478 if (cores_info->coreid[i] == CC_CORE_ID) {
479 cc = (chipcregs_t *)cores_info->regs[i];
480 break;
481 }
482 }
483 if (cc == NULL)
484 goto error;
485
486 erombase = R_REG(sii->osh, &cc->eromptr);
487 eromptr = (uint32 *)REG_MAP(erombase, SI_CORE_SIZE);
488 eromlim = eromptr + (ER_REMAPCONTROL / sizeof(uint32));
489
490 cidx = sii->curidx;
491 cia = cores_info->cia[cidx];
492 cib = cores_info->cib[cidx];
493
494 nmp = (cib & CIB_NMP_MASK) >> CIB_NMP_SHIFT;
495 nsp = (cib & CIB_NSP_MASK) >> CIB_NSP_SHIFT;
496
497 /* scan for cores */
498 while (eromptr < eromlim) {
499 if ((get_erom_ent(sih, &eromptr, ER_TAG, ER_CI) == cia) &&
500 (get_erom_ent(sih, &eromptr, 0, 0) == cib)) {
501 break;
502 }
503 }
504
505 /* skip master ports */
506 for (i = 0; i < nmp; i++)
507 get_erom_ent(sih, &eromptr, ER_VALID, ER_VALID);
508
509 /* Skip ASDs in port 0 */
510 asd = get_asd(sih, &eromptr, 0, 0, AD_ST_SLAVE, &addrl, &addrh, &sizel, &sizeh);
511 if (asd == 0) {
512 /* Try again to see if it is a bridge */
513 asd = get_asd(sih, &eromptr, 0, 0, AD_ST_BRIDGE, &addrl, &addrh,
514 &sizel, &sizeh);
515 }
516
517 j = 1;
518 do {
519 asd = get_asd(sih, &eromptr, 0, j, AD_ST_SLAVE, &addrl, &addrh,
520 &sizel, &sizeh);
521 j++;
522 } while (asd != 0);
523
524 /* Go through the ASDs for other slave ports */
525 for (i = 1; i < nsp; i++) {
526 j = 0;
527 do {
528 asd = get_asd(sih, &eromptr, i, j, AD_ST_SLAVE, &addrl, &addrh,
529 &sizel, &sizeh);
530 if (asd == 0)
531 break;
532
533 if (!asidx--) {
534 *addr = addrl;
535 *size = sizel;
536 return;
537 }
538 j++;
539 } while (1);
540
541 if (j == 0) {
542 SI_ERROR((" SP %d has no address descriptors\n", i));
543 break;
544 }
545 }
546
547error:
548 *size = 0;
549 return;
550}
551
552/* Return the number of address spaces in current core */
553int
554ai_numaddrspaces(si_t *sih)
555{
556 return 2;
557}
558
559/* Return the address of the nth address space in the current core */
560uint32
561ai_addrspace(si_t *sih, uint asidx)
562{
563 si_info_t *sii = SI_INFO(sih);
564 si_cores_info_t *cores_info = (si_cores_info_t *)sii->cores_info;
565 uint cidx;
566
567 cidx = sii->curidx;
568
569 if (asidx == 0)
570 return cores_info->coresba[cidx];
571 else if (asidx == 1)
572 return cores_info->coresba2[cidx];
573 else {
574 SI_ERROR(("%s: Need to parse the erom again to find addr space %d\n",
575 __FUNCTION__, asidx));
576 return 0;
577 }
578}
579
580/* Return the size of the nth address space in the current core */
581uint32
582ai_addrspacesize(si_t *sih, uint asidx)
583{
584 si_info_t *sii = SI_INFO(sih);
585 si_cores_info_t *cores_info = (si_cores_info_t *)sii->cores_info;
586 uint cidx;
587
588 cidx = sii->curidx;
589
590 if (asidx == 0)
591 return cores_info->coresba_size[cidx];
592 else if (asidx == 1)
593 return cores_info->coresba2_size[cidx];
594 else {
595 SI_ERROR(("%s: Need to parse the erom again to find addr space %d\n",
596 __FUNCTION__, asidx));
597 return 0;
598 }
599}
600
601uint
602ai_flag(si_t *sih)
603{
604 si_info_t *sii = SI_INFO(sih);
605 aidmp_t *ai;
606
607 if (BCM47162_DMP()) {
608 SI_ERROR(("%s: Attempting to read MIPS DMP registers on 47162a0", __FUNCTION__));
609 return sii->curidx;
610 }
611 if (BCM5357_DMP()) {
612 SI_ERROR(("%s: Attempting to read USB20H DMP registers on 5357b0\n", __FUNCTION__));
613 return sii->curidx;
614 }
615 if (BCM4707_DMP()) {
616 SI_ERROR(("%s: Attempting to read CHIPCOMMONB DMP registers on 4707\n",
617 __FUNCTION__));
618 return sii->curidx;
619 }
620 if (BCM53573_DMP()) {
621 SI_ERROR(("%s: Attempting to read DMP registers on 53573\n", __FUNCTION__));
622 return sii->curidx;
623 }
624#ifdef REROUTE_OOBINT
625 if (PMU_DMP()) {
626 SI_ERROR(("%s: Attempting to read PMU DMP registers\n",
627 __FUNCTION__));
628 return PMU_OOB_BIT;
629 }
630#else
631 if (PMU_DMP()) {
632 uint idx, flag;
633 idx = sii->curidx;
634 ai_setcoreidx(sih, SI_CC_IDX);
635 flag = ai_flag_alt(sih);
636 ai_setcoreidx(sih, idx);
637 return flag;
638 }
639#endif /* REROUTE_OOBINT */
640
641 ai = sii->curwrap;
642 ASSERT(ai != NULL);
643
644 return (R_REG(sii->osh, &ai->oobselouta30) & 0x1f);
645}
646
647uint
648ai_flag_alt(si_t *sih)
649{
650 si_info_t *sii = SI_INFO(sih);
651 aidmp_t *ai;
652
653 if (BCM47162_DMP()) {
654 SI_ERROR(("%s: Attempting to read MIPS DMP registers on 47162a0", __FUNCTION__));
655 return sii->curidx;
656 }
657 if (BCM5357_DMP()) {
658 SI_ERROR(("%s: Attempting to read USB20H DMP registers on 5357b0\n", __FUNCTION__));
659 return sii->curidx;
660 }
661 if (BCM4707_DMP()) {
662 SI_ERROR(("%s: Attempting to read CHIPCOMMONB DMP registers on 4707\n",
663 __FUNCTION__));
664 return sii->curidx;
665 }
666#ifdef REROUTE_OOBINT
667 if (PMU_DMP()) {
668 SI_ERROR(("%s: Attempting to read PMU DMP registers\n",
669 __FUNCTION__));
670 return PMU_OOB_BIT;
671 }
672#endif /* REROUTE_OOBINT */
673
674 ai = sii->curwrap;
675
676 return ((R_REG(sii->osh, &ai->oobselouta30) >> AI_OOBSEL_1_SHIFT) & AI_OOBSEL_MASK);
677}
678
679void
680ai_setint(si_t *sih, int siflag)
681{
682}
683
684uint
685ai_wrap_reg(si_t *sih, uint32 offset, uint32 mask, uint32 val)
686{
687 si_info_t *sii = SI_INFO(sih);
688 uint32 *map = (uint32 *) sii->curwrap;
689
690 if (mask || val) {
691 uint32 w = R_REG(sii->osh, map+(offset/4));
692 w &= ~mask;
693 w |= val;
694 W_REG(sii->osh, map+(offset/4), w);
695 }
696
697 return (R_REG(sii->osh, map+(offset/4)));
698}
699
700uint
701ai_corevendor(si_t *sih)
702{
703 si_info_t *sii = SI_INFO(sih);
704 si_cores_info_t *cores_info = (si_cores_info_t *)sii->cores_info;
705 uint32 cia;
706
707 cia = cores_info->cia[sii->curidx];
708 return ((cia & CIA_MFG_MASK) >> CIA_MFG_SHIFT);
709}
710
711uint
712ai_corerev(si_t *sih)
713{
714 si_info_t *sii = SI_INFO(sih);
715 si_cores_info_t *cores_info = (si_cores_info_t *)sii->cores_info;
716 uint32 cib;
717
718
719 cib = cores_info->cib[sii->curidx];
720 return remap_corerev(sih, (cib & CIB_REV_MASK) >> CIB_REV_SHIFT);
721}
722
723bool
724ai_iscoreup(si_t *sih)
725{
726 si_info_t *sii = SI_INFO(sih);
727 aidmp_t *ai;
728
729 ai = sii->curwrap;
730
731 return (((R_REG(sii->osh, &ai->ioctrl) & (SICF_FGC | SICF_CLOCK_EN)) == SICF_CLOCK_EN) &&
732 ((R_REG(sii->osh, &ai->resetctrl) & AIRC_RESET) == 0));
733}
734
735/*
736 * Switch to 'coreidx', issue a single arbitrary 32bit register mask&set operation,
737 * switch back to the original core, and return the new value.
738 *
739 * When using the silicon backplane, no fiddling with interrupts or core switches is needed.
740 *
741 * Also, when using pci/pcie, we can optimize away the core switching for pci registers
742 * and (on newer pci cores) chipcommon registers.
743 */
744uint
745ai_corereg(si_t *sih, uint coreidx, uint regoff, uint mask, uint val)
746{
747 uint origidx = 0;
748 uint32 *r = NULL;
749 uint w;
750 uint intr_val = 0;
751 bool fast = FALSE;
752 si_info_t *sii = SI_INFO(sih);
753 si_cores_info_t *cores_info = (si_cores_info_t *)sii->cores_info;
754
755
756 ASSERT(GOODIDX(coreidx));
757 ASSERT(regoff < SI_CORE_SIZE);
758 ASSERT((val & ~mask) == 0);
759
760 if (coreidx >= SI_MAXCORES)
761 return 0;
762
763 if (BUSTYPE(sih->bustype) == SI_BUS) {
764 /* If internal bus, we can always get at everything */
765 fast = TRUE;
766 /* map if does not exist */
767 if (!cores_info->regs[coreidx]) {
768 cores_info->regs[coreidx] = REG_MAP(cores_info->coresba[coreidx],
769 SI_CORE_SIZE);
770 ASSERT(GOODREGS(cores_info->regs[coreidx]));
771 }
772 r = (uint32 *)((uchar *)cores_info->regs[coreidx] + regoff);
773 } else if (BUSTYPE(sih->bustype) == PCI_BUS) {
774 /* If pci/pcie, we can get at pci/pcie regs and on newer cores to chipc */
775
776 if ((cores_info->coreid[coreidx] == CC_CORE_ID) && SI_FAST(sii)) {
777 /* Chipc registers are mapped at 12KB */
778
779 fast = TRUE;
780 r = (uint32 *)((char *)sii->curmap + PCI_16KB0_CCREGS_OFFSET + regoff);
781 } else if (sii->pub.buscoreidx == coreidx) {
782 /* pci registers are at either in the last 2KB of an 8KB window
783 * or, in pcie and pci rev 13 at 8KB
784 */
785 fast = TRUE;
786 if (SI_FAST(sii))
787 r = (uint32 *)((char *)sii->curmap +
788 PCI_16KB0_PCIREGS_OFFSET + regoff);
789 else
790 r = (uint32 *)((char *)sii->curmap +
791 ((regoff >= SBCONFIGOFF) ?
792 PCI_BAR0_PCISBR_OFFSET : PCI_BAR0_PCIREGS_OFFSET) +
793 regoff);
794 }
795 }
796
797 if (!fast) {
798 INTR_OFF(sii, intr_val);
799
800 /* save current core index */
801 origidx = si_coreidx(&sii->pub);
802
803 /* switch core */
804 r = (uint32*) ((uchar*) ai_setcoreidx(&sii->pub, coreidx) + regoff);
805 }
806 ASSERT(r != NULL);
807
808 /* mask and set */
809 if (mask || val) {
810 w = (R_REG(sii->osh, r) & ~mask) | val;
811 W_REG(sii->osh, r, w);
812 }
813
814 /* readback */
815 w = R_REG(sii->osh, r);
816
817 if (!fast) {
818 /* restore core index */
819 if (origidx != coreidx)
820 ai_setcoreidx(&sii->pub, origidx);
821
822 INTR_RESTORE(sii, intr_val);
823 }
824
825 return (w);
826}
827
828/*
829 * If there is no need for fiddling with interrupts or core switches (typically silicon
830 * back plane registers, pci registers and chipcommon registers), this function
831 * returns the register offset on this core to a mapped address. This address can
832 * be used for W_REG/R_REG directly.
833 *
834 * For accessing registers that would need a core switch, this function will return
835 * NULL.
836 */
837uint32 *
838ai_corereg_addr(si_t *sih, uint coreidx, uint regoff)
839{
840 uint32 *r = NULL;
841 bool fast = FALSE;
842 si_info_t *sii = SI_INFO(sih);
843 si_cores_info_t *cores_info = (si_cores_info_t *)sii->cores_info;
844
845
846 ASSERT(GOODIDX(coreidx));
847 ASSERT(regoff < SI_CORE_SIZE);
848
849 if (coreidx >= SI_MAXCORES)
850 return 0;
851
852 if (BUSTYPE(sih->bustype) == SI_BUS) {
853 /* If internal bus, we can always get at everything */
854 fast = TRUE;
855 /* map if does not exist */
856 if (!cores_info->regs[coreidx]) {
857 cores_info->regs[coreidx] = REG_MAP(cores_info->coresba[coreidx],
858 SI_CORE_SIZE);
859 ASSERT(GOODREGS(cores_info->regs[coreidx]));
860 }
861 r = (uint32 *)((uchar *)cores_info->regs[coreidx] + regoff);
862 } else if (BUSTYPE(sih->bustype) == PCI_BUS) {
863 /* If pci/pcie, we can get at pci/pcie regs and on newer cores to chipc */
864
865 if ((cores_info->coreid[coreidx] == CC_CORE_ID) && SI_FAST(sii)) {
866 /* Chipc registers are mapped at 12KB */
867
868 fast = TRUE;
869 r = (uint32 *)((char *)sii->curmap + PCI_16KB0_CCREGS_OFFSET + regoff);
870 } else if (sii->pub.buscoreidx == coreidx) {
871 /* pci registers are at either in the last 2KB of an 8KB window
872 * or, in pcie and pci rev 13 at 8KB
873 */
874 fast = TRUE;
875 if (SI_FAST(sii))
876 r = (uint32 *)((char *)sii->curmap +
877 PCI_16KB0_PCIREGS_OFFSET + regoff);
878 else
879 r = (uint32 *)((char *)sii->curmap +
880 ((regoff >= SBCONFIGOFF) ?
881 PCI_BAR0_PCISBR_OFFSET : PCI_BAR0_PCIREGS_OFFSET) +
882 regoff);
883 }
884 }
885
886 if (!fast) {
887 ASSERT(sii->curidx == coreidx);
888 r = (uint32*) ((uchar*)sii->curmap + regoff);
889 }
890
891 return (r);
892}
893
894void
895ai_core_disable(si_t *sih, uint32 bits)
896{
897 si_info_t *sii = SI_INFO(sih);
898 volatile uint32 dummy;
899 uint32 status;
900 aidmp_t *ai;
901
902
903 ASSERT(GOODREGS(sii->curwrap));
904 ai = sii->curwrap;
905
906 /* if core is already in reset, just return */
907 if (R_REG(sii->osh, &ai->resetctrl) & AIRC_RESET)
908 return;
909
910 /* ensure there are no pending backplane operations */
911 SPINWAIT(((status = R_REG(sii->osh, &ai->resetstatus)) != 0), 300);
912
913 /* if pending backplane ops still, try waiting longer */
914 if (status != 0) {
915 /* 300usecs was sufficient to allow backplane ops to clear for big hammer */
916 /* during driver load we may need more time */
917 SPINWAIT(((status = R_REG(sii->osh, &ai->resetstatus)) != 0), 10000);
918 /* if still pending ops, continue on and try disable anyway */
919 /* this is in big hammer path, so don't call wl_reinit in this case... */
920 }
921
922 W_REG(sii->osh, &ai->resetctrl, AIRC_RESET);
923 dummy = R_REG(sii->osh, &ai->resetctrl);
924 BCM_REFERENCE(dummy);
925 OSL_DELAY(1);
926
927 W_REG(sii->osh, &ai->ioctrl, bits);
928 dummy = R_REG(sii->osh, &ai->ioctrl);
929 BCM_REFERENCE(dummy);
930 OSL_DELAY(10);
931}
932
933/* reset and re-enable a core
934 * inputs:
935 * bits - core specific bits that are set during and after reset sequence
936 * resetbits - core specific bits that are set only during reset sequence
937 */
938static void
939_ai_core_reset(si_t *sih, uint32 bits, uint32 resetbits)
940{
941 si_info_t *sii = SI_INFO(sih);
942 aidmp_t *ai;
943 volatile uint32 dummy;
944 uint loop_counter = 10;
945#ifdef CUSTOMER_HW4_DEBUG
946 printf("%s: bits: 0x%x, resetbits: 0x%x\n", __FUNCTION__, bits, resetbits);
947#endif
948
949 ASSERT(GOODREGS(sii->curwrap));
950 ai = sii->curwrap;
951
952 /* ensure there are no pending backplane operations */
953 SPINWAIT(((dummy = R_REG(sii->osh, &ai->resetstatus)) != 0), 300);
954#ifdef CUSTOMER_HW4_DEBUG
955 printf("%s: resetstatus: %p dummy: %x\n", __FUNCTION__, &ai->resetstatus, dummy);
956#endif
957
958
959 /* put core into reset state */
960#ifdef CUSTOMER_HW4_DEBUG
961 printf("%s: resetctrl: %p\n", __FUNCTION__, &ai->resetctrl);
962#endif
963 W_REG(sii->osh, &ai->resetctrl, AIRC_RESET);
964 OSL_DELAY(10);
965
966 /* ensure there are no pending backplane operations */
967 SPINWAIT((R_REG(sii->osh, &ai->resetstatus) != 0), 300);
968
969 W_REG(sii->osh, &ai->ioctrl, (bits | resetbits | SICF_FGC | SICF_CLOCK_EN));
970 dummy = R_REG(sii->osh, &ai->ioctrl);
971#ifdef CUSTOMER_HW4_DEBUG
972 printf("%s: ioctrl: %p dummy: 0x%x\n", __FUNCTION__, &ai->ioctrl, dummy);
973#endif
974 BCM_REFERENCE(dummy);
975
976 /* ensure there are no pending backplane operations */
977 SPINWAIT(((dummy = R_REG(sii->osh, &ai->resetstatus)) != 0), 300);
978
979
980 while (R_REG(sii->osh, &ai->resetctrl) != 0 && --loop_counter != 0) {
981 /* ensure there are no pending backplane operations */
982 SPINWAIT(((dummy = R_REG(sii->osh, &ai->resetstatus)) != 0), 300);
983
984
985 /* take core out of reset */
986 W_REG(sii->osh, &ai->resetctrl, 0);
987#ifdef CUSTOMER_HW4_DEBUG
988 printf("%s: loop_counter: %d resetstatus: %p resetctrl: %p\n",
989 __FUNCTION__, loop_counter, &ai->resetstatus, &ai->resetctrl);
990#endif
991
992 /* ensure there are no pending backplane operations */
993 SPINWAIT((R_REG(sii->osh, &ai->resetstatus) != 0), 300);
994 }
995
996
997 W_REG(sii->osh, &ai->ioctrl, (bits | SICF_CLOCK_EN));
998 dummy = R_REG(sii->osh, &ai->ioctrl);
999#ifdef CUSTOMER_HW4_DEBUG
1000 printf("%s: ioctl: %p dummy: 0x%x\n", __FUNCTION__, &ai->ioctrl, dummy);
1001#endif
1002 BCM_REFERENCE(dummy);
1003 OSL_DELAY(1);
1004}
1005
1006void
1007ai_core_reset(si_t *sih, uint32 bits, uint32 resetbits)
1008{
1009 si_info_t *sii = SI_INFO(sih);
1010 si_cores_info_t *cores_info = (si_cores_info_t *)sii->cores_info;
1011 uint idx = sii->curidx;
1012
1013 if (cores_info->wrapba2[idx] != 0) {
1014 ai_setcoreidx_2ndwrap(sih, idx);
1015 _ai_core_reset(sih, bits, resetbits);
1016 ai_setcoreidx(sih, idx);
1017 }
1018
1019 _ai_core_reset(sih, bits, resetbits);
1020}
1021
1022void
1023ai_core_cflags_wo(si_t *sih, uint32 mask, uint32 val)
1024{
1025 si_info_t *sii = SI_INFO(sih);
1026 aidmp_t *ai;
1027 uint32 w;
1028
1029
1030 if (BCM47162_DMP()) {
1031 SI_ERROR(("%s: Accessing MIPS DMP register (ioctrl) on 47162a0",
1032 __FUNCTION__));
1033 return;
1034 }
1035 if (BCM5357_DMP()) {
1036 SI_ERROR(("%s: Accessing USB20H DMP register (ioctrl) on 5357\n",
1037 __FUNCTION__));
1038 return;
1039 }
1040 if (BCM4707_DMP()) {
1041 SI_ERROR(("%s: Accessing CHIPCOMMONB DMP register (ioctrl) on 4707\n",
1042 __FUNCTION__));
1043 return;
1044 }
1045 if (PMU_DMP()) {
1046 SI_ERROR(("%s: Accessing PMU DMP register (ioctrl)\n",
1047 __FUNCTION__));
1048 return;
1049 }
1050
1051 ASSERT(GOODREGS(sii->curwrap));
1052 ai = sii->curwrap;
1053
1054 ASSERT((val & ~mask) == 0);
1055
1056 if (mask || val) {
1057 w = ((R_REG(sii->osh, &ai->ioctrl) & ~mask) | val);
1058 W_REG(sii->osh, &ai->ioctrl, w);
1059 }
1060}
1061
1062uint32
1063ai_core_cflags(si_t *sih, uint32 mask, uint32 val)
1064{
1065 si_info_t *sii = SI_INFO(sih);
1066 aidmp_t *ai;
1067 uint32 w;
1068
1069 if (BCM47162_DMP()) {
1070 SI_ERROR(("%s: Accessing MIPS DMP register (ioctrl) on 47162a0",
1071 __FUNCTION__));
1072 return 0;
1073 }
1074 if (BCM5357_DMP()) {
1075 SI_ERROR(("%s: Accessing USB20H DMP register (ioctrl) on 5357\n",
1076 __FUNCTION__));
1077 return 0;
1078 }
1079 if (BCM4707_DMP()) {
1080 SI_ERROR(("%s: Accessing CHIPCOMMONB DMP register (ioctrl) on 4707\n",
1081 __FUNCTION__));
1082 return 0;
1083 }
1084
1085 if (PMU_DMP()) {
1086 SI_ERROR(("%s: Accessing PMU DMP register (ioctrl)\n",
1087 __FUNCTION__));
1088 return 0;
1089 }
1090 ASSERT(GOODREGS(sii->curwrap));
1091 ai = sii->curwrap;
1092
1093 ASSERT((val & ~mask) == 0);
1094
1095 if (mask || val) {
1096 w = ((R_REG(sii->osh, &ai->ioctrl) & ~mask) | val);
1097 W_REG(sii->osh, &ai->ioctrl, w);
1098 }
1099
1100 return R_REG(sii->osh, &ai->ioctrl);
1101}
1102
1103uint32
1104ai_core_sflags(si_t *sih, uint32 mask, uint32 val)
1105{
1106 si_info_t *sii = SI_INFO(sih);
1107 aidmp_t *ai;
1108 uint32 w;
1109
1110 if (BCM47162_DMP()) {
1111 SI_ERROR(("%s: Accessing MIPS DMP register (iostatus) on 47162a0",
1112 __FUNCTION__));
1113 return 0;
1114 }
1115 if (BCM5357_DMP()) {
1116 SI_ERROR(("%s: Accessing USB20H DMP register (iostatus) on 5357\n",
1117 __FUNCTION__));
1118 return 0;
1119 }
1120 if (BCM4707_DMP()) {
1121 SI_ERROR(("%s: Accessing CHIPCOMMONB DMP register (ioctrl) on 4707\n",
1122 __FUNCTION__));
1123 return 0;
1124 }
1125 if (PMU_DMP()) {
1126 SI_ERROR(("%s: Accessing PMU DMP register (ioctrl)\n",
1127 __FUNCTION__));
1128 return 0;
1129 }
1130
1131 ASSERT(GOODREGS(sii->curwrap));
1132 ai = sii->curwrap;
1133
1134 ASSERT((val & ~mask) == 0);
1135 ASSERT((mask & ~SISF_CORE_BITS) == 0);
1136
1137 if (mask || val) {
1138 w = ((R_REG(sii->osh, &ai->iostatus) & ~mask) | val);
1139 W_REG(sii->osh, &ai->iostatus, w);
1140 }
1141
1142 return R_REG(sii->osh, &ai->iostatus);
1143}
1144
1145#if defined(BCMDBG_PHYDUMP)
1146/* print interesting aidmp registers */
1147void
1148ai_dumpregs(si_t *sih, struct bcmstrbuf *b)
1149{
1150 si_info_t *sii = SI_INFO(sih);
1151 si_cores_info_t *cores_info = (si_cores_info_t *)sii->cores_info;
1152 osl_t *osh;
1153 aidmp_t *ai;
1154 uint i;
1155
1156 osh = sii->osh;
1157
1158 for (i = 0; i < sii->numcores; i++) {
1159 si_setcoreidx(&sii->pub, i);
1160 ai = sii->curwrap;
1161
1162 bcm_bprintf(b, "core 0x%x: \n", cores_info->coreid[i]);
1163 if (BCM47162_DMP()) {
1164 bcm_bprintf(b, "Skipping mips74k in 47162a0\n");
1165 continue;
1166 }
1167 if (BCM5357_DMP()) {
1168 bcm_bprintf(b, "Skipping usb20h in 5357\n");
1169 continue;
1170 }
1171 if (BCM4707_DMP()) {
1172 bcm_bprintf(b, "Skipping chipcommonb in 4707\n");
1173 continue;
1174 }
1175
1176 if (PMU_DMP()) {
1177 bcm_bprintf(b, "Skipping pmu core\n");
1178 continue;
1179 }
1180
1181 bcm_bprintf(b, "ioctrlset 0x%x ioctrlclear 0x%x ioctrl 0x%x iostatus 0x%x"
1182 "ioctrlwidth 0x%x iostatuswidth 0x%x\n"
1183 "resetctrl 0x%x resetstatus 0x%x resetreadid 0x%x resetwriteid 0x%x\n"
1184 "errlogctrl 0x%x errlogdone 0x%x errlogstatus 0x%x"
1185 "errlogaddrlo 0x%x errlogaddrhi 0x%x\n"
1186 "errlogid 0x%x errloguser 0x%x errlogflags 0x%x\n"
1187 "intstatus 0x%x config 0x%x itcr 0x%x\n",
1188 R_REG(osh, &ai->ioctrlset),
1189 R_REG(osh, &ai->ioctrlclear),
1190 R_REG(osh, &ai->ioctrl),
1191 R_REG(osh, &ai->iostatus),
1192 R_REG(osh, &ai->ioctrlwidth),
1193 R_REG(osh, &ai->iostatuswidth),
1194 R_REG(osh, &ai->resetctrl),
1195 R_REG(osh, &ai->resetstatus),
1196 R_REG(osh, &ai->resetreadid),
1197 R_REG(osh, &ai->resetwriteid),
1198 R_REG(osh, &ai->errlogctrl),
1199 R_REG(osh, &ai->errlogdone),
1200 R_REG(osh, &ai->errlogstatus),
1201 R_REG(osh, &ai->errlogaddrlo),
1202 R_REG(osh, &ai->errlogaddrhi),
1203 R_REG(osh, &ai->errlogid),
1204 R_REG(osh, &ai->errloguser),
1205 R_REG(osh, &ai->errlogflags),
1206 R_REG(osh, &ai->intstatus),
1207 R_REG(osh, &ai->config),
1208 R_REG(osh, &ai->itcr));
1209 }
1210}
1211#endif
1212
1213
1214void
1215ai_enable_backplane_timeouts(si_t *sih)
1216{
1217#ifdef AXI_TIMEOUTS
1218 si_info_t *sii = SI_INFO(sih);
1219 aidmp_t *ai;
1220 int i;
1221
1222 for (i = 0; i < sii->num_br; ++i) {
1223 ai = (aidmp_t *) sii->br_wrapba[i];
1224 W_REG(sii->osh, &ai->errlogctrl, (1 << AIELC_TO_ENAB_SHIFT) |
1225 ((AXI_TO_VAL << AIELC_TO_EXP_SHIFT) & AIELC_TO_EXP_MASK));
1226 }
1227#endif /* AXI_TIMEOUTS */
1228}
1229
1230void
1231ai_clear_backplane_to(si_t *sih)
1232{
1233#ifdef AXI_TIMEOUTS
1234 si_info_t *sii = SI_INFO(sih);
1235 aidmp_t *ai;
1236 int i;
1237 uint32 errlogstatus;
1238
1239 for (i = 0; i < sii->num_br; ++i) {
1240 ai = (aidmp_t *) sii->br_wrapba[i];
1241 /* check for backplane timeout & clear backplane hang */
1242 errlogstatus = R_REG(sii->osh, &ai->errlogstatus);
1243
1244 if ((errlogstatus & AIELS_TIMEOUT_MASK) != 0) {
1245 /* set ErrDone to clear the condition */
1246 W_REG(sii->osh, &ai->errlogdone, AIELD_ERRDONE_MASK);
1247
1248 /* SPINWAIT on errlogstatus timeout status bits */
1249 while (R_REG(sii->osh, &ai->errlogstatus) & AIELS_TIMEOUT_MASK)
1250 ;
1251
1252 /* only reset APB Bridge on timeout (not slave error, or dec error) */
1253 switch (errlogstatus & AIELS_TIMEOUT_MASK) {
1254 case 0x1:
1255 printf("AXI slave error");
1256 break;
1257 case 0x2:
1258 /* reset APB Bridge */
1259 OR_REG(sii->osh, &ai->resetctrl, AIRC_RESET);
1260 /* sync write */
1261 (void)R_REG(sii->osh, &ai->resetctrl);
1262 /* clear Reset bit */
1263 AND_REG(sii->osh, &ai->resetctrl, ~(AIRC_RESET));
1264 /* sync write */
1265 (void)R_REG(sii->osh, &ai->resetctrl);
1266 printf("AXI timeout");
1267 break;
1268 case 0x3:
1269 printf("AXI decode error");
1270 break;
1271 default:
1272 ; /* should be impossible */
1273 }
1274 printf("; APB Bridge %d\n", i);
1275 printf("\t errlog: lo 0x%08x, hi 0x%08x, id 0x%08x, flags 0x%08x",
1276 R_REG(sii->osh, &ai->errlogaddrlo),
1277 R_REG(sii->osh, &ai->errlogaddrhi),
1278 R_REG(sii->osh, &ai->errlogid),
1279 R_REG(sii->osh, &ai->errlogflags));
1280 printf(", status 0x%08x\n", errlogstatus);
1281 }
1282 }
1283#endif /* AXI_TIMEOUTS */
1284}