2 * Copyright (c) 2010 Broadcom Corporation
4 * Permission to use, copy, modify, and/or distribute this software for any
5 * purpose with or without fee is hereby granted, provided that the above
6 * copyright notice and this permission notice appear in all copies.
8 * THE SOFTWARE IS PROVIDED "AS IS" AND THE AUTHOR DISCLAIMS ALL WARRANTIES
9 * WITH REGARD TO THIS SOFTWARE INCLUDING ALL IMPLIED WARRANTIES OF
10 * MERCHANTABILITY AND FITNESS. IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR ANY
11 * SPECIAL, DIRECT, INDIRECT, OR CONSEQUENTIAL DAMAGES OR ANY DAMAGES
12 * WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR PROFITS, WHETHER IN AN ACTION
13 * OF CONTRACT, NEGLIGENCE OR OTHER TORTIOUS ACTION, ARISING OUT OF OR IN
14 * CONNECTION WITH THE USE OR PERFORMANCE OF THIS SOFTWARE.
17 #include <linux/types.h>
28 #include "siutils_priv.h"
30 /* local prototypes */
31 static uint
_sb_coreidx(si_info_t
*sii
, u32 sba
);
32 static uint
_sb_scan(si_info_t
*sii
, u32 sba
, void *regs
, uint bus
,
33 u32 sbba
, uint ncores
);
34 static u32
_sb_coresba(si_info_t
*sii
);
35 static void *_sb_setcoreidx(si_info_t
*sii
, uint coreidx
);
37 #define SET_SBREG(sii, r, mask, val) \
38 W_SBREG((sii), (r), ((R_SBREG((sii), (r)) & ~(mask)) | (val)))
39 #define REGS2SB(va) (sbconfig_t *) ((s8 *)(va) + SBCONFIGOFF)
42 #define SONICS_2_2 (SBIDL_RV_2_2 >> SBIDL_RV_SHIFT)
43 #define SONICS_2_3 (SBIDL_RV_2_3 >> SBIDL_RV_SHIFT)
45 #define R_SBREG(sii, sbr) sb_read_sbreg((sii), (sbr))
46 #define W_SBREG(sii, sbr, v) sb_write_sbreg((sii), (sbr), (v))
47 #define AND_SBREG(sii, sbr, v) \
48 W_SBREG((sii), (sbr), (R_SBREG((sii), (sbr)) & (v)))
49 #define OR_SBREG(sii, sbr, v) \
50 W_SBREG((sii), (sbr), (R_SBREG((sii), (sbr)) | (v)))
52 static u32
sb_read_sbreg(si_info_t
*sii
, volatile u32
*sbr
)
54 return R_REG(sii
->osh
, sbr
);
57 static void sb_write_sbreg(si_info_t
*sii
, volatile u32
*sbr
, u32 v
)
59 W_REG(sii
->osh
, sbr
, v
);
62 uint
sb_coreid(si_t
*sih
)
68 sb
= REGS2SB(sii
->curmap
);
70 return (R_SBREG(sii
, &sb
->sbidhigh
) & SBIDH_CC_MASK
) >>
74 /* return core index of the core with address 'sba' */
75 static uint
_sb_coreidx(si_info_t
*sii
, u32 sba
)
79 for (i
= 0; i
< sii
->numcores
; i
++)
80 if (sba
== sii
->coresba
[i
])
85 /* return core address of the current core */
86 static u32
_sb_coresba(si_info_t
*sii
)
90 switch (BUSTYPE(sii
->pub
.bustype
)) {
93 sbaddr
= (u32
)(unsigned long)sii
->curmap
;
103 uint
sb_corerev(si_t
*sih
)
110 sb
= REGS2SB(sii
->curmap
);
111 sbidh
= R_SBREG(sii
, &sb
->sbidhigh
);
113 return SBCOREREV(sbidh
);
116 bool sb_iscoreup(si_t
*sih
)
122 sb
= REGS2SB(sii
->curmap
);
124 return (R_SBREG(sii
, &sb
->sbtmstatelow
) &
125 (SBTML_RESET
| SBTML_REJ_MASK
|
126 (SICF_CLOCK_EN
<< SBTML_SICF_SHIFT
))) ==
127 (SICF_CLOCK_EN
<< SBTML_SICF_SHIFT
);
131 * Switch to 'coreidx', issue a single arbitrary 32bit
132 * register mask&set operation,
133 * switch back to the original core, and return the new value.
135 * When using the silicon backplane, no fidleing with interrupts
136 * or core switches are needed.
138 * Also, when using pci/pcie, we can optimize away the core switching
140 * and (on newer pci cores) chipcommon registers.
142 uint
sb_corereg(si_t
*sih
, uint coreidx
, uint regoff
, uint mask
, uint val
)
153 ASSERT(GOODIDX(coreidx
));
154 ASSERT(regoff
< SI_CORE_SIZE
);
155 ASSERT((val
& ~mask
) == 0);
157 if (coreidx
>= SI_MAXCORES
)
161 INTR_OFF(sii
, intr_val
);
163 /* save current core index */
164 origidx
= si_coreidx(&sii
->pub
);
167 r
= (u32
*) ((unsigned char *) sb_setcoreidx(&sii
->pub
, coreidx
) +
174 if (regoff
>= SBCONFIGOFF
) {
175 w
= (R_SBREG(sii
, r
) & ~mask
) | val
;
178 w
= (R_REG(sii
->osh
, r
) & ~mask
) | val
;
179 W_REG(sii
->osh
, r
, w
);
184 if (regoff
>= SBCONFIGOFF
)
187 w
= R_REG(sii
->osh
, r
);
190 /* restore core index */
191 if (origidx
!= coreidx
)
192 sb_setcoreidx(&sii
->pub
, origidx
);
194 INTR_RESTORE(sii
, intr_val
);
200 /* Scan the enumeration space to find all cores starting from the given
201 * bus 'sbba'. Append coreid and other info to the lists in 'si'. 'sba'
202 * is the default core address at chip POR time and 'regs' is the virtual
203 * address that the default core is mapped at. 'ncores' is the number of
204 * cores expected on bus 'sbba'. It returns the total number of cores
205 * starting from bus 'sbba', inclusive.
207 #define SB_MAXBUSES 2
208 static uint
_sb_scan(si_info_t
*sii
, u32 sba
, void *regs
, uint bus
, u32 sbba
,
215 if (bus
>= SB_MAXBUSES
) {
216 SI_ERROR(("_sb_scan: bus 0x%08x at level %d is too deep to "
217 "scan\n", sbba
, bus
));
220 SI_MSG(("_sb_scan: scan bus 0x%08x assume %u cores\n",
223 /* Scan all cores on the bus starting from core 0.
224 * Core addresses must be contiguous on each bus.
226 for (i
= 0, next
= sii
->numcores
;
227 i
< numcores
&& next
< SB_BUS_MAXCORES
; i
++, next
++) {
228 sii
->coresba
[next
] = sbba
+ (i
* SI_CORE_SIZE
);
230 /* change core to 'next' and read its coreid */
231 sii
->curmap
= _sb_setcoreidx(sii
, next
);
234 sii
->coreid
[next
] = sb_coreid(&sii
->pub
);
236 /* core specific processing... */
237 /* chipc provides # cores */
238 if (sii
->coreid
[next
] == CC_CORE_ID
) {
239 chipcregs_t
*cc
= (chipcregs_t
*) sii
->curmap
;
240 u32 ccrev
= sb_corerev(&sii
->pub
);
242 /* determine numcores - this is the
243 total # cores in the chip */
244 if (((ccrev
== 4) || (ccrev
>= 6)))
246 (R_REG(sii
->osh
, &cc
->chipid
) & CID_CC_MASK
)
250 SI_ERROR(("sb_chip2numcores: unsupported chip "
251 "0x%x\n", CHIPID(sii
->pub
.chip
)));
256 SI_VMSG(("_sb_scan: %u cores in the chip %s\n",
257 numcores
, sii
->pub
.issim
? "QT" : ""));
259 /* scan bridged SB(s) and add results to the end of the list */
260 else if (sii
->coreid
[next
] == OCP_CORE_ID
) {
261 sbconfig_t
*sb
= REGS2SB(sii
->curmap
);
262 u32 nsbba
= R_SBREG(sii
, &sb
->sbadmatch1
);
265 sii
->numcores
= next
+ 1;
267 if ((nsbba
& 0xfff00000) != SI_ENUM_BASE
)
270 if (_sb_coreidx(sii
, nsbba
) != BADIDX
)
274 (R_SBREG(sii
, &sb
->sbtmstatehigh
) & 0x000f0000) >>
276 nsbcc
= _sb_scan(sii
, sba
, regs
, bus
+ 1, nsbba
, nsbcc
);
277 if (sbba
== SI_ENUM_BASE
)
283 SI_MSG(("_sb_scan: found %u cores on bus 0x%08x\n", i
, sbba
));
285 sii
->numcores
= i
+ ncc
;
286 return sii
->numcores
;
289 /* scan the sb enumerated space to identify all cores */
290 void sb_scan(si_t
*sih
, void *regs
, uint devid
)
297 sb
= REGS2SB(sii
->curmap
);
300 (R_SBREG(sii
, &sb
->sbidlow
) & SBIDL_RV_MASK
) >> SBIDL_RV_SHIFT
;
302 /* Save the current core info and validate it later till we know
303 * for sure what is good and what is bad.
305 origsba
= _sb_coresba(sii
);
307 /* scan all SB(s) starting from SI_ENUM_BASE */
308 sii
->numcores
= _sb_scan(sii
, origsba
, regs
, 0, SI_ENUM_BASE
, 1);
312 * This function changes logical "focus" to the indicated core;
313 * must be called with interrupts off.
314 * Moreover, callers should keep interrupts off during switching out of
315 * and back to d11 core
317 void *sb_setcoreidx(si_t
*sih
, uint coreidx
)
323 if (coreidx
>= sii
->numcores
)
327 * If the user has provided an interrupt mask enabled function,
328 * then assert interrupts are disabled before switching the core.
330 ASSERT((sii
->intrsenabled_fn
== NULL
)
331 || !(*(sii
)->intrsenabled_fn
) ((sii
)->intr_arg
));
333 sii
->curmap
= _sb_setcoreidx(sii
, coreidx
);
334 sii
->curidx
= coreidx
;
339 /* This function changes the logical "focus" to the indicated core.
340 * Return the current core's virtual address.
342 static void *_sb_setcoreidx(si_info_t
*sii
, uint coreidx
)
344 u32 sbaddr
= sii
->coresba
[coreidx
];
347 switch (BUSTYPE(sii
->pub
.bustype
)) {
352 if (!sii
->regs
[coreidx
]) {
353 sii
->regs
[coreidx
] = (void *)sbaddr
;
354 ASSERT(GOODREGS(sii
->regs
[coreidx
]));
356 regs
= sii
->regs
[coreidx
];
368 /* traverse all cores to find and clear source of serror */
369 static void sb_serr_clear(si_info_t
*sii
)
373 uint i
, intr_val
= 0;
374 void *corereg
= NULL
;
376 INTR_OFF(sii
, intr_val
);
377 origidx
= si_coreidx(&sii
->pub
);
379 for (i
= 0; i
< sii
->numcores
; i
++) {
380 corereg
= sb_setcoreidx(&sii
->pub
, i
);
381 if (NULL
!= corereg
) {
382 sb
= REGS2SB(corereg
);
383 if ((R_SBREG(sii
, &sb
->sbtmstatehigh
)) & SBTMH_SERR
) {
384 AND_SBREG(sii
, &sb
->sbtmstatehigh
, ~SBTMH_SERR
);
385 SI_ERROR(("sb_serr_clear: SError core 0x%x\n",
386 sb_coreid(&sii
->pub
)));
391 sb_setcoreidx(&sii
->pub
, origidx
);
392 INTR_RESTORE(sii
, intr_val
);
396 * Check if any inband, outband or timeout errors has happened and clear them.
397 * Must be called with chip clk on !
399 bool sb_taclear(si_t
*sih
, bool details
)
406 u32 inband
= 0, serror
= 0, timeout
= 0;
407 void *corereg
= NULL
;
408 volatile u32 imstate
, tmstate
;
412 if ((BUSTYPE(sii
->pub
.bustype
) == SDIO_BUS
) ||
413 (BUSTYPE(sii
->pub
.bustype
) == SPI_BUS
)) {
415 INTR_OFF(sii
, intr_val
);
416 origidx
= si_coreidx(sih
);
418 corereg
= si_setcore(sih
, PCMCIA_CORE_ID
, 0);
420 corereg
= si_setcore(sih
, SDIOD_CORE_ID
, 0);
421 if (NULL
!= corereg
) {
422 sb
= REGS2SB(corereg
);
424 imstate
= R_SBREG(sii
, &sb
->sbimstate
);
425 if ((imstate
!= 0xffffffff)
426 && (imstate
& (SBIM_IBE
| SBIM_TO
))) {
427 AND_SBREG(sii
, &sb
->sbimstate
,
428 ~(SBIM_IBE
| SBIM_TO
));
429 /* inband = imstate & SBIM_IBE; cmd error */
430 timeout
= imstate
& SBIM_TO
;
432 tmstate
= R_SBREG(sii
, &sb
->sbtmstatehigh
);
433 if ((tmstate
!= 0xffffffff)
434 && (tmstate
& SBTMH_INT_STATUS
)) {
437 OR_SBREG(sii
, &sb
->sbtmstatelow
, SBTML_INT_ACK
);
438 AND_SBREG(sii
, &sb
->sbtmstatelow
,
443 sb_setcoreidx(sih
, origidx
);
444 INTR_RESTORE(sii
, intr_val
);
447 if (inband
| timeout
| serror
) {
449 SI_ERROR(("sb_taclear: inband 0x%x, serror 0x%x, timeout "
450 "0x%x!\n", inband
, serror
, timeout
));
456 void sb_core_disable(si_t
*sih
, u32 bits
)
464 ASSERT(GOODREGS(sii
->curmap
));
465 sb
= REGS2SB(sii
->curmap
);
467 /* if core is already in reset, just return */
468 if (R_SBREG(sii
, &sb
->sbtmstatelow
) & SBTML_RESET
)
471 /* if clocks are not enabled, put into reset and return */
472 if ((R_SBREG(sii
, &sb
->sbtmstatelow
) &
473 (SICF_CLOCK_EN
<< SBTML_SICF_SHIFT
)) == 0)
476 /* set target reject and spin until busy is clear
477 (preserve core-specific bits) */
478 OR_SBREG(sii
, &sb
->sbtmstatelow
, SBTML_REJ
);
479 dummy
= R_SBREG(sii
, &sb
->sbtmstatelow
);
481 SPINWAIT((R_SBREG(sii
, &sb
->sbtmstatehigh
) & SBTMH_BUSY
), 100000);
482 if (R_SBREG(sii
, &sb
->sbtmstatehigh
) & SBTMH_BUSY
)
483 SI_ERROR(("%s: target state still busy\n", __func__
));
485 if (R_SBREG(sii
, &sb
->sbidlow
) & SBIDL_INIT
) {
486 OR_SBREG(sii
, &sb
->sbimstate
, SBIM_RJ
);
487 dummy
= R_SBREG(sii
, &sb
->sbimstate
);
489 SPINWAIT((R_SBREG(sii
, &sb
->sbimstate
) & SBIM_BY
), 100000);
492 /* set reset and reject while enabling the clocks */
493 W_SBREG(sii
, &sb
->sbtmstatelow
,
494 (((bits
| SICF_FGC
| SICF_CLOCK_EN
) << SBTML_SICF_SHIFT
) |
495 SBTML_REJ
| SBTML_RESET
));
496 dummy
= R_SBREG(sii
, &sb
->sbtmstatelow
);
499 /* don't forget to clear the initiator reject bit */
500 if (R_SBREG(sii
, &sb
->sbidlow
) & SBIDL_INIT
)
501 AND_SBREG(sii
, &sb
->sbimstate
, ~SBIM_RJ
);
504 /* leave reset and reject asserted */
505 W_SBREG(sii
, &sb
->sbtmstatelow
,
506 ((bits
<< SBTML_SICF_SHIFT
) | SBTML_REJ
| SBTML_RESET
));
510 /* reset and re-enable a core
512 * bits - core specific bits that are set during and after reset sequence
513 * resetbits - core specific bits that are set only during reset sequence
515 void sb_core_reset(si_t
*sih
, u32 bits
, u32 resetbits
)
522 ASSERT(GOODREGS(sii
->curmap
));
523 sb
= REGS2SB(sii
->curmap
);
526 * Must do the disable sequence first to work for
527 * arbitrary current core state.
529 sb_core_disable(sih
, (bits
| resetbits
));
532 * Now do the initialization sequence.
535 /* set reset while enabling the clock and
536 forcing them on throughout the core */
537 W_SBREG(sii
, &sb
->sbtmstatelow
,
538 (((bits
| resetbits
| SICF_FGC
| SICF_CLOCK_EN
) <<
539 SBTML_SICF_SHIFT
) | SBTML_RESET
));
540 dummy
= R_SBREG(sii
, &sb
->sbtmstatelow
);
543 if (R_SBREG(sii
, &sb
->sbtmstatehigh
) & SBTMH_SERR
)
544 W_SBREG(sii
, &sb
->sbtmstatehigh
, 0);
546 dummy
= R_SBREG(sii
, &sb
->sbimstate
);
547 if (dummy
& (SBIM_IBE
| SBIM_TO
))
548 AND_SBREG(sii
, &sb
->sbimstate
, ~(SBIM_IBE
| SBIM_TO
));
550 /* clear reset and allow it to propagate throughout the core */
551 W_SBREG(sii
, &sb
->sbtmstatelow
,
552 ((bits
| resetbits
| SICF_FGC
| SICF_CLOCK_EN
) <<
554 dummy
= R_SBREG(sii
, &sb
->sbtmstatelow
);
557 /* leave clock enabled */
558 W_SBREG(sii
, &sb
->sbtmstatelow
,
559 ((bits
| SICF_CLOCK_EN
) << SBTML_SICF_SHIFT
));
560 dummy
= R_SBREG(sii
, &sb
->sbtmstatelow
);
564 u32
sb_base(u32 admatch
)
569 type
= admatch
& SBAM_TYPE_MASK
;
575 base
= admatch
& SBAM_BASE0_MASK
;
576 } else if (type
== 1) {
577 ASSERT(!(admatch
& SBAM_ADNEG
)); /* neg not supported */
578 base
= admatch
& SBAM_BASE1_MASK
;
579 } else if (type
== 2) {
580 ASSERT(!(admatch
& SBAM_ADNEG
)); /* neg not supported */
581 base
= admatch
& SBAM_BASE2_MASK
;