PD#91902:update ap6xxx module wifi driver as 1.88.45.4
[GitHub/LineageOS/G12/android_hardware_amlogic_kernel-modules_dhd-driver.git] / broadcm_40181 / bcmsdh_sdmmc.c
1 /*
2 * BCMSDH Function Driver for the native SDIO/MMC driver in the Linux Kernel
3 *
4 * Copyright (C) 1999-2013, Broadcom Corporation
5 *
6 * Unless you and Broadcom execute a separate written software license
7 * agreement governing use of this software, this software is licensed to you
8 * under the terms of the GNU General Public License version 2 (the "GPL"),
9 * available at http://www.broadcom.com/licenses/GPLv2.php, with the
10 * following added to such license:
11 *
12 * As a special exception, the copyright holders of this software give you
13 * permission to link this software with independent modules, and to copy and
14 * distribute the resulting executable under terms of your choice, provided that
15 * you also meet, for each linked independent module, the terms and conditions of
16 * the license of that module. An independent module is a module which is not
17 * derived from this software. The special exception does not apply to any
18 * modifications of the software.
19 *
20 * Notwithstanding the above, under no circumstances may you combine this
21 * software in any way with any other Broadcom software provided under a license
22 * other than the GPL, without Broadcom's express prior written consent.
23 *
24 * $Id: bcmsdh_sdmmc.c 418714 2013-08-16 13:21:09Z $
25 */
26 #include <typedefs.h>
27
28 #include <bcmdevs.h>
29 #include <bcmendian.h>
30 #include <bcmutils.h>
31 #include <osl.h>
32 #include <sdio.h> /* SDIO Device and Protocol Specs */
33 #include <sdioh.h> /* Standard SDIO Host Controller Specification */
34 #include <bcmsdbus.h> /* bcmsdh to/from specific controller APIs */
35 #include <sdiovar.h> /* ioctl/iovars */
36
37 #include <linux/mmc/core.h>
38 #include <linux/mmc/card.h>
39 #include <linux/mmc/sdio_func.h>
40 #include <linux/mmc/sdio_ids.h>
41
42 #include <dngl_stats.h>
43 #include <dhd.h>
44
45 #if (LINUX_VERSION_CODE >= KERNEL_VERSION(2, 6, 27)) && defined(CONFIG_PM_SLEEP)
46 #include <linux/suspend.h>
47 extern volatile bool dhd_mmc_suspend;
48 #endif
49 #include "bcmsdh_sdmmc.h"
50
51 #ifndef BCMSDH_MODULE
52 extern int sdio_function_init(void);
53 extern void sdio_function_cleanup(void);
54 #endif /* BCMSDH_MODULE */
55
56 #if !defined(OOB_INTR_ONLY)
57 static void IRQHandler(struct sdio_func *func);
58 static void IRQHandlerF2(struct sdio_func *func);
59 #endif /* !defined(OOB_INTR_ONLY) */
60 static int sdioh_sdmmc_get_cisaddr(sdioh_info_t *sd, uint32 regaddr);
61 extern int sdio_reset_comm(struct mmc_card *card);
62
63 extern PBCMSDH_SDMMC_INSTANCE gInstance;
64
65 #define DEFAULT_SDIO_F2_BLKSIZE 512
66 #ifndef CUSTOM_SDIO_F2_BLKSIZE
67 #define CUSTOM_SDIO_F2_BLKSIZE DEFAULT_SDIO_F2_BLKSIZE
68 #endif
69
70 uint sd_sdmode = SDIOH_MODE_SD4; /* Use SD4 mode by default */
71 uint sd_f2_blocksize = CUSTOM_SDIO_F2_BLKSIZE;
72 uint sd_divisor = 2; /* Default 48MHz/2 = 24MHz */
73
74 uint sd_power = 1; /* Default to SD Slot powered ON */
75 uint sd_clock = 1; /* Default to SD Clock turned ON */
76 uint sd_hiok = FALSE; /* Don't use hi-speed mode by default */
77 uint sd_msglevel = 0x01;
78 uint sd_use_dma = TRUE;
79
80 #ifdef BCMSDIOH_TXGLOM
81 #ifndef CUSTOM_TXGLOM
82 #define CUSTOM_TXGLOM 0
83 #endif
84 uint sd_txglom = CUSTOM_TXGLOM;
85 #endif /* BCMSDIOH_TXGLOM */
86
87 #ifndef CUSTOM_RXCHAIN
88 #define CUSTOM_RXCHAIN 0
89 #endif
90
91 DHD_PM_RESUME_WAIT_INIT(sdioh_request_byte_wait);
92 DHD_PM_RESUME_WAIT_INIT(sdioh_request_word_wait);
93 DHD_PM_RESUME_WAIT_INIT(sdioh_request_packet_wait);
94 DHD_PM_RESUME_WAIT_INIT(sdioh_request_buffer_wait);
95
96 #define DMA_ALIGN_MASK 0x03
97 #define MMC_SDIO_ABORT_RETRY_LIMIT 5
98
99 int sdioh_sdmmc_card_regread(sdioh_info_t *sd, int func, uint32 regaddr, int regsize, uint32 *data);
100
101 static int
102 sdioh_sdmmc_card_enablefuncs(sdioh_info_t *sd)
103 {
104 int err_ret;
105 uint32 fbraddr;
106 uint8 func;
107
108 sd_trace(("%s\n", __FUNCTION__));
109
110 /* Get the Card's common CIS address */
111 sd->com_cis_ptr = sdioh_sdmmc_get_cisaddr(sd, SDIOD_CCCR_CISPTR_0);
112 sd->func_cis_ptr[0] = sd->com_cis_ptr;
113 sd_info(("%s: Card's Common CIS Ptr = 0x%x\n", __FUNCTION__, sd->com_cis_ptr));
114
115 /* Get the Card's function CIS (for each function) */
116 for (fbraddr = SDIOD_FBR_STARTADDR, func = 1;
117 func <= sd->num_funcs; func++, fbraddr += SDIOD_FBR_SIZE) {
118 sd->func_cis_ptr[func] = sdioh_sdmmc_get_cisaddr(sd, SDIOD_FBR_CISPTR_0 + fbraddr);
119 sd_info(("%s: Function %d CIS Ptr = 0x%x\n",
120 __FUNCTION__, func, sd->func_cis_ptr[func]));
121 }
122
123 sd->func_cis_ptr[0] = sd->com_cis_ptr;
124 sd_info(("%s: Card's Common CIS Ptr = 0x%x\n", __FUNCTION__, sd->com_cis_ptr));
125
126 /* Enable Function 1 */
127 sdio_claim_host(gInstance->func[1]);
128 err_ret = sdio_enable_func(gInstance->func[1]);
129 sdio_release_host(gInstance->func[1]);
130 if (err_ret) {
131 sd_err(("bcmsdh_sdmmc: Failed to enable F1 Err: 0x%08x", err_ret));
132 }
133
134 return FALSE;
135 }
136
137 /*
138 * Public entry points & extern's
139 */
140 extern sdioh_info_t *
141 sdioh_attach(osl_t *osh, void *bar0, uint irq)
142 {
143 sdioh_info_t *sd;
144 int err_ret;
145
146 sd_trace(("%s\n", __FUNCTION__));
147
148 if (gInstance == NULL) {
149 sd_err(("%s: SDIO Device not present\n", __FUNCTION__));
150 return NULL;
151 }
152
153 if ((sd = (sdioh_info_t *)MALLOC(osh, sizeof(sdioh_info_t))) == NULL) {
154 sd_err(("sdioh_attach: out of memory, malloced %d bytes\n", MALLOCED(osh)));
155 return NULL;
156 }
157 bzero((char *)sd, sizeof(sdioh_info_t));
158 sd->osh = osh;
159 if (sdioh_sdmmc_osinit(sd) != 0) {
160 sd_err(("%s:sdioh_sdmmc_osinit() failed\n", __FUNCTION__));
161 MFREE(sd->osh, sd, sizeof(sdioh_info_t));
162 return NULL;
163 }
164
165 sd->num_funcs = 2;
166 sd->sd_blockmode = TRUE;
167 sd->use_client_ints = TRUE;
168 sd->client_block_size[0] = 64;
169 sd->use_rxchain = CUSTOM_RXCHAIN;
170
171 gInstance->sd = sd;
172
173 /* Claim host controller */
174 if (gInstance->func[1]) {
175 sdio_claim_host(gInstance->func[1]);
176
177 sd->client_block_size[1] = 64;
178 err_ret = sdio_set_block_size(gInstance->func[1], 64);
179 /* Release host controller F1 */
180 sdio_release_host(gInstance->func[1]);
181 if (err_ret) {
182 sd_err(("bcmsdh_sdmmc: Failed to set F1 blocksize\n"));
183 MFREE(sd->osh, sd, sizeof(sdioh_info_t));
184 return NULL;
185 }
186
187 } else {
188 sd_err(("%s:gInstance->func[1] is null\n", __FUNCTION__));
189 MFREE(sd->osh, sd, sizeof(sdioh_info_t));
190 return NULL;
191 }
192
193 if (gInstance->func[2]) {
194 /* Claim host controller F2 */
195 sdio_claim_host(gInstance->func[2]);
196
197 sd->client_block_size[2] = sd_f2_blocksize;
198 err_ret = sdio_set_block_size(gInstance->func[2], sd_f2_blocksize);
199 /* Release host controller F2 */
200 sdio_release_host(gInstance->func[2]);
201 if (err_ret) {
202 sd_err(("bcmsdh_sdmmc: Failed to set F2 blocksize to %d\n",
203 sd_f2_blocksize));
204 MFREE(sd->osh, sd, sizeof(sdioh_info_t));
205 return NULL;
206 }
207
208 } else {
209 sd_err(("%s:gInstance->func[2] is null\n", __FUNCTION__));
210 MFREE(sd->osh, sd, sizeof(sdioh_info_t));
211 return NULL;
212 }
213
214 sdioh_sdmmc_card_enablefuncs(sd);
215
216 sd_trace(("%s: Done\n", __FUNCTION__));
217 return sd;
218 }
219
220
221 extern SDIOH_API_RC
222 sdioh_detach(osl_t *osh, sdioh_info_t *sd)
223 {
224 sd_trace(("%s\n", __FUNCTION__));
225
226 if (sd) {
227
228 /* Disable Function 2 */
229 sdio_claim_host(gInstance->func[2]);
230 sdio_disable_func(gInstance->func[2]);
231 sdio_release_host(gInstance->func[2]);
232
233 /* Disable Function 1 */
234 if (gInstance->func[1]) {
235 sdio_claim_host(gInstance->func[1]);
236 sdio_disable_func(gInstance->func[1]);
237 sdio_release_host(gInstance->func[1]);
238 }
239
240 gInstance->func[1] = NULL;
241 gInstance->func[2] = NULL;
242
243 /* deregister irq */
244 sdioh_sdmmc_osfree(sd);
245
246 MFREE(sd->osh, sd, sizeof(sdioh_info_t));
247 }
248 return SDIOH_API_RC_SUCCESS;
249 }
250
251 #if defined(OOB_INTR_ONLY) && defined(HW_OOB)
252
253 extern SDIOH_API_RC
254 sdioh_enable_func_intr(void)
255 {
256 uint8 reg;
257 int err;
258
259 if (gInstance->func[0]) {
260 sdio_claim_host(gInstance->func[0]);
261
262 reg = sdio_readb(gInstance->func[0], SDIOD_CCCR_INTEN, &err);
263 if (err) {
264 sd_err(("%s: error for read SDIO_CCCR_IENx : 0x%x\n", __FUNCTION__, err));
265 sdio_release_host(gInstance->func[0]);
266 return SDIOH_API_RC_FAIL;
267 }
268
269 /* Enable F1 and F2 interrupts, clear master enable */
270 reg &= ~INTR_CTL_MASTER_EN;
271 reg |= (INTR_CTL_FUNC1_EN | INTR_CTL_FUNC2_EN);
272 sdio_writeb(gInstance->func[0], reg, SDIOD_CCCR_INTEN, &err);
273 sdio_release_host(gInstance->func[0]);
274
275 if (err) {
276 sd_err(("%s: error for write SDIO_CCCR_IENx : 0x%x\n", __FUNCTION__, err));
277 return SDIOH_API_RC_FAIL;
278 }
279 }
280
281 return SDIOH_API_RC_SUCCESS;
282 }
283
284 extern SDIOH_API_RC
285 sdioh_disable_func_intr(void)
286 {
287 uint8 reg;
288 int err;
289
290 if (gInstance->func[0]) {
291 sdio_claim_host(gInstance->func[0]);
292 reg = sdio_readb(gInstance->func[0], SDIOD_CCCR_INTEN, &err);
293 if (err) {
294 sd_err(("%s: error for read SDIO_CCCR_IENx : 0x%x\n", __FUNCTION__, err));
295 sdio_release_host(gInstance->func[0]);
296 return SDIOH_API_RC_FAIL;
297 }
298
299 reg &= ~(INTR_CTL_FUNC1_EN | INTR_CTL_FUNC2_EN);
300 /* Disable master interrupt with the last function interrupt */
301 if (!(reg & 0xFE))
302 reg = 0;
303 sdio_writeb(gInstance->func[0], reg, SDIOD_CCCR_INTEN, &err);
304
305 sdio_release_host(gInstance->func[0]);
306 if (err) {
307 sd_err(("%s: error for write SDIO_CCCR_IENx : 0x%x\n", __FUNCTION__, err));
308 return SDIOH_API_RC_FAIL;
309 }
310 }
311 return SDIOH_API_RC_SUCCESS;
312 }
313 #endif /* defined(OOB_INTR_ONLY) && defined(HW_OOB) */
314
315 /* Configure callback to client when we recieve client interrupt */
316 extern SDIOH_API_RC
317 sdioh_interrupt_register(sdioh_info_t *sd, sdioh_cb_fn_t fn, void *argh)
318 {
319 sd_trace(("%s: Entering\n", __FUNCTION__));
320 if (fn == NULL) {
321 sd_err(("%s: interrupt handler is NULL, not registering\n", __FUNCTION__));
322 return SDIOH_API_RC_FAIL;
323 }
324 #if !defined(OOB_INTR_ONLY)
325 sd->intr_handler = fn;
326 sd->intr_handler_arg = argh;
327 sd->intr_handler_valid = TRUE;
328
329 /* register and unmask irq */
330 if (gInstance->func[2]) {
331 sdio_claim_host(gInstance->func[2]);
332 sdio_claim_irq(gInstance->func[2], IRQHandlerF2);
333 sdio_release_host(gInstance->func[2]);
334 }
335
336 if (gInstance->func[1]) {
337 sdio_claim_host(gInstance->func[1]);
338 sdio_claim_irq(gInstance->func[1], IRQHandler);
339 sdio_release_host(gInstance->func[1]);
340 }
341 #elif defined(HW_OOB)
342 sdioh_enable_func_intr();
343 #endif /* !defined(OOB_INTR_ONLY) */
344
345 return SDIOH_API_RC_SUCCESS;
346 }
347
348 extern SDIOH_API_RC
349 sdioh_interrupt_deregister(sdioh_info_t *sd)
350 {
351 sd_trace(("%s: Entering\n", __FUNCTION__));
352
353 #if !defined(OOB_INTR_ONLY)
354 if (gInstance->func[1]) {
355 /* register and unmask irq */
356 sdio_claim_host(gInstance->func[1]);
357 sdio_release_irq(gInstance->func[1]);
358 sdio_release_host(gInstance->func[1]);
359 }
360
361 if (gInstance->func[2]) {
362 /* Claim host controller F2 */
363 sdio_claim_host(gInstance->func[2]);
364 sdio_release_irq(gInstance->func[2]);
365 /* Release host controller F2 */
366 sdio_release_host(gInstance->func[2]);
367 }
368
369 sd->intr_handler_valid = FALSE;
370 sd->intr_handler = NULL;
371 sd->intr_handler_arg = NULL;
372 #elif defined(HW_OOB)
373 if (dhd_download_fw_on_driverload)
374 sdioh_disable_func_intr();
375 #endif /* !defined(OOB_INTR_ONLY) */
376 return SDIOH_API_RC_SUCCESS;
377 }
378
379 extern SDIOH_API_RC
380 sdioh_interrupt_query(sdioh_info_t *sd, bool *onoff)
381 {
382 sd_trace(("%s: Entering\n", __FUNCTION__));
383 *onoff = sd->client_intr_enabled;
384 return SDIOH_API_RC_SUCCESS;
385 }
386
387 #if defined(DHD_DEBUG)
388 extern bool
389 sdioh_interrupt_pending(sdioh_info_t *sd)
390 {
391 return (0);
392 }
393 #endif
394
395 uint
396 sdioh_query_iofnum(sdioh_info_t *sd)
397 {
398 return sd->num_funcs;
399 }
400
401 /* IOVar table */
402 enum {
403 IOV_MSGLEVEL = 1,
404 IOV_BLOCKMODE,
405 IOV_BLOCKSIZE,
406 IOV_DMA,
407 IOV_USEINTS,
408 IOV_NUMINTS,
409 IOV_NUMLOCALINTS,
410 IOV_HOSTREG,
411 IOV_DEVREG,
412 IOV_DIVISOR,
413 IOV_SDMODE,
414 IOV_HISPEED,
415 IOV_HCIREGS,
416 IOV_POWER,
417 IOV_CLOCK,
418 IOV_RXCHAIN
419 };
420
421 const bcm_iovar_t sdioh_iovars[] = {
422 {"sd_msglevel", IOV_MSGLEVEL, 0, IOVT_UINT32, 0 },
423 {"sd_blockmode", IOV_BLOCKMODE, 0, IOVT_BOOL, 0 },
424 {"sd_blocksize", IOV_BLOCKSIZE, 0, IOVT_UINT32, 0 }, /* ((fn << 16) | size) */
425 {"sd_dma", IOV_DMA, 0, IOVT_BOOL, 0 },
426 {"sd_ints", IOV_USEINTS, 0, IOVT_BOOL, 0 },
427 {"sd_numints", IOV_NUMINTS, 0, IOVT_UINT32, 0 },
428 {"sd_numlocalints", IOV_NUMLOCALINTS, 0, IOVT_UINT32, 0 },
429 {"sd_hostreg", IOV_HOSTREG, 0, IOVT_BUFFER, sizeof(sdreg_t) },
430 {"sd_devreg", IOV_DEVREG, 0, IOVT_BUFFER, sizeof(sdreg_t) },
431 {"sd_divisor", IOV_DIVISOR, 0, IOVT_UINT32, 0 },
432 {"sd_power", IOV_POWER, 0, IOVT_UINT32, 0 },
433 {"sd_clock", IOV_CLOCK, 0, IOVT_UINT32, 0 },
434 {"sd_mode", IOV_SDMODE, 0, IOVT_UINT32, 100},
435 {"sd_highspeed", IOV_HISPEED, 0, IOVT_UINT32, 0 },
436 {"sd_rxchain", IOV_RXCHAIN, 0, IOVT_BOOL, 0 },
437 {NULL, 0, 0, 0, 0 }
438 };
439
440 int
441 sdioh_iovar_op(sdioh_info_t *si, const char *name,
442 void *params, int plen, void *arg, int len, bool set)
443 {
444 const bcm_iovar_t *vi = NULL;
445 int bcmerror = 0;
446 int val_size;
447 int32 int_val = 0;
448 bool bool_val;
449 uint32 actionid;
450
451 ASSERT(name);
452 ASSERT(len >= 0);
453
454 /* Get must have return space; Set does not take qualifiers */
455 ASSERT(set || (arg && len));
456 ASSERT(!set || (!params && !plen));
457
458 sd_trace(("%s: Enter (%s %s)\n", __FUNCTION__, (set ? "set" : "get"), name));
459
460 if ((vi = bcm_iovar_lookup(sdioh_iovars, name)) == NULL) {
461 bcmerror = BCME_UNSUPPORTED;
462 goto exit;
463 }
464
465 if ((bcmerror = bcm_iovar_lencheck(vi, arg, len, set)) != 0)
466 goto exit;
467
468 /* Set up params so get and set can share the convenience variables */
469 if (params == NULL) {
470 params = arg;
471 plen = len;
472 }
473
474 if (vi->type == IOVT_VOID)
475 val_size = 0;
476 else if (vi->type == IOVT_BUFFER)
477 val_size = len;
478 else
479 val_size = sizeof(int);
480
481 if (plen >= (int)sizeof(int_val))
482 bcopy(params, &int_val, sizeof(int_val));
483
484 bool_val = (int_val != 0) ? TRUE : FALSE;
485 BCM_REFERENCE(bool_val);
486
487 actionid = set ? IOV_SVAL(vi->varid) : IOV_GVAL(vi->varid);
488 switch (actionid) {
489 case IOV_GVAL(IOV_MSGLEVEL):
490 int_val = (int32)sd_msglevel;
491 bcopy(&int_val, arg, val_size);
492 break;
493
494 case IOV_SVAL(IOV_MSGLEVEL):
495 sd_msglevel = int_val;
496 break;
497
498 case IOV_GVAL(IOV_BLOCKMODE):
499 int_val = (int32)si->sd_blockmode;
500 bcopy(&int_val, arg, val_size);
501 break;
502
503 case IOV_SVAL(IOV_BLOCKMODE):
504 si->sd_blockmode = (bool)int_val;
505 /* Haven't figured out how to make non-block mode with DMA */
506 break;
507
508 case IOV_GVAL(IOV_BLOCKSIZE):
509 if ((uint32)int_val > si->num_funcs) {
510 bcmerror = BCME_BADARG;
511 break;
512 }
513 int_val = (int32)si->client_block_size[int_val];
514 bcopy(&int_val, arg, val_size);
515 break;
516
517 case IOV_SVAL(IOV_BLOCKSIZE):
518 {
519 uint func = ((uint32)int_val >> 16);
520 uint blksize = (uint16)int_val;
521 uint maxsize;
522
523 if (func > si->num_funcs) {
524 bcmerror = BCME_BADARG;
525 break;
526 }
527
528 switch (func) {
529 case 0: maxsize = 32; break;
530 case 1: maxsize = BLOCK_SIZE_4318; break;
531 case 2: maxsize = BLOCK_SIZE_4328; break;
532 default: maxsize = 0;
533 }
534 if (blksize > maxsize) {
535 bcmerror = BCME_BADARG;
536 break;
537 }
538 if (!blksize) {
539 blksize = maxsize;
540 }
541
542 /* Now set it */
543 si->client_block_size[func] = blksize;
544
545 break;
546 }
547
548 case IOV_GVAL(IOV_RXCHAIN):
549 int_val = (int32)si->use_rxchain;
550 bcopy(&int_val, arg, val_size);
551 break;
552
553 case IOV_GVAL(IOV_DMA):
554 int_val = (int32)si->sd_use_dma;
555 bcopy(&int_val, arg, val_size);
556 break;
557
558 case IOV_SVAL(IOV_DMA):
559 si->sd_use_dma = (bool)int_val;
560 break;
561
562 case IOV_GVAL(IOV_USEINTS):
563 int_val = (int32)si->use_client_ints;
564 bcopy(&int_val, arg, val_size);
565 break;
566
567 case IOV_SVAL(IOV_USEINTS):
568 si->use_client_ints = (bool)int_val;
569 if (si->use_client_ints)
570 si->intmask |= CLIENT_INTR;
571 else
572 si->intmask &= ~CLIENT_INTR;
573
574 break;
575
576 case IOV_GVAL(IOV_DIVISOR):
577 int_val = (uint32)sd_divisor;
578 bcopy(&int_val, arg, val_size);
579 break;
580
581 case IOV_SVAL(IOV_DIVISOR):
582 sd_divisor = int_val;
583 break;
584
585 case IOV_GVAL(IOV_POWER):
586 int_val = (uint32)sd_power;
587 bcopy(&int_val, arg, val_size);
588 break;
589
590 case IOV_SVAL(IOV_POWER):
591 sd_power = int_val;
592 break;
593
594 case IOV_GVAL(IOV_CLOCK):
595 int_val = (uint32)sd_clock;
596 bcopy(&int_val, arg, val_size);
597 break;
598
599 case IOV_SVAL(IOV_CLOCK):
600 sd_clock = int_val;
601 break;
602
603 case IOV_GVAL(IOV_SDMODE):
604 int_val = (uint32)sd_sdmode;
605 bcopy(&int_val, arg, val_size);
606 break;
607
608 case IOV_SVAL(IOV_SDMODE):
609 sd_sdmode = int_val;
610 break;
611
612 case IOV_GVAL(IOV_HISPEED):
613 int_val = (uint32)sd_hiok;
614 bcopy(&int_val, arg, val_size);
615 break;
616
617 case IOV_SVAL(IOV_HISPEED):
618 sd_hiok = int_val;
619 break;
620
621 case IOV_GVAL(IOV_NUMINTS):
622 int_val = (int32)si->intrcount;
623 bcopy(&int_val, arg, val_size);
624 break;
625
626 case IOV_GVAL(IOV_NUMLOCALINTS):
627 int_val = (int32)0;
628 bcopy(&int_val, arg, val_size);
629 break;
630
631 case IOV_GVAL(IOV_HOSTREG):
632 {
633 sdreg_t *sd_ptr = (sdreg_t *)params;
634
635 if (sd_ptr->offset < SD_SysAddr || sd_ptr->offset > SD_MaxCurCap) {
636 sd_err(("%s: bad offset 0x%x\n", __FUNCTION__, sd_ptr->offset));
637 bcmerror = BCME_BADARG;
638 break;
639 }
640
641 sd_trace(("%s: rreg%d at offset %d\n", __FUNCTION__,
642 (sd_ptr->offset & 1) ? 8 : ((sd_ptr->offset & 2) ? 16 : 32),
643 sd_ptr->offset));
644 if (sd_ptr->offset & 1)
645 int_val = 8; /* sdioh_sdmmc_rreg8(si, sd_ptr->offset); */
646 else if (sd_ptr->offset & 2)
647 int_val = 16; /* sdioh_sdmmc_rreg16(si, sd_ptr->offset); */
648 else
649 int_val = 32; /* sdioh_sdmmc_rreg(si, sd_ptr->offset); */
650
651 bcopy(&int_val, arg, sizeof(int_val));
652 break;
653 }
654
655 case IOV_SVAL(IOV_HOSTREG):
656 {
657 sdreg_t *sd_ptr = (sdreg_t *)params;
658
659 if (sd_ptr->offset < SD_SysAddr || sd_ptr->offset > SD_MaxCurCap) {
660 sd_err(("%s: bad offset 0x%x\n", __FUNCTION__, sd_ptr->offset));
661 bcmerror = BCME_BADARG;
662 break;
663 }
664
665 sd_trace(("%s: wreg%d value 0x%08x at offset %d\n", __FUNCTION__, sd_ptr->value,
666 (sd_ptr->offset & 1) ? 8 : ((sd_ptr->offset & 2) ? 16 : 32),
667 sd_ptr->offset));
668 break;
669 }
670
671 case IOV_GVAL(IOV_DEVREG):
672 {
673 sdreg_t *sd_ptr = (sdreg_t *)params;
674 uint8 data = 0;
675
676 if (sdioh_cfg_read(si, sd_ptr->func, sd_ptr->offset, &data)) {
677 bcmerror = BCME_SDIO_ERROR;
678 break;
679 }
680
681 int_val = (int)data;
682 bcopy(&int_val, arg, sizeof(int_val));
683 break;
684 }
685
686 case IOV_SVAL(IOV_DEVREG):
687 {
688 sdreg_t *sd_ptr = (sdreg_t *)params;
689 uint8 data = (uint8)sd_ptr->value;
690
691 if (sdioh_cfg_write(si, sd_ptr->func, sd_ptr->offset, &data)) {
692 bcmerror = BCME_SDIO_ERROR;
693 break;
694 }
695 break;
696 }
697
698 default:
699 bcmerror = BCME_UNSUPPORTED;
700 break;
701 }
702 exit:
703
704 return bcmerror;
705 }
706
707 #if defined(OOB_INTR_ONLY) && defined(HW_OOB)
708
709 SDIOH_API_RC
710 sdioh_enable_hw_oob_intr(sdioh_info_t *sd, bool enable)
711 {
712 SDIOH_API_RC status;
713 uint8 data;
714
715 if (enable)
716 data = SDIO_SEPINT_MASK | SDIO_SEPINT_OE | SDIO_SEPINT_ACT_HI;
717 else
718 data = SDIO_SEPINT_ACT_HI; /* disable hw oob interrupt */
719
720 status = sdioh_request_byte(sd, SDIOH_WRITE, 0, SDIOD_CCCR_BRCM_SEPINT, &data);
721 return status;
722 }
723 #endif /* defined(OOB_INTR_ONLY) && defined(HW_OOB) */
724
725 extern SDIOH_API_RC
726 sdioh_cfg_read(sdioh_info_t *sd, uint fnc_num, uint32 addr, uint8 *data)
727 {
728 SDIOH_API_RC status;
729 /* No lock needed since sdioh_request_byte does locking */
730 status = sdioh_request_byte(sd, SDIOH_READ, fnc_num, addr, data);
731 return status;
732 }
733
734 extern SDIOH_API_RC
735 sdioh_cfg_write(sdioh_info_t *sd, uint fnc_num, uint32 addr, uint8 *data)
736 {
737 /* No lock needed since sdioh_request_byte does locking */
738 SDIOH_API_RC status;
739 status = sdioh_request_byte(sd, SDIOH_WRITE, fnc_num, addr, data);
740 return status;
741 }
742
743 static int
744 sdioh_sdmmc_get_cisaddr(sdioh_info_t *sd, uint32 regaddr)
745 {
746 /* read 24 bits and return valid 17 bit addr */
747 int i;
748 uint32 scratch, regdata;
749 uint8 *ptr = (uint8 *)&scratch;
750 for (i = 0; i < 3; i++) {
751 if ((sdioh_sdmmc_card_regread (sd, 0, regaddr, 1, &regdata)) != SUCCESS)
752 sd_err(("%s: Can't read!\n", __FUNCTION__));
753
754 *ptr++ = (uint8) regdata;
755 regaddr++;
756 }
757
758 /* Only the lower 17-bits are valid */
759 scratch = ltoh32(scratch);
760 scratch &= 0x0001FFFF;
761 return (scratch);
762 }
763
764 extern SDIOH_API_RC
765 sdioh_cis_read(sdioh_info_t *sd, uint func, uint8 *cisd, uint32 length)
766 {
767 uint32 count;
768 int offset;
769 uint32 foo;
770 uint8 *cis = cisd;
771
772 sd_trace(("%s: Func = %d\n", __FUNCTION__, func));
773
774 if (!sd->func_cis_ptr[func]) {
775 bzero(cis, length);
776 sd_err(("%s: no func_cis_ptr[%d]\n", __FUNCTION__, func));
777 return SDIOH_API_RC_FAIL;
778 }
779
780 sd_err(("%s: func_cis_ptr[%d]=0x%04x\n", __FUNCTION__, func, sd->func_cis_ptr[func]));
781
782 for (count = 0; count < length; count++) {
783 offset = sd->func_cis_ptr[func] + count;
784 if (sdioh_sdmmc_card_regread (sd, 0, offset, 1, &foo) < 0) {
785 sd_err(("%s: regread failed: Can't read CIS\n", __FUNCTION__));
786 return SDIOH_API_RC_FAIL;
787 }
788
789 *cis = (uint8)(foo & 0xff);
790 cis++;
791 }
792
793 return SDIOH_API_RC_SUCCESS;
794 }
795
796 extern SDIOH_API_RC
797 sdioh_request_byte(sdioh_info_t *sd, uint rw, uint func, uint regaddr, uint8 *byte)
798 {
799 int err_ret = 0;
800 #if defined(MMC_SDIO_ABORT)
801 int sdio_abort_retry = MMC_SDIO_ABORT_RETRY_LIMIT;
802 #endif
803
804 sd_info(("%s: rw=%d, func=%d, addr=0x%05x\n", __FUNCTION__, rw, func, regaddr));
805
806 DHD_PM_RESUME_WAIT(sdioh_request_byte_wait);
807 DHD_PM_RESUME_RETURN_ERROR(SDIOH_API_RC_FAIL);
808 if(rw) { /* CMD52 Write */
809 if (func == 0) {
810 /* Can only directly write to some F0 registers. Handle F2 enable
811 * as a special case.
812 */
813 if (regaddr == SDIOD_CCCR_IOEN) {
814 if (gInstance->func[2]) {
815 sdio_claim_host(gInstance->func[2]);
816 if (*byte & SDIO_FUNC_ENABLE_2) {
817 /* Enable Function 2 */
818 err_ret = sdio_enable_func(gInstance->func[2]);
819 if (err_ret) {
820 sd_err(("bcmsdh_sdmmc: enable F2 failed:%d",
821 err_ret));
822 }
823 } else {
824 /* Disable Function 2 */
825 err_ret = sdio_disable_func(gInstance->func[2]);
826 if (err_ret) {
827 sd_err(("bcmsdh_sdmmc: Disab F2 failed:%d",
828 err_ret));
829 }
830 }
831 sdio_release_host(gInstance->func[2]);
832 }
833 }
834 #if defined(MMC_SDIO_ABORT)
835 /* to allow abort command through F1 */
836 else if (regaddr == SDIOD_CCCR_IOABORT) {
837 while (sdio_abort_retry--) {
838 if (gInstance->func[func]) {
839 sdio_claim_host(gInstance->func[func]);
840 /*
841 * this sdio_f0_writeb() can be replaced with
842 * another api depending upon MMC driver change.
843 * As of this time, this is temporaray one
844 */
845 sdio_writeb(gInstance->func[func],
846 *byte, regaddr, &err_ret);
847 sdio_release_host(gInstance->func[func]);
848 }
849 if (!err_ret)
850 break;
851 }
852 }
853 #endif /* MMC_SDIO_ABORT */
854 else if (regaddr < 0xF0) {
855 sd_err(("bcmsdh_sdmmc: F0 Wr:0x%02x: write disallowed\n", regaddr));
856 } else {
857 /* Claim host controller, perform F0 write, and release */
858 if (gInstance->func[func]) {
859 sdio_claim_host(gInstance->func[func]);
860 sdio_f0_writeb(gInstance->func[func],
861 *byte, regaddr, &err_ret);
862 sdio_release_host(gInstance->func[func]);
863 }
864 }
865 } else {
866 /* Claim host controller, perform Fn write, and release */
867 if (gInstance->func[func]) {
868 sdio_claim_host(gInstance->func[func]);
869 sdio_writeb(gInstance->func[func], *byte, regaddr, &err_ret);
870 sdio_release_host(gInstance->func[func]);
871 }
872 }
873 } else { /* CMD52 Read */
874 /* Claim host controller, perform Fn read, and release */
875 if (gInstance->func[func]) {
876 sdio_claim_host(gInstance->func[func]);
877 if (func == 0) {
878 *byte = sdio_f0_readb(gInstance->func[func], regaddr, &err_ret);
879 } else {
880 *byte = sdio_readb(gInstance->func[func], regaddr, &err_ret);
881 }
882 sdio_release_host(gInstance->func[func]);
883 }
884 }
885
886 if (err_ret) {
887 if ((regaddr == 0x1001F) && (err_ret == -110)) {
888 } else {
889 sd_err(("bcmsdh_sdmmc: Failed to %s byte F%d:@0x%05x=%02x, Err: %d\n",
890 rw ? "Write" : "Read", func, regaddr, *byte, err_ret));
891 }
892 }
893
894 return ((err_ret == 0) ? SDIOH_API_RC_SUCCESS : SDIOH_API_RC_FAIL);
895 }
896
897 extern SDIOH_API_RC
898 sdioh_request_word(sdioh_info_t *sd, uint cmd_type, uint rw, uint func, uint addr,
899 uint32 *word, uint nbytes)
900 {
901 int err_ret = SDIOH_API_RC_FAIL;
902 int err_ret2 = SDIOH_API_RC_SUCCESS; // terence 20130621: prevent dhd_dpc in dead lock
903 #if defined(MMC_SDIO_ABORT)
904 int sdio_abort_retry = MMC_SDIO_ABORT_RETRY_LIMIT;
905 #endif
906
907 if (func == 0) {
908 sd_err(("%s: Only CMD52 allowed to F0.\n", __FUNCTION__));
909 return SDIOH_API_RC_FAIL;
910 }
911
912 sd_info(("%s: cmd_type=%d, rw=%d, func=%d, addr=0x%05x, nbytes=%d\n",
913 __FUNCTION__, cmd_type, rw, func, addr, nbytes));
914
915 DHD_PM_RESUME_WAIT(sdioh_request_word_wait);
916 DHD_PM_RESUME_RETURN_ERROR(SDIOH_API_RC_FAIL);
917 /* Claim host controller */
918 sdio_claim_host(gInstance->func[func]);
919
920 if(rw) { /* CMD52 Write */
921 if (nbytes == 4) {
922 sdio_writel(gInstance->func[func], *word, addr, &err_ret);
923 } else if (nbytes == 2) {
924 sdio_writew(gInstance->func[func], (*word & 0xFFFF), addr, &err_ret);
925 } else {
926 sd_err(("%s: Invalid nbytes: %d\n", __FUNCTION__, nbytes));
927 }
928 } else { /* CMD52 Read */
929 if (nbytes == 4) {
930 *word = sdio_readl(gInstance->func[func], addr, &err_ret);
931 } else if (nbytes == 2) {
932 *word = sdio_readw(gInstance->func[func], addr, &err_ret) & 0xFFFF;
933 } else {
934 sd_err(("%s: Invalid nbytes: %d\n", __FUNCTION__, nbytes));
935 }
936 }
937
938 /* Release host controller */
939 sdio_release_host(gInstance->func[func]);
940
941 if (err_ret) {
942 #if defined(MMC_SDIO_ABORT)
943 /* Any error on CMD53 transaction should abort that function using function 0. */
944 while (sdio_abort_retry--) {
945 if (gInstance->func[0]) {
946 sdio_claim_host(gInstance->func[0]);
947 /*
948 * this sdio_f0_writeb() can be replaced with another api
949 * depending upon MMC driver change.
950 * As of this time, this is temporaray one
951 */
952 sdio_writeb(gInstance->func[0],
953 func, SDIOD_CCCR_IOABORT, &err_ret2);
954 sdio_release_host(gInstance->func[0]);
955 }
956 if (!err_ret2)
957 break;
958 }
959 if (err_ret)
960 #endif /* MMC_SDIO_ABORT */
961 {
962 sd_err(("bcmsdh_sdmmc: Failed to %s word, Err: 0x%08x\n",
963 rw ? "Write" : "Read", err_ret));
964 }
965 }
966
967 return (((err_ret == 0)&&(err_ret2 == 0)) ? SDIOH_API_RC_SUCCESS : SDIOH_API_RC_FAIL);
968 }
969
970 #ifdef BCMSDIOH_TXGLOM
971 void
972 sdioh_glom_post(sdioh_info_t *sd, uint8 *frame, void *pkt, uint len)
973 {
974 void *phead = sd->glom_info.glom_pkt_head;
975 void *ptail = sd->glom_info.glom_pkt_tail;
976
977 BCM_REFERENCE(frame);
978
979 ASSERT(!PKTLINK(pkt));
980 if (!phead) {
981 ASSERT(!phead);
982 sd->glom_info.glom_pkt_head = sd->glom_info.glom_pkt_tail = pkt;
983 }
984 else {
985 ASSERT(ptail);
986 PKTSETNEXT(sd->osh, ptail, pkt);
987 sd->glom_info.glom_pkt_tail = pkt;
988 }
989 sd->glom_info.count++;
990 }
991
992 void
993 sdioh_glom_clear(sdioh_info_t *sd)
994 {
995 void *pnow, *pnext;
996
997 pnext = sd->glom_info.glom_pkt_head;
998
999 if (!pnext) {
1000 sd_err(("sdioh_glom_clear: no first packet to clear!\n"));
1001 return;
1002 }
1003
1004 while (pnext) {
1005 pnow = pnext;
1006 pnext = PKTNEXT(sd->osh, pnow);
1007 PKTSETNEXT(sd->osh, pnow, NULL);
1008 sd->glom_info.count--;
1009 }
1010
1011 sd->glom_info.glom_pkt_head = NULL;
1012 sd->glom_info.glom_pkt_tail = NULL;
1013 if (sd->glom_info.count != 0) {
1014 sd_err(("sdioh_glom_clear: glom count mismatch!\n"));
1015 sd->glom_info.count = 0;
1016 }
1017 }
1018
1019 uint
1020 sdioh_set_mode(sdioh_info_t *sd, uint mode)
1021 {
1022 if (mode == SDPCM_TXGLOM_CPY)
1023 sd->txglom_mode = mode;
1024 else if (mode == SDPCM_TXGLOM_MDESC)
1025 sd->txglom_mode = mode;
1026
1027 return (sd->txglom_mode);
1028 }
1029
1030 bool
1031 sdioh_glom_enabled(void)
1032 {
1033 return sd_txglom;
1034 }
1035 #endif /* BCMSDIOH_TXGLOM */
1036
1037 static INLINE int sdioh_request_packet_align(uint pkt_len, uint write, uint func, int blk_size)
1038 {
1039 /* Align Patch */
1040 if (!write || pkt_len < 32)
1041 pkt_len = (pkt_len + 3) & 0xFFFFFFFC;
1042 else if ((pkt_len > blk_size) && (pkt_len % blk_size)) {
1043 if (func == SDIO_FUNC_2) {
1044 sd_err(("%s: [%s] dhd_sdio must align %d bytes"
1045 " packet larger than a %d bytes blk size by a blk size\n",
1046 __FUNCTION__, write ? "W" : "R", pkt_len, blk_size));
1047 }
1048 pkt_len += blk_size - (pkt_len % blk_size);
1049 }
1050 #ifdef CONFIG_MMC_MSM7X00A
1051 if ((pkt_len % 64) == 32) {
1052 sd_err(("%s: Rounding up TX packet +=32\n", __FUNCTION__));
1053 pkt_len += 32;
1054 }
1055 #endif /* CONFIG_MMC_MSM7X00A */
1056 return pkt_len;
1057 }
1058
1059 static SDIOH_API_RC
1060 sdioh_request_packet(sdioh_info_t *sd, uint fix_inc, uint write, uint func,
1061 uint addr, void *pkt)
1062 {
1063 bool fifo = (fix_inc == SDIOH_DATA_FIX);
1064 uint32 SGCount = 0;
1065 int err_ret = 0;
1066 void *pnext;
1067 uint ttl_len, dma_len, lft_len, xfred_len, pkt_len;
1068 uint blk_num;
1069 int blk_size;
1070 struct mmc_request mmc_req;
1071 struct mmc_command mmc_cmd;
1072 struct mmc_data mmc_dat;
1073 #ifdef BCMSDIOH_TXGLOM
1074 uint8 *localbuf = NULL;
1075 uint local_plen = 0;
1076 bool need_txglom = write && sdioh_glom_enabled() &&
1077 (pkt == sd->glom_info.glom_pkt_tail) &&
1078 (sd->glom_info.glom_pkt_head != sd->glom_info.glom_pkt_tail);
1079 #endif /* BCMSDIOH_TXGLOM */
1080
1081 sd_trace(("%s: Enter\n", __FUNCTION__));
1082
1083 ASSERT(pkt);
1084 DHD_PM_RESUME_WAIT(sdioh_request_packet_wait);
1085 DHD_PM_RESUME_RETURN_ERROR(SDIOH_API_RC_FAIL);
1086
1087 ttl_len = xfred_len = 0;
1088 #ifdef BCMSDIOH_TXGLOM
1089 if (need_txglom) {
1090 pkt = sd->glom_info.glom_pkt_head;
1091 }
1092 #endif /* BCMSDIOH_TXGLOM */
1093
1094 /* at least 4 bytes alignment of skb buff is guaranteed */
1095 for (pnext = pkt; pnext; pnext = PKTNEXT(sd->osh, pnext))
1096 ttl_len += PKTLEN(sd->osh, pnext);
1097
1098 blk_size = sd->client_block_size[func];
1099 if (((!write && sd->use_rxchain) ||
1100 #ifdef BCMSDIOH_TXGLOM
1101 (need_txglom && sd->txglom_mode == SDPCM_TXGLOM_MDESC) ||
1102 #endif
1103 0) && (ttl_len >= blk_size)) {
1104 blk_num = ttl_len / blk_size;
1105 dma_len = blk_num * blk_size;
1106 } else {
1107 blk_num = 0;
1108 dma_len = 0;
1109 }
1110
1111 lft_len = ttl_len - dma_len;
1112
1113 sd_trace(("%s: %s %dB to func%d:%08x, %d blks with DMA, %dB leftover\n",
1114 __FUNCTION__, write ? "W" : "R",
1115 ttl_len, func, addr, blk_num, lft_len));
1116
1117 if (0 != dma_len) {
1118 memset(&mmc_req, 0, sizeof(struct mmc_request));
1119 memset(&mmc_cmd, 0, sizeof(struct mmc_command));
1120 memset(&mmc_dat, 0, sizeof(struct mmc_data));
1121
1122 /* Set up DMA descriptors */
1123 for (pnext = pkt;
1124 pnext && dma_len;
1125 pnext = PKTNEXT(sd->osh, pnext)) {
1126 pkt_len = PKTLEN(sd->osh, pnext);
1127
1128 if (dma_len > pkt_len)
1129 dma_len -= pkt_len;
1130 else {
1131 pkt_len = xfred_len = dma_len;
1132 dma_len = 0;
1133 pkt = pnext;
1134 }
1135
1136 sg_set_buf(&sd->sg_list[SGCount++],
1137 (uint8*)PKTDATA(sd->osh, pnext),
1138 pkt_len);
1139
1140 if (SGCount >= SDIOH_SDMMC_MAX_SG_ENTRIES) {
1141 sd_err(("%s: sg list entries exceed limit\n",
1142 __FUNCTION__));
1143 return (SDIOH_API_RC_FAIL);
1144 }
1145 }
1146
1147 mmc_dat.sg = sd->sg_list;
1148 mmc_dat.sg_len = SGCount;
1149 mmc_dat.blksz = blk_size;
1150 mmc_dat.blocks = blk_num;
1151 mmc_dat.flags = write ? MMC_DATA_WRITE : MMC_DATA_READ;
1152
1153 mmc_cmd.opcode = 53; /* SD_IO_RW_EXTENDED */
1154 mmc_cmd.arg = write ? 1<<31 : 0;
1155 mmc_cmd.arg |= (func & 0x7) << 28;
1156 mmc_cmd.arg |= 1<<27;
1157 mmc_cmd.arg |= fifo ? 0 : 1<<26;
1158 mmc_cmd.arg |= (addr & 0x1FFFF) << 9;
1159 mmc_cmd.arg |= blk_num & 0x1FF;
1160 mmc_cmd.flags = MMC_RSP_SPI_R5 | MMC_RSP_R5 | MMC_CMD_ADTC;
1161
1162 mmc_req.cmd = &mmc_cmd;
1163 mmc_req.data = &mmc_dat;
1164
1165 sdio_claim_host(gInstance->func[func]);
1166 ////mmc_set_data_timeout(&mmc_dat, gInstance->func[func]->card);
1167 ////mmc_wait_for_req(gInstance->func[func]->card->host, &mmc_req);
1168 sdio_release_host(gInstance->func[func]);
1169
1170 err_ret = mmc_cmd.error? mmc_cmd.error : mmc_dat.error;
1171 if (0 != err_ret) {
1172 sd_err(("%s:CMD53 %s failed with code %d\n",
1173 __FUNCTION__,
1174 write ? "write" : "read",
1175 err_ret));
1176 }
1177 if (!fifo) {
1178 addr = addr + ttl_len - lft_len - dma_len;
1179 }
1180 }
1181
1182 /* PIO mode */
1183 if (0 != lft_len) {
1184 /* Claim host controller */
1185 sdio_claim_host(gInstance->func[func]);
1186 for (pnext = pkt; pnext; pnext = PKTNEXT(sd->osh, pnext)) {
1187 uint8 *buf = (uint8*)PKTDATA(sd->osh, pnext) +
1188 xfred_len;
1189 uint pad = 0;
1190 pkt_len = PKTLEN(sd->osh, pnext);
1191 if (0 != xfred_len) {
1192 pkt_len -= xfred_len;
1193 xfred_len = 0;
1194 }
1195 #ifdef BCMSDIOH_TXGLOM
1196 if (need_txglom) {
1197 if (!localbuf) {
1198 uint prev_lft_len = lft_len;
1199 lft_len = sdioh_request_packet_align(lft_len, write,
1200 func, blk_size);
1201
1202 if (lft_len > prev_lft_len) {
1203 sd_err(("%s: padding is unexpected! lft_len %d,"
1204 " prev_lft_len %d %s\n",
1205 __FUNCTION__, lft_len, prev_lft_len,
1206 write ? "Write" : "Read"));
1207 }
1208
1209 localbuf = (uint8 *)MALLOC(sd->osh, lft_len);
1210 if (localbuf == NULL) {
1211 sd_err(("%s: %s TXGLOM: localbuf malloc FAILED\n",
1212 __FUNCTION__, (write) ? "TX" : "RX"));
1213 need_txglom = FALSE;
1214 goto txglomfail;
1215 }
1216 }
1217 bcopy(buf, (localbuf + local_plen), pkt_len);
1218 local_plen += pkt_len;
1219
1220 if (PKTNEXT(sd->osh, pnext)) {
1221 continue;
1222 }
1223
1224 buf = localbuf;
1225 pkt_len = local_plen;
1226 }
1227
1228 txglomfail:
1229 #endif /* BCMSDIOH_TXGLOM */
1230
1231 if (
1232 #ifdef BCMSDIOH_TXGLOM
1233 !need_txglom &&
1234 #endif
1235 TRUE) {
1236 pkt_len = sdioh_request_packet_align(pkt_len, write,
1237 func, blk_size);
1238
1239 pad = pkt_len - PKTLEN(sd->osh, pnext);
1240
1241 if (pad > 0) {
1242 if (func == SDIO_FUNC_2) {
1243 sd_err(("%s: padding is unexpected! pkt_len %d,"
1244 " PKTLEN %d lft_len %d %s\n",
1245 __FUNCTION__, pkt_len, PKTLEN(sd->osh, pnext),
1246 lft_len, write ? "Write" : "Read"));
1247 }
1248 if (PKTTAILROOM(sd->osh, pkt) < pad) {
1249 sd_info(("%s: insufficient tailroom %d, pad %d,"
1250 " lft_len %d pktlen %d, func %d %s\n",
1251 __FUNCTION__, (int)PKTTAILROOM(sd->osh, pkt),
1252 pad, lft_len, PKTLEN(sd->osh, pnext), func,
1253 write ? "W" : "R"));
1254 if (PKTPADTAILROOM(sd->osh, pkt, pad)) {
1255 sd_err(("%s: padding error size %d.\n",
1256 __FUNCTION__, pad));
1257 return SDIOH_API_RC_FAIL;
1258 }
1259 }
1260 }
1261 }
1262
1263 if ((write) && (!fifo))
1264 err_ret = sdio_memcpy_toio(
1265 gInstance->func[func],
1266 addr, buf, pkt_len);
1267 else if (write)
1268 err_ret = sdio_memcpy_toio(
1269 gInstance->func[func],
1270 addr, buf, pkt_len);
1271 else if (fifo)
1272 err_ret = sdio_readsb(
1273 gInstance->func[func],
1274 buf, addr, pkt_len);
1275 else
1276 err_ret = sdio_memcpy_fromio(
1277 gInstance->func[func],
1278 buf, addr, pkt_len);
1279
1280 if (err_ret)
1281 sd_err(("%s: %s FAILED %p[%d], addr=0x%05x, pkt_len=%d, ERR=%d\n",
1282 __FUNCTION__,
1283 (write) ? "TX" : "RX",
1284 pnext, SGCount, addr, pkt_len, err_ret));
1285 else
1286 sd_trace(("%s: %s xfr'd %p[%d], addr=0x%05x, len=%d\n",
1287 __FUNCTION__,
1288 (write) ? "TX" : "RX",
1289 pnext, SGCount, addr, pkt_len));
1290
1291 if (!fifo)
1292 addr += pkt_len;
1293 SGCount ++;
1294 }
1295 sdio_release_host(gInstance->func[func]);
1296 }
1297 #ifdef BCMSDIOH_TXGLOM
1298 if (localbuf)
1299 MFREE(sd->osh, localbuf, lft_len);
1300 #endif /* BCMSDIOH_TXGLOM */
1301
1302 sd_trace(("%s: Exit\n", __FUNCTION__));
1303 return ((err_ret == 0) ? SDIOH_API_RC_SUCCESS : SDIOH_API_RC_FAIL);
1304 }
1305
1306
1307 /*
1308 * This function takes a buffer or packet, and fixes everything up so that in the
1309 * end, a DMA-able packet is created.
1310 *
1311 * A buffer does not have an associated packet pointer, and may or may not be aligned.
1312 * A packet may consist of a single packet, or a packet chain. If it is a packet chain,
1313 * then all the packets in the chain must be properly aligned. If the packet data is not
1314 * aligned, then there may only be one packet, and in this case, it is copied to a new
1315 * aligned packet.
1316 *
1317 */
1318 extern SDIOH_API_RC
1319 sdioh_request_buffer(sdioh_info_t *sd, uint pio_dma, uint fix_inc, uint write, uint func,
1320 uint addr, uint reg_width, uint buflen_u, uint8 *buffer, void *pkt)
1321 {
1322 SDIOH_API_RC Status;
1323 void *tmppkt;
1324 void *orig_buf = NULL;
1325 uint copylen = 0;
1326
1327 sd_trace(("%s: Enter\n", __FUNCTION__));
1328
1329 DHD_PM_RESUME_WAIT(sdioh_request_buffer_wait);
1330 DHD_PM_RESUME_RETURN_ERROR(SDIOH_API_RC_FAIL);
1331
1332 if (pkt == NULL) {
1333 /* Case 1: we don't have a packet. */
1334 orig_buf = buffer;
1335 copylen = buflen_u;
1336 } else if ((ulong)PKTDATA(sd->osh, pkt) & DMA_ALIGN_MASK) {
1337 /* Case 2: We have a packet, but it is unaligned.
1338 * in this case, we cannot have a chain.
1339 */
1340 ASSERT(PKTNEXT(sd->osh, pkt) == NULL);
1341
1342 orig_buf = PKTDATA(sd->osh, pkt);
1343 copylen = PKTLEN(sd->osh, pkt);
1344 }
1345
1346 tmppkt = pkt;
1347 if (copylen) {
1348 tmppkt = PKTGET_STATIC(sd->osh, copylen, write ? TRUE : FALSE);
1349 if (tmppkt == NULL) {
1350 sd_err(("%s: PKTGET failed: len %d\n", __FUNCTION__, copylen));
1351 return SDIOH_API_RC_FAIL;
1352 }
1353 /* For a write, copy the buffer data into the packet. */
1354 if (write)
1355 bcopy(orig_buf, PKTDATA(sd->osh, tmppkt), copylen);
1356 }
1357
1358 Status = sdioh_request_packet(sd, fix_inc, write, func, addr, tmppkt);
1359
1360 if (copylen) {
1361 /* For a read, copy the packet data back to the buffer. */
1362 if (!write)
1363 bcopy(PKTDATA(sd->osh, tmppkt), orig_buf, PKTLEN(sd->osh, tmppkt));
1364 PKTFREE_STATIC(sd->osh, tmppkt, write ? TRUE : FALSE);
1365 }
1366
1367 return (Status);
1368 }
1369
1370 /* this function performs "abort" for both of host & device */
1371 extern int
1372 sdioh_abort(sdioh_info_t *sd, uint func)
1373 {
1374 #if defined(MMC_SDIO_ABORT)
1375 char t_func = (char) func;
1376 #endif /* defined(MMC_SDIO_ABORT) */
1377 sd_trace(("%s: Enter\n", __FUNCTION__));
1378
1379 #if defined(MMC_SDIO_ABORT)
1380 /* issue abort cmd52 command through F1 */
1381 sdioh_request_byte(sd, SD_IO_OP_WRITE, SDIO_FUNC_0, SDIOD_CCCR_IOABORT, &t_func);
1382 #endif /* defined(MMC_SDIO_ABORT) */
1383
1384 sd_trace(("%s: Exit\n", __FUNCTION__));
1385 return SDIOH_API_RC_SUCCESS;
1386 }
1387
1388 /* Reset and re-initialize the device */
1389 int sdioh_sdio_reset(sdioh_info_t *si)
1390 {
1391 sd_trace(("%s: Enter\n", __FUNCTION__));
1392 sd_trace(("%s: Exit\n", __FUNCTION__));
1393 return SDIOH_API_RC_SUCCESS;
1394 }
1395
1396 /* Disable device interrupt */
1397 void
1398 sdioh_sdmmc_devintr_off(sdioh_info_t *sd)
1399 {
1400 sd_trace(("%s: %d\n", __FUNCTION__, sd->use_client_ints));
1401 sd->intmask &= ~CLIENT_INTR;
1402 }
1403
1404 /* Enable device interrupt */
1405 void
1406 sdioh_sdmmc_devintr_on(sdioh_info_t *sd)
1407 {
1408 sd_trace(("%s: %d\n", __FUNCTION__, sd->use_client_ints));
1409 sd->intmask |= CLIENT_INTR;
1410 }
1411
1412 /* Read client card reg */
1413 int
1414 sdioh_sdmmc_card_regread(sdioh_info_t *sd, int func, uint32 regaddr, int regsize, uint32 *data)
1415 {
1416
1417 if ((func == 0) || (regsize == 1)) {
1418 uint8 temp = 0;
1419
1420 sdioh_request_byte(sd, SDIOH_READ, func, regaddr, &temp);
1421 *data = temp;
1422 *data &= 0xff;
1423 sd_data(("%s: byte read data=0x%02x\n",
1424 __FUNCTION__, *data));
1425 } else {
1426 sdioh_request_word(sd, 0, SDIOH_READ, func, regaddr, data, regsize);
1427 if (regsize == 2)
1428 *data &= 0xffff;
1429
1430 sd_data(("%s: word read data=0x%08x\n",
1431 __FUNCTION__, *data));
1432 }
1433
1434 return SUCCESS;
1435 }
1436
1437 #if !defined(OOB_INTR_ONLY)
1438 /* bcmsdh_sdmmc interrupt handler */
1439 static void IRQHandler(struct sdio_func *func)
1440 {
1441 sdioh_info_t *sd;
1442
1443 sd_trace(("bcmsdh_sdmmc: ***IRQHandler\n"));
1444 sd = gInstance->sd;
1445
1446 ASSERT(sd != NULL);
1447 sdio_release_host(gInstance->func[0]);
1448
1449 if (sd->use_client_ints) {
1450 sd->intrcount++;
1451 ASSERT(sd->intr_handler);
1452 ASSERT(sd->intr_handler_arg);
1453 (sd->intr_handler)(sd->intr_handler_arg);
1454 } else {
1455 sd_err(("bcmsdh_sdmmc: ***IRQHandler\n"));
1456
1457 sd_err(("%s: Not ready for intr: enabled %d, handler %p\n",
1458 __FUNCTION__, sd->client_intr_enabled, sd->intr_handler));
1459 }
1460
1461 sdio_claim_host(gInstance->func[0]);
1462 }
1463
1464 /* bcmsdh_sdmmc interrupt handler for F2 (dummy handler) */
1465 static void IRQHandlerF2(struct sdio_func *func)
1466 {
1467 sdioh_info_t *sd;
1468
1469 sd_trace(("bcmsdh_sdmmc: ***IRQHandlerF2\n"));
1470
1471 sd = gInstance->sd;
1472
1473 ASSERT(sd != NULL);
1474 BCM_REFERENCE(sd);
1475 }
1476 #endif /* !defined(OOB_INTR_ONLY) */
1477
1478 #ifdef NOTUSED
1479 /* Write client card reg */
1480 static int
1481 sdioh_sdmmc_card_regwrite(sdioh_info_t *sd, int func, uint32 regaddr, int regsize, uint32 data)
1482 {
1483
1484 if ((func == 0) || (regsize == 1)) {
1485 uint8 temp;
1486
1487 temp = data & 0xff;
1488 sdioh_request_byte(sd, SDIOH_READ, func, regaddr, &temp);
1489 sd_data(("%s: byte write data=0x%02x\n",
1490 __FUNCTION__, data));
1491 } else {
1492 if (regsize == 2)
1493 data &= 0xffff;
1494
1495 sdioh_request_word(sd, 0, SDIOH_READ, func, regaddr, &data, regsize);
1496
1497 sd_data(("%s: word write data=0x%08x\n",
1498 __FUNCTION__, data));
1499 }
1500
1501 return SUCCESS;
1502 }
1503 #endif /* NOTUSED */
1504
1505 int
1506 sdioh_start(sdioh_info_t *si, int stage)
1507 {
1508 sdioh_info_t *sd = gInstance->sd;
1509
1510 if (!sd) {
1511 sd_err(("%s Failed, sd is NULL\n", __FUNCTION__));
1512 return (0);
1513 }
1514
1515 /* Need to do this stages as we can't enable the interrupt till
1516 downloading of the firmware is complete, other wise polling
1517 sdio access will come in way
1518 */
1519 if (gInstance->func[0]) {
1520 if (stage == 0) {
1521 /* Since the power to the chip is killed, we will have
1522 re enumerate the device again. Set the block size
1523 and enable the fucntion 1 for in preparation for
1524 downloading the code
1525 */
1526 /* sdio_reset_comm() - has been fixed in latest kernel/msm.git for Linux
1527 2.6.27. The implementation prior to that is buggy, and needs broadcom's
1528 patch for it
1529 */
1530 /*
1531 if ((ret = sdio_reset_comm(gInstance->func[0]->card))) {
1532 sd_err(("%s Failed, error = %d\n", __FUNCTION__, ret));
1533 return ret;
1534 }
1535 else */
1536 {
1537 sd->num_funcs = 2;
1538 sd->sd_blockmode = TRUE;
1539 sd->use_client_ints = TRUE;
1540 sd->client_block_size[0] = 64;
1541
1542 if (gInstance->func[1]) {
1543 /* Claim host controller */
1544 sdio_claim_host(gInstance->func[1]);
1545
1546 sd->client_block_size[1] = 64;
1547 if (sdio_set_block_size(gInstance->func[1], 64)) {
1548 sd_err(("bcmsdh_sdmmc: Failed to set F1 blocksize\n"));
1549 }
1550
1551 /* Release host controller F1 */
1552 sdio_release_host(gInstance->func[1]);
1553 }
1554
1555 if (gInstance->func[2]) {
1556 /* Claim host controller F2 */
1557 sdio_claim_host(gInstance->func[2]);
1558
1559 sd->client_block_size[2] = sd_f2_blocksize;
1560 if (sdio_set_block_size(gInstance->func[2],
1561 sd_f2_blocksize)) {
1562 sd_err(("bcmsdh_sdmmc: Failed to set F2 "
1563 "blocksize to %d\n", sd_f2_blocksize));
1564 }
1565
1566 /* Release host controller F2 */
1567 sdio_release_host(gInstance->func[2]);
1568 }
1569
1570 sdioh_sdmmc_card_enablefuncs(sd);
1571 }
1572 } else {
1573 #if !defined(OOB_INTR_ONLY)
1574 sdio_claim_host(gInstance->func[0]);
1575 if (gInstance->func[2])
1576 sdio_claim_irq(gInstance->func[2], IRQHandlerF2);
1577 if (gInstance->func[1])
1578 sdio_claim_irq(gInstance->func[1], IRQHandler);
1579 sdio_release_host(gInstance->func[0]);
1580 #else /* defined(OOB_INTR_ONLY) */
1581 #if defined(HW_OOB)
1582 sdioh_enable_func_intr();
1583 #endif
1584 bcmsdh_oob_intr_set(TRUE);
1585 #endif /* !defined(OOB_INTR_ONLY) */
1586 }
1587 }
1588 else
1589 sd_err(("%s Failed\n", __FUNCTION__));
1590
1591 return (0);
1592 }
1593
1594 int
1595 sdioh_stop(sdioh_info_t *si)
1596 {
1597 /* MSM7201A Android sdio stack has bug with interrupt
1598 So internaly within SDIO stack they are polling
1599 which cause issue when device is turned off. So
1600 unregister interrupt with SDIO stack to stop the
1601 polling
1602 */
1603 if (gInstance->func[0]) {
1604 #if !defined(OOB_INTR_ONLY)
1605 sdio_claim_host(gInstance->func[0]);
1606 if (gInstance->func[1])
1607 sdio_release_irq(gInstance->func[1]);
1608 if (gInstance->func[2])
1609 sdio_release_irq(gInstance->func[2]);
1610 sdio_release_host(gInstance->func[0]);
1611 #else /* defined(OOB_INTR_ONLY) */
1612 #if defined(HW_OOB)
1613 sdioh_disable_func_intr();
1614 #endif
1615 bcmsdh_oob_intr_set(FALSE);
1616 #endif /* !defined(OOB_INTR_ONLY) */
1617 }
1618 else
1619 sd_err(("%s Failed\n", __FUNCTION__));
1620 return (0);
1621 }
1622
1623 int
1624 sdioh_waitlockfree(sdioh_info_t *sd)
1625 {
1626 return (1);
1627 }
1628
1629
1630 SDIOH_API_RC
1631 sdioh_gpioouten(sdioh_info_t *sd, uint32 gpio)
1632 {
1633 return SDIOH_API_RC_FAIL;
1634 }
1635
1636 SDIOH_API_RC
1637 sdioh_gpioout(sdioh_info_t *sd, uint32 gpio, bool enab)
1638 {
1639 return SDIOH_API_RC_FAIL;
1640 }
1641
1642 bool
1643 sdioh_gpioin(sdioh_info_t *sd, uint32 gpio)
1644 {
1645 return FALSE;
1646 }
1647
1648 SDIOH_API_RC
1649 sdioh_gpio_init(sdioh_info_t *sd)
1650 {
1651 return SDIOH_API_RC_FAIL;
1652 }