2 * Block driver for media (i.e., flash cards)
4 * Copyright 2002 Hewlett-Packard Company
5 * Copyright 2005-2008 Pierre Ossman
7 * Use consistent with the GNU GPL is permitted,
8 * provided that this copyright notice is
9 * preserved in its entirety in all copies and derived works.
11 * HEWLETT-PACKARD COMPANY MAKES NO WARRANTIES, EXPRESSED OR IMPLIED,
12 * AS TO THE USEFULNESS OR CORRECTNESS OF THIS CODE OR ITS
13 * FITNESS FOR ANY PARTICULAR PURPOSE.
15 * Many thanks to Alessandro Rubini and Jonathan Corbet!
17 * Author: Andrew Christian
20 #include <linux/moduleparam.h>
21 #include <linux/module.h>
22 #include <linux/init.h>
24 #include <linux/kernel.h>
26 #include <linux/slab.h>
27 #include <linux/errno.h>
28 #include <linux/hdreg.h>
29 #include <linux/kdev_t.h>
30 #include <linux/blkdev.h>
31 #include <linux/mutex.h>
32 #include <linux/scatterlist.h>
33 #include <linux/string_helpers.h>
34 #include <linux/delay.h>
35 #include <linux/capability.h>
36 #include <linux/compat.h>
38 #include <linux/mmc/ioctl.h>
39 #include <linux/mmc/card.h>
40 #include <linux/mmc/host.h>
41 #include <linux/mmc/mmc.h>
42 #include <linux/mmc/sd.h>
44 #include <asm/uaccess.h>
48 MODULE_ALIAS("mmc:block");
49 #ifdef MODULE_PARAM_PREFIX
50 #undef MODULE_PARAM_PREFIX
52 #define MODULE_PARAM_PREFIX "mmcblk."
54 #define INAND_CMD38_ARG_EXT_CSD 113
55 #define INAND_CMD38_ARG_ERASE 0x00
56 #define INAND_CMD38_ARG_TRIM 0x01
57 #define INAND_CMD38_ARG_SECERASE 0x80
58 #define INAND_CMD38_ARG_SECTRIM1 0x81
59 #define INAND_CMD38_ARG_SECTRIM2 0x88
60 #define MMC_BLK_TIMEOUT_MS (10 * 60 * 1000) /* 10 minute timeout */
62 #define mmc_req_rel_wr(req) (((req->cmd_flags & REQ_FUA) || \
63 (req->cmd_flags & REQ_META)) && \
64 (rq_data_dir(req) == WRITE))
65 #define PACKED_CMD_VER 0x01
66 #define PACKED_CMD_WR 0x02
68 static DEFINE_MUTEX(block_mutex
);
71 * The defaults come from config options but can be overriden by module
74 static int perdev_minors
= CONFIG_MMC_BLOCK_MINORS
;
77 * We've only got one major, so number of mmcblk devices is
78 * limited to 256 / number of minors per device.
80 static int max_devices
;
82 /* 256 minors, so at most 256 separate devices */
83 static DECLARE_BITMAP(dev_use
, 256);
84 static DECLARE_BITMAP(name_use
, 256);
87 * There is one mmc_blk_data per slot.
92 struct mmc_queue queue
;
93 struct list_head part
;
96 #define MMC_BLK_CMD23 (1 << 0) /* Can do SET_BLOCK_COUNT for multiblock */
97 #define MMC_BLK_REL_WR (1 << 1) /* MMC Reliable write support */
98 #define MMC_BLK_PACKED_CMD (1 << 2) /* MMC packed command support */
101 unsigned int read_only
;
102 unsigned int part_type
;
103 unsigned int name_idx
;
104 unsigned int reset_done
;
105 #define MMC_BLK_READ BIT(0)
106 #define MMC_BLK_WRITE BIT(1)
107 #define MMC_BLK_DISCARD BIT(2)
108 #define MMC_BLK_SECDISCARD BIT(3)
111 * Only set in main mmc_blk_data associated
112 * with mmc_card with mmc_set_drvdata, and keeps
113 * track of the current selected device partition.
115 unsigned int part_curr
;
116 struct device_attribute force_ro
;
117 struct device_attribute power_ro_lock
;
121 static DEFINE_MUTEX(open_lock
);
124 MMC_PACKED_NR_IDX
= -1,
126 MMC_PACKED_NR_SINGLE
,
129 module_param(perdev_minors
, int, 0444);
130 MODULE_PARM_DESC(perdev_minors
, "Minors numbers to allocate per device");
132 static inline int mmc_blk_part_switch(struct mmc_card
*card
,
133 struct mmc_blk_data
*md
);
134 static int get_card_status(struct mmc_card
*card
, u32
*status
, int retries
);
136 static inline void mmc_blk_clear_packed(struct mmc_queue_req
*mqrq
)
138 struct mmc_packed
*packed
= mqrq
->packed
;
142 mqrq
->cmd_type
= MMC_PACKED_NONE
;
143 packed
->nr_entries
= MMC_PACKED_NR_ZERO
;
144 packed
->idx_failure
= MMC_PACKED_NR_IDX
;
149 static struct mmc_blk_data
*mmc_blk_get(struct gendisk
*disk
)
151 struct mmc_blk_data
*md
;
153 mutex_lock(&open_lock
);
154 md
= disk
->private_data
;
155 if (md
&& md
->usage
== 0)
159 mutex_unlock(&open_lock
);
164 static inline int mmc_get_devidx(struct gendisk
*disk
)
166 int devmaj
= MAJOR(disk_devt(disk
));
167 int devidx
= MINOR(disk_devt(disk
)) / perdev_minors
;
170 devidx
= disk
->first_minor
/ perdev_minors
;
174 static void mmc_blk_put(struct mmc_blk_data
*md
)
176 mutex_lock(&open_lock
);
178 if (md
->usage
== 0) {
179 int devidx
= mmc_get_devidx(md
->disk
);
180 blk_cleanup_queue(md
->queue
.queue
);
182 __clear_bit(devidx
, dev_use
);
187 mutex_unlock(&open_lock
);
190 static ssize_t
power_ro_lock_show(struct device
*dev
,
191 struct device_attribute
*attr
, char *buf
)
194 struct mmc_blk_data
*md
= mmc_blk_get(dev_to_disk(dev
));
195 struct mmc_card
*card
= md
->queue
.card
;
198 if (card
->ext_csd
.boot_ro_lock
& EXT_CSD_BOOT_WP_B_PERM_WP_EN
)
200 else if (card
->ext_csd
.boot_ro_lock
& EXT_CSD_BOOT_WP_B_PWR_WP_EN
)
203 ret
= snprintf(buf
, PAGE_SIZE
, "%d\n", locked
);
208 static ssize_t
power_ro_lock_store(struct device
*dev
,
209 struct device_attribute
*attr
, const char *buf
, size_t count
)
212 struct mmc_blk_data
*md
, *part_md
;
213 struct mmc_card
*card
;
216 if (kstrtoul(buf
, 0, &set
))
222 md
= mmc_blk_get(dev_to_disk(dev
));
223 card
= md
->queue
.card
;
225 mmc_claim_host(card
->host
);
227 ret
= mmc_switch(card
, EXT_CSD_CMD_SET_NORMAL
, EXT_CSD_BOOT_WP
,
228 card
->ext_csd
.boot_ro_lock
|
229 EXT_CSD_BOOT_WP_B_PWR_WP_EN
,
230 card
->ext_csd
.part_time
);
232 pr_err("%s: Locking boot partition ro until next power on failed: %d\n", md
->disk
->disk_name
, ret
);
234 card
->ext_csd
.boot_ro_lock
|= EXT_CSD_BOOT_WP_B_PWR_WP_EN
;
236 mmc_release_host(card
->host
);
239 pr_info("%s: Locking boot partition ro until next power on\n",
240 md
->disk
->disk_name
);
241 set_disk_ro(md
->disk
, 1);
243 list_for_each_entry(part_md
, &md
->part
, part
)
244 if (part_md
->area_type
== MMC_BLK_DATA_AREA_BOOT
) {
245 pr_info("%s: Locking boot partition ro until next power on\n", part_md
->disk
->disk_name
);
246 set_disk_ro(part_md
->disk
, 1);
254 static ssize_t
force_ro_show(struct device
*dev
, struct device_attribute
*attr
,
258 struct mmc_blk_data
*md
= mmc_blk_get(dev_to_disk(dev
));
260 ret
= snprintf(buf
, PAGE_SIZE
, "%d",
261 get_disk_ro(dev_to_disk(dev
)) ^
267 static ssize_t
force_ro_store(struct device
*dev
, struct device_attribute
*attr
,
268 const char *buf
, size_t count
)
272 struct mmc_blk_data
*md
= mmc_blk_get(dev_to_disk(dev
));
273 unsigned long set
= simple_strtoul(buf
, &end
, 0);
279 set_disk_ro(dev_to_disk(dev
), set
|| md
->read_only
);
286 static int mmc_blk_open(struct block_device
*bdev
, fmode_t mode
)
288 struct mmc_blk_data
*md
= mmc_blk_get(bdev
->bd_disk
);
291 mutex_lock(&block_mutex
);
294 check_disk_change(bdev
);
297 if ((mode
& FMODE_WRITE
) && md
->read_only
) {
302 mutex_unlock(&block_mutex
);
307 static void mmc_blk_release(struct gendisk
*disk
, fmode_t mode
)
309 struct mmc_blk_data
*md
= disk
->private_data
;
311 mutex_lock(&block_mutex
);
313 mutex_unlock(&block_mutex
);
317 mmc_blk_getgeo(struct block_device
*bdev
, struct hd_geometry
*geo
)
319 geo
->cylinders
= get_capacity(bdev
->bd_disk
) / (4 * 16);
325 struct mmc_blk_ioc_data
{
326 struct mmc_ioc_cmd ic
;
331 static struct mmc_blk_ioc_data
*mmc_blk_ioctl_copy_from_user(
332 struct mmc_ioc_cmd __user
*user
)
334 struct mmc_blk_ioc_data
*idata
;
337 idata
= kzalloc(sizeof(*idata
), GFP_KERNEL
);
343 if (copy_from_user(&idata
->ic
, user
, sizeof(idata
->ic
))) {
348 idata
->buf_bytes
= (u64
) idata
->ic
.blksz
* idata
->ic
.blocks
;
349 if (idata
->buf_bytes
> MMC_IOC_MAX_BYTES
) {
354 if (!idata
->buf_bytes
)
357 idata
->buf
= kzalloc(idata
->buf_bytes
, GFP_KERNEL
);
363 if (copy_from_user(idata
->buf
, (void __user
*)(unsigned long)
364 idata
->ic
.data_ptr
, idata
->buf_bytes
)) {
379 static int ioctl_rpmb_card_status_poll(struct mmc_card
*card
, u32
*status
,
385 if (!status
|| !retries_max
)
389 err
= get_card_status(card
, status
, 5);
393 if (!R1_STATUS(*status
) &&
394 (R1_CURRENT_STATE(*status
) != R1_STATE_PRG
))
395 break; /* RPMB programming operation complete */
398 * Rechedule to give the MMC device a chance to continue
399 * processing the previous command without being polled too
402 usleep_range(1000, 5000);
403 } while (++retry_count
< retries_max
);
405 if (retry_count
== retries_max
)
411 static int mmc_blk_ioctl_cmd(struct block_device
*bdev
,
412 struct mmc_ioc_cmd __user
*ic_ptr
)
414 struct mmc_blk_ioc_data
*idata
;
415 struct mmc_blk_data
*md
;
416 struct mmc_card
*card
;
417 struct mmc_command cmd
= {0};
418 struct mmc_data data
= {0};
419 struct mmc_request mrq
= {NULL
};
420 struct scatterlist sg
;
426 * The caller must have CAP_SYS_RAWIO, and must be calling this on the
427 * whole block device, not on a partition. This prevents overspray
428 * between sibling partitions.
430 if ((!capable(CAP_SYS_RAWIO
)) || (bdev
!= bdev
->bd_contains
))
433 idata
= mmc_blk_ioctl_copy_from_user(ic_ptr
);
435 return PTR_ERR(idata
);
437 md
= mmc_blk_get(bdev
->bd_disk
);
443 if (md
->area_type
& MMC_BLK_DATA_AREA_RPMB
)
446 card
= md
->queue
.card
;
452 cmd
.opcode
= idata
->ic
.opcode
;
453 cmd
.arg
= idata
->ic
.arg
;
454 cmd
.flags
= idata
->ic
.flags
;
456 if (idata
->buf_bytes
) {
459 data
.blksz
= idata
->ic
.blksz
;
460 data
.blocks
= idata
->ic
.blocks
;
462 sg_init_one(data
.sg
, idata
->buf
, idata
->buf_bytes
);
464 if (idata
->ic
.write_flag
)
465 data
.flags
= MMC_DATA_WRITE
;
467 data
.flags
= MMC_DATA_READ
;
469 /* data.flags must already be set before doing this. */
470 mmc_set_data_timeout(&data
, card
);
472 /* Allow overriding the timeout_ns for empirical tuning. */
473 if (idata
->ic
.data_timeout_ns
)
474 data
.timeout_ns
= idata
->ic
.data_timeout_ns
;
476 if ((cmd
.flags
& MMC_RSP_R1B
) == MMC_RSP_R1B
) {
478 * Pretend this is a data transfer and rely on the
479 * host driver to compute timeout. When all host
480 * drivers support cmd.cmd_timeout for R1B, this
484 * cmd.cmd_timeout = idata->ic.cmd_timeout_ms;
486 data
.timeout_ns
= idata
->ic
.cmd_timeout_ms
* 1000000;
494 mmc_claim_host(card
->host
);
496 err
= mmc_blk_part_switch(card
, md
);
500 if (idata
->ic
.is_acmd
) {
501 err
= mmc_app_cmd(card
->host
, card
);
507 err
= mmc_set_blockcount(card
, data
.blocks
,
508 idata
->ic
.write_flag
& (1 << 31));
513 mmc_wait_for_req(card
->host
, &mrq
);
516 dev_err(mmc_dev(card
->host
), "%s: cmd error %d\n",
517 __func__
, cmd
.error
);
522 dev_err(mmc_dev(card
->host
), "%s: data error %d\n",
523 __func__
, data
.error
);
529 * According to the SD specs, some commands require a delay after
530 * issuing the command.
532 if (idata
->ic
.postsleep_min_us
)
533 usleep_range(idata
->ic
.postsleep_min_us
, idata
->ic
.postsleep_max_us
);
535 if (copy_to_user(&(ic_ptr
->response
), cmd
.resp
, sizeof(cmd
.resp
))) {
540 if (!idata
->ic
.write_flag
) {
541 if (copy_to_user((void __user
*)(unsigned long) idata
->ic
.data_ptr
,
542 idata
->buf
, idata
->buf_bytes
)) {
550 * Ensure RPMB command has completed by polling CMD13
553 err
= ioctl_rpmb_card_status_poll(card
, &status
, 5);
555 dev_err(mmc_dev(card
->host
),
556 "%s: Card Status=0x%08X, error %d\n",
557 __func__
, status
, err
);
561 mmc_release_host(card
->host
);
571 static int mmc_blk_ioctl(struct block_device
*bdev
, fmode_t mode
,
572 unsigned int cmd
, unsigned long arg
)
575 if (cmd
== MMC_IOC_CMD
)
576 ret
= mmc_blk_ioctl_cmd(bdev
, (struct mmc_ioc_cmd __user
*)arg
);
581 static int mmc_blk_compat_ioctl(struct block_device
*bdev
, fmode_t mode
,
582 unsigned int cmd
, unsigned long arg
)
584 return mmc_blk_ioctl(bdev
, mode
, cmd
, (unsigned long) compat_ptr(arg
));
588 static const struct block_device_operations mmc_bdops
= {
589 .open
= mmc_blk_open
,
590 .release
= mmc_blk_release
,
591 .getgeo
= mmc_blk_getgeo
,
592 .owner
= THIS_MODULE
,
593 .ioctl
= mmc_blk_ioctl
,
595 .compat_ioctl
= mmc_blk_compat_ioctl
,
599 static inline int mmc_blk_part_switch(struct mmc_card
*card
,
600 struct mmc_blk_data
*md
)
603 struct mmc_blk_data
*main_md
= mmc_get_drvdata(card
);
605 if (main_md
->part_curr
== md
->part_type
)
608 if (mmc_card_mmc(card
)) {
609 u8 part_config
= card
->ext_csd
.part_config
;
611 part_config
&= ~EXT_CSD_PART_CONFIG_ACC_MASK
;
612 part_config
|= md
->part_type
;
614 ret
= mmc_switch(card
, EXT_CSD_CMD_SET_NORMAL
,
615 EXT_CSD_PART_CONFIG
, part_config
,
616 card
->ext_csd
.part_time
);
620 card
->ext_csd
.part_config
= part_config
;
623 main_md
->part_curr
= md
->part_type
;
627 static u32
mmc_sd_num_wr_blocks(struct mmc_card
*card
)
633 struct mmc_request mrq
= {NULL
};
634 struct mmc_command cmd
= {0};
635 struct mmc_data data
= {0};
637 struct scatterlist sg
;
639 cmd
.opcode
= MMC_APP_CMD
;
640 cmd
.arg
= card
->rca
<< 16;
641 cmd
.flags
= MMC_RSP_SPI_R1
| MMC_RSP_R1
| MMC_CMD_AC
;
643 err
= mmc_wait_for_cmd(card
->host
, &cmd
, 0);
646 if (!mmc_host_is_spi(card
->host
) && !(cmd
.resp
[0] & R1_APP_CMD
))
649 memset(&cmd
, 0, sizeof(struct mmc_command
));
651 cmd
.opcode
= SD_APP_SEND_NUM_WR_BLKS
;
653 cmd
.flags
= MMC_RSP_SPI_R1
| MMC_RSP_R1
| MMC_CMD_ADTC
;
657 data
.flags
= MMC_DATA_READ
;
660 mmc_set_data_timeout(&data
, card
);
665 blocks
= kmalloc(4, GFP_KERNEL
);
669 sg_init_one(&sg
, blocks
, 4);
671 mmc_wait_for_req(card
->host
, &mrq
);
673 result
= ntohl(*blocks
);
676 if (cmd
.error
|| data
.error
)
682 static int send_stop(struct mmc_card
*card
, u32
*status
)
684 struct mmc_command cmd
= {0};
687 cmd
.opcode
= MMC_STOP_TRANSMISSION
;
688 cmd
.flags
= MMC_RSP_SPI_R1B
| MMC_RSP_R1B
| MMC_CMD_AC
;
689 err
= mmc_wait_for_cmd(card
->host
, &cmd
, 5);
691 *status
= cmd
.resp
[0];
695 static int get_card_status(struct mmc_card
*card
, u32
*status
, int retries
)
697 struct mmc_command cmd
= {0};
700 cmd
.opcode
= MMC_SEND_STATUS
;
701 if (!mmc_host_is_spi(card
->host
))
702 cmd
.arg
= card
->rca
<< 16;
703 cmd
.flags
= MMC_RSP_SPI_R2
| MMC_RSP_R1
| MMC_CMD_AC
;
704 err
= mmc_wait_for_cmd(card
->host
, &cmd
, retries
);
706 *status
= cmd
.resp
[0];
710 #define ERR_NOMEDIUM 3
713 #define ERR_CONTINUE 0
715 static int mmc_blk_cmd_error(struct request
*req
, const char *name
, int error
,
716 bool status_valid
, u32 status
)
720 /* response crc error, retry the r/w cmd */
721 pr_err("%s: %s sending %s command, card status %#x\n",
722 req
->rq_disk
->disk_name
, "response CRC error",
727 pr_err("%s: %s sending %s command, card status %#x\n",
728 req
->rq_disk
->disk_name
, "timed out", name
, status
);
730 /* If the status cmd initially failed, retry the r/w cmd */
735 * If it was a r/w cmd crc error, or illegal command
736 * (eg, issued in wrong state) then retry - we should
737 * have corrected the state problem above.
739 if (status
& (R1_COM_CRC_ERROR
| R1_ILLEGAL_COMMAND
))
742 /* Otherwise abort the command */
746 /* We don't understand the error code the driver gave us */
747 pr_err("%s: unknown error %d sending read/write command, card status %#x\n",
748 req
->rq_disk
->disk_name
, error
, status
);
754 * Initial r/w and stop cmd error recovery.
755 * We don't know whether the card received the r/w cmd or not, so try to
756 * restore things back to a sane state. Essentially, we do this as follows:
757 * - Obtain card status. If the first attempt to obtain card status fails,
758 * the status word will reflect the failed status cmd, not the failed
759 * r/w cmd. If we fail to obtain card status, it suggests we can no
760 * longer communicate with the card.
761 * - Check the card state. If the card received the cmd but there was a
762 * transient problem with the response, it might still be in a data transfer
763 * mode. Try to send it a stop command. If this fails, we can't recover.
764 * - If the r/w cmd failed due to a response CRC error, it was probably
765 * transient, so retry the cmd.
766 * - If the r/w cmd timed out, but we didn't get the r/w cmd status, retry.
767 * - If the r/w cmd timed out, and the r/w cmd failed due to CRC error or
768 * illegal cmd, retry.
769 * Otherwise we don't understand what happened, so abort.
771 static int mmc_blk_cmd_recovery(struct mmc_card
*card
, struct request
*req
,
772 struct mmc_blk_request
*brq
, int *ecc_err
, int *gen_err
)
774 bool prev_cmd_status_valid
= true;
775 u32 status
, stop_status
= 0;
778 if (mmc_card_removed(card
))
782 * Try to get card status which indicates both the card state
783 * and why there was no response. If the first attempt fails,
784 * we can't be sure the returned status is for the r/w command.
786 for (retry
= 2; retry
>= 0; retry
--) {
787 err
= get_card_status(card
, &status
, 0);
791 prev_cmd_status_valid
= false;
792 pr_err("%s: error %d sending status command, %sing\n",
793 req
->rq_disk
->disk_name
, err
, retry
? "retry" : "abort");
796 /* We couldn't get a response from the card. Give up. */
798 /* Check if the card is removed */
799 if (mmc_detect_card_removed(card
->host
))
804 /* Flag ECC errors */
805 if ((status
& R1_CARD_ECC_FAILED
) ||
806 (brq
->stop
.resp
[0] & R1_CARD_ECC_FAILED
) ||
807 (brq
->cmd
.resp
[0] & R1_CARD_ECC_FAILED
))
810 /* Flag General errors */
811 if (!mmc_host_is_spi(card
->host
) && rq_data_dir(req
) != READ
)
812 if ((status
& R1_ERROR
) ||
813 (brq
->stop
.resp
[0] & R1_ERROR
)) {
814 pr_err("%s: %s: general error sending stop or status command, stop cmd response %#x, card status %#x\n",
815 req
->rq_disk
->disk_name
, __func__
,
816 brq
->stop
.resp
[0], status
);
821 * Check the current card state. If it is in some data transfer
822 * mode, tell it to stop (and hopefully transition back to TRAN.)
824 if (R1_CURRENT_STATE(status
) == R1_STATE_DATA
||
825 R1_CURRENT_STATE(status
) == R1_STATE_RCV
) {
826 err
= send_stop(card
, &stop_status
);
828 pr_err("%s: error %d sending stop command\n",
829 req
->rq_disk
->disk_name
, err
);
832 * If the stop cmd also timed out, the card is probably
833 * not present, so abort. Other errors are bad news too.
837 if (stop_status
& R1_CARD_ECC_FAILED
)
839 if (!mmc_host_is_spi(card
->host
) && rq_data_dir(req
) != READ
)
840 if (stop_status
& R1_ERROR
) {
841 pr_err("%s: %s: general error sending stop command, stop cmd response %#x\n",
842 req
->rq_disk
->disk_name
, __func__
,
848 /* Check for set block count errors */
850 return mmc_blk_cmd_error(req
, "SET_BLOCK_COUNT", brq
->sbc
.error
,
851 prev_cmd_status_valid
, status
);
853 /* Check for r/w command errors */
855 return mmc_blk_cmd_error(req
, "r/w cmd", brq
->cmd
.error
,
856 prev_cmd_status_valid
, status
);
859 if (!brq
->stop
.error
)
862 /* Now for stop errors. These aren't fatal to the transfer. */
863 pr_err("%s: error %d sending stop command, original cmd response %#x, card status %#x\n",
864 req
->rq_disk
->disk_name
, brq
->stop
.error
,
865 brq
->cmd
.resp
[0], status
);
868 * Subsitute in our own stop status as this will give the error
869 * state which happened during the execution of the r/w command.
872 brq
->stop
.resp
[0] = stop_status
;
878 static int mmc_blk_reset(struct mmc_blk_data
*md
, struct mmc_host
*host
,
883 if (md
->reset_done
& type
)
886 md
->reset_done
|= type
;
887 err
= mmc_hw_reset(host
);
888 /* Ensure we switch back to the correct partition */
889 if (err
!= -EOPNOTSUPP
) {
890 struct mmc_blk_data
*main_md
= mmc_get_drvdata(host
->card
);
893 main_md
->part_curr
= main_md
->part_type
;
894 part_err
= mmc_blk_part_switch(host
->card
, md
);
897 * We have failed to get back into the correct
898 * partition, so we need to abort the whole request.
906 static inline void mmc_blk_reset_success(struct mmc_blk_data
*md
, int type
)
908 md
->reset_done
&= ~type
;
911 static int mmc_blk_issue_discard_rq(struct mmc_queue
*mq
, struct request
*req
)
913 struct mmc_blk_data
*md
= mq
->data
;
914 struct mmc_card
*card
= md
->queue
.card
;
915 unsigned int from
, nr
, arg
;
916 int err
= 0, type
= MMC_BLK_DISCARD
;
918 if (!mmc_can_erase(card
)) {
923 from
= blk_rq_pos(req
);
924 nr
= blk_rq_sectors(req
);
926 if (mmc_can_discard(card
))
927 arg
= MMC_DISCARD_ARG
;
928 else if (mmc_can_trim(card
))
933 if (card
->quirks
& MMC_QUIRK_INAND_CMD38
) {
934 err
= mmc_switch(card
, EXT_CSD_CMD_SET_NORMAL
,
935 INAND_CMD38_ARG_EXT_CSD
,
936 arg
== MMC_TRIM_ARG
?
937 INAND_CMD38_ARG_TRIM
:
938 INAND_CMD38_ARG_ERASE
,
943 err
= mmc_erase(card
, from
, nr
, arg
);
945 if (err
== -EIO
&& !mmc_blk_reset(md
, card
->host
, type
))
948 mmc_blk_reset_success(md
, type
);
949 blk_end_request(req
, err
, blk_rq_bytes(req
));
954 static int mmc_blk_issue_secdiscard_rq(struct mmc_queue
*mq
,
957 struct mmc_blk_data
*md
= mq
->data
;
958 struct mmc_card
*card
= md
->queue
.card
;
959 unsigned int from
, nr
, arg
, trim_arg
, erase_arg
;
960 int err
= 0, type
= MMC_BLK_SECDISCARD
;
962 if (!(mmc_can_secure_erase_trim(card
) || mmc_can_sanitize(card
))) {
967 from
= blk_rq_pos(req
);
968 nr
= blk_rq_sectors(req
);
970 /* The sanitize operation is supported at v4.5 only */
971 if (mmc_can_sanitize(card
)) {
972 erase_arg
= MMC_ERASE_ARG
;
973 trim_arg
= MMC_TRIM_ARG
;
975 erase_arg
= MMC_SECURE_ERASE_ARG
;
976 trim_arg
= MMC_SECURE_TRIM1_ARG
;
979 if (mmc_erase_group_aligned(card
, from
, nr
))
981 else if (mmc_can_trim(card
))
988 if (card
->quirks
& MMC_QUIRK_INAND_CMD38
) {
989 err
= mmc_switch(card
, EXT_CSD_CMD_SET_NORMAL
,
990 INAND_CMD38_ARG_EXT_CSD
,
991 arg
== MMC_SECURE_TRIM1_ARG
?
992 INAND_CMD38_ARG_SECTRIM1
:
993 INAND_CMD38_ARG_SECERASE
,
999 err
= mmc_erase(card
, from
, nr
, arg
);
1005 if (arg
== MMC_SECURE_TRIM1_ARG
) {
1006 if (card
->quirks
& MMC_QUIRK_INAND_CMD38
) {
1007 err
= mmc_switch(card
, EXT_CSD_CMD_SET_NORMAL
,
1008 INAND_CMD38_ARG_EXT_CSD
,
1009 INAND_CMD38_ARG_SECTRIM2
,
1015 err
= mmc_erase(card
, from
, nr
, MMC_SECURE_TRIM2_ARG
);
1022 if (mmc_can_sanitize(card
))
1023 err
= mmc_switch(card
, EXT_CSD_CMD_SET_NORMAL
,
1024 EXT_CSD_SANITIZE_START
, 1, 0);
1026 if (err
&& !mmc_blk_reset(md
, card
->host
, type
))
1029 mmc_blk_reset_success(md
, type
);
1031 blk_end_request(req
, err
, blk_rq_bytes(req
));
1036 static int mmc_blk_issue_flush(struct mmc_queue
*mq
, struct request
*req
)
1038 struct mmc_blk_data
*md
= mq
->data
;
1039 struct mmc_card
*card
= md
->queue
.card
;
1042 ret
= mmc_flush_cache(card
);
1046 blk_end_request_all(req
, ret
);
1052 * Reformat current write as a reliable write, supporting
1053 * both legacy and the enhanced reliable write MMC cards.
1054 * In each transfer we'll handle only as much as a single
1055 * reliable write can handle, thus finish the request in
1056 * partial completions.
1058 static inline void mmc_apply_rel_rw(struct mmc_blk_request
*brq
,
1059 struct mmc_card
*card
,
1060 struct request
*req
)
1062 if (!(card
->ext_csd
.rel_param
& EXT_CSD_WR_REL_PARAM_EN
)) {
1063 /* Legacy mode imposes restrictions on transfers. */
1064 if (!IS_ALIGNED(brq
->cmd
.arg
, card
->ext_csd
.rel_sectors
))
1065 brq
->data
.blocks
= 1;
1067 if (brq
->data
.blocks
> card
->ext_csd
.rel_sectors
)
1068 brq
->data
.blocks
= card
->ext_csd
.rel_sectors
;
1069 else if (brq
->data
.blocks
< card
->ext_csd
.rel_sectors
)
1070 brq
->data
.blocks
= 1;
1074 #define CMD_ERRORS \
1075 (R1_OUT_OF_RANGE | /* Command argument out of range */ \
1076 R1_ADDRESS_ERROR | /* Misaligned address */ \
1077 R1_BLOCK_LEN_ERROR | /* Transferred block length incorrect */\
1078 R1_WP_VIOLATION | /* Tried to write to protected block */ \
1079 R1_CC_ERROR | /* Card controller error */ \
1080 R1_ERROR) /* General/unknown error */
1082 static int mmc_blk_err_check(struct mmc_card
*card
,
1083 struct mmc_async_req
*areq
)
1085 struct mmc_queue_req
*mq_mrq
= container_of(areq
, struct mmc_queue_req
,
1087 struct mmc_blk_request
*brq
= &mq_mrq
->brq
;
1088 struct request
*req
= mq_mrq
->req
;
1089 int ecc_err
= 0, gen_err
= 0;
1092 * sbc.error indicates a problem with the set block count
1093 * command. No data will have been transferred.
1095 * cmd.error indicates a problem with the r/w command. No
1096 * data will have been transferred.
1098 * stop.error indicates a problem with the stop command. Data
1099 * may have been transferred, or may still be transferring.
1101 if (brq
->sbc
.error
|| brq
->cmd
.error
|| brq
->stop
.error
||
1103 switch (mmc_blk_cmd_recovery(card
, req
, brq
, &ecc_err
, &gen_err
)) {
1105 return MMC_BLK_RETRY
;
1107 return MMC_BLK_ABORT
;
1109 return MMC_BLK_NOMEDIUM
;
1116 * Check for errors relating to the execution of the
1117 * initial command - such as address errors. No data
1118 * has been transferred.
1120 if (brq
->cmd
.resp
[0] & CMD_ERRORS
) {
1121 pr_err("%s: r/w command failed, status = %#x\n",
1122 req
->rq_disk
->disk_name
, brq
->cmd
.resp
[0]);
1123 return MMC_BLK_ABORT
;
1127 * Everything else is either success, or a data error of some
1128 * kind. If it was a write, we may have transitioned to
1129 * program mode, which we have to wait for it to complete.
1131 if (!mmc_host_is_spi(card
->host
) && rq_data_dir(req
) != READ
) {
1133 unsigned long timeout
;
1135 /* Check stop command response */
1136 if (brq
->stop
.resp
[0] & R1_ERROR
) {
1137 pr_err("%s: %s: general error sending stop command, stop cmd response %#x\n",
1138 req
->rq_disk
->disk_name
, __func__
,
1143 timeout
= jiffies
+ msecs_to_jiffies(MMC_BLK_TIMEOUT_MS
);
1145 int err
= get_card_status(card
, &status
, 5);
1147 pr_err("%s: error %d requesting status\n",
1148 req
->rq_disk
->disk_name
, err
);
1149 return MMC_BLK_CMD_ERR
;
1152 if (status
& R1_ERROR
) {
1153 pr_err("%s: %s: general error sending status command, card status %#x\n",
1154 req
->rq_disk
->disk_name
, __func__
,
1159 /* Timeout if the device never becomes ready for data
1160 * and never leaves the program state.
1162 if (time_after(jiffies
, timeout
)) {
1163 pr_err("%s: Card stuck in programming state!"\
1164 " %s %s\n", mmc_hostname(card
->host
),
1165 req
->rq_disk
->disk_name
, __func__
);
1167 return MMC_BLK_CMD_ERR
;
1170 * Some cards mishandle the status bits,
1171 * so make sure to check both the busy
1172 * indication and the card state.
1174 } while (!(status
& R1_READY_FOR_DATA
) ||
1175 (R1_CURRENT_STATE(status
) == R1_STATE_PRG
));
1178 /* if general error occurs, retry the write operation. */
1180 pr_warn("%s: retrying write for general error\n",
1181 req
->rq_disk
->disk_name
);
1182 return MMC_BLK_RETRY
;
1185 if (brq
->data
.error
) {
1186 pr_err("%s: error %d transferring data, sector %u, nr %u, cmd response %#x, card status %#x\n",
1187 req
->rq_disk
->disk_name
, brq
->data
.error
,
1188 (unsigned)blk_rq_pos(req
),
1189 (unsigned)blk_rq_sectors(req
),
1190 brq
->cmd
.resp
[0], brq
->stop
.resp
[0]);
1192 if (rq_data_dir(req
) == READ
) {
1194 return MMC_BLK_ECC_ERR
;
1195 return MMC_BLK_DATA_ERR
;
1197 return MMC_BLK_CMD_ERR
;
1201 if (!brq
->data
.bytes_xfered
)
1202 return MMC_BLK_RETRY
;
1204 if (mmc_packed_cmd(mq_mrq
->cmd_type
)) {
1205 if (unlikely(brq
->data
.blocks
<< 9 != brq
->data
.bytes_xfered
))
1206 return MMC_BLK_PARTIAL
;
1208 return MMC_BLK_SUCCESS
;
1211 if (blk_rq_bytes(req
) != brq
->data
.bytes_xfered
)
1212 return MMC_BLK_PARTIAL
;
1214 return MMC_BLK_SUCCESS
;
1217 static int mmc_blk_packed_err_check(struct mmc_card
*card
,
1218 struct mmc_async_req
*areq
)
1220 struct mmc_queue_req
*mq_rq
= container_of(areq
, struct mmc_queue_req
,
1222 struct request
*req
= mq_rq
->req
;
1223 struct mmc_packed
*packed
= mq_rq
->packed
;
1224 int err
, check
, status
;
1230 check
= mmc_blk_err_check(card
, areq
);
1231 err
= get_card_status(card
, &status
, 0);
1233 pr_err("%s: error %d sending status command\n",
1234 req
->rq_disk
->disk_name
, err
);
1235 return MMC_BLK_ABORT
;
1238 if (status
& R1_EXCEPTION_EVENT
) {
1239 ext_csd
= kzalloc(512, GFP_KERNEL
);
1241 pr_err("%s: unable to allocate buffer for ext_csd\n",
1242 req
->rq_disk
->disk_name
);
1246 err
= mmc_send_ext_csd(card
, ext_csd
);
1248 pr_err("%s: error %d sending ext_csd\n",
1249 req
->rq_disk
->disk_name
, err
);
1250 check
= MMC_BLK_ABORT
;
1254 if ((ext_csd
[EXT_CSD_EXP_EVENTS_STATUS
] &
1255 EXT_CSD_PACKED_FAILURE
) &&
1256 (ext_csd
[EXT_CSD_PACKED_CMD_STATUS
] &
1257 EXT_CSD_PACKED_GENERIC_ERROR
)) {
1258 if (ext_csd
[EXT_CSD_PACKED_CMD_STATUS
] &
1259 EXT_CSD_PACKED_INDEXED_ERROR
) {
1260 packed
->idx_failure
=
1261 ext_csd
[EXT_CSD_PACKED_FAILURE_INDEX
] - 1;
1262 check
= MMC_BLK_PARTIAL
;
1264 pr_err("%s: packed cmd failed, nr %u, sectors %u, "
1265 "failure index: %d\n",
1266 req
->rq_disk
->disk_name
, packed
->nr_entries
,
1267 packed
->blocks
, packed
->idx_failure
);
1276 static void mmc_blk_rw_rq_prep(struct mmc_queue_req
*mqrq
,
1277 struct mmc_card
*card
,
1279 struct mmc_queue
*mq
)
1281 u32 readcmd
, writecmd
;
1282 struct mmc_blk_request
*brq
= &mqrq
->brq
;
1283 struct request
*req
= mqrq
->req
;
1284 struct mmc_blk_data
*md
= mq
->data
;
1288 * Reliable writes are used to implement Forced Unit Access and
1289 * REQ_META accesses, and are supported only on MMCs.
1291 * XXX: this really needs a good explanation of why REQ_META
1292 * is treated special.
1294 bool do_rel_wr
= ((req
->cmd_flags
& REQ_FUA
) ||
1295 (req
->cmd_flags
& REQ_META
)) &&
1296 (rq_data_dir(req
) == WRITE
) &&
1297 (md
->flags
& MMC_BLK_REL_WR
);
1299 memset(brq
, 0, sizeof(struct mmc_blk_request
));
1300 brq
->mrq
.cmd
= &brq
->cmd
;
1301 brq
->mrq
.data
= &brq
->data
;
1303 brq
->cmd
.arg
= blk_rq_pos(req
);
1304 if (!mmc_card_blockaddr(card
))
1306 brq
->cmd
.flags
= MMC_RSP_SPI_R1
| MMC_RSP_R1
| MMC_CMD_ADTC
;
1307 brq
->data
.blksz
= 512;
1308 brq
->stop
.opcode
= MMC_STOP_TRANSMISSION
;
1310 brq
->stop
.flags
= MMC_RSP_SPI_R1B
| MMC_RSP_R1B
| MMC_CMD_AC
;
1311 brq
->data
.blocks
= blk_rq_sectors(req
);
1314 * The block layer doesn't support all sector count
1315 * restrictions, so we need to be prepared for too big
1318 if (brq
->data
.blocks
> card
->host
->max_blk_count
)
1319 brq
->data
.blocks
= card
->host
->max_blk_count
;
1321 if (brq
->data
.blocks
> 1) {
1323 * After a read error, we redo the request one sector
1324 * at a time in order to accurately determine which
1325 * sectors can be read successfully.
1328 brq
->data
.blocks
= 1;
1330 /* Some controllers can't do multiblock reads due to hw bugs */
1331 if (card
->host
->caps2
& MMC_CAP2_NO_MULTI_READ
&&
1332 rq_data_dir(req
) == READ
)
1333 brq
->data
.blocks
= 1;
1336 if (brq
->data
.blocks
> 1 || do_rel_wr
) {
1337 /* SPI multiblock writes terminate using a special
1338 * token, not a STOP_TRANSMISSION request.
1340 if (!mmc_host_is_spi(card
->host
) ||
1341 rq_data_dir(req
) == READ
)
1342 brq
->mrq
.stop
= &brq
->stop
;
1343 readcmd
= MMC_READ_MULTIPLE_BLOCK
;
1344 writecmd
= MMC_WRITE_MULTIPLE_BLOCK
;
1346 brq
->mrq
.stop
= NULL
;
1347 readcmd
= MMC_READ_SINGLE_BLOCK
;
1348 writecmd
= MMC_WRITE_BLOCK
;
1350 if (rq_data_dir(req
) == READ
) {
1351 brq
->cmd
.opcode
= readcmd
;
1352 brq
->data
.flags
|= MMC_DATA_READ
;
1354 brq
->cmd
.opcode
= writecmd
;
1355 brq
->data
.flags
|= MMC_DATA_WRITE
;
1359 mmc_apply_rel_rw(brq
, card
, req
);
1362 * Data tag is used only during writing meta data to speed
1363 * up write and any subsequent read of this meta data
1365 do_data_tag
= (card
->ext_csd
.data_tag_unit_size
) &&
1366 (req
->cmd_flags
& REQ_META
) &&
1367 (rq_data_dir(req
) == WRITE
) &&
1368 ((brq
->data
.blocks
* brq
->data
.blksz
) >=
1369 card
->ext_csd
.data_tag_unit_size
);
1372 * Pre-defined multi-block transfers are preferable to
1373 * open ended-ones (and necessary for reliable writes).
1374 * However, it is not sufficient to just send CMD23,
1375 * and avoid the final CMD12, as on an error condition
1376 * CMD12 (stop) needs to be sent anyway. This, coupled
1377 * with Auto-CMD23 enhancements provided by some
1378 * hosts, means that the complexity of dealing
1379 * with this is best left to the host. If CMD23 is
1380 * supported by card and host, we'll fill sbc in and let
1381 * the host deal with handling it correctly. This means
1382 * that for hosts that don't expose MMC_CAP_CMD23, no
1383 * change of behavior will be observed.
1385 * N.B: Some MMC cards experience perf degradation.
1386 * We'll avoid using CMD23-bounded multiblock writes for
1387 * these, while retaining features like reliable writes.
1389 if ((md
->flags
& MMC_BLK_CMD23
) && mmc_op_multi(brq
->cmd
.opcode
) &&
1390 (do_rel_wr
|| !(card
->quirks
& MMC_QUIRK_BLK_NO_CMD23
) ||
1392 brq
->sbc
.opcode
= MMC_SET_BLOCK_COUNT
;
1393 brq
->sbc
.arg
= brq
->data
.blocks
|
1394 (do_rel_wr
? (1 << 31) : 0) |
1395 (do_data_tag
? (1 << 29) : 0);
1396 brq
->sbc
.flags
= MMC_RSP_R1
| MMC_CMD_AC
;
1397 brq
->mrq
.sbc
= &brq
->sbc
;
1400 mmc_set_data_timeout(&brq
->data
, card
);
1402 brq
->data
.sg
= mqrq
->sg
;
1403 brq
->data
.sg_len
= mmc_queue_map_sg(mq
, mqrq
);
1406 * Adjust the sg list so it is the same size as the
1409 if (brq
->data
.blocks
!= blk_rq_sectors(req
)) {
1410 int i
, data_size
= brq
->data
.blocks
<< 9;
1411 struct scatterlist
*sg
;
1413 for_each_sg(brq
->data
.sg
, sg
, brq
->data
.sg_len
, i
) {
1414 data_size
-= sg
->length
;
1415 if (data_size
<= 0) {
1416 sg
->length
+= data_size
;
1421 brq
->data
.sg_len
= i
;
1424 mqrq
->mmc_active
.mrq
= &brq
->mrq
;
1425 mqrq
->mmc_active
.err_check
= mmc_blk_err_check
;
1427 mmc_queue_bounce_pre(mqrq
);
1430 static inline u8
mmc_calc_packed_hdr_segs(struct request_queue
*q
,
1431 struct mmc_card
*card
)
1433 unsigned int hdr_sz
= mmc_large_sector(card
) ? 4096 : 512;
1434 unsigned int max_seg_sz
= queue_max_segment_size(q
);
1435 unsigned int len
, nr_segs
= 0;
1438 len
= min(hdr_sz
, max_seg_sz
);
1446 static u8
mmc_blk_prep_packed_list(struct mmc_queue
*mq
, struct request
*req
)
1448 struct request_queue
*q
= mq
->queue
;
1449 struct mmc_card
*card
= mq
->card
;
1450 struct request
*cur
= req
, *next
= NULL
;
1451 struct mmc_blk_data
*md
= mq
->data
;
1452 struct mmc_queue_req
*mqrq
= mq
->mqrq_cur
;
1453 bool en_rel_wr
= card
->ext_csd
.rel_param
& EXT_CSD_WR_REL_PARAM_EN
;
1454 unsigned int req_sectors
= 0, phys_segments
= 0;
1455 unsigned int max_blk_count
, max_phys_segs
;
1456 bool put_back
= true;
1457 u8 max_packed_rw
= 0;
1460 if (!(md
->flags
& MMC_BLK_PACKED_CMD
))
1463 if ((rq_data_dir(cur
) == WRITE
) &&
1464 mmc_host_packed_wr(card
->host
))
1465 max_packed_rw
= card
->ext_csd
.max_packed_writes
;
1467 if (max_packed_rw
== 0)
1470 if (mmc_req_rel_wr(cur
) &&
1471 (md
->flags
& MMC_BLK_REL_WR
) && !en_rel_wr
)
1474 if (mmc_large_sector(card
) &&
1475 !IS_ALIGNED(blk_rq_sectors(cur
), 8))
1478 mmc_blk_clear_packed(mqrq
);
1480 max_blk_count
= min(card
->host
->max_blk_count
,
1481 card
->host
->max_req_size
>> 9);
1482 if (unlikely(max_blk_count
> 0xffff))
1483 max_blk_count
= 0xffff;
1485 max_phys_segs
= queue_max_segments(q
);
1486 req_sectors
+= blk_rq_sectors(cur
);
1487 phys_segments
+= cur
->nr_phys_segments
;
1489 if (rq_data_dir(cur
) == WRITE
) {
1490 req_sectors
+= mmc_large_sector(card
) ? 8 : 1;
1491 phys_segments
+= mmc_calc_packed_hdr_segs(q
, card
);
1495 if (reqs
>= max_packed_rw
- 1) {
1500 spin_lock_irq(q
->queue_lock
);
1501 next
= blk_fetch_request(q
);
1502 spin_unlock_irq(q
->queue_lock
);
1508 if (mmc_large_sector(card
) &&
1509 !IS_ALIGNED(blk_rq_sectors(next
), 8))
1512 if (next
->cmd_flags
& REQ_DISCARD
||
1513 next
->cmd_flags
& REQ_FLUSH
)
1516 if (rq_data_dir(cur
) != rq_data_dir(next
))
1519 if (mmc_req_rel_wr(next
) &&
1520 (md
->flags
& MMC_BLK_REL_WR
) && !en_rel_wr
)
1523 req_sectors
+= blk_rq_sectors(next
);
1524 if (req_sectors
> max_blk_count
)
1527 phys_segments
+= next
->nr_phys_segments
;
1528 if (phys_segments
> max_phys_segs
)
1531 list_add_tail(&next
->queuelist
, &mqrq
->packed
->list
);
1537 spin_lock_irq(q
->queue_lock
);
1538 blk_requeue_request(q
, next
);
1539 spin_unlock_irq(q
->queue_lock
);
1543 list_add(&req
->queuelist
, &mqrq
->packed
->list
);
1544 mqrq
->packed
->nr_entries
= ++reqs
;
1545 mqrq
->packed
->retries
= reqs
;
1550 mqrq
->cmd_type
= MMC_PACKED_NONE
;
1554 static void mmc_blk_packed_hdr_wrq_prep(struct mmc_queue_req
*mqrq
,
1555 struct mmc_card
*card
,
1556 struct mmc_queue
*mq
)
1558 struct mmc_blk_request
*brq
= &mqrq
->brq
;
1559 struct request
*req
= mqrq
->req
;
1560 struct request
*prq
;
1561 struct mmc_blk_data
*md
= mq
->data
;
1562 struct mmc_packed
*packed
= mqrq
->packed
;
1563 bool do_rel_wr
, do_data_tag
;
1564 u32
*packed_cmd_hdr
;
1570 mqrq
->cmd_type
= MMC_PACKED_WRITE
;
1572 packed
->idx_failure
= MMC_PACKED_NR_IDX
;
1574 packed_cmd_hdr
= packed
->cmd_hdr
;
1575 memset(packed_cmd_hdr
, 0, sizeof(packed
->cmd_hdr
));
1576 packed_cmd_hdr
[0] = (packed
->nr_entries
<< 16) |
1577 (PACKED_CMD_WR
<< 8) | PACKED_CMD_VER
;
1578 hdr_blocks
= mmc_large_sector(card
) ? 8 : 1;
1581 * Argument for each entry of packed group
1583 list_for_each_entry(prq
, &packed
->list
, queuelist
) {
1584 do_rel_wr
= mmc_req_rel_wr(prq
) && (md
->flags
& MMC_BLK_REL_WR
);
1585 do_data_tag
= (card
->ext_csd
.data_tag_unit_size
) &&
1586 (prq
->cmd_flags
& REQ_META
) &&
1587 (rq_data_dir(prq
) == WRITE
) &&
1588 ((brq
->data
.blocks
* brq
->data
.blksz
) >=
1589 card
->ext_csd
.data_tag_unit_size
);
1590 /* Argument of CMD23 */
1591 packed_cmd_hdr
[(i
* 2)] =
1592 (do_rel_wr
? MMC_CMD23_ARG_REL_WR
: 0) |
1593 (do_data_tag
? MMC_CMD23_ARG_TAG_REQ
: 0) |
1594 blk_rq_sectors(prq
);
1595 /* Argument of CMD18 or CMD25 */
1596 packed_cmd_hdr
[((i
* 2)) + 1] =
1597 mmc_card_blockaddr(card
) ?
1598 blk_rq_pos(prq
) : blk_rq_pos(prq
) << 9;
1599 packed
->blocks
+= blk_rq_sectors(prq
);
1603 memset(brq
, 0, sizeof(struct mmc_blk_request
));
1604 brq
->mrq
.cmd
= &brq
->cmd
;
1605 brq
->mrq
.data
= &brq
->data
;
1606 brq
->mrq
.sbc
= &brq
->sbc
;
1607 brq
->mrq
.stop
= &brq
->stop
;
1609 brq
->sbc
.opcode
= MMC_SET_BLOCK_COUNT
;
1610 brq
->sbc
.arg
= MMC_CMD23_ARG_PACKED
| (packed
->blocks
+ hdr_blocks
);
1611 brq
->sbc
.flags
= MMC_RSP_R1
| MMC_CMD_AC
;
1613 brq
->cmd
.opcode
= MMC_WRITE_MULTIPLE_BLOCK
;
1614 brq
->cmd
.arg
= blk_rq_pos(req
);
1615 if (!mmc_card_blockaddr(card
))
1617 brq
->cmd
.flags
= MMC_RSP_SPI_R1
| MMC_RSP_R1
| MMC_CMD_ADTC
;
1619 brq
->data
.blksz
= 512;
1620 brq
->data
.blocks
= packed
->blocks
+ hdr_blocks
;
1621 brq
->data
.flags
|= MMC_DATA_WRITE
;
1623 brq
->stop
.opcode
= MMC_STOP_TRANSMISSION
;
1625 brq
->stop
.flags
= MMC_RSP_SPI_R1B
| MMC_RSP_R1B
| MMC_CMD_AC
;
1627 mmc_set_data_timeout(&brq
->data
, card
);
1629 brq
->data
.sg
= mqrq
->sg
;
1630 brq
->data
.sg_len
= mmc_queue_map_sg(mq
, mqrq
);
1632 mqrq
->mmc_active
.mrq
= &brq
->mrq
;
1633 mqrq
->mmc_active
.err_check
= mmc_blk_packed_err_check
;
1635 mmc_queue_bounce_pre(mqrq
);
1638 static int mmc_blk_cmd_err(struct mmc_blk_data
*md
, struct mmc_card
*card
,
1639 struct mmc_blk_request
*brq
, struct request
*req
,
1642 struct mmc_queue_req
*mq_rq
;
1643 mq_rq
= container_of(brq
, struct mmc_queue_req
, brq
);
1646 * If this is an SD card and we're writing, we can first
1647 * mark the known good sectors as ok.
1649 * If the card is not SD, we can still ok written sectors
1650 * as reported by the controller (which might be less than
1651 * the real number of written sectors, but never more).
1653 if (mmc_card_sd(card
)) {
1656 blocks
= mmc_sd_num_wr_blocks(card
);
1657 if (blocks
!= (u32
)-1) {
1658 ret
= blk_end_request(req
, 0, blocks
<< 9);
1661 if (!mmc_packed_cmd(mq_rq
->cmd_type
))
1662 ret
= blk_end_request(req
, 0, brq
->data
.bytes_xfered
);
1667 static int mmc_blk_end_packed_req(struct mmc_queue_req
*mq_rq
)
1669 struct request
*prq
;
1670 struct mmc_packed
*packed
= mq_rq
->packed
;
1671 int idx
= packed
->idx_failure
, i
= 0;
1676 while (!list_empty(&packed
->list
)) {
1677 prq
= list_entry_rq(packed
->list
.next
);
1679 /* retry from error index */
1680 packed
->nr_entries
-= idx
;
1684 if (packed
->nr_entries
== MMC_PACKED_NR_SINGLE
) {
1685 list_del_init(&prq
->queuelist
);
1686 mmc_blk_clear_packed(mq_rq
);
1690 list_del_init(&prq
->queuelist
);
1691 blk_end_request(prq
, 0, blk_rq_bytes(prq
));
1695 mmc_blk_clear_packed(mq_rq
);
1699 static void mmc_blk_abort_packed_req(struct mmc_queue_req
*mq_rq
)
1701 struct request
*prq
;
1702 struct mmc_packed
*packed
= mq_rq
->packed
;
1706 while (!list_empty(&packed
->list
)) {
1707 prq
= list_entry_rq(packed
->list
.next
);
1708 list_del_init(&prq
->queuelist
);
1709 blk_end_request(prq
, -EIO
, blk_rq_bytes(prq
));
1712 mmc_blk_clear_packed(mq_rq
);
1715 static void mmc_blk_revert_packed_req(struct mmc_queue
*mq
,
1716 struct mmc_queue_req
*mq_rq
)
1718 struct request
*prq
;
1719 struct request_queue
*q
= mq
->queue
;
1720 struct mmc_packed
*packed
= mq_rq
->packed
;
1724 while (!list_empty(&packed
->list
)) {
1725 prq
= list_entry_rq(packed
->list
.prev
);
1726 if (prq
->queuelist
.prev
!= &packed
->list
) {
1727 list_del_init(&prq
->queuelist
);
1728 spin_lock_irq(q
->queue_lock
);
1729 blk_requeue_request(mq
->queue
, prq
);
1730 spin_unlock_irq(q
->queue_lock
);
1732 list_del_init(&prq
->queuelist
);
1736 mmc_blk_clear_packed(mq_rq
);
1739 static int mmc_blk_issue_rw_rq(struct mmc_queue
*mq
, struct request
*rqc
)
1741 struct mmc_blk_data
*md
= mq
->data
;
1742 struct mmc_card
*card
= md
->queue
.card
;
1743 struct mmc_blk_request
*brq
= &mq
->mqrq_cur
->brq
;
1744 int ret
= 1, disable_multi
= 0, retry
= 0, type
;
1745 enum mmc_blk_status status
;
1746 struct mmc_queue_req
*mq_rq
;
1747 struct request
*req
= rqc
;
1748 struct mmc_async_req
*areq
;
1749 const u8 packed_nr
= 2;
1752 if (!rqc
&& !mq
->mqrq_prev
->req
)
1756 reqs
= mmc_blk_prep_packed_list(mq
, rqc
);
1761 * When 4KB native sector is enabled, only 8 blocks
1762 * multiple read or write is allowed
1764 if ((brq
->data
.blocks
& 0x07) &&
1765 (card
->ext_csd
.data_sector_size
== 4096)) {
1766 pr_err("%s: Transfer size is not 4KB sector size aligned\n",
1767 req
->rq_disk
->disk_name
);
1768 mq_rq
= mq
->mqrq_cur
;
1772 if (reqs
>= packed_nr
)
1773 mmc_blk_packed_hdr_wrq_prep(mq
->mqrq_cur
,
1776 mmc_blk_rw_rq_prep(mq
->mqrq_cur
, card
, 0, mq
);
1777 areq
= &mq
->mqrq_cur
->mmc_active
;
1780 areq
= mmc_start_req(card
->host
, areq
, (int *) &status
);
1782 if (status
== MMC_BLK_NEW_REQUEST
)
1783 mq
->flags
|= MMC_QUEUE_NEW_REQUEST
;
1787 mq_rq
= container_of(areq
, struct mmc_queue_req
, mmc_active
);
1790 type
= rq_data_dir(req
) == READ
? MMC_BLK_READ
: MMC_BLK_WRITE
;
1791 mmc_queue_bounce_post(mq_rq
);
1794 case MMC_BLK_SUCCESS
:
1795 case MMC_BLK_PARTIAL
:
1797 * A block was successfully transferred.
1799 mmc_blk_reset_success(md
, type
);
1801 if (mmc_packed_cmd(mq_rq
->cmd_type
)) {
1802 ret
= mmc_blk_end_packed_req(mq_rq
);
1805 ret
= blk_end_request(req
, 0,
1806 brq
->data
.bytes_xfered
);
1810 * If the blk_end_request function returns non-zero even
1811 * though all data has been transferred and no errors
1812 * were returned by the host controller, it's a bug.
1814 if (status
== MMC_BLK_SUCCESS
&& ret
) {
1815 pr_err("%s BUG rq_tot %d d_xfer %d\n",
1816 __func__
, blk_rq_bytes(req
),
1817 brq
->data
.bytes_xfered
);
1822 case MMC_BLK_CMD_ERR
:
1823 ret
= mmc_blk_cmd_err(md
, card
, brq
, req
, ret
);
1824 if (!mmc_blk_reset(md
, card
->host
, type
))
1832 if (!mmc_blk_reset(md
, card
->host
, type
))
1835 case MMC_BLK_DATA_ERR
: {
1838 err
= mmc_blk_reset(md
, card
->host
, type
);
1841 if (err
== -ENODEV
||
1842 mmc_packed_cmd(mq_rq
->cmd_type
))
1846 case MMC_BLK_ECC_ERR
:
1847 if (brq
->data
.blocks
> 1) {
1848 /* Redo read one sector at a time */
1849 pr_warning("%s: retrying using single block read\n",
1850 req
->rq_disk
->disk_name
);
1855 * After an error, we redo I/O one sector at a
1856 * time, so we only reach here after trying to
1857 * read a single sector.
1859 ret
= blk_end_request(req
, -EIO
,
1864 case MMC_BLK_NOMEDIUM
:
1867 pr_err("%s: Unhandled return value (%d)",
1868 req
->rq_disk
->disk_name
, status
);
1873 if (mmc_packed_cmd(mq_rq
->cmd_type
)) {
1874 if (!mq_rq
->packed
->retries
)
1876 mmc_blk_packed_hdr_wrq_prep(mq_rq
, card
, mq
);
1877 mmc_start_req(card
->host
,
1878 &mq_rq
->mmc_active
, NULL
);
1882 * In case of a incomplete request
1883 * prepare it again and resend.
1885 mmc_blk_rw_rq_prep(mq_rq
, card
,
1887 mmc_start_req(card
->host
,
1888 &mq_rq
->mmc_active
, NULL
);
1896 if (mmc_packed_cmd(mq_rq
->cmd_type
)) {
1897 mmc_blk_abort_packed_req(mq_rq
);
1899 if (mmc_card_removed(card
))
1900 req
->cmd_flags
|= REQ_QUIET
;
1902 ret
= blk_end_request(req
, -EIO
,
1903 blk_rq_cur_bytes(req
));
1908 if (mmc_card_removed(card
)) {
1909 rqc
->cmd_flags
|= REQ_QUIET
;
1910 blk_end_request_all(rqc
, -EIO
);
1913 * If current request is packed, it needs to put back.
1915 if (mmc_packed_cmd(mq
->mqrq_cur
->cmd_type
))
1916 mmc_blk_revert_packed_req(mq
, mq
->mqrq_cur
);
1918 mmc_blk_rw_rq_prep(mq
->mqrq_cur
, card
, 0, mq
);
1919 mmc_start_req(card
->host
,
1920 &mq
->mqrq_cur
->mmc_active
, NULL
);
1927 static int mmc_blk_issue_rq(struct mmc_queue
*mq
, struct request
*req
)
1930 struct mmc_blk_data
*md
= mq
->data
;
1931 struct mmc_card
*card
= md
->queue
.card
;
1932 struct mmc_host
*host
= card
->host
;
1933 unsigned long flags
;
1934 unsigned int cmd_flags
= req
? req
->cmd_flags
: 0;
1936 if (req
&& !mq
->mqrq_prev
->req
)
1937 /* claim host only for the first request */
1938 mmc_claim_host(card
->host
);
1940 ret
= mmc_blk_part_switch(card
, md
);
1943 blk_end_request_all(req
, -EIO
);
1949 mq
->flags
&= ~MMC_QUEUE_NEW_REQUEST
;
1950 if (cmd_flags
& REQ_DISCARD
) {
1951 /* complete ongoing async transfer before issuing discard */
1952 if (card
->host
->areq
)
1953 mmc_blk_issue_rw_rq(mq
, NULL
);
1954 if (req
->cmd_flags
& REQ_SECURE
&&
1955 !(card
->quirks
& MMC_QUIRK_SEC_ERASE_TRIM_BROKEN
))
1956 ret
= mmc_blk_issue_secdiscard_rq(mq
, req
);
1958 ret
= mmc_blk_issue_discard_rq(mq
, req
);
1959 } else if (cmd_flags
& REQ_FLUSH
) {
1960 /* complete ongoing async transfer before issuing flush */
1961 if (card
->host
->areq
)
1962 mmc_blk_issue_rw_rq(mq
, NULL
);
1963 ret
= mmc_blk_issue_flush(mq
, req
);
1965 if (!req
&& host
->areq
) {
1966 spin_lock_irqsave(&host
->context_info
.lock
, flags
);
1967 host
->context_info
.is_waiting_last_req
= true;
1968 spin_unlock_irqrestore(&host
->context_info
.lock
, flags
);
1970 ret
= mmc_blk_issue_rw_rq(mq
, req
);
1974 if ((!req
&& !(mq
->flags
& MMC_QUEUE_NEW_REQUEST
)) ||
1975 (cmd_flags
& MMC_REQ_SPECIAL_MASK
))
1977 * Release host when there are no more requests
1978 * and after special request(discard, flush) is done.
1979 * In case sepecial request, there is no reentry to
1980 * the 'mmc_blk_issue_rq' with 'mqrq_prev->req'.
1982 mmc_release_host(card
->host
);
1986 static inline int mmc_blk_readonly(struct mmc_card
*card
)
1988 return mmc_card_readonly(card
) ||
1989 !(card
->csd
.cmdclass
& CCC_BLOCK_WRITE
);
1992 static struct mmc_blk_data
*mmc_blk_alloc_req(struct mmc_card
*card
,
1993 struct device
*parent
,
1996 const char *subname
,
1999 struct mmc_blk_data
*md
;
2002 devidx
= find_first_zero_bit(dev_use
, max_devices
);
2003 if (devidx
>= max_devices
)
2004 return ERR_PTR(-ENOSPC
);
2005 __set_bit(devidx
, dev_use
);
2007 md
= kzalloc(sizeof(struct mmc_blk_data
), GFP_KERNEL
);
2014 * !subname implies we are creating main mmc_blk_data that will be
2015 * associated with mmc_card with mmc_set_drvdata. Due to device
2016 * partitions, devidx will not coincide with a per-physical card
2017 * index anymore so we keep track of a name index.
2020 md
->name_idx
= find_first_zero_bit(name_use
, max_devices
);
2021 __set_bit(md
->name_idx
, name_use
);
2023 md
->name_idx
= ((struct mmc_blk_data
*)
2024 dev_to_disk(parent
)->private_data
)->name_idx
;
2026 md
->area_type
= area_type
;
2029 * Set the read-only status based on the supported commands
2030 * and the write protect switch.
2032 md
->read_only
= mmc_blk_readonly(card
);
2034 md
->disk
= alloc_disk(perdev_minors
);
2035 if (md
->disk
== NULL
) {
2040 spin_lock_init(&md
->lock
);
2041 INIT_LIST_HEAD(&md
->part
);
2044 ret
= mmc_init_queue(&md
->queue
, card
, &md
->lock
, subname
);
2048 md
->queue
.issue_fn
= mmc_blk_issue_rq
;
2049 md
->queue
.data
= md
;
2051 md
->disk
->major
= MMC_BLOCK_MAJOR
;
2052 md
->disk
->first_minor
= devidx
* perdev_minors
;
2053 md
->disk
->fops
= &mmc_bdops
;
2054 md
->disk
->private_data
= md
;
2055 md
->disk
->queue
= md
->queue
.queue
;
2056 md
->disk
->driverfs_dev
= parent
;
2057 set_disk_ro(md
->disk
, md
->read_only
|| default_ro
);
2058 if (area_type
& MMC_BLK_DATA_AREA_RPMB
)
2059 md
->disk
->flags
|= GENHD_FL_NO_PART_SCAN
;
2062 * As discussed on lkml, GENHD_FL_REMOVABLE should:
2064 * - be set for removable media with permanent block devices
2065 * - be unset for removable block devices with permanent media
2067 * Since MMC block devices clearly fall under the second
2068 * case, we do not set GENHD_FL_REMOVABLE. Userspace
2069 * should use the block device creation/destruction hotplug
2070 * messages to tell when the card is present.
2073 snprintf(md
->disk
->disk_name
, sizeof(md
->disk
->disk_name
),
2074 "mmcblk%d%s", md
->name_idx
, subname
? subname
: "");
2076 if (mmc_card_mmc(card
))
2077 blk_queue_logical_block_size(md
->queue
.queue
,
2078 card
->ext_csd
.data_sector_size
);
2080 blk_queue_logical_block_size(md
->queue
.queue
, 512);
2082 set_capacity(md
->disk
, size
);
2084 if (mmc_host_cmd23(card
->host
)) {
2085 if (mmc_card_mmc(card
) ||
2086 (mmc_card_sd(card
) &&
2087 card
->scr
.cmds
& SD_SCR_CMD23_SUPPORT
))
2088 md
->flags
|= MMC_BLK_CMD23
;
2091 if (mmc_card_mmc(card
) &&
2092 md
->flags
& MMC_BLK_CMD23
&&
2093 ((card
->ext_csd
.rel_param
& EXT_CSD_WR_REL_PARAM_EN
) ||
2094 card
->ext_csd
.rel_sectors
)) {
2095 md
->flags
|= MMC_BLK_REL_WR
;
2096 blk_queue_flush(md
->queue
.queue
, REQ_FLUSH
| REQ_FUA
);
2099 if (mmc_card_mmc(card
) &&
2100 (area_type
== MMC_BLK_DATA_AREA_MAIN
) &&
2101 (md
->flags
& MMC_BLK_CMD23
) &&
2102 card
->ext_csd
.packed_event_en
) {
2103 if (!mmc_packed_init(&md
->queue
, card
))
2104 md
->flags
|= MMC_BLK_PACKED_CMD
;
2114 return ERR_PTR(ret
);
2117 static struct mmc_blk_data
*mmc_blk_alloc(struct mmc_card
*card
)
2120 struct mmc_blk_data
*md
;
2122 if (!mmc_card_sd(card
) && mmc_card_blockaddr(card
)) {
2124 * The EXT_CSD sector count is in number or 512 byte
2127 size
= card
->ext_csd
.sectors
;
2130 * The CSD capacity field is in units of read_blkbits.
2131 * set_capacity takes units of 512 bytes.
2133 size
= card
->csd
.capacity
<< (card
->csd
.read_blkbits
- 9);
2136 md
= mmc_blk_alloc_req(card
, &card
->dev
, size
, false, NULL
,
2137 MMC_BLK_DATA_AREA_MAIN
);
2141 static int mmc_blk_alloc_part(struct mmc_card
*card
,
2142 struct mmc_blk_data
*md
,
2143 unsigned int part_type
,
2146 const char *subname
,
2150 struct mmc_blk_data
*part_md
;
2152 part_md
= mmc_blk_alloc_req(card
, disk_to_dev(md
->disk
), size
, default_ro
,
2153 subname
, area_type
);
2154 if (IS_ERR(part_md
))
2155 return PTR_ERR(part_md
);
2156 part_md
->part_type
= part_type
;
2157 list_add(&part_md
->part
, &md
->part
);
2159 string_get_size((u64
)get_capacity(part_md
->disk
) << 9, STRING_UNITS_2
,
2160 cap_str
, sizeof(cap_str
));
2161 pr_info("%s: %s %s partition %u %s\n",
2162 part_md
->disk
->disk_name
, mmc_card_id(card
),
2163 mmc_card_name(card
), part_md
->part_type
, cap_str
);
2167 /* MMC Physical partitions consist of two boot partitions and
2168 * up to four general purpose partitions.
2169 * For each partition enabled in EXT_CSD a block device will be allocatedi
2170 * to provide access to the partition.
2173 static int mmc_blk_alloc_parts(struct mmc_card
*card
, struct mmc_blk_data
*md
)
2177 if (!mmc_card_mmc(card
))
2180 for (idx
= 0; idx
< card
->nr_parts
; idx
++) {
2181 if (card
->part
[idx
].size
) {
2182 ret
= mmc_blk_alloc_part(card
, md
,
2183 card
->part
[idx
].part_cfg
,
2184 card
->part
[idx
].size
>> 9,
2185 card
->part
[idx
].force_ro
,
2186 card
->part
[idx
].name
,
2187 card
->part
[idx
].area_type
);
2196 static void mmc_blk_remove_req(struct mmc_blk_data
*md
)
2198 struct mmc_card
*card
;
2201 card
= md
->queue
.card
;
2202 if (md
->disk
->flags
& GENHD_FL_UP
) {
2203 device_remove_file(disk_to_dev(md
->disk
), &md
->force_ro
);
2204 if ((md
->area_type
& MMC_BLK_DATA_AREA_BOOT
) &&
2205 card
->ext_csd
.boot_ro_lockable
)
2206 device_remove_file(disk_to_dev(md
->disk
),
2207 &md
->power_ro_lock
);
2209 /* Stop new requests from getting into the queue */
2210 del_gendisk(md
->disk
);
2213 /* Then flush out any already in there */
2214 mmc_cleanup_queue(&md
->queue
);
2215 if (md
->flags
& MMC_BLK_PACKED_CMD
)
2216 mmc_packed_clean(&md
->queue
);
2221 static void mmc_blk_remove_parts(struct mmc_card
*card
,
2222 struct mmc_blk_data
*md
)
2224 struct list_head
*pos
, *q
;
2225 struct mmc_blk_data
*part_md
;
2227 __clear_bit(md
->name_idx
, name_use
);
2228 list_for_each_safe(pos
, q
, &md
->part
) {
2229 part_md
= list_entry(pos
, struct mmc_blk_data
, part
);
2231 mmc_blk_remove_req(part_md
);
2235 static int mmc_add_disk(struct mmc_blk_data
*md
)
2238 struct mmc_card
*card
= md
->queue
.card
;
2241 md
->force_ro
.show
= force_ro_show
;
2242 md
->force_ro
.store
= force_ro_store
;
2243 sysfs_attr_init(&md
->force_ro
.attr
);
2244 md
->force_ro
.attr
.name
= "force_ro";
2245 md
->force_ro
.attr
.mode
= S_IRUGO
| S_IWUSR
;
2246 ret
= device_create_file(disk_to_dev(md
->disk
), &md
->force_ro
);
2250 if ((md
->area_type
& MMC_BLK_DATA_AREA_BOOT
) &&
2251 card
->ext_csd
.boot_ro_lockable
) {
2254 if (card
->ext_csd
.boot_ro_lock
& EXT_CSD_BOOT_WP_B_PWR_WP_DIS
)
2257 mode
= S_IRUGO
| S_IWUSR
;
2259 md
->power_ro_lock
.show
= power_ro_lock_show
;
2260 md
->power_ro_lock
.store
= power_ro_lock_store
;
2261 sysfs_attr_init(&md
->power_ro_lock
.attr
);
2262 md
->power_ro_lock
.attr
.mode
= mode
;
2263 md
->power_ro_lock
.attr
.name
=
2264 "ro_lock_until_next_power_on";
2265 ret
= device_create_file(disk_to_dev(md
->disk
),
2266 &md
->power_ro_lock
);
2268 goto power_ro_lock_fail
;
2273 device_remove_file(disk_to_dev(md
->disk
), &md
->force_ro
);
2275 del_gendisk(md
->disk
);
2280 #define CID_MANFID_SANDISK 0x2
2281 #define CID_MANFID_TOSHIBA 0x11
2282 #define CID_MANFID_MICRON 0x13
2283 #define CID_MANFID_SAMSUNG 0x15
2285 static const struct mmc_fixup blk_fixups
[] =
2287 MMC_FIXUP("SEM02G", CID_MANFID_SANDISK
, 0x100, add_quirk
,
2288 MMC_QUIRK_INAND_CMD38
),
2289 MMC_FIXUP("SEM04G", CID_MANFID_SANDISK
, 0x100, add_quirk
,
2290 MMC_QUIRK_INAND_CMD38
),
2291 MMC_FIXUP("SEM08G", CID_MANFID_SANDISK
, 0x100, add_quirk
,
2292 MMC_QUIRK_INAND_CMD38
),
2293 MMC_FIXUP("SEM16G", CID_MANFID_SANDISK
, 0x100, add_quirk
,
2294 MMC_QUIRK_INAND_CMD38
),
2295 MMC_FIXUP("SEM32G", CID_MANFID_SANDISK
, 0x100, add_quirk
,
2296 MMC_QUIRK_INAND_CMD38
),
2299 * Some MMC cards experience performance degradation with CMD23
2300 * instead of CMD12-bounded multiblock transfers. For now we'll
2301 * black list what's bad...
2302 * - Certain Toshiba cards.
2304 * N.B. This doesn't affect SD cards.
2306 MMC_FIXUP("MMC08G", CID_MANFID_TOSHIBA
, CID_OEMID_ANY
, add_quirk_mmc
,
2307 MMC_QUIRK_BLK_NO_CMD23
),
2308 MMC_FIXUP("MMC16G", CID_MANFID_TOSHIBA
, CID_OEMID_ANY
, add_quirk_mmc
,
2309 MMC_QUIRK_BLK_NO_CMD23
),
2310 MMC_FIXUP("MMC32G", CID_MANFID_TOSHIBA
, CID_OEMID_ANY
, add_quirk_mmc
,
2311 MMC_QUIRK_BLK_NO_CMD23
),
2314 * Some Micron MMC cards needs longer data read timeout than
2317 MMC_FIXUP(CID_NAME_ANY
, CID_MANFID_MICRON
, 0x200, add_quirk_mmc
,
2318 MMC_QUIRK_LONG_READ_TIME
),
2321 * On these Samsung MoviNAND parts, performing secure erase or
2322 * secure trim can result in unrecoverable corruption due to a
2325 MMC_FIXUP("M8G2FA", CID_MANFID_SAMSUNG
, CID_OEMID_ANY
, add_quirk_mmc
,
2326 MMC_QUIRK_SEC_ERASE_TRIM_BROKEN
),
2327 MMC_FIXUP("MAG4FA", CID_MANFID_SAMSUNG
, CID_OEMID_ANY
, add_quirk_mmc
,
2328 MMC_QUIRK_SEC_ERASE_TRIM_BROKEN
),
2329 MMC_FIXUP("MBG8FA", CID_MANFID_SAMSUNG
, CID_OEMID_ANY
, add_quirk_mmc
,
2330 MMC_QUIRK_SEC_ERASE_TRIM_BROKEN
),
2331 MMC_FIXUP("MCGAFA", CID_MANFID_SAMSUNG
, CID_OEMID_ANY
, add_quirk_mmc
,
2332 MMC_QUIRK_SEC_ERASE_TRIM_BROKEN
),
2333 MMC_FIXUP("VAL00M", CID_MANFID_SAMSUNG
, CID_OEMID_ANY
, add_quirk_mmc
,
2334 MMC_QUIRK_SEC_ERASE_TRIM_BROKEN
),
2335 MMC_FIXUP("VYL00M", CID_MANFID_SAMSUNG
, CID_OEMID_ANY
, add_quirk_mmc
,
2336 MMC_QUIRK_SEC_ERASE_TRIM_BROKEN
),
2337 MMC_FIXUP("KYL00M", CID_MANFID_SAMSUNG
, CID_OEMID_ANY
, add_quirk_mmc
,
2338 MMC_QUIRK_SEC_ERASE_TRIM_BROKEN
),
2339 MMC_FIXUP("VZL00M", CID_MANFID_SAMSUNG
, CID_OEMID_ANY
, add_quirk_mmc
,
2340 MMC_QUIRK_SEC_ERASE_TRIM_BROKEN
),
2345 static int mmc_blk_probe(struct mmc_card
*card
)
2347 struct mmc_blk_data
*md
, *part_md
;
2351 * Check that the card supports the command class(es) we need.
2353 if (!(card
->csd
.cmdclass
& CCC_BLOCK_READ
))
2356 md
= mmc_blk_alloc(card
);
2360 string_get_size((u64
)get_capacity(md
->disk
) << 9, STRING_UNITS_2
,
2361 cap_str
, sizeof(cap_str
));
2362 pr_info("%s: %s %s %s %s\n",
2363 md
->disk
->disk_name
, mmc_card_id(card
), mmc_card_name(card
),
2364 cap_str
, md
->read_only
? "(ro)" : "");
2366 if (mmc_blk_alloc_parts(card
, md
))
2369 mmc_set_drvdata(card
, md
);
2370 mmc_fixup_device(card
, blk_fixups
);
2372 if (mmc_add_disk(md
))
2375 list_for_each_entry(part_md
, &md
->part
, part
) {
2376 if (mmc_add_disk(part_md
))
2382 mmc_blk_remove_parts(card
, md
);
2383 mmc_blk_remove_req(md
);
2387 static void mmc_blk_remove(struct mmc_card
*card
)
2389 struct mmc_blk_data
*md
= mmc_get_drvdata(card
);
2391 mmc_blk_remove_parts(card
, md
);
2392 mmc_claim_host(card
->host
);
2393 mmc_blk_part_switch(card
, md
);
2394 mmc_release_host(card
->host
);
2395 mmc_blk_remove_req(md
);
2396 mmc_set_drvdata(card
, NULL
);
2400 static int mmc_blk_suspend(struct mmc_card
*card
)
2402 struct mmc_blk_data
*part_md
;
2403 struct mmc_blk_data
*md
= mmc_get_drvdata(card
);
2406 mmc_queue_suspend(&md
->queue
);
2407 list_for_each_entry(part_md
, &md
->part
, part
) {
2408 mmc_queue_suspend(&part_md
->queue
);
2414 static int mmc_blk_resume(struct mmc_card
*card
)
2416 struct mmc_blk_data
*part_md
;
2417 struct mmc_blk_data
*md
= mmc_get_drvdata(card
);
2421 * Resume involves the card going into idle state,
2422 * so current partition is always the main one.
2424 md
->part_curr
= md
->part_type
;
2425 mmc_queue_resume(&md
->queue
);
2426 list_for_each_entry(part_md
, &md
->part
, part
) {
2427 mmc_queue_resume(&part_md
->queue
);
2433 #define mmc_blk_suspend NULL
2434 #define mmc_blk_resume NULL
2437 static struct mmc_driver mmc_driver
= {
2441 .probe
= mmc_blk_probe
,
2442 .remove
= mmc_blk_remove
,
2443 .suspend
= mmc_blk_suspend
,
2444 .resume
= mmc_blk_resume
,
2447 static int __init
mmc_blk_init(void)
2451 if (perdev_minors
!= CONFIG_MMC_BLOCK_MINORS
)
2452 pr_info("mmcblk: using %d minors per device\n", perdev_minors
);
2454 max_devices
= 256 / perdev_minors
;
2456 res
= register_blkdev(MMC_BLOCK_MAJOR
, "mmc");
2460 res
= mmc_register_driver(&mmc_driver
);
2466 unregister_blkdev(MMC_BLOCK_MAJOR
, "mmc");
2471 static void __exit
mmc_blk_exit(void)
2473 mmc_unregister_driver(&mmc_driver
);
2474 unregister_blkdev(MMC_BLOCK_MAJOR
, "mmc");
2477 module_init(mmc_blk_init
);
2478 module_exit(mmc_blk_exit
);
2480 MODULE_LICENSE("GPL");
2481 MODULE_DESCRIPTION("Multimedia Card (MMC) block device driver");