2 * Block driver for media (i.e., flash cards)
4 * Copyright 2002 Hewlett-Packard Company
5 * Copyright 2005-2008 Pierre Ossman
7 * Use consistent with the GNU GPL is permitted,
8 * provided that this copyright notice is
9 * preserved in its entirety in all copies and derived works.
11 * HEWLETT-PACKARD COMPANY MAKES NO WARRANTIES, EXPRESSED OR IMPLIED,
12 * AS TO THE USEFULNESS OR CORRECTNESS OF THIS CODE OR ITS
13 * FITNESS FOR ANY PARTICULAR PURPOSE.
15 * Many thanks to Alessandro Rubini and Jonathan Corbet!
17 * Author: Andrew Christian
20 #include <linux/moduleparam.h>
21 #include <linux/module.h>
22 #include <linux/init.h>
24 #include <linux/kernel.h>
26 #include <linux/slab.h>
27 #include <linux/errno.h>
28 #include <linux/hdreg.h>
29 #include <linux/kdev_t.h>
30 #include <linux/blkdev.h>
31 #include <linux/mutex.h>
32 #include <linux/scatterlist.h>
33 #include <linux/string_helpers.h>
34 #include <linux/delay.h>
35 #include <linux/capability.h>
36 #include <linux/compat.h>
38 #define CREATE_TRACE_POINTS
39 #include <trace/events/mmc.h>
41 #include <linux/mmc/ioctl.h>
42 #include <linux/mmc/card.h>
43 #include <linux/mmc/host.h>
44 #include <linux/mmc/mmc.h>
45 #include <linux/mmc/sd.h>
47 #include <asm/uaccess.h>
50 #include <mach/mtk_meminfo.h>
52 //add vmstat info with block tag log
53 #include <linux/vmstat.h>
54 #define FEATURE_STORAGE_VMSTAT_LOGGER
57 #include <linux/xlog.h>
58 #include <asm/div64.h>
59 #include <linux/vmalloc.h>
61 #include <linux/mmc/sd_misc.h>
63 #define MET_USER_EVENT_SUPPORT
64 #include <linux/met_drv.h>
66 #define FEATURE_STORAGE_PERF_INDEX
67 //enable storage log in user load
69 #ifdef USER_BUILD_KERNEL
70 #undef FEATURE_STORAGE_PERF_INDEX
74 MODULE_ALIAS("mmc:block");
75 #ifdef MODULE_PARAM_PREFIX
76 #undef MODULE_PARAM_PREFIX
78 #define MODULE_PARAM_PREFIX "mmcblk."
80 #define INAND_CMD38_ARG_EXT_CSD 113
81 #define INAND_CMD38_ARG_ERASE 0x00
82 #define INAND_CMD38_ARG_TRIM 0x01
83 #define INAND_CMD38_ARG_SECERASE 0x80
84 #define INAND_CMD38_ARG_SECTRIM1 0x81
85 #define INAND_CMD38_ARG_SECTRIM2 0x88
86 #define MMC_BLK_TIMEOUT_MS (10 * 60 * 1000) /* 10 minute timeout */
88 #define mmc_req_rel_wr(req) ((req->cmd_flags & REQ_FUA) && \
89 (rq_data_dir(req) == WRITE))
90 #define PACKED_CMD_VER 0x01
91 #define PACKED_CMD_WR 0x02
93 static DEFINE_MUTEX(block_mutex
);
96 * The defaults come from config options but can be overriden by module
99 static int perdev_minors
= CONFIG_MMC_BLOCK_MINORS
;
102 * We've only got one major, so number of mmcblk devices is
103 * limited to 256 / number of minors per device.
105 static int max_devices
;
107 /* 256 minors, so at most 256 separate devices */
108 static DECLARE_BITMAP(dev_use
, 256);
109 static DECLARE_BITMAP(name_use
, 256);
112 * There is one mmc_blk_data per slot.
114 struct mmc_blk_data
{
116 struct gendisk
*disk
;
117 struct mmc_queue queue
;
118 struct list_head part
;
121 #define MMC_BLK_CMD23 (1 << 0) /* Can do SET_BLOCK_COUNT for multiblock */
122 #define MMC_BLK_REL_WR (1 << 1) /* MMC Reliable write support */
123 #define MMC_BLK_PACKED_CMD (1 << 2) /* MMC packed command support */
126 unsigned int read_only
;
127 unsigned int part_type
;
128 unsigned int name_idx
;
129 unsigned int reset_done
;
130 #define MMC_BLK_READ BIT(0)
131 #define MMC_BLK_WRITE BIT(1)
132 #define MMC_BLK_DISCARD BIT(2)
133 #define MMC_BLK_SECDISCARD BIT(3)
136 * Only set in main mmc_blk_data associated
137 * with mmc_card with mmc_set_drvdata, and keeps
138 * track of the current selected device partition.
140 unsigned int part_curr
;
141 struct device_attribute force_ro
;
142 struct device_attribute power_ro_lock
;
146 static DEFINE_MUTEX(open_lock
);
149 MMC_PACKED_NR_IDX
= -1,
151 MMC_PACKED_NR_SINGLE
,
154 module_param(perdev_minors
, int, 0444);
155 MODULE_PARM_DESC(perdev_minors
, "Minors numbers to allocate per device");
157 static inline int mmc_blk_part_switch(struct mmc_card
*card
,
158 struct mmc_blk_data
*md
);
159 static int get_card_status(struct mmc_card
*card
, u32
*status
, int retries
);
161 #ifndef CONFIG_MTK_FPGA
162 #include <linux/met_ftrace_bio.h>
165 char mmc_get_rw_type(u32 opcode
)
169 case MMC_READ_SINGLE_BLOCK
:
170 case MMC_READ_MULTIPLE_BLOCK
:
172 case MMC_WRITE_BLOCK
:
173 case MMC_WRITE_MULTIPLE_BLOCK
:
181 inline int check_met_mmc_async_req_legal(struct mmc_host
*host
, struct mmc_async_req
*areq
)
185 if (!((host
== NULL
) || (areq
== NULL
) || (areq
->mrq
== NULL
)
186 || (areq
->mrq
->cmd
== NULL
) || (areq
->mrq
->data
== NULL
)
187 || (host
->card
== NULL
))) {
194 inline int check_met_mmc_blk_data_legal(struct mmc_blk_data
*md
)
198 if (!((md
== NULL
) || (md
->disk
== NULL
))) {
205 inline int check_met_mmc_req_legal(struct mmc_host
*host
, struct mmc_request
*req
)
209 if (!((host
== NULL
) || (req
== NULL
) || (req
->cmd
== NULL
)
210 || (req
->data
== NULL
) || (host
->card
== NULL
))) {
217 void met_mmc_insert(struct mmc_host
*host
, struct mmc_async_req
*areq
)
219 struct mmc_blk_data
*md
;
222 if (!check_met_mmc_async_req_legal(host
, areq
))
225 md
= mmc_get_drvdata(host
->card
);
226 if (!check_met_mmc_blk_data_legal(md
))
229 type
= mmc_get_rw_type(areq
->mrq
->cmd
->opcode
);
233 #ifndef CONFIG_MTK_FPGA
234 MET_FTRACE_PRINTK(met_mmc_insert
, md
, areq
, type
);
238 void met_mmc_dma_map(struct mmc_host
*host
, struct mmc_async_req
*areq
)
240 struct mmc_blk_data
*md
;
243 if (!check_met_mmc_async_req_legal(host
, areq
))
246 md
= mmc_get_drvdata(host
->card
);
247 if (!check_met_mmc_blk_data_legal(md
))
250 type
= mmc_get_rw_type(areq
->mrq
->cmd
->opcode
);
253 #ifndef CONFIG_MTK_FPGA
254 MET_FTRACE_PRINTK(met_mmc_dma_map
, md
, areq
, type
);
258 //void met_mmc_issue(struct mmc_host *host, struct mmc_async_req *areq)
260 // struct mmc_blk_data *md;
263 // if (!check_met_mmc_async_req_legal(host, areq))
266 // md = mmc_get_drvdata(host->card);
268 // type = mmc_get_rw_type(areq->mrq->cmd->opcode);
272 // MET_FTRACE_PRINTK(met_mmc_issue, md, areq, type);
275 void met_mmc_issue(struct mmc_host
*host
, struct mmc_request
*req
)
277 struct mmc_blk_data
*md
;
280 if (!check_met_mmc_req_legal(host
, req
))
283 md
= mmc_get_drvdata(host
->card
);
284 if (!check_met_mmc_blk_data_legal(md
))
287 type
= mmc_get_rw_type(req
->cmd
->opcode
);
290 #ifndef CONFIG_MTK_FPGA
291 MET_FTRACE_PRINTK(met_mmc_issue
, md
, req
, type
);
295 void met_mmc_send_cmd(struct mmc_host
*host
, struct mmc_command
*cmd
)
297 struct mmc_blk_data
*md
= mmc_get_drvdata(host
->card
);
300 type
= mmc_get_rw_type(cmd
->opcode
);
304 trace_printk("%d,%d %c %d + %d [%s]\n",
305 md
->disk
->major
, md
->disk
->first_minor
, type
,
306 cmd
->arg
, cmd
->data
->blocks
,
310 void met_mmc_xfr_done(struct mmc_host
*host
, struct mmc_command
*cmd
)
312 struct mmc_blk_data
*md
=mmc_get_drvdata(host
->card
);
315 type
= mmc_get_rw_type(cmd
->opcode
);
319 trace_printk("%d,%d %c %d + %d [%s]\n",
320 md
->disk
->major
, md
->disk
->first_minor
, type
,
321 cmd
->arg
, cmd
->data
->blocks
,
325 void met_mmc_wait_xfr(struct mmc_host
*host
, struct mmc_async_req
*areq
)
327 struct mmc_blk_data
*md
= mmc_get_drvdata(host
->card
);
330 type
= mmc_get_rw_type(areq
->mrq
->cmd
->opcode
);
334 trace_printk("%d,%d %c %d + %d [%s]\n",
335 md
->disk
->major
, md
->disk
->first_minor
, type
,
336 areq
->mrq
->cmd
->arg
, areq
->mrq
->data
->blocks
,
341 void met_mmc_tuning_start(struct mmc_host
*host
, struct mmc_command
*cmd
)
343 struct mmc_blk_data
*md
= mmc_get_drvdata(host
->card
);
346 type
= mmc_get_rw_type(cmd
->opcode
);
350 trace_printk("%d,%d %c %d + %d [%s]\n",
351 md
->disk
->major
, md
->disk
->first_minor
, type
,
352 cmd
->arg
, cmd
->data
->blocks
,
356 void met_mmc_tuning_end(struct mmc_host
*host
, struct mmc_command
*cmd
)
358 struct mmc_blk_data
*md
= mmc_get_drvdata(host
->card
);
361 type
= mmc_get_rw_type(cmd
->opcode
);
365 trace_printk("%d,%d %c %d + %d [%s]\n",
366 md
->disk
->major
, md
->disk
->first_minor
, type
,
367 cmd
->arg
, cmd
->data
->blocks
,
371 void met_mmc_complete(struct mmc_host
*host
, struct mmc_async_req
*areq
)
373 struct mmc_blk_data
*md
;
376 if (!check_met_mmc_async_req_legal(host
, areq
))
379 md
= mmc_get_drvdata(host
->card
);
380 if (!check_met_mmc_blk_data_legal(md
))
383 type
= mmc_get_rw_type(areq
->mrq
->cmd
->opcode
);
386 #ifndef CONFIG_MTK_FPGA
387 MET_FTRACE_PRINTK(met_mmc_complete
, md
, areq
, type
);
391 void met_mmc_dma_unmap_start(struct mmc_host
*host
, struct mmc_async_req
*areq
)
393 struct mmc_blk_data
*md
;
396 if (!check_met_mmc_async_req_legal(host
, areq
))
399 md
= mmc_get_drvdata(host
->card
);
400 if (!check_met_mmc_blk_data_legal(md
))
403 type
= mmc_get_rw_type(areq
->mrq
->cmd
->opcode
);
406 #ifndef CONFIG_MTK_FPGA
407 MET_FTRACE_PRINTK(met_mmc_dma_unmap_start
, md
, areq
, type
);
411 void met_mmc_dma_unmap_stop(struct mmc_host
*host
, struct mmc_async_req
*areq
)
413 struct mmc_blk_data
*md
;
416 if (!check_met_mmc_async_req_legal(host
, areq
))
419 md
= mmc_get_drvdata(host
->card
);
420 if (!check_met_mmc_blk_data_legal(md
))
423 type
= mmc_get_rw_type(areq
->mrq
->cmd
->opcode
);
426 #ifndef CONFIG_MTK_FPGA
427 MET_FTRACE_PRINTK(met_mmc_dma_unmap_stop
, md
, areq
, type
);
431 void met_mmc_continue_req_end(struct mmc_host
*host
, struct mmc_async_req
*areq
)
433 struct mmc_blk_data
*md
;
436 if (!check_met_mmc_async_req_legal(host
, areq
))
439 md
= mmc_get_drvdata(host
->card
);
440 if (!check_met_mmc_blk_data_legal(md
))
443 type
= mmc_get_rw_type(areq
->mrq
->cmd
->opcode
);
446 #ifndef CONFIG_MTK_FPGA
447 MET_FTRACE_PRINTK(met_mmc_continue_req_end
, md
, areq
, type
);
451 void met_mmc_dma_stop(struct mmc_host
*host
, struct mmc_async_req
*areq
, unsigned int bd_num
)
453 struct mmc_blk_data
*md
;
456 if (!check_met_mmc_async_req_legal(host
, areq
))
459 md
= mmc_get_drvdata(host
->card
);
460 if (!check_met_mmc_blk_data_legal(md
))
463 type
= mmc_get_rw_type(areq
->mrq
->cmd
->opcode
);
466 #ifndef CONFIG_MTK_FPGA
467 MET_FTRACE_PRINTK(met_mmc_dma_stop
, md
, areq
, type
, bd_num
);
471 //void met_mmc_end(struct mmc_host *host, struct mmc_async_req *areq)
473 // struct mmc_blk_data *md;
476 // if (areq && areq->mrq && host && host->card) {
477 // type = mmc_get_rw_type(areq->mrq->cmd->opcode);
481 // md = mmc_get_drvdata(host->card);
483 // if (areq && areq->mrq)
485 // trace_printk("%d,%d %c %d + %d [%s]\n",
486 // md->disk->major, md->disk->first_minor, type,
487 // areq->mrq->cmd->arg, areq->mrq->data->blocks,
493 static inline void mmc_blk_clear_packed(struct mmc_queue_req
*mqrq
)
495 struct mmc_packed
*packed
= mqrq
->packed
;
499 mqrq
->cmd_type
= MMC_PACKED_NONE
;
500 packed
->nr_entries
= MMC_PACKED_NR_ZERO
;
501 packed
->idx_failure
= MMC_PACKED_NR_IDX
;
506 static struct mmc_blk_data
*mmc_blk_get(struct gendisk
*disk
)
508 struct mmc_blk_data
*md
;
510 mutex_lock(&open_lock
);
511 md
= disk
->private_data
;
512 if (md
&& md
->usage
== 0)
516 mutex_unlock(&open_lock
);
521 static inline int mmc_get_devidx(struct gendisk
*disk
)
523 int devidx
= disk
->first_minor
/ perdev_minors
;
527 static void mmc_blk_put(struct mmc_blk_data
*md
)
529 mutex_lock(&open_lock
);
531 if (md
->usage
== 0) {
532 int devidx
= mmc_get_devidx(md
->disk
);
533 blk_cleanup_queue(md
->queue
.queue
);
535 __clear_bit(devidx
, dev_use
);
540 mutex_unlock(&open_lock
);
543 static ssize_t
power_ro_lock_show(struct device
*dev
,
544 struct device_attribute
*attr
, char *buf
)
547 struct mmc_blk_data
*md
= mmc_blk_get(dev_to_disk(dev
));
548 struct mmc_card
*card
= md
->queue
.card
;
551 if (card
->ext_csd
.boot_ro_lock
& EXT_CSD_BOOT_WP_B_PERM_WP_EN
)
553 else if (card
->ext_csd
.boot_ro_lock
& EXT_CSD_BOOT_WP_B_PWR_WP_EN
)
556 ret
= snprintf(buf
, PAGE_SIZE
, "%d\n", locked
);
563 static ssize_t
power_ro_lock_store(struct device
*dev
,
564 struct device_attribute
*attr
, const char *buf
, size_t count
)
567 struct mmc_blk_data
*md
, *part_md
;
568 struct mmc_card
*card
;
571 if (kstrtoul(buf
, 0, &set
))
577 md
= mmc_blk_get(dev_to_disk(dev
));
578 card
= md
->queue
.card
;
580 mmc_claim_host(card
->host
);
582 ret
= mmc_switch(card
, EXT_CSD_CMD_SET_NORMAL
, EXT_CSD_BOOT_WP
,
583 card
->ext_csd
.boot_ro_lock
|
584 EXT_CSD_BOOT_WP_B_PWR_WP_EN
,
585 card
->ext_csd
.part_time
);
587 pr_err("%s: Locking boot partition ro until next power on failed: %d\n", md
->disk
->disk_name
, ret
);
589 card
->ext_csd
.boot_ro_lock
|= EXT_CSD_BOOT_WP_B_PWR_WP_EN
;
591 mmc_release_host(card
->host
);
594 pr_info("%s: Locking boot partition ro until next power on\n",
595 md
->disk
->disk_name
);
596 set_disk_ro(md
->disk
, 1);
598 list_for_each_entry(part_md
, &md
->part
, part
)
599 if (part_md
->area_type
== MMC_BLK_DATA_AREA_BOOT
) {
600 pr_info("%s: Locking boot partition ro until next power on\n", part_md
->disk
->disk_name
);
601 set_disk_ro(part_md
->disk
, 1);
609 static ssize_t
force_ro_show(struct device
*dev
, struct device_attribute
*attr
,
613 struct mmc_blk_data
*md
= mmc_blk_get(dev_to_disk(dev
));
615 ret
= snprintf(buf
, PAGE_SIZE
, "%d\n",
616 get_disk_ro(dev_to_disk(dev
)) ^
622 static ssize_t
force_ro_store(struct device
*dev
, struct device_attribute
*attr
,
623 const char *buf
, size_t count
)
627 struct mmc_blk_data
*md
= mmc_blk_get(dev_to_disk(dev
));
628 unsigned long set
= simple_strtoul(buf
, &end
, 0);
634 set_disk_ro(dev_to_disk(dev
), set
|| md
->read_only
);
641 static int mmc_blk_open(struct block_device
*bdev
, fmode_t mode
)
643 struct mmc_blk_data
*md
= mmc_blk_get(bdev
->bd_disk
);
646 mutex_lock(&block_mutex
);
649 check_disk_change(bdev
);
652 if ((mode
& FMODE_WRITE
) && md
->read_only
) {
657 mutex_unlock(&block_mutex
);
662 static void mmc_blk_release(struct gendisk
*disk
, fmode_t mode
)
664 struct mmc_blk_data
*md
= disk
->private_data
;
666 mutex_lock(&block_mutex
);
668 mutex_unlock(&block_mutex
);
672 mmc_blk_getgeo(struct block_device
*bdev
, struct hd_geometry
*geo
)
674 geo
->cylinders
= get_capacity(bdev
->bd_disk
) / (4 * 16);
680 struct mmc_blk_ioc_data
{
681 struct mmc_ioc_cmd ic
;
686 static struct mmc_blk_ioc_data
*mmc_blk_ioctl_copy_from_user(
687 struct mmc_ioc_cmd __user
*user
)
689 struct mmc_blk_ioc_data
*idata
;
692 idata
= kzalloc(sizeof(*idata
), GFP_KERNEL
);
698 if (copy_from_user(&idata
->ic
, user
, sizeof(idata
->ic
))) {
703 idata
->buf_bytes
= (u64
) idata
->ic
.blksz
* idata
->ic
.blocks
;
704 if (idata
->buf_bytes
> MMC_IOC_MAX_BYTES
) {
709 if (!idata
->buf_bytes
)
712 idata
->buf
= kzalloc(idata
->buf_bytes
, GFP_KERNEL
);
718 if (copy_from_user(idata
->buf
, (void __user
*)(unsigned long)
719 idata
->ic
.data_ptr
, idata
->buf_bytes
)) {
734 static int ioctl_rpmb_card_status_poll(struct mmc_card
*card
, u32
*status
,
740 if (!status
|| !retries_max
)
744 err
= get_card_status(card
, status
, 5);
748 if (!R1_STATUS(*status
) &&
749 (R1_CURRENT_STATE(*status
) != R1_STATE_PRG
))
750 break; /* RPMB programming operation complete */
753 * Rechedule to give the MMC device a chance to continue
754 * processing the previous command without being polled too
757 usleep_range(1000, 5000);
758 } while (++retry_count
< retries_max
);
760 if (retry_count
== retries_max
)
766 static int mmc_blk_ioctl_cmd(struct block_device
*bdev
,
767 struct mmc_ioc_cmd __user
*ic_ptr
)
769 struct mmc_blk_ioc_data
*idata
;
770 struct mmc_blk_data
*md
;
771 struct mmc_card
*card
;
772 struct mmc_command cmd
= {0};
773 struct mmc_data data
= {0};
774 struct mmc_request mrq
= {NULL
};
775 struct scatterlist sg
;
781 * The caller must have CAP_SYS_RAWIO, and must be calling this on the
782 * whole block device, not on a partition. This prevents overspray
783 * between sibling partitions.
785 if ((!capable(CAP_SYS_RAWIO
)) || (bdev
!= bdev
->bd_contains
))
788 idata
= mmc_blk_ioctl_copy_from_user(ic_ptr
);
790 return PTR_ERR(idata
);
792 md
= mmc_blk_get(bdev
->bd_disk
);
798 if (md
->area_type
& MMC_BLK_DATA_AREA_RPMB
)
801 card
= md
->queue
.card
;
807 cmd
.opcode
= idata
->ic
.opcode
;
808 cmd
.arg
= idata
->ic
.arg
;
809 cmd
.flags
= idata
->ic
.flags
;
811 if (idata
->buf_bytes
) {
814 data
.blksz
= idata
->ic
.blksz
;
815 data
.blocks
= idata
->ic
.blocks
;
817 sg_init_one(data
.sg
, idata
->buf
, idata
->buf_bytes
);
819 if (idata
->ic
.write_flag
)
820 data
.flags
= MMC_DATA_WRITE
;
822 data
.flags
= MMC_DATA_READ
;
824 /* data.flags must already be set before doing this. */
825 mmc_set_data_timeout(&data
, card
);
827 /* Allow overriding the timeout_ns for empirical tuning. */
828 if (idata
->ic
.data_timeout_ns
)
829 data
.timeout_ns
= idata
->ic
.data_timeout_ns
;
831 if ((cmd
.flags
& MMC_RSP_R1B
) == MMC_RSP_R1B
) {
833 * Pretend this is a data transfer and rely on the
834 * host driver to compute timeout. When all host
835 * drivers support cmd.cmd_timeout for R1B, this
839 * cmd.cmd_timeout = idata->ic.cmd_timeout_ms;
841 data
.timeout_ns
= idata
->ic
.cmd_timeout_ms
* 1000000;
849 mmc_claim_host(card
->host
);
851 err
= mmc_blk_part_switch(card
, md
);
855 if (idata
->ic
.is_acmd
) {
856 err
= mmc_app_cmd(card
->host
, card
);
862 err
= mmc_set_blockcount(card
, data
.blocks
,
863 idata
->ic
.write_flag
& (1 << 31));
868 mmc_wait_for_req(card
->host
, &mrq
);
871 dev_err(mmc_dev(card
->host
), "%s: cmd error %d\n",
872 __func__
, cmd
.error
);
877 dev_err(mmc_dev(card
->host
), "%s: data error %d\n",
878 __func__
, data
.error
);
884 * According to the SD specs, some commands require a delay after
885 * issuing the command.
887 if (idata
->ic
.postsleep_min_us
)
888 usleep_range(idata
->ic
.postsleep_min_us
, idata
->ic
.postsleep_max_us
);
890 if (copy_to_user(&(ic_ptr
->response
), cmd
.resp
, sizeof(cmd
.resp
))) {
895 if (!idata
->ic
.write_flag
) {
896 if (copy_to_user((void __user
*)(unsigned long) idata
->ic
.data_ptr
,
897 idata
->buf
, idata
->buf_bytes
)) {
905 * Ensure RPMB command has completed by polling CMD13
908 err
= ioctl_rpmb_card_status_poll(card
, &status
, 5);
910 dev_err(mmc_dev(card
->host
),
911 "%s: Card Status=0x%08X, error %d\n",
912 __func__
, status
, err
);
916 mmc_release_host(card
->host
);
926 static int mmc_blk_ioctl(struct block_device
*bdev
, fmode_t mode
,
927 unsigned int cmd
, unsigned long arg
)
930 if (cmd
== MMC_IOC_CMD
)
931 ret
= mmc_blk_ioctl_cmd(bdev
, (struct mmc_ioc_cmd __user
*)arg
);
936 static int mmc_blk_compat_ioctl(struct block_device
*bdev
, fmode_t mode
,
937 unsigned int cmd
, unsigned long arg
)
939 return mmc_blk_ioctl(bdev
, mode
, cmd
, (unsigned long) compat_ptr(arg
));
943 static const struct block_device_operations mmc_bdops
= {
944 .open
= mmc_blk_open
,
945 .release
= mmc_blk_release
,
946 .getgeo
= mmc_blk_getgeo
,
947 .owner
= THIS_MODULE
,
948 .ioctl
= mmc_blk_ioctl
,
950 .compat_ioctl
= mmc_blk_compat_ioctl
,
954 static inline int mmc_blk_part_switch(struct mmc_card
*card
,
955 struct mmc_blk_data
*md
)
958 struct mmc_blk_data
*main_md
= mmc_get_drvdata(card
);
960 if (main_md
->part_curr
== md
->part_type
)
963 if (mmc_card_mmc(card
)) {
964 u8 part_config
= card
->ext_csd
.part_config
;
966 part_config
&= ~EXT_CSD_PART_CONFIG_ACC_MASK
;
967 part_config
|= md
->part_type
;
969 ret
= mmc_switch(card
, EXT_CSD_CMD_SET_NORMAL
,
970 EXT_CSD_PART_CONFIG
, part_config
,
971 card
->ext_csd
.part_time
);
975 card
->ext_csd
.part_config
= part_config
;
978 main_md
->part_curr
= md
->part_type
;
982 static u32
mmc_sd_num_wr_blocks(struct mmc_card
*card
)
988 struct mmc_request mrq
= {NULL
};
989 struct mmc_command cmd
= {0};
990 struct mmc_data data
= {0};
992 struct scatterlist sg
;
994 cmd
.opcode
= MMC_APP_CMD
;
995 cmd
.arg
= card
->rca
<< 16;
996 cmd
.flags
= MMC_RSP_SPI_R1
| MMC_RSP_R1
| MMC_CMD_AC
;
998 err
= mmc_wait_for_cmd(card
->host
, &cmd
, 0);
1001 if (!mmc_host_is_spi(card
->host
) && !(cmd
.resp
[0] & R1_APP_CMD
))
1004 memset(&cmd
, 0, sizeof(struct mmc_command
));
1006 cmd
.opcode
= SD_APP_SEND_NUM_WR_BLKS
;
1008 cmd
.flags
= MMC_RSP_SPI_R1
| MMC_RSP_R1
| MMC_CMD_ADTC
;
1012 data
.flags
= MMC_DATA_READ
;
1015 mmc_set_data_timeout(&data
, card
);
1020 blocks
= kmalloc(4, GFP_KERNEL
);
1024 sg_init_one(&sg
, blocks
, 4);
1026 mmc_wait_for_req(card
->host
, &mrq
);
1028 result
= ntohl(*blocks
);
1031 if (cmd
.error
|| data
.error
)
1037 u32
__mmc_sd_num_wr_blocks(struct mmc_card
*card
)
1039 return mmc_sd_num_wr_blocks(card
);
1041 EXPORT_SYMBOL(__mmc_sd_num_wr_blocks
);
1043 static int send_stop(struct mmc_card
*card
, u32
*status
)
1045 struct mmc_command cmd
= {0};
1048 cmd
.opcode
= MMC_STOP_TRANSMISSION
;
1049 cmd
.flags
= MMC_RSP_SPI_R1B
| MMC_RSP_R1B
| MMC_CMD_AC
;
1050 err
= mmc_wait_for_cmd(card
->host
, &cmd
, 5);
1052 *status
= cmd
.resp
[0];
1056 static int get_card_status(struct mmc_card
*card
, u32
*status
, int retries
)
1058 struct mmc_command cmd
= {0};
1061 cmd
.opcode
= MMC_SEND_STATUS
;
1062 if (!mmc_host_is_spi(card
->host
))
1063 cmd
.arg
= card
->rca
<< 16;
1064 cmd
.flags
= MMC_RSP_SPI_R2
| MMC_RSP_R1
| MMC_CMD_AC
;
1065 err
= mmc_wait_for_cmd(card
->host
, &cmd
, retries
);
1067 *status
= cmd
.resp
[0];
1071 #define ERR_NOMEDIUM 3
1074 #define ERR_CONTINUE 0
1076 static int mmc_blk_cmd_error(struct request
*req
, const char *name
, int error
,
1077 bool status_valid
, u32 status
)
1081 /* response crc error, retry the r/w cmd */
1082 pr_err("%s: %s sending %s command, card status %#x\n",
1083 req
->rq_disk
->disk_name
, "response CRC error",
1088 pr_err("%s: %s sending %s command, card status %#x\n",
1089 req
->rq_disk
->disk_name
, "timed out", name
, status
);
1091 /* If the status cmd initially failed, retry the r/w cmd */
1092 if (!status_valid
) {
1093 pr_err("%s: status not valid, retrying timeout\n", req
->rq_disk
->disk_name
);
1097 * If it was a r/w cmd crc error, or illegal command
1098 * (eg, issued in wrong state) then retry - we should
1099 * have corrected the state problem above.
1101 if (status
& (R1_COM_CRC_ERROR
| R1_ILLEGAL_COMMAND
)) {
1102 pr_err("%s: command error, retrying timeout\n", req
->rq_disk
->disk_name
);
1106 /* Otherwise abort the command */
1107 pr_err("%s: not retrying timeout\n", req
->rq_disk
->disk_name
);
1111 /* We don't understand the error code the driver gave us */
1112 pr_err("%s: unknown error %d sending read/write command, card status %#x\n",
1113 req
->rq_disk
->disk_name
, error
, status
);
1119 * Initial r/w and stop cmd error recovery.
1120 * We don't know whether the card received the r/w cmd or not, so try to
1121 * restore things back to a sane state. Essentially, we do this as follows:
1122 * - Obtain card status. If the first attempt to obtain card status fails,
1123 * the status word will reflect the failed status cmd, not the failed
1124 * r/w cmd. If we fail to obtain card status, it suggests we can no
1125 * longer communicate with the card.
1126 * - Check the card state. If the card received the cmd but there was a
1127 * transient problem with the response, it might still be in a data transfer
1128 * mode. Try to send it a stop command. If this fails, we can't recover.
1129 * - If the r/w cmd failed due to a response CRC error, it was probably
1130 * transient, so retry the cmd.
1131 * - If the r/w cmd timed out, but we didn't get the r/w cmd status, retry.
1132 * - If the r/w cmd timed out, and the r/w cmd failed due to CRC error or
1133 * illegal cmd, retry.
1134 * Otherwise we don't understand what happened, so abort.
1136 static int mmc_blk_cmd_recovery(struct mmc_card
*card
, struct request
*req
,
1137 struct mmc_blk_request
*brq
, int *ecc_err
, int *gen_err
)
1139 bool prev_cmd_status_valid
= true;
1140 u32 status
, stop_status
= 0;
1143 if (mmc_card_removed(card
))
1144 return ERR_NOMEDIUM
;
1147 * Try to get card status which indicates both the card state
1148 * and why there was no response. If the first attempt fails,
1149 * we can't be sure the returned status is for the r/w command.
1151 for (retry
= 2; retry
>= 0; retry
--) {
1152 err
= get_card_status(card
, &status
, 0);
1156 prev_cmd_status_valid
= false;
1157 pr_err("%s: error %d sending status command, %sing\n",
1158 req
->rq_disk
->disk_name
, err
, retry
? "retry" : "abort");
1161 /* We couldn't get a response from the card. Give up. */
1163 /* Check if the card is removed */
1164 if (mmc_detect_card_removed(card
->host
))
1165 return ERR_NOMEDIUM
;
1169 /* Flag ECC errors */
1170 if ((status
& R1_CARD_ECC_FAILED
) ||
1171 (brq
->stop
.resp
[0] & R1_CARD_ECC_FAILED
) ||
1172 (brq
->cmd
.resp
[0] & R1_CARD_ECC_FAILED
))
1175 /* Flag General errors */
1176 if (!mmc_host_is_spi(card
->host
) && rq_data_dir(req
) != READ
)
1177 if ((status
& R1_ERROR
) ||
1178 (brq
->stop
.resp
[0] & R1_ERROR
)) {
1179 pr_err("%s: %s: general error sending stop or status command, stop cmd response %#x, card status %#x\n",
1180 req
->rq_disk
->disk_name
, __func__
,
1181 brq
->stop
.resp
[0], status
);
1186 * Check the current card state. If it is in some data transfer
1187 * mode, tell it to stop (and hopefully transition back to TRAN.)
1189 if (R1_CURRENT_STATE(status
) == R1_STATE_DATA
||
1190 R1_CURRENT_STATE(status
) == R1_STATE_RCV
) {
1191 err
= send_stop(card
, &stop_status
);
1194 get_card_status(card
,&status
,0);
1195 if ((R1_CURRENT_STATE(status
) == R1_STATE_TRAN
) ||(R1_CURRENT_STATE(status
) == R1_STATE_PRG
)){
1198 pr_err("b card status %d \n",status
);
1201 pr_err("g card status %d \n",status
);
1204 pr_err("%s: error %d sending stop command\n",
1205 req
->rq_disk
->disk_name
, err
);
1208 * If the stop cmd also timed out, the card is probably
1209 * not present, so abort. Other errors are bad news too.
1213 if (stop_status
& R1_CARD_ECC_FAILED
)
1215 if (!mmc_host_is_spi(card
->host
) && rq_data_dir(req
) != READ
)
1216 if (stop_status
& R1_ERROR
) {
1217 pr_err("%s: %s: general error sending stop command, stop cmd response %#x\n",
1218 req
->rq_disk
->disk_name
, __func__
,
1224 /* Check for set block count errors */
1226 return mmc_blk_cmd_error(req
, "SET_BLOCK_COUNT", brq
->sbc
.error
,
1227 prev_cmd_status_valid
, status
);
1229 /* Check for r/w command errors */
1231 return mmc_blk_cmd_error(req
, "r/w cmd", brq
->cmd
.error
,
1232 prev_cmd_status_valid
, status
);
1235 if (!brq
->stop
.error
)
1236 return ERR_CONTINUE
;
1238 /* Now for stop errors. These aren't fatal to the transfer. */
1239 pr_err("%s: error %d sending stop command, original cmd response %#x, card status %#x\n",
1240 req
->rq_disk
->disk_name
, brq
->stop
.error
,
1241 brq
->cmd
.resp
[0], status
);
1244 * Subsitute in our own stop status as this will give the error
1245 * state which happened during the execution of the r/w command.
1248 brq
->stop
.resp
[0] = stop_status
;
1249 brq
->stop
.error
= 0;
1251 return ERR_CONTINUE
;
1254 static int mmc_blk_reset(struct mmc_blk_data
*md
, struct mmc_host
*host
,
1259 if (md
->reset_done
& type
)
1262 md
->reset_done
|= type
;
1263 err
= mmc_hw_reset(host
);
1264 /* Ensure we switch back to the correct partition */
1265 if (err
!= -EOPNOTSUPP
) {
1266 struct mmc_blk_data
*main_md
= mmc_get_drvdata(host
->card
);
1269 main_md
->part_curr
= main_md
->part_type
;
1270 part_err
= mmc_blk_part_switch(host
->card
, md
);
1273 * We have failed to get back into the correct
1274 * partition, so we need to abort the whole request.
1282 static inline void mmc_blk_reset_success(struct mmc_blk_data
*md
, int type
)
1284 md
->reset_done
&= ~type
;
1287 int mmc_access_rpmb(struct mmc_queue
*mq
)
1289 struct mmc_blk_data
*md
= mq
->data
;
1291 * If this is a RPMB partition access, return ture
1293 if (md
&& md
->part_type
== EXT_CSD_PART_CONFIG_ACC_RPMB
)
1299 static int mmc_blk_issue_discard_rq(struct mmc_queue
*mq
, struct request
*req
)
1301 struct mmc_blk_data
*md
= mq
->data
;
1302 struct mmc_card
*card
= md
->queue
.card
;
1303 unsigned int from
, nr
, arg
;
1304 int err
= 0, type
= MMC_BLK_DISCARD
;
1306 if (!mmc_can_erase(card
)) {
1311 from
= blk_rq_pos(req
);
1312 nr
= blk_rq_sectors(req
);
1314 if (mmc_can_discard(card
))
1315 arg
= MMC_DISCARD_ARG
;
1316 else if (mmc_can_trim(card
))
1319 arg
= MMC_ERASE_ARG
;
1321 if (card
->quirks
& MMC_QUIRK_INAND_CMD38
) {
1322 err
= mmc_switch(card
, EXT_CSD_CMD_SET_NORMAL
,
1323 INAND_CMD38_ARG_EXT_CSD
,
1324 arg
== MMC_TRIM_ARG
?
1325 INAND_CMD38_ARG_TRIM
:
1326 INAND_CMD38_ARG_ERASE
,
1331 err
= mmc_erase(card
, from
, nr
, arg
);
1333 if (err
== -EIO
&& !mmc_blk_reset(md
, card
->host
, type
))
1336 mmc_blk_reset_success(md
, type
);
1337 blk_end_request(req
, err
, blk_rq_bytes(req
));
1342 static int mmc_blk_issue_secdiscard_rq(struct mmc_queue
*mq
,
1343 struct request
*req
)
1345 struct mmc_blk_data
*md
= mq
->data
;
1346 struct mmc_card
*card
= md
->queue
.card
;
1347 unsigned int from
, nr
, arg
, trim_arg
, erase_arg
;
1348 int err
= 0, type
= MMC_BLK_SECDISCARD
;
1350 if (!(mmc_can_secure_erase_trim(card
) || mmc_can_sanitize(card
))) {
1355 from
= blk_rq_pos(req
);
1356 nr
= blk_rq_sectors(req
);
1358 /* The sanitize operation is supported at v4.5 only */
1359 if (mmc_can_sanitize(card
)) {
1360 erase_arg
= MMC_ERASE_ARG
;
1361 trim_arg
= MMC_TRIM_ARG
;
1363 erase_arg
= MMC_SECURE_ERASE_ARG
;
1364 trim_arg
= MMC_SECURE_TRIM1_ARG
;
1367 if (mmc_erase_group_aligned(card
, from
, nr
))
1369 else if (mmc_can_trim(card
))
1376 if (card
->quirks
& MMC_QUIRK_INAND_CMD38
) {
1377 err
= mmc_switch(card
, EXT_CSD_CMD_SET_NORMAL
,
1378 INAND_CMD38_ARG_EXT_CSD
,
1379 arg
== MMC_SECURE_TRIM1_ARG
?
1380 INAND_CMD38_ARG_SECTRIM1
:
1381 INAND_CMD38_ARG_SECERASE
,
1387 err
= mmc_erase(card
, from
, nr
, arg
);
1393 if (arg
== MMC_SECURE_TRIM1_ARG
) {
1394 if (card
->quirks
& MMC_QUIRK_INAND_CMD38
) {
1395 err
= mmc_switch(card
, EXT_CSD_CMD_SET_NORMAL
,
1396 INAND_CMD38_ARG_EXT_CSD
,
1397 INAND_CMD38_ARG_SECTRIM2
,
1403 err
= mmc_erase(card
, from
, nr
, MMC_SECURE_TRIM2_ARG
);
1410 if (mmc_can_sanitize(card
)) {
1411 trace_mmc_blk_erase_start(EXT_CSD_SANITIZE_START
, 0, 0);
1412 err
= mmc_switch(card
, EXT_CSD_CMD_SET_NORMAL
,
1413 EXT_CSD_SANITIZE_START
, 1, 0);
1414 trace_mmc_blk_erase_end(EXT_CSD_SANITIZE_START
, 0, 0);
1417 if (err
&& !mmc_blk_reset(md
, card
->host
, type
))
1420 mmc_blk_reset_success(md
, type
);
1422 blk_end_request(req
, err
, blk_rq_bytes(req
));
1427 static int mmc_blk_issue_flush(struct mmc_queue
*mq
, struct request
*req
)
1429 struct mmc_blk_data
*md
= mq
->data
;
1430 struct mmc_card
*card
= md
->queue
.card
;
1433 ret
= mmc_flush_cache(card
);
1437 blk_end_request_all(req
, ret
);
1443 * Reformat current write as a reliable write, supporting
1444 * both legacy and the enhanced reliable write MMC cards.
1445 * In each transfer we'll handle only as much as a single
1446 * reliable write can handle, thus finish the request in
1447 * partial completions.
1449 static inline void mmc_apply_rel_rw(struct mmc_blk_request
*brq
,
1450 struct mmc_card
*card
,
1451 struct request
*req
)
1453 if (!(card
->ext_csd
.rel_param
& EXT_CSD_WR_REL_PARAM_EN
)) {
1454 /* Legacy mode imposes restrictions on transfers. */
1455 if (!IS_ALIGNED(brq
->cmd
.arg
, card
->ext_csd
.rel_sectors
))
1456 brq
->data
.blocks
= 1;
1458 if (brq
->data
.blocks
> card
->ext_csd
.rel_sectors
)
1459 brq
->data
.blocks
= card
->ext_csd
.rel_sectors
;
1460 else if (brq
->data
.blocks
< card
->ext_csd
.rel_sectors
)
1461 brq
->data
.blocks
= 1;
1465 #define CMD_ERRORS \
1466 (R1_OUT_OF_RANGE | /* Command argument out of range */ \
1467 R1_ADDRESS_ERROR | /* Misaligned address */ \
1468 R1_BLOCK_LEN_ERROR | /* Transferred block length incorrect */\
1469 R1_WP_VIOLATION | /* Tried to write to protected block */ \
1470 R1_CC_ERROR | /* Card controller error */ \
1471 R1_ERROR) /* General/unknown error */
1473 static int mmc_blk_err_check(struct mmc_card
*card
,
1474 struct mmc_async_req
*areq
)
1476 struct mmc_queue_req
*mq_mrq
= container_of(areq
, struct mmc_queue_req
,
1478 struct mmc_blk_request
*brq
= &mq_mrq
->brq
;
1479 struct request
*req
= mq_mrq
->req
;
1480 int ecc_err
= 0, gen_err
= 0;
1483 * sbc.error indicates a problem with the set block count
1484 * command. No data will have been transferred.
1486 * cmd.error indicates a problem with the r/w command. No
1487 * data will have been transferred.
1489 * stop.error indicates a problem with the stop command. Data
1490 * may have been transferred, or may still be transferring.
1492 if (brq
->sbc
.error
|| brq
->cmd
.error
|| brq
->stop
.error
||
1494 switch (mmc_blk_cmd_recovery(card
, req
, brq
, &ecc_err
, &gen_err
)) {
1496 return MMC_BLK_RETRY
;
1498 return MMC_BLK_ABORT
;
1500 return MMC_BLK_NOMEDIUM
;
1507 * Check for errors relating to the execution of the
1508 * initial command - such as address errors. No data
1509 * has been transferred.
1511 if (brq
->cmd
.resp
[0] & CMD_ERRORS
) {
1512 pr_err("%s: r/w command failed, status = %#x\n",
1513 req
->rq_disk
->disk_name
, brq
->cmd
.resp
[0]);
1514 return MMC_BLK_ABORT
;
1518 * Everything else is either success, or a data error of some
1519 * kind. If it was a write, we may have transitioned to
1520 * program mode, which we have to wait for it to complete.
1522 if (!mmc_host_is_spi(card
->host
) && rq_data_dir(req
) != READ
) {
1524 unsigned long timeout
;
1526 /* Check stop command response */
1527 if (brq
->stop
.resp
[0] & R1_ERROR
) {
1528 pr_err("%s: %s: general error sending stop command, stop cmd response %#x\n",
1529 req
->rq_disk
->disk_name
, __func__
,
1534 timeout
= jiffies
+ msecs_to_jiffies(MMC_BLK_TIMEOUT_MS
);
1536 int err
= get_card_status(card
, &status
, 5);
1538 pr_err("%s: error %d requesting status\n",
1539 req
->rq_disk
->disk_name
, err
);
1540 return MMC_BLK_CMD_ERR
;
1543 if (status
& R1_ERROR
) {
1544 pr_err("%s: %s: general error sending status command, card status %#x\n",
1545 req
->rq_disk
->disk_name
, __func__
,
1550 /* Timeout if the device never becomes ready for data
1551 * and never leaves the program state.
1553 if (time_after(jiffies
, timeout
)) {
1554 pr_err("%s: Card stuck in programming state!"\
1555 " %s %s\n", mmc_hostname(card
->host
),
1556 req
->rq_disk
->disk_name
, __func__
);
1558 return MMC_BLK_CMD_ERR
;
1561 * Some cards mishandle the status bits,
1562 * so make sure to check both the busy
1563 * indication and the card state.
1565 } while (!(status
& R1_READY_FOR_DATA
) ||
1566 (R1_CURRENT_STATE(status
) == R1_STATE_PRG
));
1569 /* if general error occurs, retry the write operation. */
1571 pr_warn("%s: retrying write for general error\n",
1572 req
->rq_disk
->disk_name
);
1573 return MMC_BLK_RETRY
;
1576 if (brq
->data
.error
) {
1577 pr_err("%s: error %d transferring data, sector %u, nr %u, cmd response %#x, card status %#x\n",
1578 req
->rq_disk
->disk_name
, brq
->data
.error
,
1579 (unsigned)blk_rq_pos(req
),
1580 (unsigned)blk_rq_sectors(req
),
1581 brq
->cmd
.resp
[0], brq
->stop
.resp
[0]);
1583 if (rq_data_dir(req
) == READ
) {
1585 return MMC_BLK_ECC_ERR
;
1586 return MMC_BLK_DATA_ERR
;
1588 return MMC_BLK_CMD_ERR
;
1592 if (!brq
->data
.bytes_xfered
)
1593 return MMC_BLK_RETRY
;
1595 if (mmc_packed_cmd(mq_mrq
->cmd_type
)) {
1596 if (unlikely(brq
->data
.blocks
<< 9 != brq
->data
.bytes_xfered
))
1597 return MMC_BLK_PARTIAL
;
1599 return MMC_BLK_SUCCESS
;
1602 if (blk_rq_bytes(req
) != brq
->data
.bytes_xfered
)
1603 return MMC_BLK_PARTIAL
;
1605 return MMC_BLK_SUCCESS
;
1608 static int mmc_blk_packed_err_check(struct mmc_card
*card
,
1609 struct mmc_async_req
*areq
)
1611 struct mmc_queue_req
*mq_rq
= container_of(areq
, struct mmc_queue_req
,
1613 struct request
*req
= mq_rq
->req
;
1614 struct mmc_packed
*packed
= mq_rq
->packed
;
1615 int err
, check
, status
;
1621 check
= mmc_blk_err_check(card
, areq
);
1622 err
= get_card_status(card
, &status
, 0);
1624 pr_err("%s: error %d sending status command\n",
1625 req
->rq_disk
->disk_name
, err
);
1626 return MMC_BLK_ABORT
;
1629 if (status
& R1_EXCEPTION_EVENT
) {
1630 ext_csd
= kzalloc(512, GFP_KERNEL
);
1632 pr_err("%s: unable to allocate buffer for ext_csd\n",
1633 req
->rq_disk
->disk_name
);
1637 err
= mmc_send_ext_csd(card
, ext_csd
);
1639 pr_err("%s: error %d sending ext_csd\n",
1640 req
->rq_disk
->disk_name
, err
);
1641 check
= MMC_BLK_ABORT
;
1645 if ((ext_csd
[EXT_CSD_EXP_EVENTS_STATUS
] &
1646 EXT_CSD_PACKED_FAILURE
) &&
1647 (ext_csd
[EXT_CSD_PACKED_CMD_STATUS
] &
1648 EXT_CSD_PACKED_GENERIC_ERROR
)) {
1649 if (ext_csd
[EXT_CSD_PACKED_CMD_STATUS
] &
1650 EXT_CSD_PACKED_INDEXED_ERROR
) {
1651 packed
->idx_failure
=
1652 ext_csd
[EXT_CSD_PACKED_FAILURE_INDEX
] - 1;
1653 check
= MMC_BLK_PARTIAL
;
1655 pr_err("%s: packed cmd failed, nr %u, sectors %u, "
1656 "failure index: %d\n",
1657 req
->rq_disk
->disk_name
, packed
->nr_entries
,
1658 packed
->blocks
, packed
->idx_failure
);
1667 static void mmc_blk_rw_rq_prep(struct mmc_queue_req
*mqrq
,
1668 struct mmc_card
*card
,
1670 struct mmc_queue
*mq
)
1672 u32 readcmd
, writecmd
;
1673 struct mmc_blk_request
*brq
= &mqrq
->brq
;
1674 struct request
*req
= mqrq
->req
;
1675 struct mmc_blk_data
*md
= mq
->data
;
1679 * Reliable writes are used to implement Forced Unit Access and
1680 * are supported only on MMCs.
1682 bool do_rel_wr
= (req
->cmd_flags
& REQ_FUA
) &&
1683 (rq_data_dir(req
) == WRITE
) &&
1684 (md
->flags
& MMC_BLK_REL_WR
);
1686 memset(brq
, 0, sizeof(struct mmc_blk_request
));
1687 brq
->mrq
.cmd
= &brq
->cmd
;
1688 brq
->mrq
.data
= &brq
->data
;
1690 brq
->cmd
.arg
= blk_rq_pos(req
);
1691 if (!mmc_card_blockaddr(card
))
1693 brq
->cmd
.flags
= MMC_RSP_SPI_R1
| MMC_RSP_R1
| MMC_CMD_ADTC
;
1694 brq
->data
.blksz
= 512;
1695 brq
->stop
.opcode
= MMC_STOP_TRANSMISSION
;
1697 brq
->stop
.flags
= MMC_RSP_SPI_R1B
| MMC_RSP_R1B
| MMC_CMD_AC
;
1698 brq
->data
.blocks
= blk_rq_sectors(req
);
1701 * The block layer doesn't support all sector count
1702 * restrictions, so we need to be prepared for too big
1705 if (brq
->data
.blocks
> card
->host
->max_blk_count
)
1706 brq
->data
.blocks
= card
->host
->max_blk_count
;
1708 if (brq
->data
.blocks
> 1) {
1710 * After a read error, we redo the request one sector
1711 * at a time in order to accurately determine which
1712 * sectors can be read successfully.
1715 brq
->data
.blocks
= 1;
1717 /* Some controllers can't do multiblock reads due to hw bugs */
1718 if (card
->host
->caps2
& MMC_CAP2_NO_MULTI_READ
&&
1719 rq_data_dir(req
) == READ
)
1720 brq
->data
.blocks
= 1;
1723 if (brq
->data
.blocks
> 1 || do_rel_wr
) {
1724 /* SPI multiblock writes terminate using a special
1725 * token, not a STOP_TRANSMISSION request.
1727 if (!mmc_host_is_spi(card
->host
) ||
1728 rq_data_dir(req
) == READ
)
1729 brq
->mrq
.stop
= &brq
->stop
;
1730 readcmd
= MMC_READ_MULTIPLE_BLOCK
;
1731 writecmd
= MMC_WRITE_MULTIPLE_BLOCK
;
1733 brq
->mrq
.stop
= NULL
;
1734 readcmd
= MMC_READ_SINGLE_BLOCK
;
1735 writecmd
= MMC_WRITE_BLOCK
;
1737 #ifdef CONFIG_MTK_EMMC_CACHE
1738 /* for non-cacheable system data,
1739 * the implementation of reliable write / force prg write,
1740 * must be applied with mutli write cmd
1742 if (mmc_card_mmc(card
) && (card
->ext_csd
.cache_ctrl
& 0x1)){
1743 writecmd
= MMC_WRITE_MULTIPLE_BLOCK
;
1746 if (rq_data_dir(req
) == READ
) {
1747 brq
->cmd
.opcode
= readcmd
;
1748 brq
->data
.flags
|= MMC_DATA_READ
;
1750 brq
->cmd
.opcode
= writecmd
;
1751 brq
->data
.flags
|= MMC_DATA_WRITE
;
1755 mmc_apply_rel_rw(brq
, card
, req
);
1758 * Data tag is used only during writing meta data to speed
1759 * up write and any subsequent read of this meta data
1761 do_data_tag
= (card
->ext_csd
.data_tag_unit_size
) &&
1762 (req
->cmd_flags
& REQ_META
) &&
1763 (rq_data_dir(req
) == WRITE
) &&
1764 ((brq
->data
.blocks
* brq
->data
.blksz
) >=
1765 card
->ext_csd
.data_tag_unit_size
);
1768 * Pre-defined multi-block transfers are preferable to
1769 * open ended-ones (and necessary for reliable writes).
1770 * However, it is not sufficient to just send CMD23,
1771 * and avoid the final CMD12, as on an error condition
1772 * CMD12 (stop) needs to be sent anyway. This, coupled
1773 * with Auto-CMD23 enhancements provided by some
1774 * hosts, means that the complexity of dealing
1775 * with this is best left to the host. If CMD23 is
1776 * supported by card and host, we'll fill sbc in and let
1777 * the host deal with handling it correctly. This means
1778 * that for hosts that don't expose MMC_CAP_CMD23, no
1779 * change of behavior will be observed.
1781 * N.B: Some MMC cards experience perf degradation.
1782 * We'll avoid using CMD23-bounded multiblock writes for
1783 * these, while retaining features like reliable writes.
1785 if ((md
->flags
& MMC_BLK_CMD23
) && mmc_op_multi(brq
->cmd
.opcode
) &&
1786 (do_rel_wr
|| !(card
->quirks
& MMC_QUIRK_BLK_NO_CMD23
) ||
1788 brq
->sbc
.opcode
= MMC_SET_BLOCK_COUNT
;
1789 brq
->sbc
.arg
= brq
->data
.blocks
|
1790 (do_rel_wr
? (1 << 31) : 0) |
1791 (do_data_tag
? (1 << 29) : 0);
1792 brq
->sbc
.flags
= MMC_RSP_R1
| MMC_CMD_AC
;
1793 brq
->mrq
.sbc
= &brq
->sbc
;
1796 mmc_set_data_timeout(&brq
->data
, card
);
1798 brq
->data
.sg
= mqrq
->sg
;
1799 brq
->data
.sg_len
= mmc_queue_map_sg(mq
, mqrq
);
1801 if (brq
->data
.sg_len
> 1024)
1802 pr_err("%s:%d sglen = %x\n", __func__
, __LINE__
, brq
->data
.sg_len
);
1805 * Adjust the sg list so it is the same size as the
1808 if (brq
->data
.blocks
!= blk_rq_sectors(req
)) {
1809 int i
, data_size
= brq
->data
.blocks
<< 9;
1810 struct scatterlist
*sg
;
1812 for_each_sg(brq
->data
.sg
, sg
, brq
->data
.sg_len
, i
) {
1813 data_size
-= sg
->length
;
1814 if (data_size
<= 0) {
1815 sg
->length
+= data_size
;
1820 brq
->data
.sg_len
= i
;
1821 pr_err("%s:%d sglen = %x\n", __func__
, __LINE__
, brq
->data
.sg_len
);
1824 mqrq
->mmc_active
.mrq
= &brq
->mrq
;
1825 mqrq
->mmc_active
.err_check
= mmc_blk_err_check
;
1827 mmc_queue_bounce_pre(mqrq
);
1830 static inline u8
mmc_calc_packed_hdr_segs(struct request_queue
*q
,
1831 struct mmc_card
*card
)
1833 unsigned int hdr_sz
= mmc_large_sector(card
) ? 4096 : 512;
1834 unsigned int max_seg_sz
= queue_max_segment_size(q
);
1835 unsigned int len
, nr_segs
= 0;
1838 len
= min(hdr_sz
, max_seg_sz
);
1846 static u8
mmc_blk_prep_packed_list(struct mmc_queue
*mq
, struct request
*req
)
1848 struct request_queue
*q
= mq
->queue
;
1849 struct mmc_card
*card
= mq
->card
;
1850 struct request
*cur
= req
, *next
= NULL
;
1851 struct mmc_blk_data
*md
= mq
->data
;
1852 struct mmc_queue_req
*mqrq
= mq
->mqrq_cur
;
1853 bool en_rel_wr
= card
->ext_csd
.rel_param
& EXT_CSD_WR_REL_PARAM_EN
;
1854 unsigned int req_sectors
= 0, phys_segments
= 0;
1855 unsigned int max_blk_count
, max_phys_segs
;
1856 bool put_back
= true;
1857 u8 max_packed_rw
= 0;
1860 if (!(md
->flags
& MMC_BLK_PACKED_CMD
))
1863 if ((rq_data_dir(cur
) == WRITE
) &&
1864 mmc_host_packed_wr(card
->host
))
1865 max_packed_rw
= card
->ext_csd
.max_packed_writes
;
1867 if (max_packed_rw
== 0)
1870 if (mmc_req_rel_wr(cur
) &&
1871 (md
->flags
& MMC_BLK_REL_WR
) && !en_rel_wr
)
1874 if (mmc_large_sector(card
) &&
1875 !IS_ALIGNED(blk_rq_sectors(cur
), 8))
1878 mmc_blk_clear_packed(mqrq
);
1880 max_blk_count
= min(card
->host
->max_blk_count
,
1881 card
->host
->max_req_size
>> 9);
1882 if (unlikely(max_blk_count
> 0xffff))
1883 max_blk_count
= 0xffff;
1885 max_phys_segs
= queue_max_segments(q
);
1886 req_sectors
+= blk_rq_sectors(cur
);
1887 phys_segments
+= cur
->nr_phys_segments
;
1889 if (rq_data_dir(cur
) == WRITE
) {
1890 req_sectors
+= mmc_large_sector(card
) ? 8 : 1;
1891 phys_segments
+= mmc_calc_packed_hdr_segs(q
, card
);
1895 if (reqs
>= max_packed_rw
- 1) {
1900 spin_lock_irq(q
->queue_lock
);
1901 next
= blk_fetch_request(q
);
1902 spin_unlock_irq(q
->queue_lock
);
1908 if (mmc_large_sector(card
) &&
1909 !IS_ALIGNED(blk_rq_sectors(next
), 8))
1912 if (next
->cmd_flags
& REQ_DISCARD
||
1913 next
->cmd_flags
& REQ_FLUSH
)
1916 if (rq_data_dir(cur
) != rq_data_dir(next
))
1919 if (mmc_req_rel_wr(next
) &&
1920 (md
->flags
& MMC_BLK_REL_WR
) && !en_rel_wr
)
1923 req_sectors
+= blk_rq_sectors(next
);
1924 if (req_sectors
> max_blk_count
)
1927 phys_segments
+= next
->nr_phys_segments
;
1928 if (phys_segments
> max_phys_segs
)
1931 list_add_tail(&next
->queuelist
, &mqrq
->packed
->list
);
1937 spin_lock_irq(q
->queue_lock
);
1938 blk_requeue_request(q
, next
);
1939 spin_unlock_irq(q
->queue_lock
);
1943 list_add(&req
->queuelist
, &mqrq
->packed
->list
);
1944 mqrq
->packed
->nr_entries
= ++reqs
;
1945 mqrq
->packed
->retries
= reqs
;
1950 mqrq
->cmd_type
= MMC_PACKED_NONE
;
1954 static void mmc_blk_packed_hdr_wrq_prep(struct mmc_queue_req
*mqrq
,
1955 struct mmc_card
*card
,
1956 struct mmc_queue
*mq
)
1958 struct mmc_blk_request
*brq
= &mqrq
->brq
;
1959 struct request
*req
= mqrq
->req
;
1960 struct request
*prq
;
1961 struct mmc_blk_data
*md
= mq
->data
;
1962 struct mmc_packed
*packed
= mqrq
->packed
;
1963 bool do_rel_wr
, do_data_tag
;
1964 u32
*packed_cmd_hdr
;
1970 mqrq
->cmd_type
= MMC_PACKED_WRITE
;
1972 packed
->idx_failure
= MMC_PACKED_NR_IDX
;
1974 packed_cmd_hdr
= packed
->cmd_hdr
;
1975 memset(packed_cmd_hdr
, 0, sizeof(packed
->cmd_hdr
));
1976 packed_cmd_hdr
[0] = cpu_to_le32((packed
->nr_entries
<< 16) |
1977 (PACKED_CMD_WR
<< 8) | PACKED_CMD_VER
);
1978 hdr_blocks
= mmc_large_sector(card
) ? 8 : 1;
1981 * Argument for each entry of packed group
1983 list_for_each_entry(prq
, &packed
->list
, queuelist
) {
1984 do_rel_wr
= mmc_req_rel_wr(prq
) && (md
->flags
& MMC_BLK_REL_WR
);
1985 do_data_tag
= (card
->ext_csd
.data_tag_unit_size
) &&
1986 (prq
->cmd_flags
& REQ_META
) &&
1987 (rq_data_dir(prq
) == WRITE
) &&
1988 ((brq
->data
.blocks
* brq
->data
.blksz
) >=
1989 card
->ext_csd
.data_tag_unit_size
);
1990 /* Argument of CMD23 */
1991 packed_cmd_hdr
[(i
* 2)] = cpu_to_le32(
1992 (do_rel_wr
? MMC_CMD23_ARG_REL_WR
: 0) |
1993 (do_data_tag
? MMC_CMD23_ARG_TAG_REQ
: 0) |
1994 blk_rq_sectors(prq
));
1995 /* Argument of CMD18 or CMD25 */
1996 packed_cmd_hdr
[((i
* 2)) + 1] = cpu_to_le32(
1997 mmc_card_blockaddr(card
) ?
1998 blk_rq_pos(prq
) : blk_rq_pos(prq
) << 9);
1999 packed
->blocks
+= blk_rq_sectors(prq
);
2003 memset(brq
, 0, sizeof(struct mmc_blk_request
));
2004 brq
->mrq
.cmd
= &brq
->cmd
;
2005 brq
->mrq
.data
= &brq
->data
;
2006 brq
->mrq
.sbc
= &brq
->sbc
;
2007 brq
->mrq
.stop
= &brq
->stop
;
2009 brq
->sbc
.opcode
= MMC_SET_BLOCK_COUNT
;
2010 brq
->sbc
.arg
= MMC_CMD23_ARG_PACKED
| (packed
->blocks
+ hdr_blocks
);
2011 brq
->sbc
.flags
= MMC_RSP_R1
| MMC_CMD_AC
;
2013 brq
->cmd
.opcode
= MMC_WRITE_MULTIPLE_BLOCK
;
2014 brq
->cmd
.arg
= blk_rq_pos(req
);
2015 if (!mmc_card_blockaddr(card
))
2017 brq
->cmd
.flags
= MMC_RSP_SPI_R1
| MMC_RSP_R1
| MMC_CMD_ADTC
;
2019 brq
->data
.blksz
= 512;
2020 brq
->data
.blocks
= packed
->blocks
+ hdr_blocks
;
2021 brq
->data
.flags
|= MMC_DATA_WRITE
;
2023 brq
->stop
.opcode
= MMC_STOP_TRANSMISSION
;
2025 brq
->stop
.flags
= MMC_RSP_SPI_R1B
| MMC_RSP_R1B
| MMC_CMD_AC
;
2027 mmc_set_data_timeout(&brq
->data
, card
);
2029 brq
->data
.sg
= mqrq
->sg
;
2030 brq
->data
.sg_len
= mmc_queue_map_sg(mq
, mqrq
);
2031 pr_err("%s: sglen = %d\n", __func__
, brq
->data
.sg_len
);
2033 mqrq
->mmc_active
.mrq
= &brq
->mrq
;
2034 mqrq
->mmc_active
.err_check
= mmc_blk_packed_err_check
;
2036 mmc_queue_bounce_pre(mqrq
);
2039 static int mmc_blk_cmd_err(struct mmc_blk_data
*md
, struct mmc_card
*card
,
2040 struct mmc_blk_request
*brq
, struct request
*req
,
2043 struct mmc_queue_req
*mq_rq
;
2044 mq_rq
= container_of(brq
, struct mmc_queue_req
, brq
);
2047 * If this is an SD card and we're writing, we can first
2048 * mark the known good sectors as ok.
2050 * If the card is not SD, we can still ok written sectors
2051 * as reported by the controller (which might be less than
2052 * the real number of written sectors, but never more).
2054 if (mmc_card_sd(card
)) {
2057 blocks
= mmc_sd_num_wr_blocks(card
);
2058 if (blocks
!= (u32
)-1) {
2059 ret
= blk_end_request(req
, 0, blocks
<< 9);
2062 if (!mmc_packed_cmd(mq_rq
->cmd_type
))
2063 ret
= blk_end_request(req
, 0, brq
->data
.bytes_xfered
);
2068 static int mmc_blk_end_packed_req(struct mmc_queue_req
*mq_rq
)
2070 struct request
*prq
;
2071 struct mmc_packed
*packed
= mq_rq
->packed
;
2072 int idx
= packed
->idx_failure
, i
= 0;
2077 while (!list_empty(&packed
->list
)) {
2078 prq
= list_entry_rq(packed
->list
.next
);
2080 /* retry from error index */
2081 packed
->nr_entries
-= idx
;
2085 if (packed
->nr_entries
== MMC_PACKED_NR_SINGLE
) {
2086 list_del_init(&prq
->queuelist
);
2087 mmc_blk_clear_packed(mq_rq
);
2091 list_del_init(&prq
->queuelist
);
2092 blk_end_request(prq
, 0, blk_rq_bytes(prq
));
2096 mmc_blk_clear_packed(mq_rq
);
2100 static void mmc_blk_abort_packed_req(struct mmc_queue_req
*mq_rq
)
2102 struct request
*prq
;
2103 struct mmc_packed
*packed
= mq_rq
->packed
;
2107 while (!list_empty(&packed
->list
)) {
2108 prq
= list_entry_rq(packed
->list
.next
);
2109 list_del_init(&prq
->queuelist
);
2110 blk_end_request(prq
, -EIO
, blk_rq_bytes(prq
));
2113 mmc_blk_clear_packed(mq_rq
);
2116 static void mmc_blk_revert_packed_req(struct mmc_queue
*mq
,
2117 struct mmc_queue_req
*mq_rq
)
2119 struct request
*prq
;
2120 struct request_queue
*q
= mq
->queue
;
2121 struct mmc_packed
*packed
= mq_rq
->packed
;
2125 while (!list_empty(&packed
->list
)) {
2126 prq
= list_entry_rq(packed
->list
.prev
);
2127 if (prq
->queuelist
.prev
!= &packed
->list
) {
2128 list_del_init(&prq
->queuelist
);
2129 spin_lock_irq(q
->queue_lock
);
2130 blk_requeue_request(mq
->queue
, prq
);
2131 spin_unlock_irq(q
->queue_lock
);
2133 list_del_init(&prq
->queuelist
);
2137 mmc_blk_clear_packed(mq_rq
);
2139 #if defined(FEATURE_STORAGE_PERF_INDEX)
2140 #define PRT_TIME_PERIOD 500000000
2141 #define UP_LIMITS_4BYTE 4294967295UL //((4*1024*1024*1024)-1)
2143 pid_t mmcqd
[ID_CNT
]={0};
2144 bool start_async_req
[ID_CNT
] = {0};
2145 unsigned long long start_async_req_time
[ID_CNT
] = {0};
2146 static unsigned long long mmcqd_tag_t1
[ID_CNT
]={0}, mmccid_tag_t1
=0;
2147 unsigned long long mmcqd_t_usage_wr
[ID_CNT
]={0}, mmcqd_t_usage_rd
[ID_CNT
]={0};
2148 unsigned int mmcqd_rq_size_wr
[ID_CNT
]={0}, mmcqd_rq_size_rd
[ID_CNT
]={0};
2149 static unsigned int mmcqd_wr_offset_tag
[ID_CNT
]={0}, mmcqd_rd_offset_tag
[ID_CNT
]={0}, mmcqd_wr_offset
[ID_CNT
]={0}, mmcqd_rd_offset
[ID_CNT
]={0};
2150 static unsigned int mmcqd_wr_bit
[ID_CNT
]={0},mmcqd_wr_tract
[ID_CNT
]={0};
2151 static unsigned int mmcqd_rd_bit
[ID_CNT
]={0},mmcqd_rd_tract
[ID_CNT
]={0};
2152 static unsigned int mmcqd_wr_break
[ID_CNT
]={0}, mmcqd_rd_break
[ID_CNT
]={0};
2153 unsigned int mmcqd_rq_count
[ID_CNT
]={0}, mmcqd_wr_rq_count
[ID_CNT
]={0}, mmcqd_rd_rq_count
[ID_CNT
]={0};
2154 extern u32 g_u32_cid
[4];
2155 #ifdef FEATURE_STORAGE_META_LOG
2156 int check_perdev_minors
= CONFIG_MMC_BLOCK_MINORS
;
2157 struct metadata_rwlogger metadata_logger
[10] = {{{0}}};
2160 unsigned int mmcqd_work_percent
[ID_CNT
]={0};
2161 unsigned int mmcqd_w_throughput
[ID_CNT
]={0};
2162 unsigned int mmcqd_r_throughput
[ID_CNT
]={0};
2163 unsigned int mmcqd_read_clear
[ID_CNT
]={0};
2165 static void g_var_clear(unsigned int idx
)
2167 mmcqd_t_usage_wr
[idx
]=0;
2168 mmcqd_t_usage_rd
[idx
]=0;
2169 mmcqd_rq_size_wr
[idx
]=0;
2170 mmcqd_rq_size_rd
[idx
]=0;
2171 mmcqd_rq_count
[idx
]=0;
2172 mmcqd_wr_offset
[idx
]=0;
2173 mmcqd_rd_offset
[idx
]=0;
2174 mmcqd_wr_break
[idx
]=0;
2175 mmcqd_rd_break
[idx
]=0;
2176 mmcqd_wr_tract
[idx
]=0;
2177 mmcqd_wr_bit
[idx
]=0;
2178 mmcqd_rd_tract
[idx
]=0;
2179 mmcqd_rd_bit
[idx
]=0;
2180 mmcqd_wr_rq_count
[idx
]=0;
2181 mmcqd_rd_rq_count
[idx
]=0;
2184 unsigned int find_mmcqd_index(void)
2190 mmcqd_pid
= task_pid_nr(current
);
2193 mmcqd
[0] = mmcqd_pid
;
2194 start_async_req
[0]=0;
2197 for(i
=0;i
<ID_CNT
;i
++)
2199 if(mmcqd_pid
== mmcqd
[i
])
2204 if ((mmcqd
[i
] == 0) ||( i
==ID_CNT
-1))
2207 start_async_req
[i
]=0;
2216 //#undef FEATURE_STORAGE_PID_LOGGER
2217 #if defined(FEATURE_STORAGE_PID_LOGGER)
2219 struct struct_pid_logger g_pid_logger
[PID_ID_CNT
]={{0,0,{0},{0},{0},{0}}};
2223 unsigned char *page_logger
= NULL
;
2224 spinlock_t g_locker
;
2227 static int mmc_blk_issue_rw_rq(struct mmc_queue
*mq
, struct request
*rqc
)
2229 struct mmc_blk_data
*md
= mq
->data
;
2230 struct mmc_card
*card
= md
->queue
.card
;
2231 struct mmc_blk_request
*brq
= &mq
->mqrq_cur
->brq
;
2232 int ret
= 1, disable_multi
= 0, retry
= 0, type
;
2233 enum mmc_blk_status status
;
2234 struct mmc_queue_req
*mq_rq
;
2235 struct request
*req
= rqc
;
2236 struct mmc_async_req
*areq
;
2237 const u8 packed_nr
= 2;
2239 unsigned long long time1
= 0;
2240 #if defined(FEATURE_STORAGE_PERF_INDEX)
2242 unsigned long long t_period
=0, t_usage
=0;
2243 unsigned int t_percent
=0;
2244 unsigned int perf_meter
=0;
2245 unsigned int rq_byte
=0,rq_sector
=0,sect_offset
=0;
2246 unsigned int diversity
=0;
2248 #ifdef FEATURE_STORAGE_META_LOG
2249 unsigned int mmcmetaindex
=0;
2252 #if defined(FEATURE_STORAGE_PID_LOGGER)
2253 unsigned int index
=0;
2256 if (!rqc
&& !mq
->mqrq_prev
->req
)
2258 time1
= sched_clock();
2261 reqs
= mmc_blk_prep_packed_list(mq
, rqc
);
2262 #if defined(FEATURE_STORAGE_PERF_INDEX)
2263 mmcqd_pid
= task_pid_nr(current
);
2265 idx
= find_mmcqd_index();
2267 mmcqd_read_clear
[idx
] = 1;
2268 if(mmccid_tag_t1
==0)
2269 mmccid_tag_t1
= time1
;
2270 t_period
= time1
- mmccid_tag_t1
;
2271 if(t_period
>= (unsigned long long )((PRT_TIME_PERIOD
)*(unsigned long long )10))
2273 xlog_printk(ANDROID_LOG_DEBUG
, "BLOCK_TAG", "MMC Queue Thread:%d, %d, %d, %d, %d \n", mmcqd
[0], mmcqd
[1], mmcqd
[2], mmcqd
[3], mmcqd
[4]);
2274 xlog_printk(ANDROID_LOG_DEBUG
, "BLOCK_TAG", "MMC CID: %lx %lx %lx %lx \n", g_u32_cid
[0], g_u32_cid
[1], g_u32_cid
[2], g_u32_cid
[3]);
2275 mmccid_tag_t1
= time1
;
2277 if(mmcqd_tag_t1
[idx
]==0)
2278 mmcqd_tag_t1
[idx
] = time1
;
2279 t_period
= time1
- mmcqd_tag_t1
[idx
];
2281 if(t_period
>= (unsigned long long )PRT_TIME_PERIOD
)
2283 mmcqd_read_clear
[idx
] = 2;
2284 mmcqd_work_percent
[idx
] = 1;
2285 mmcqd_r_throughput
[idx
] = 0;
2286 mmcqd_w_throughput
[idx
] = 0;
2287 t_usage
= mmcqd_t_usage_wr
[idx
] + mmcqd_t_usage_rd
[idx
];
2288 if(t_period
> t_usage
*100)
2289 xlog_printk(ANDROID_LOG_DEBUG
, "BLOCK_TAG", "mmcqd:%d Workload < 1%%, duty %lld, period %lld, req_cnt=%d \n", mmcqd
[idx
], t_usage
, t_period
, mmcqd_rq_count
[idx
]);
2292 do_div(t_period
, 100); //boundary issue
2293 t_percent
=((unsigned int)t_usage
)/((unsigned int)t_period
);
2294 mmcqd_work_percent
[idx
] = t_percent
;
2295 xlog_printk(ANDROID_LOG_DEBUG
, "BLOCK_TAG", "mmcqd:%d Workload=%d%%, duty %lld, period %lld00, req_cnt=%d \n", mmcqd
[idx
], t_percent
, t_usage
, t_period
, mmcqd_rq_count
[idx
]); //period %lld00 == period %lld x100
2297 if(mmcqd_wr_rq_count
[idx
] >= 2)
2299 diversity
= mmcqd_wr_offset
[idx
]/(mmcqd_wr_rq_count
[idx
]-1);
2300 xlog_printk(ANDROID_LOG_DEBUG
, "BLOCK_TAG", "mmcqd:%d Write Diversity=%d sectors offset, req_cnt=%d, break_cnt=%d, tract_cnt=%d, bit_cnt=%d\n", mmcqd
[idx
], diversity
, mmcqd_wr_rq_count
[idx
], mmcqd_wr_break
[idx
], mmcqd_wr_tract
[idx
], mmcqd_wr_bit
[idx
]);
2302 if(mmcqd_rd_rq_count
[idx
] >= 2)
2304 diversity
= mmcqd_rd_offset
[idx
]/(mmcqd_rd_rq_count
[idx
]-1);
2305 xlog_printk(ANDROID_LOG_DEBUG
, "BLOCK_TAG", "mmcqd:%d Read Diversity=%d sectors offset, req_cnt=%d, break_cnt=%d, tract_cnt=%d, bit_cnt=%d\n", mmcqd
[idx
], diversity
, mmcqd_rd_rq_count
[idx
], mmcqd_rd_break
[idx
], mmcqd_rd_tract
[idx
], mmcqd_rd_bit
[idx
]);
2307 if(mmcqd_t_usage_wr
[idx
])
2309 do_div(mmcqd_t_usage_wr
[idx
], 1000000); //boundary issue
2310 if(mmcqd_t_usage_wr
[idx
]) // discard print if duration will <1ms
2312 perf_meter
= (mmcqd_rq_size_wr
[idx
])/((unsigned int)mmcqd_t_usage_wr
[idx
]); //kb/s
2313 mmcqd_w_throughput
[idx
] = perf_meter
;
2314 xlog_printk(ANDROID_LOG_DEBUG
, "BLOCK_TAG", "mmcqd:%d Write Throughput=%d kB/s, size: %d bytes, time:%lld ms\n", mmcqd
[idx
], perf_meter
, mmcqd_rq_size_wr
[idx
], mmcqd_t_usage_wr
[idx
]);
2317 if(mmcqd_t_usage_rd
[idx
])
2319 do_div(mmcqd_t_usage_rd
[idx
], 1000000); //boundary issue
2320 if(mmcqd_t_usage_rd
[idx
]) // discard print if duration will <1ms
2322 perf_meter
= (mmcqd_rq_size_rd
[idx
])/((unsigned int)mmcqd_t_usage_rd
[idx
]); //kb/s
2323 mmcqd_r_throughput
[idx
] = perf_meter
;
2324 xlog_printk(ANDROID_LOG_DEBUG
, "BLOCK_TAG", "mmcqd:%d Read Throughput=%d kB/s, size: %d bytes, time:%lld ms\n", mmcqd
[idx
], perf_meter
, mmcqd_rq_size_rd
[idx
], mmcqd_t_usage_rd
[idx
]);
2327 mmcqd_tag_t1
[idx
]=time1
;
2329 #ifdef FEATURE_STORAGE_META_LOG
2330 mmcmetaindex
= mmc_get_devidx(md
->disk
);
2331 xlog_printk(ANDROID_LOG_DEBUG
, "BLOCK_TAG", "mmcqd metarw WR:%d NWR:%d HR:%d WDR:%d HDR:%d WW:%d NWW:%d HW:%d\n",
2332 metadata_logger
[mmcmetaindex
].metadata_rw_logger
[0], metadata_logger
[mmcmetaindex
].metadata_rw_logger
[1],
2333 metadata_logger
[mmcmetaindex
].metadata_rw_logger
[2], metadata_logger
[mmcmetaindex
].metadata_rw_logger
[3],
2334 metadata_logger
[mmcmetaindex
].metadata_rw_logger
[4], metadata_logger
[mmcmetaindex
].metadata_rw_logger
[5],
2335 metadata_logger
[mmcmetaindex
].metadata_rw_logger
[6], metadata_logger
[mmcmetaindex
].metadata_rw_logger
[7]);
2336 clear_metadata_rw_status(md
->disk
->first_minor
);
2338 #if defined(FEATURE_STORAGE_PID_LOGGER)
2341 for(index
=0; index
<PID_ID_CNT
; index
++) {
2343 if( g_pid_logger
[index
].current_pid
!=0 && g_pid_logger
[index
].current_pid
== mmcqd_pid
)
2346 if( index
== PID_ID_CNT
)
2348 for( i
=0; i
<PID_LOGGER_COUNT
; i
++) {
2349 //printk(KERN_INFO"hank mmcqd %d %d", g_pid_logger[index].pid_logger[i], mmcqd_pid);
2350 if( g_pid_logger
[index
].pid_logger
[i
] == 0)
2352 sprintf (g_pid_logger
[index
].pid_buffer
+i
*37, "{%05d:%05d:%08d:%05d:%08d}", g_pid_logger
[index
].pid_logger
[i
], g_pid_logger
[index
].pid_logger_counter
[i
], g_pid_logger
[index
].pid_logger_length
[i
], g_pid_logger
[index
].pid_logger_r_counter
[i
], g_pid_logger
[index
].pid_logger_r_length
[i
]);
2356 xlog_printk(ANDROID_LOG_DEBUG
, "BLOCK_TAG", "mmcqd pid:%d %s\n", g_pid_logger
[index
].current_pid
, g_pid_logger
[index
].pid_buffer
);
2357 //xlog_printk(ANDROID_LOG_DEBUG, "BLOCK_TAG", "sizeof(&(g_pid_logger[index].pid_logger)):%d\n", sizeof(unsigned short)*PID_LOGGER_COUNT);
2358 //memset( &(g_pid_logger[index].pid_logger), 0, sizeof(struct struct_pid_logger)-(unsigned long)&(((struct struct_pid_logger *)0)->pid_logger));
2359 memset( &(g_pid_logger
[index
].pid_logger
), 0, sizeof(unsigned short)*PID_LOGGER_COUNT
);
2360 memset( &(g_pid_logger
[index
].pid_logger_counter
), 0, sizeof(unsigned short)*PID_LOGGER_COUNT
);
2361 memset( &(g_pid_logger
[index
].pid_logger_length
), 0, sizeof(unsigned int)*PID_LOGGER_COUNT
);
2362 memset( &(g_pid_logger
[index
].pid_logger_r_counter
), 0, sizeof(unsigned short)*PID_LOGGER_COUNT
);
2363 memset( &(g_pid_logger
[index
].pid_logger_r_length
), 0, sizeof(unsigned int)*PID_LOGGER_COUNT
);
2364 memset( &(g_pid_logger
[index
].pid_buffer
), 0, sizeof(char)*1024);
2368 g_pid_logger
[index
].pid_buffer
[0] = '\0';
2373 #if defined(FEATURE_STORAGE_VMSTAT_LOGGER)
2374 xlog_printk(ANDROID_LOG_DEBUG
, "BLOCK_TAG", "vmstat (FP:%ld)(FD:%ld)(ND:%ld)(WB:%ld)(NW:%ld)\n",
2375 ((global_page_state(NR_FILE_PAGES
)) << (PAGE_SHIFT
- 10)),
2376 ((global_page_state(NR_FILE_DIRTY
)) << (PAGE_SHIFT
- 10)),
2377 ((global_page_state(NR_DIRTIED
)) << (PAGE_SHIFT
- 10)),
2378 ((global_page_state(NR_WRITEBACK
)) << (PAGE_SHIFT
- 10)),
2379 ((global_page_state(NR_WRITTEN
)) << (PAGE_SHIFT
- 10)));
2385 rq_byte
= blk_rq_bytes(rqc
);
2386 rq_sector
= blk_rq_sectors(rqc
);
2387 if(rq_data_dir(rqc
) == WRITE
)
2389 if(mmcqd_wr_offset_tag
[idx
]>0)
2391 sect_offset
= abs(blk_rq_pos(rqc
) - mmcqd_wr_offset_tag
[idx
]);
2392 mmcqd_wr_offset
[idx
] += sect_offset
;
2393 if(sect_offset
== 1)
2394 mmcqd_wr_break
[idx
]++;
2396 mmcqd_wr_offset_tag
[idx
] = blk_rq_pos(rqc
) + rq_sector
;
2397 if(rq_sector
<= 1) //512 bytes
2398 mmcqd_wr_bit
[idx
] ++;
2399 else if(rq_sector
>= 1016) //508kB
2400 mmcqd_wr_tract
[idx
] ++;
2404 if(mmcqd_rd_offset_tag
[idx
]>0)
2406 sect_offset
= abs(blk_rq_pos(rqc
) - mmcqd_rd_offset_tag
[idx
]);
2407 mmcqd_rd_offset
[idx
] += sect_offset
;
2408 if(sect_offset
== 1)
2409 mmcqd_rd_break
[idx
]++;
2411 mmcqd_rd_offset_tag
[idx
] = blk_rq_pos(rqc
) + rq_sector
;
2412 if(rq_sector
<= 1) //512 bytes
2413 mmcqd_rd_bit
[idx
] ++;
2414 else if(rq_sector
>= 1016) //508kB
2415 mmcqd_rd_tract
[idx
] ++;
2422 * When 4KB native sector is enabled, only 8 blocks
2423 * multiple read or write is allowed
2425 if ((brq
->data
.blocks
& 0x07) &&
2426 (card
->ext_csd
.data_sector_size
== 4096)) {
2427 pr_err("%s: Transfer size is not 4KB sector size aligned\n",
2428 req
->rq_disk
->disk_name
);
2429 mq_rq
= mq
->mqrq_cur
;
2433 if (reqs
>= packed_nr
)
2434 mmc_blk_packed_hdr_wrq_prep(mq
->mqrq_cur
,
2437 mmc_blk_rw_rq_prep(mq
->mqrq_cur
, card
, 0, mq
);
2438 areq
= &mq
->mqrq_cur
->mmc_active
;
2441 areq
= mmc_start_req(card
->host
, areq
, (int *) &status
);
2443 if (status
== MMC_BLK_NEW_REQUEST
)
2444 mq
->flags
|= MMC_QUEUE_NEW_REQUEST
;
2448 mq_rq
= container_of(areq
, struct mmc_queue_req
, mmc_active
);
2451 type
= rq_data_dir(req
) == READ
? MMC_BLK_READ
: MMC_BLK_WRITE
;
2452 mmc_queue_bounce_post(mq_rq
);
2455 case MMC_BLK_SUCCESS
:
2456 case MMC_BLK_PARTIAL
:
2458 * A block was successfully transferred.
2460 mmc_blk_reset_success(md
, type
);
2462 if (mmc_packed_cmd(mq_rq
->cmd_type
)) {
2463 ret
= mmc_blk_end_packed_req(mq_rq
);
2466 ret
= blk_end_request(req
, 0,
2467 brq
->data
.bytes_xfered
);
2470 // if (card && card->host && card->host->areq)
2471 // met_mmc_end(card->host, card->host->areq);
2474 * If the blk_end_request function returns non-zero even
2475 * though all data has been transferred and no errors
2476 * were returned by the host controller, it's a bug.
2478 if (status
== MMC_BLK_SUCCESS
&& ret
) {
2479 pr_err("%s BUG rq_tot %d d_xfer %d\n",
2480 __func__
, blk_rq_bytes(req
),
2481 brq
->data
.bytes_xfered
);
2486 case MMC_BLK_CMD_ERR
:
2487 ret
= mmc_blk_cmd_err(md
, card
, brq
, req
, ret
);
2488 if (mmc_blk_reset(md
, card
->host
, type
))
2498 if (!mmc_blk_reset(md
, card
->host
, type
))
2501 case MMC_BLK_DATA_ERR
: {
2504 err
= mmc_blk_reset(md
, card
->host
, type
);
2507 if (err
== -ENODEV
||
2508 mmc_packed_cmd(mq_rq
->cmd_type
))
2512 case MMC_BLK_ECC_ERR
:
2513 if (brq
->data
.blocks
> 1) {
2514 /* Redo read one sector at a time */
2515 pr_warning("%s: retrying using single block read\n",
2516 req
->rq_disk
->disk_name
);
2521 * After an error, we redo I/O one sector at a
2522 * time, so we only reach here after trying to
2523 * read a single sector.
2525 ret
= blk_end_request(req
, -EIO
,
2530 case MMC_BLK_NOMEDIUM
:
2533 pr_err("%s: Unhandled return value (%d)",
2534 req
->rq_disk
->disk_name
, status
);
2539 if (mmc_packed_cmd(mq_rq
->cmd_type
)) {
2540 if (!mq_rq
->packed
->retries
)
2542 mmc_blk_packed_hdr_wrq_prep(mq_rq
, card
, mq
);
2543 mmc_start_req(card
->host
,
2544 &mq_rq
->mmc_active
, NULL
);
2548 * In case of a incomplete request
2549 * prepare it again and resend.
2551 mmc_blk_rw_rq_prep(mq_rq
, card
,
2553 mmc_start_req(card
->host
,
2554 &mq_rq
->mmc_active
, NULL
);
2562 if (mmc_packed_cmd(mq_rq
->cmd_type
)) {
2563 mmc_blk_abort_packed_req(mq_rq
);
2565 if (mmc_card_removed(card
))
2566 req
->cmd_flags
|= REQ_QUIET
;
2568 ret
= blk_end_request(req
, -EIO
,
2569 blk_rq_cur_bytes(req
));
2574 if (mmc_card_removed(card
)) {
2575 rqc
->cmd_flags
|= REQ_QUIET
;
2576 blk_end_request_all(rqc
, -EIO
);
2579 * If current request is packed, it needs to put back.
2581 if (mmc_packed_cmd(mq
->mqrq_cur
->cmd_type
))
2582 mmc_blk_revert_packed_req(mq
, mq
->mqrq_cur
);
2584 mmc_blk_rw_rq_prep(mq
->mqrq_cur
, card
, 0, mq
);
2585 mmc_start_req(card
->host
,
2586 &mq
->mqrq_cur
->mmc_active
, NULL
);
2593 static int mmc_blk_issue_rq(struct mmc_queue
*mq
, struct request
*req
)
2596 struct mmc_blk_data
*md
= mq
->data
;
2597 struct mmc_card
*card
= md
->queue
.card
;
2598 struct mmc_host
*host
= card
->host
;
2599 unsigned long flags
;
2600 unsigned int cmd_flags
= req
? req
->cmd_flags
: 0;
2602 #ifdef CONFIG_MMC_BLOCK_DEFERRED_RESUME
2603 if (mmc_bus_needs_resume(card
->host
))
2604 mmc_resume_bus(card
->host
);
2607 if (req
&& !mq
->mqrq_prev
->req
)
2608 /* claim host only for the first request */
2609 mmc_claim_host(card
->host
);
2611 ret
= mmc_blk_part_switch(card
, md
);
2614 blk_end_request_all(req
, -EIO
);
2620 mq
->flags
&= ~MMC_QUEUE_NEW_REQUEST
;
2621 if (cmd_flags
& REQ_DISCARD
) {
2622 /* complete ongoing async transfer before issuing discard */
2623 if (card
->host
->areq
)
2624 mmc_blk_issue_rw_rq(mq
, NULL
);
2625 if (req
->cmd_flags
& REQ_SECURE
&&
2626 !(card
->quirks
& MMC_QUIRK_SEC_ERASE_TRIM_BROKEN
))
2627 ret
= mmc_blk_issue_secdiscard_rq(mq
, req
);
2629 ret
= mmc_blk_issue_discard_rq(mq
, req
);
2630 } else if (cmd_flags
& REQ_FLUSH
) {
2631 /* complete ongoing async transfer before issuing flush */
2632 if (card
->host
->areq
)
2633 mmc_blk_issue_rw_rq(mq
, NULL
);
2634 ret
= mmc_blk_issue_flush(mq
, req
);
2636 if (!req
&& host
->areq
) {
2637 spin_lock_irqsave(&host
->context_info
.lock
, flags
);
2638 host
->context_info
.is_waiting_last_req
= true;
2639 spin_unlock_irqrestore(&host
->context_info
.lock
, flags
);
2641 ret
= mmc_blk_issue_rw_rq(mq
, req
);
2645 if ((!req
&& !(mq
->flags
& MMC_QUEUE_NEW_REQUEST
)) ||
2646 (cmd_flags
& MMC_REQ_SPECIAL_MASK
))
2648 * Release host when there are no more requests
2649 * and after special request(discard, flush) is done.
2650 * In case sepecial request, there is no reentry to
2651 * the 'mmc_blk_issue_rq' with 'mqrq_prev->req'.
2653 mmc_release_host(card
->host
);
2657 static inline int mmc_blk_readonly(struct mmc_card
*card
)
2659 return mmc_card_readonly(card
) ||
2660 !(card
->csd
.cmdclass
& CCC_BLOCK_WRITE
);
2663 //#if defined(FEATURE_STORAGE_PID_LOGGER)
2664 //extern unsigned long get_memory_size(void);
2666 #ifdef CONFIG_MTK_EXTMEM
2667 extern void* extmem_malloc_page_align(size_t bytes
);
2669 static struct mmc_blk_data
*mmc_blk_alloc_req(struct mmc_card
*card
,
2670 struct device
*parent
,
2673 const char *subname
,
2676 struct mmc_blk_data
*md
;
2679 devidx
= find_first_zero_bit(dev_use
, max_devices
);
2680 if (devidx
>= max_devices
)
2681 return ERR_PTR(-ENOSPC
);
2682 __set_bit(devidx
, dev_use
);
2684 md
= kzalloc(sizeof(struct mmc_blk_data
), GFP_KERNEL
);
2691 * !subname implies we are creating main mmc_blk_data that will be
2692 * associated with mmc_card with mmc_set_drvdata. Due to device
2693 * partitions, devidx will not coincide with a per-physical card
2694 * index anymore so we keep track of a name index.
2697 md
->name_idx
= find_first_zero_bit(name_use
, max_devices
);
2698 __set_bit(md
->name_idx
, name_use
);
2700 md
->name_idx
= ((struct mmc_blk_data
*)
2701 dev_to_disk(parent
)->private_data
)->name_idx
;
2703 md
->area_type
= area_type
;
2706 * Set the read-only status based on the supported commands
2707 * and the write protect switch.
2709 md
->read_only
= mmc_blk_readonly(card
);
2711 md
->disk
= alloc_disk(perdev_minors
);
2712 if (md
->disk
== NULL
) {
2717 spin_lock_init(&md
->lock
);
2718 INIT_LIST_HEAD(&md
->part
);
2721 ret
= mmc_init_queue(&md
->queue
, card
, &md
->lock
, subname
);
2724 #if defined(FEATURE_STORAGE_PID_LOGGER)
2726 //num_page_logger = sizeof(struct page_pid_logger);
2727 //page_logger = vmalloc(num_physpages*sizeof(struct page_pid_logger));
2728 // solution: use get_memory_size to obtain the size from start pfn to max pfn
2730 //unsigned long count = get_memory_size() >> PAGE_SHIFT;
2731 unsigned long count
= get_max_DRAM_size() >> PAGE_SHIFT
;
2732 #ifdef CONFIG_MTK_EXTMEM
2733 page_logger
= extmem_malloc_page_align(count
* sizeof(struct page_pid_logger
));
2735 page_logger
= vmalloc(count
* sizeof(struct page_pid_logger
));
2738 memset( page_logger
, -1, count
*sizeof( struct page_pid_logger
));
2740 spin_lock_init(&g_locker
);
2743 #if defined(FEATURE_STORAGE_META_LOG)
2744 check_perdev_minors
= perdev_minors
;
2747 md
->queue
.issue_fn
= mmc_blk_issue_rq
;
2748 md
->queue
.data
= md
;
2750 md
->disk
->major
= MMC_BLOCK_MAJOR
;
2751 md
->disk
->first_minor
= devidx
* perdev_minors
;
2752 md
->disk
->fops
= &mmc_bdops
;
2753 md
->disk
->private_data
= md
;
2754 md
->disk
->queue
= md
->queue
.queue
;
2755 md
->disk
->driverfs_dev
= parent
;
2756 set_disk_ro(md
->disk
, md
->read_only
|| default_ro
);
2757 md
->disk
->flags
= GENHD_FL_EXT_DEVT
;
2758 if (area_type
& MMC_BLK_DATA_AREA_RPMB
)
2759 md
->disk
->flags
|= GENHD_FL_NO_PART_SCAN
;
2762 * As discussed on lkml, GENHD_FL_REMOVABLE should:
2764 * - be set for removable media with permanent block devices
2765 * - be unset for removable block devices with permanent media
2767 * Since MMC block devices clearly fall under the second
2768 * case, we do not set GENHD_FL_REMOVABLE. Userspace
2769 * should use the block device creation/destruction hotplug
2770 * messages to tell when the card is present.
2773 snprintf(md
->disk
->disk_name
, sizeof(md
->disk
->disk_name
),
2774 "mmcblk%d%s", md
->name_idx
, subname
? subname
: "");
2776 if (mmc_card_mmc(card
))
2777 blk_queue_logical_block_size(md
->queue
.queue
,
2778 card
->ext_csd
.data_sector_size
);
2780 blk_queue_logical_block_size(md
->queue
.queue
, 512);
2782 set_capacity(md
->disk
, size
);
2784 if (mmc_host_cmd23(card
->host
)) {
2785 if (mmc_card_mmc(card
) ||
2786 (mmc_card_sd(card
) &&
2787 card
->scr
.cmds
& SD_SCR_CMD23_SUPPORT
))
2788 md
->flags
|= MMC_BLK_CMD23
;
2791 if (mmc_card_mmc(card
) &&
2792 md
->flags
& MMC_BLK_CMD23
&&
2793 ((card
->ext_csd
.rel_param
& EXT_CSD_WR_REL_PARAM_EN
) ||
2794 card
->ext_csd
.rel_sectors
)) {
2795 md
->flags
|= MMC_BLK_REL_WR
;
2796 blk_queue_flush(md
->queue
.queue
, REQ_FLUSH
| REQ_FUA
);
2799 if (mmc_card_mmc(card
) &&
2800 (area_type
== MMC_BLK_DATA_AREA_MAIN
) &&
2801 (md
->flags
& MMC_BLK_CMD23
) &&
2802 card
->ext_csd
.packed_event_en
) {
2803 if (!mmc_packed_init(&md
->queue
, card
))
2804 md
->flags
|= MMC_BLK_PACKED_CMD
;
2814 return ERR_PTR(ret
);
2817 static struct mmc_blk_data
*mmc_blk_alloc(struct mmc_card
*card
)
2820 #ifdef CONFIG_MTK_EMMC_SUPPORT
2821 unsigned int l_reserve
;
2822 struct storage_info s_info
= {0};
2824 struct mmc_blk_data
*md
;
2826 if (!mmc_card_sd(card
) && mmc_card_blockaddr(card
)) {
2828 * The EXT_CSD sector count is in number or 512 byte
2831 size
= card
->ext_csd
.sectors
;
2834 * The CSD capacity field is in units of read_blkbits.
2835 * set_capacity takes units of 512 bytes.
2837 size
= card
->csd
.capacity
<< (card
->csd
.read_blkbits
- 9);
2840 if(!mmc_card_sd(card
)){
2841 #ifdef CONFIG_MTK_EMMC_SUPPORT
2842 msdc_get_info(EMMC_CARD_BOOT
, EMMC_RESERVE
, &s_info
);
2843 l_reserve
= s_info
.emmc_reserve
;
2844 printk("l_reserve = 0x%x\n", l_reserve
);
2845 size
-= l_reserve
; /*reserved for 64MB (emmc otp + emmc combo offset + reserved)*/
2848 md
= mmc_blk_alloc_req(card
, &card
->dev
, size
, false, NULL
,
2849 MMC_BLK_DATA_AREA_MAIN
);
2853 static int mmc_blk_alloc_part(struct mmc_card
*card
,
2854 struct mmc_blk_data
*md
,
2855 unsigned int part_type
,
2858 const char *subname
,
2862 struct mmc_blk_data
*part_md
;
2864 part_md
= mmc_blk_alloc_req(card
, disk_to_dev(md
->disk
), size
, default_ro
,
2865 subname
, area_type
);
2866 if (IS_ERR(part_md
))
2867 return PTR_ERR(part_md
);
2868 part_md
->part_type
= part_type
;
2869 list_add(&part_md
->part
, &md
->part
);
2871 string_get_size((u64
)get_capacity(part_md
->disk
) << 9, STRING_UNITS_2
,
2872 cap_str
, sizeof(cap_str
));
2873 pr_info("%s: %s %s partition %u %s\n",
2874 part_md
->disk
->disk_name
, mmc_card_id(card
),
2875 mmc_card_name(card
), part_md
->part_type
, cap_str
);
2879 /* MMC Physical partitions consist of two boot partitions and
2880 * up to four general purpose partitions.
2881 * For each partition enabled in EXT_CSD a block device will be allocatedi
2882 * to provide access to the partition.
2885 static int mmc_blk_alloc_parts(struct mmc_card
*card
, struct mmc_blk_data
*md
)
2889 if (!mmc_card_mmc(card
))
2892 for (idx
= 0; idx
< card
->nr_parts
; idx
++) {
2893 if (card
->part
[idx
].size
) {
2894 ret
= mmc_blk_alloc_part(card
, md
,
2895 card
->part
[idx
].part_cfg
,
2896 card
->part
[idx
].size
>> 9,
2897 card
->part
[idx
].force_ro
,
2898 card
->part
[idx
].name
,
2899 card
->part
[idx
].area_type
);
2908 static void mmc_blk_remove_req(struct mmc_blk_data
*md
)
2910 struct mmc_card
*card
;
2913 card
= md
->queue
.card
;
2914 if (md
->disk
->flags
& GENHD_FL_UP
) {
2915 device_remove_file(disk_to_dev(md
->disk
), &md
->force_ro
);
2916 if ((md
->area_type
& MMC_BLK_DATA_AREA_BOOT
) &&
2917 card
->ext_csd
.boot_ro_lockable
)
2918 device_remove_file(disk_to_dev(md
->disk
),
2919 &md
->power_ro_lock
);
2921 /* Stop new requests from getting into the queue */
2922 del_gendisk(md
->disk
);
2925 /* Then flush out any already in there */
2926 mmc_cleanup_queue(&md
->queue
);
2927 if (md
->flags
& MMC_BLK_PACKED_CMD
)
2928 mmc_packed_clean(&md
->queue
);
2933 static void mmc_blk_remove_parts(struct mmc_card
*card
,
2934 struct mmc_blk_data
*md
)
2936 struct list_head
*pos
, *q
;
2937 struct mmc_blk_data
*part_md
;
2939 __clear_bit(md
->name_idx
, name_use
);
2940 list_for_each_safe(pos
, q
, &md
->part
) {
2941 part_md
= list_entry(pos
, struct mmc_blk_data
, part
);
2943 mmc_blk_remove_req(part_md
);
2947 static int mmc_add_disk(struct mmc_blk_data
*md
)
2950 struct mmc_card
*card
= md
->queue
.card
;
2953 md
->force_ro
.show
= force_ro_show
;
2954 md
->force_ro
.store
= force_ro_store
;
2955 sysfs_attr_init(&md
->force_ro
.attr
);
2956 md
->force_ro
.attr
.name
= "force_ro";
2957 md
->force_ro
.attr
.mode
= S_IRUGO
| S_IWUSR
;
2958 ret
= device_create_file(disk_to_dev(md
->disk
), &md
->force_ro
);
2962 if ((md
->area_type
& MMC_BLK_DATA_AREA_BOOT
) &&
2963 card
->ext_csd
.boot_ro_lockable
) {
2966 if (card
->ext_csd
.boot_ro_lock
& EXT_CSD_BOOT_WP_B_PWR_WP_DIS
)
2969 mode
= S_IRUGO
| S_IWUSR
;
2971 md
->power_ro_lock
.show
= power_ro_lock_show
;
2972 md
->power_ro_lock
.store
= power_ro_lock_store
;
2973 sysfs_attr_init(&md
->power_ro_lock
.attr
);
2974 md
->power_ro_lock
.attr
.mode
= mode
;
2975 md
->power_ro_lock
.attr
.name
=
2976 "ro_lock_until_next_power_on";
2977 ret
= device_create_file(disk_to_dev(md
->disk
),
2978 &md
->power_ro_lock
);
2980 goto power_ro_lock_fail
;
2985 device_remove_file(disk_to_dev(md
->disk
), &md
->force_ro
);
2987 del_gendisk(md
->disk
);
2992 #define CID_MANFID_SANDISK 0x2
2993 #define CID_MANFID_TOSHIBA 0x11
2994 #define CID_MANFID_MICRON 0x13
2995 #define CID_MANFID_SAMSUNG 0x15
2996 #define CID_MANFID_SANDISK_NEW 0x45
2997 #define CID_MANFID_HYNIX 0x90
2998 #define CID_MANFID_KSI 0x70
3000 static const struct mmc_fixup blk_fixups
[] =
3002 MMC_FIXUP("SEM02G", CID_MANFID_SANDISK
, 0x100, add_quirk
,
3003 MMC_QUIRK_INAND_CMD38
),
3004 MMC_FIXUP("SEM04G", CID_MANFID_SANDISK
, 0x100, add_quirk
,
3005 MMC_QUIRK_INAND_CMD38
),
3006 MMC_FIXUP("SEM08G", CID_MANFID_SANDISK
, 0x100, add_quirk
,
3007 MMC_QUIRK_INAND_CMD38
),
3008 MMC_FIXUP("SEM16G", CID_MANFID_SANDISK
, 0x100, add_quirk
,
3009 MMC_QUIRK_INAND_CMD38
),
3010 MMC_FIXUP("SEM32G", CID_MANFID_SANDISK
, 0x100, add_quirk
,
3011 MMC_QUIRK_INAND_CMD38
),
3012 MMC_FIXUP(CID_NAME_ANY
, CID_MANFID_SANDISK_NEW
, CID_OEMID_ANY
, add_quirk
,
3015 * Some MMC cards experience performance degradation with CMD23
3016 * instead of CMD12-bounded multiblock transfers. For now we'll
3017 * black list what's bad...
3018 * - Certain Toshiba cards.
3020 * N.B. This doesn't affect SD cards.
3022 MMC_FIXUP("MMC08G", CID_MANFID_TOSHIBA
, CID_OEMID_ANY
, add_quirk_mmc
,
3023 MMC_QUIRK_BLK_NO_CMD23
),
3024 MMC_FIXUP("MMC16G", CID_MANFID_TOSHIBA
, CID_OEMID_ANY
, add_quirk_mmc
,
3025 MMC_QUIRK_BLK_NO_CMD23
),
3026 MMC_FIXUP("MMC32G", CID_MANFID_TOSHIBA
, CID_OEMID_ANY
, add_quirk_mmc
,
3027 MMC_QUIRK_BLK_NO_CMD23
),
3030 * Some MMC cards need longer data read timeout than indicated in CSD.
3032 MMC_FIXUP(CID_NAME_ANY
, CID_MANFID_MICRON
, 0x200, add_quirk_mmc
,
3033 MMC_QUIRK_LONG_READ_TIME
),
3034 MMC_FIXUP("008GE0", CID_MANFID_TOSHIBA
, CID_OEMID_ANY
, add_quirk_mmc
,
3035 MMC_QUIRK_LONG_READ_TIME
),
3038 * On these Samsung MoviNAND parts, performing secure erase or
3039 * secure trim can result in unrecoverable corruption due to a
3042 MMC_FIXUP("M8G2FA", CID_MANFID_SAMSUNG
, CID_OEMID_ANY
, add_quirk_mmc
,
3043 MMC_QUIRK_SEC_ERASE_TRIM_BROKEN
),
3044 MMC_FIXUP("MAG4FA", CID_MANFID_SAMSUNG
, CID_OEMID_ANY
, add_quirk_mmc
,
3045 MMC_QUIRK_SEC_ERASE_TRIM_BROKEN
),
3046 MMC_FIXUP("MBG8FA", CID_MANFID_SAMSUNG
, CID_OEMID_ANY
, add_quirk_mmc
,
3047 MMC_QUIRK_SEC_ERASE_TRIM_BROKEN
),
3048 MMC_FIXUP("MCGAFA", CID_MANFID_SAMSUNG
, CID_OEMID_ANY
, add_quirk_mmc
,
3049 MMC_QUIRK_SEC_ERASE_TRIM_BROKEN
),
3050 MMC_FIXUP("VAL00M", CID_MANFID_SAMSUNG
, CID_OEMID_ANY
, add_quirk_mmc
,
3051 MMC_QUIRK_SEC_ERASE_TRIM_BROKEN
),
3052 MMC_FIXUP("VYL00M", CID_MANFID_SAMSUNG
, CID_OEMID_ANY
, add_quirk_mmc
,
3053 MMC_QUIRK_SEC_ERASE_TRIM_BROKEN
),
3054 MMC_FIXUP("KYL00M", CID_MANFID_SAMSUNG
, CID_OEMID_ANY
, add_quirk_mmc
,
3055 MMC_QUIRK_SEC_ERASE_TRIM_BROKEN
),
3056 MMC_FIXUP("VZL00M", CID_MANFID_SAMSUNG
, CID_OEMID_ANY
, add_quirk_mmc
,
3057 MMC_QUIRK_SEC_ERASE_TRIM_BROKEN
),
3058 #ifdef CONFIG_MTK_EMMC_CACHE
3060 * Some MMC cards cache feature, cannot flush the previous cache data by force programming or reliable write
3061 * which cannot gurrantee the strong order betwee meta data and file data.
3065 * Toshiba eMMC after enable cache feature, write performance drop, because flush operation waste much time
3067 MMC_FIXUP(CID_NAME_ANY
, CID_MANFID_TOSHIBA
, CID_OEMID_ANY
, add_quirk_mmc
,
3068 MMC_QUIRK_DISABLE_CACHE
),
3071 /* Hynix 4.41 trim will lead boot up failed. */
3072 MMC_FIXUP(CID_NAME_ANY
, CID_MANFID_HYNIX
, CID_OEMID_ANY
, add_quirk_mmc
,
3073 MMC_QUIRK_TRIM_UNSTABLE
),
3075 /* KSI PRV=0x3 trim will lead write performance drop. */
3076 MMC_FIXUP(CID_NAME_ANY
, CID_MANFID_KSI
, CID_OEMID_ANY
, add_quirk_mmc_ksi_v03_skip_trim
,
3077 MMC_QUIRK_KSI_V03_SKIP_TRIM
),
3082 #if defined(CONFIG_MTK_EMMC_SUPPORT) && !defined(CONFIG_MTK_GPT_SCHEME_SUPPORT)
3083 extern void emmc_create_sys_symlink (struct mmc_card
*card
);
3085 static int mmc_blk_probe(struct mmc_card
*card
)
3087 struct mmc_blk_data
*md
, *part_md
;
3091 * Check that the card supports the command class(es) we need.
3093 if (!(card
->csd
.cmdclass
& CCC_BLOCK_READ
))
3096 md
= mmc_blk_alloc(card
);
3100 string_get_size((u64
)get_capacity(md
->disk
) << 9, STRING_UNITS_2
,
3101 cap_str
, sizeof(cap_str
));
3102 pr_info("%s: %s %s %s %s\n",
3103 md
->disk
->disk_name
, mmc_card_id(card
), mmc_card_name(card
),
3104 cap_str
, md
->read_only
? "(ro)" : "");
3106 if (mmc_blk_alloc_parts(card
, md
))
3109 mmc_set_drvdata(card
, md
);
3110 mmc_fixup_device(card
, blk_fixups
);
3112 printk("[%s]: %s by manufacturer settings, quirks=0x%x\n", __func__
, md
->disk
->disk_name
, card
->quirks
);
3114 #ifdef CONFIG_MMC_BLOCK_DEFERRED_RESUME
3115 mmc_set_bus_resume_policy(card
->host
, 1);
3117 if (mmc_add_disk(md
))
3120 list_for_each_entry(part_md
, &md
->part
, part
) {
3121 if (mmc_add_disk(part_md
))
3124 #if defined(CONFIG_MTK_EMMC_SUPPORT) && !defined(CONFIG_MTK_GPT_SCHEME_SUPPORT)
3125 emmc_create_sys_symlink(card
);
3130 mmc_blk_remove_parts(card
, md
);
3131 mmc_blk_remove_req(md
);
3135 static void mmc_blk_remove(struct mmc_card
*card
)
3137 struct mmc_blk_data
*md
= mmc_get_drvdata(card
);
3139 mmc_blk_remove_parts(card
, md
);
3140 mmc_claim_host(card
->host
);
3141 mmc_blk_part_switch(card
, md
);
3142 mmc_release_host(card
->host
);
3143 mmc_blk_remove_req(md
);
3144 mmc_set_drvdata(card
, NULL
);
3145 #ifdef CONFIG_MMC_BLOCK_DEFERRED_RESUME
3146 mmc_set_bus_resume_policy(card
->host
, 0);
3151 static int mmc_blk_suspend(struct mmc_card
*card
)
3153 struct mmc_blk_data
*part_md
;
3154 struct mmc_blk_data
*md
= mmc_get_drvdata(card
);
3157 mmc_queue_suspend(&md
->queue
);
3158 list_for_each_entry(part_md
, &md
->part
, part
) {
3159 mmc_queue_suspend(&part_md
->queue
);
3165 static int mmc_blk_resume(struct mmc_card
*card
)
3167 struct mmc_blk_data
*part_md
;
3168 struct mmc_blk_data
*md
= mmc_get_drvdata(card
);
3172 * Resume involves the card going into idle state,
3173 * so current partition is always the main one.
3175 md
->part_curr
= md
->part_type
;
3176 mmc_queue_resume(&md
->queue
);
3177 list_for_each_entry(part_md
, &md
->part
, part
) {
3178 mmc_queue_resume(&part_md
->queue
);
3184 #define mmc_blk_suspend NULL
3185 #define mmc_blk_resume NULL
3188 static struct mmc_driver mmc_driver
= {
3192 .probe
= mmc_blk_probe
,
3193 .remove
= mmc_blk_remove
,
3194 .suspend
= mmc_blk_suspend
,
3195 .resume
= mmc_blk_resume
,
3198 static int __init
mmc_blk_init(void)
3202 if (perdev_minors
!= CONFIG_MMC_BLOCK_MINORS
)
3203 pr_info("mmcblk: using %d minors per device\n", perdev_minors
);
3205 max_devices
= 256 / perdev_minors
;
3207 res
= register_blkdev(MMC_BLOCK_MAJOR
, "mmc");
3211 res
= mmc_register_driver(&mmc_driver
);
3217 unregister_blkdev(MMC_BLOCK_MAJOR
, "mmc");
3222 static void __exit
mmc_blk_exit(void)
3224 mmc_unregister_driver(&mmc_driver
);
3225 unregister_blkdev(MMC_BLOCK_MAJOR
, "mmc");
3228 module_init(mmc_blk_init
);
3229 module_exit(mmc_blk_exit
);
3231 MODULE_LICENSE("GPL");
3232 MODULE_DESCRIPTION("Multimedia Card (MMC) block device driver");