Merge tag 'v3.10.103' into update
[GitHub/mt8127/android_kernel_alcatel_ttab.git] / drivers / mmc / card / block.c
CommitLineData
1da177e4
LT
1/*
2 * Block driver for media (i.e., flash cards)
3 *
4 * Copyright 2002 Hewlett-Packard Company
979ce720 5 * Copyright 2005-2008 Pierre Ossman
1da177e4
LT
6 *
7 * Use consistent with the GNU GPL is permitted,
8 * provided that this copyright notice is
9 * preserved in its entirety in all copies and derived works.
10 *
11 * HEWLETT-PACKARD COMPANY MAKES NO WARRANTIES, EXPRESSED OR IMPLIED,
12 * AS TO THE USEFULNESS OR CORRECTNESS OF THIS CODE OR ITS
13 * FITNESS FOR ANY PARTICULAR PURPOSE.
14 *
15 * Many thanks to Alessandro Rubini and Jonathan Corbet!
16 *
17 * Author: Andrew Christian
18 * 28 May 2002
19 */
20#include <linux/moduleparam.h>
21#include <linux/module.h>
22#include <linux/init.h>
23
1da177e4
LT
24#include <linux/kernel.h>
25#include <linux/fs.h>
5a0e3ad6 26#include <linux/slab.h>
1da177e4
LT
27#include <linux/errno.h>
28#include <linux/hdreg.h>
29#include <linux/kdev_t.h>
30#include <linux/blkdev.h>
a621aaed 31#include <linux/mutex.h>
ec5a19dd 32#include <linux/scatterlist.h>
a7bbb573 33#include <linux/string_helpers.h>
cb87ea28
JC
34#include <linux/delay.h>
35#include <linux/capability.h>
36#include <linux/compat.h>
1da177e4 37
6fa3eb70
S
38#define CREATE_TRACE_POINTS
39#include <trace/events/mmc.h>
40
cb87ea28 41#include <linux/mmc/ioctl.h>
1da177e4 42#include <linux/mmc/card.h>
385e3227 43#include <linux/mmc/host.h>
da7fbe58
PO
44#include <linux/mmc/mmc.h>
45#include <linux/mmc/sd.h>
1da177e4 46
1da177e4
LT
47#include <asm/uaccess.h>
48
98ac2162 49#include "queue.h"
6fa3eb70
S
50#include <mach/mtk_meminfo.h>
51
52//add vmstat info with block tag log
53#include <linux/vmstat.h>
54#define FEATURE_STORAGE_VMSTAT_LOGGER
55
56
57#include <linux/xlog.h>
58#include <asm/div64.h>
59#include <linux/vmalloc.h>
60
61#include <linux/mmc/sd_misc.h>
62
63#define MET_USER_EVENT_SUPPORT
64#include <linux/met_drv.h>
65
66#define FEATURE_STORAGE_PERF_INDEX
67//enable storage log in user load
68#if 0
69#ifdef USER_BUILD_KERNEL
70#undef FEATURE_STORAGE_PERF_INDEX
71#endif
72#endif
1da177e4 73
6b0b6285 74MODULE_ALIAS("mmc:block");
5e71b7a6
OJ
75#ifdef MODULE_PARAM_PREFIX
76#undef MODULE_PARAM_PREFIX
77#endif
78#define MODULE_PARAM_PREFIX "mmcblk."
79
6a7a6b45
AW
80#define INAND_CMD38_ARG_EXT_CSD 113
81#define INAND_CMD38_ARG_ERASE 0x00
82#define INAND_CMD38_ARG_TRIM 0x01
83#define INAND_CMD38_ARG_SECERASE 0x80
84#define INAND_CMD38_ARG_SECTRIM1 0x81
85#define INAND_CMD38_ARG_SECTRIM2 0x88
8fee476b 86#define MMC_BLK_TIMEOUT_MS (10 * 60 * 1000) /* 10 minute timeout */
6a7a6b45 87
e3dda035 88#define mmc_req_rel_wr(req) ((req->cmd_flags & REQ_FUA) && \
ce39f9d1
SJ
89 (rq_data_dir(req) == WRITE))
90#define PACKED_CMD_VER 0x01
91#define PACKED_CMD_WR 0x02
92
5e71b7a6 93static DEFINE_MUTEX(block_mutex);
6b0b6285 94
1da177e4 95/*
5e71b7a6
OJ
96 * The defaults come from config options but can be overriden by module
97 * or bootarg options.
1da177e4 98 */
5e71b7a6 99static int perdev_minors = CONFIG_MMC_BLOCK_MINORS;
1dff3144 100
5e71b7a6
OJ
101/*
102 * We've only got one major, so number of mmcblk devices is
103 * limited to 256 / number of minors per device.
104 */
105static int max_devices;
106
107/* 256 minors, so at most 256 separate devices */
108static DECLARE_BITMAP(dev_use, 256);
f06c9153 109static DECLARE_BITMAP(name_use, 256);
1da177e4 110
1da177e4
LT
111/*
112 * There is one mmc_blk_data per slot.
113 */
114struct mmc_blk_data {
115 spinlock_t lock;
116 struct gendisk *disk;
117 struct mmc_queue queue;
371a689f 118 struct list_head part;
1da177e4 119
d0c97cfb
AW
120 unsigned int flags;
121#define MMC_BLK_CMD23 (1 << 0) /* Can do SET_BLOCK_COUNT for multiblock */
122#define MMC_BLK_REL_WR (1 << 1) /* MMC Reliable write support */
ce39f9d1 123#define MMC_BLK_PACKED_CMD (1 << 2) /* MMC packed command support */
d0c97cfb 124
1da177e4 125 unsigned int usage;
a6f6c96b 126 unsigned int read_only;
371a689f 127 unsigned int part_type;
f06c9153 128 unsigned int name_idx;
67716327
AH
129 unsigned int reset_done;
130#define MMC_BLK_READ BIT(0)
131#define MMC_BLK_WRITE BIT(1)
132#define MMC_BLK_DISCARD BIT(2)
133#define MMC_BLK_SECDISCARD BIT(3)
371a689f
AW
134
135 /*
136 * Only set in main mmc_blk_data associated
137 * with mmc_card with mmc_set_drvdata, and keeps
138 * track of the current selected device partition.
139 */
140 unsigned int part_curr;
141 struct device_attribute force_ro;
add710ea
JR
142 struct device_attribute power_ro_lock;
143 int area_type;
1da177e4
LT
144};
145
a621aaed 146static DEFINE_MUTEX(open_lock);
1da177e4 147
ce39f9d1
SJ
148enum {
149 MMC_PACKED_NR_IDX = -1,
150 MMC_PACKED_NR_ZERO,
151 MMC_PACKED_NR_SINGLE,
152};
153
5e71b7a6
OJ
154module_param(perdev_minors, int, 0444);
155MODULE_PARM_DESC(perdev_minors, "Minors numbers to allocate per device");
156
8d1e977d
LP
157static inline int mmc_blk_part_switch(struct mmc_card *card,
158 struct mmc_blk_data *md);
159static int get_card_status(struct mmc_card *card, u32 *status, int retries);
160
6fa3eb70
S
161#ifndef CONFIG_MTK_FPGA
162#include <linux/met_ftrace_bio.h>
163#endif
164
165char mmc_get_rw_type(u32 opcode)
166{
167 switch (opcode)
168 {
169 case MMC_READ_SINGLE_BLOCK:
170 case MMC_READ_MULTIPLE_BLOCK:
171 return 'R';
172 case MMC_WRITE_BLOCK:
173 case MMC_WRITE_MULTIPLE_BLOCK:
174 return 'W';
175 default:
176 // Unknown opcode!!!
177 return 'X';
178 }
179}
180
181inline int check_met_mmc_async_req_legal(struct mmc_host *host, struct mmc_async_req *areq)
182{
183 int is_legal = 0;
184
185 if (!((host == NULL) || (areq == NULL) || (areq->mrq == NULL)
186 || (areq->mrq->cmd == NULL) || (areq->mrq->data == NULL)
187 || (host->card == NULL))) {
188 is_legal = 1;
189 }
190
191 return is_legal;
192}
193
194inline int check_met_mmc_blk_data_legal(struct mmc_blk_data *md)
195{
196 int is_legal = 0;
197
198 if (!((md == NULL) || (md->disk == NULL))) {
199 is_legal = 1;
200 }
201
202 return is_legal;
203}
204
205inline int check_met_mmc_req_legal(struct mmc_host *host, struct mmc_request *req)
206{
207 int is_legal = 0;
208
209 if (!((host == NULL) || (req == NULL) || (req->cmd == NULL)
210 || (req->data == NULL) || (host->card == NULL))) {
211 is_legal = 1;
212 }
213
214 return is_legal;
215}
216
217void met_mmc_insert(struct mmc_host *host, struct mmc_async_req *areq)
218{
219 struct mmc_blk_data *md;
220 char type;
221
222 if (!check_met_mmc_async_req_legal(host, areq))
223 return;
224
225 md = mmc_get_drvdata(host->card);
226 if (!check_met_mmc_blk_data_legal(md))
227 return;
228
229 type = mmc_get_rw_type(areq->mrq->cmd->opcode);
230 if (type == 'X')
231 return;
232
233#ifndef CONFIG_MTK_FPGA
234 MET_FTRACE_PRINTK(met_mmc_insert, md, areq, type);
235#endif
236}
237
238void met_mmc_dma_map(struct mmc_host *host, struct mmc_async_req *areq)
239{
240 struct mmc_blk_data *md;
241 char type;
242
243 if (!check_met_mmc_async_req_legal(host, areq))
244 return;
245
246 md = mmc_get_drvdata(host->card);
247 if (!check_met_mmc_blk_data_legal(md))
248 return;
249
250 type = mmc_get_rw_type(areq->mrq->cmd->opcode);
251 if (type == 'X')
252 return;
253#ifndef CONFIG_MTK_FPGA
254 MET_FTRACE_PRINTK(met_mmc_dma_map, md, areq, type);
255#endif
256}
257
258//void met_mmc_issue(struct mmc_host *host, struct mmc_async_req *areq)
259//{
260// struct mmc_blk_data *md;
261// char type;
262//
263// if (!check_met_mmc_async_req_legal(host, areq))
264// return;
265//
266// md = mmc_get_drvdata(host->card);
267//
268// type = mmc_get_rw_type(areq->mrq->cmd->opcode);
269// if (type == 'X')
270// return;
271//
272// MET_FTRACE_PRINTK(met_mmc_issue, md, areq, type);
273//}
274
275void met_mmc_issue(struct mmc_host *host, struct mmc_request *req)
276{
277 struct mmc_blk_data *md;
278 char type;
279
280 if (!check_met_mmc_req_legal(host, req))
281 return;
282
283 md = mmc_get_drvdata(host->card);
284 if (!check_met_mmc_blk_data_legal(md))
285 return;
286
287 type = mmc_get_rw_type(req->cmd->opcode);
288 if (type == 'X')
289 return;
290#ifndef CONFIG_MTK_FPGA
291 MET_FTRACE_PRINTK(met_mmc_issue, md, req, type);
292#endif
293}
294
295void met_mmc_send_cmd(struct mmc_host *host, struct mmc_command *cmd)
296{
297 struct mmc_blk_data *md = mmc_get_drvdata(host->card);
298 char type;
299
300 type = mmc_get_rw_type(cmd->opcode);
301 if (type == 'X')
302 return;
303
304 trace_printk("%d,%d %c %d + %d [%s]\n",
305 md->disk->major, md->disk->first_minor, type,
306 cmd->arg, cmd->data->blocks,
307 current->comm);
308}
309
310void met_mmc_xfr_done(struct mmc_host *host, struct mmc_command *cmd)
311{
312 struct mmc_blk_data *md=mmc_get_drvdata(host->card);
313 char type;
314
315 type = mmc_get_rw_type(cmd->opcode);
316 if (type == 'X')
317 return;
318
319 trace_printk("%d,%d %c %d + %d [%s]\n",
320 md->disk->major, md->disk->first_minor, type,
321 cmd->arg, cmd->data->blocks,
322 current->comm);
323}
324
325void met_mmc_wait_xfr(struct mmc_host *host, struct mmc_async_req *areq)
326{
327 struct mmc_blk_data *md = mmc_get_drvdata(host->card);
328 char type;
329
330 type = mmc_get_rw_type(areq->mrq->cmd->opcode);
331 if (type == 'X')
332 return;
333
334 trace_printk("%d,%d %c %d + %d [%s]\n",
335 md->disk->major, md->disk->first_minor, type,
336 areq->mrq->cmd->arg, areq->mrq->data->blocks,
337 current->comm);
338
339}
340
341void met_mmc_tuning_start(struct mmc_host *host, struct mmc_command *cmd)
342{
343 struct mmc_blk_data *md = mmc_get_drvdata(host->card);
344 char type;
345
346 type = mmc_get_rw_type(cmd->opcode);
347 if (type == 'X')
348 return;
349
350 trace_printk("%d,%d %c %d + %d [%s]\n",
351 md->disk->major, md->disk->first_minor, type,
352 cmd->arg, cmd->data->blocks,
353 current->comm);
354}
355
356void met_mmc_tuning_end(struct mmc_host *host, struct mmc_command *cmd)
357{
358 struct mmc_blk_data *md = mmc_get_drvdata(host->card);
359 char type;
360
361 type = mmc_get_rw_type(cmd->opcode);
362 if (type == 'X')
363 return;
364
365 trace_printk("%d,%d %c %d + %d [%s]\n",
366 md->disk->major, md->disk->first_minor, type,
367 cmd->arg, cmd->data->blocks,
368 current->comm);
369}
370
371void met_mmc_complete(struct mmc_host *host, struct mmc_async_req *areq)
372{
373 struct mmc_blk_data *md;
374 char type;
375
376 if (!check_met_mmc_async_req_legal(host, areq))
377 return;
378
379 md = mmc_get_drvdata(host->card);
380 if (!check_met_mmc_blk_data_legal(md))
381 return;
382
383 type = mmc_get_rw_type(areq->mrq->cmd->opcode);
384 if (type == 'X')
385 return;
386#ifndef CONFIG_MTK_FPGA
387 MET_FTRACE_PRINTK(met_mmc_complete, md, areq, type);
388#endif
389}
390
391void met_mmc_dma_unmap_start(struct mmc_host *host, struct mmc_async_req *areq)
392{
393 struct mmc_blk_data *md;
394 char type;
395
396 if (!check_met_mmc_async_req_legal(host, areq))
397 return;
398
399 md = mmc_get_drvdata(host->card);
400 if (!check_met_mmc_blk_data_legal(md))
401 return;
402
403 type = mmc_get_rw_type(areq->mrq->cmd->opcode);
404 if (type == 'X')
405 return;
406#ifndef CONFIG_MTK_FPGA
407 MET_FTRACE_PRINTK(met_mmc_dma_unmap_start, md, areq, type);
408#endif
409}
410
411void met_mmc_dma_unmap_stop(struct mmc_host *host, struct mmc_async_req *areq)
412{
413 struct mmc_blk_data *md;
414 char type;
415
416 if (!check_met_mmc_async_req_legal(host, areq))
417 return;
418
419 md = mmc_get_drvdata(host->card);
420 if (!check_met_mmc_blk_data_legal(md))
421 return;
422
423 type = mmc_get_rw_type(areq->mrq->cmd->opcode);
424 if (type == 'X')
425 return;
426#ifndef CONFIG_MTK_FPGA
427 MET_FTRACE_PRINTK(met_mmc_dma_unmap_stop, md, areq, type);
428#endif
429}
430
431void met_mmc_continue_req_end(struct mmc_host *host, struct mmc_async_req *areq)
432{
433 struct mmc_blk_data *md;
434 char type;
435
436 if (!check_met_mmc_async_req_legal(host, areq))
437 return;
438
439 md = mmc_get_drvdata(host->card);
440 if (!check_met_mmc_blk_data_legal(md))
441 return;
442
443 type = mmc_get_rw_type(areq->mrq->cmd->opcode);
444 if (type == 'X')
445 return;
446#ifndef CONFIG_MTK_FPGA
447 MET_FTRACE_PRINTK(met_mmc_continue_req_end, md, areq, type);
448#endif
449}
450
451void met_mmc_dma_stop(struct mmc_host *host, struct mmc_async_req *areq, unsigned int bd_num)
452{
453 struct mmc_blk_data *md;
454 char type;
455
456 if (!check_met_mmc_async_req_legal(host, areq))
457 return;
458
459 md = mmc_get_drvdata(host->card);
460 if (!check_met_mmc_blk_data_legal(md))
461 return;
462
463 type = mmc_get_rw_type(areq->mrq->cmd->opcode);
464 if (type == 'X')
465 return;
466#ifndef CONFIG_MTK_FPGA
467 MET_FTRACE_PRINTK(met_mmc_dma_stop, md, areq, type, bd_num);
468#endif
469}
470
471//void met_mmc_end(struct mmc_host *host, struct mmc_async_req *areq)
472//{
473// struct mmc_blk_data *md;
474// char type;
475//
476// if (areq && areq->mrq && host && host->card) {
477// type = mmc_get_rw_type(areq->mrq->cmd->opcode);
478// if (type == 'X')
479// return;
480//
481// md = mmc_get_drvdata(host->card);
482//
483// if (areq && areq->mrq)
484// {
485// trace_printk("%d,%d %c %d + %d [%s]\n",
486// md->disk->major, md->disk->first_minor, type,
487// areq->mrq->cmd->arg, areq->mrq->data->blocks,
488// current->comm);
489// }
490// }
491//}
492
ce39f9d1
SJ
493static inline void mmc_blk_clear_packed(struct mmc_queue_req *mqrq)
494{
495 struct mmc_packed *packed = mqrq->packed;
496
497 BUG_ON(!packed);
498
499 mqrq->cmd_type = MMC_PACKED_NONE;
500 packed->nr_entries = MMC_PACKED_NR_ZERO;
501 packed->idx_failure = MMC_PACKED_NR_IDX;
502 packed->retries = 0;
503 packed->blocks = 0;
504}
505
1da177e4
LT
506static struct mmc_blk_data *mmc_blk_get(struct gendisk *disk)
507{
508 struct mmc_blk_data *md;
509
a621aaed 510 mutex_lock(&open_lock);
1da177e4
LT
511 md = disk->private_data;
512 if (md && md->usage == 0)
513 md = NULL;
514 if (md)
515 md->usage++;
a621aaed 516 mutex_unlock(&open_lock);
1da177e4
LT
517
518 return md;
519}
520
371a689f
AW
521static inline int mmc_get_devidx(struct gendisk *disk)
522{
6fa3eb70 523 int devidx = disk->first_minor / perdev_minors;
371a689f
AW
524 return devidx;
525}
526
1da177e4
LT
527static void mmc_blk_put(struct mmc_blk_data *md)
528{
a621aaed 529 mutex_lock(&open_lock);
1da177e4
LT
530 md->usage--;
531 if (md->usage == 0) {
371a689f 532 int devidx = mmc_get_devidx(md->disk);
5fa83ce2
AH
533 blk_cleanup_queue(md->queue.queue);
534
1dff3144
DW
535 __clear_bit(devidx, dev_use);
536
1da177e4 537 put_disk(md->disk);
1da177e4
LT
538 kfree(md);
539 }
a621aaed 540 mutex_unlock(&open_lock);
1da177e4
LT
541}
542
add710ea
JR
543static ssize_t power_ro_lock_show(struct device *dev,
544 struct device_attribute *attr, char *buf)
545{
546 int ret;
547 struct mmc_blk_data *md = mmc_blk_get(dev_to_disk(dev));
548 struct mmc_card *card = md->queue.card;
549 int locked = 0;
550
551 if (card->ext_csd.boot_ro_lock & EXT_CSD_BOOT_WP_B_PERM_WP_EN)
552 locked = 2;
553 else if (card->ext_csd.boot_ro_lock & EXT_CSD_BOOT_WP_B_PWR_WP_EN)
554 locked = 1;
555
556 ret = snprintf(buf, PAGE_SIZE, "%d\n", locked);
557
1ac6c9e2
TW
558 mmc_blk_put(md);
559
add710ea
JR
560 return ret;
561}
562
563static ssize_t power_ro_lock_store(struct device *dev,
564 struct device_attribute *attr, const char *buf, size_t count)
565{
566 int ret;
567 struct mmc_blk_data *md, *part_md;
568 struct mmc_card *card;
569 unsigned long set;
570
571 if (kstrtoul(buf, 0, &set))
572 return -EINVAL;
573
574 if (set != 1)
575 return count;
576
577 md = mmc_blk_get(dev_to_disk(dev));
578 card = md->queue.card;
579
580 mmc_claim_host(card->host);
581
582 ret = mmc_switch(card, EXT_CSD_CMD_SET_NORMAL, EXT_CSD_BOOT_WP,
583 card->ext_csd.boot_ro_lock |
584 EXT_CSD_BOOT_WP_B_PWR_WP_EN,
585 card->ext_csd.part_time);
586 if (ret)
587 pr_err("%s: Locking boot partition ro until next power on failed: %d\n", md->disk->disk_name, ret);
588 else
589 card->ext_csd.boot_ro_lock |= EXT_CSD_BOOT_WP_B_PWR_WP_EN;
590
591 mmc_release_host(card->host);
592
593 if (!ret) {
594 pr_info("%s: Locking boot partition ro until next power on\n",
595 md->disk->disk_name);
596 set_disk_ro(md->disk, 1);
597
598 list_for_each_entry(part_md, &md->part, part)
599 if (part_md->area_type == MMC_BLK_DATA_AREA_BOOT) {
600 pr_info("%s: Locking boot partition ro until next power on\n", part_md->disk->disk_name);
601 set_disk_ro(part_md->disk, 1);
602 }
603 }
604
605 mmc_blk_put(md);
606 return count;
607}
608
371a689f
AW
609static ssize_t force_ro_show(struct device *dev, struct device_attribute *attr,
610 char *buf)
611{
612 int ret;
613 struct mmc_blk_data *md = mmc_blk_get(dev_to_disk(dev));
614
5d316a31 615 ret = snprintf(buf, PAGE_SIZE, "%d\n",
371a689f
AW
616 get_disk_ro(dev_to_disk(dev)) ^
617 md->read_only);
618 mmc_blk_put(md);
619 return ret;
620}
621
622static ssize_t force_ro_store(struct device *dev, struct device_attribute *attr,
623 const char *buf, size_t count)
624{
625 int ret;
626 char *end;
627 struct mmc_blk_data *md = mmc_blk_get(dev_to_disk(dev));
628 unsigned long set = simple_strtoul(buf, &end, 0);
629 if (end == buf) {
630 ret = -EINVAL;
631 goto out;
632 }
633
634 set_disk_ro(dev_to_disk(dev), set || md->read_only);
635 ret = count;
636out:
637 mmc_blk_put(md);
638 return ret;
639}
640
a5a1561f 641static int mmc_blk_open(struct block_device *bdev, fmode_t mode)
1da177e4 642{
a5a1561f 643 struct mmc_blk_data *md = mmc_blk_get(bdev->bd_disk);
1da177e4
LT
644 int ret = -ENXIO;
645
2a48fc0a 646 mutex_lock(&block_mutex);
1da177e4
LT
647 if (md) {
648 if (md->usage == 2)
a5a1561f 649 check_disk_change(bdev);
1da177e4 650 ret = 0;
a00fc090 651
a5a1561f 652 if ((mode & FMODE_WRITE) && md->read_only) {
70bb0896 653 mmc_blk_put(md);
a00fc090 654 ret = -EROFS;
70bb0896 655 }
1da177e4 656 }
2a48fc0a 657 mutex_unlock(&block_mutex);
1da177e4
LT
658
659 return ret;
660}
661
db2a144b 662static void mmc_blk_release(struct gendisk *disk, fmode_t mode)
1da177e4 663{
a5a1561f 664 struct mmc_blk_data *md = disk->private_data;
1da177e4 665
2a48fc0a 666 mutex_lock(&block_mutex);
1da177e4 667 mmc_blk_put(md);
2a48fc0a 668 mutex_unlock(&block_mutex);
1da177e4
LT
669}
670
671static int
a885c8c4 672mmc_blk_getgeo(struct block_device *bdev, struct hd_geometry *geo)
1da177e4 673{
a885c8c4
CH
674 geo->cylinders = get_capacity(bdev->bd_disk) / (4 * 16);
675 geo->heads = 4;
676 geo->sectors = 16;
677 return 0;
1da177e4
LT
678}
679
cb87ea28
JC
680struct mmc_blk_ioc_data {
681 struct mmc_ioc_cmd ic;
682 unsigned char *buf;
683 u64 buf_bytes;
684};
685
686static struct mmc_blk_ioc_data *mmc_blk_ioctl_copy_from_user(
687 struct mmc_ioc_cmd __user *user)
688{
689 struct mmc_blk_ioc_data *idata;
690 int err;
691
692 idata = kzalloc(sizeof(*idata), GFP_KERNEL);
693 if (!idata) {
694 err = -ENOMEM;
aea253ec 695 goto out;
cb87ea28
JC
696 }
697
698 if (copy_from_user(&idata->ic, user, sizeof(idata->ic))) {
699 err = -EFAULT;
aea253ec 700 goto idata_err;
cb87ea28
JC
701 }
702
703 idata->buf_bytes = (u64) idata->ic.blksz * idata->ic.blocks;
704 if (idata->buf_bytes > MMC_IOC_MAX_BYTES) {
705 err = -EOVERFLOW;
aea253ec 706 goto idata_err;
cb87ea28
JC
707 }
708
4d6144de
JR
709 if (!idata->buf_bytes)
710 return idata;
711
cb87ea28
JC
712 idata->buf = kzalloc(idata->buf_bytes, GFP_KERNEL);
713 if (!idata->buf) {
714 err = -ENOMEM;
aea253ec 715 goto idata_err;
cb87ea28
JC
716 }
717
718 if (copy_from_user(idata->buf, (void __user *)(unsigned long)
719 idata->ic.data_ptr, idata->buf_bytes)) {
720 err = -EFAULT;
721 goto copy_err;
722 }
723
724 return idata;
725
726copy_err:
727 kfree(idata->buf);
aea253ec 728idata_err:
cb87ea28 729 kfree(idata);
aea253ec 730out:
cb87ea28 731 return ERR_PTR(err);
cb87ea28
JC
732}
733
8d1e977d
LP
734static int ioctl_rpmb_card_status_poll(struct mmc_card *card, u32 *status,
735 u32 retries_max)
736{
737 int err;
738 u32 retry_count = 0;
739
740 if (!status || !retries_max)
741 return -EINVAL;
742
743 do {
744 err = get_card_status(card, status, 5);
745 if (err)
746 break;
747
748 if (!R1_STATUS(*status) &&
749 (R1_CURRENT_STATE(*status) != R1_STATE_PRG))
750 break; /* RPMB programming operation complete */
751
752 /*
753 * Rechedule to give the MMC device a chance to continue
754 * processing the previous command without being polled too
755 * frequently.
756 */
757 usleep_range(1000, 5000);
758 } while (++retry_count < retries_max);
759
760 if (retry_count == retries_max)
761 err = -EPERM;
762
763 return err;
764}
765
cb87ea28
JC
766static int mmc_blk_ioctl_cmd(struct block_device *bdev,
767 struct mmc_ioc_cmd __user *ic_ptr)
768{
769 struct mmc_blk_ioc_data *idata;
770 struct mmc_blk_data *md;
771 struct mmc_card *card;
772 struct mmc_command cmd = {0};
773 struct mmc_data data = {0};
ad5fd972 774 struct mmc_request mrq = {NULL};
cb87ea28
JC
775 struct scatterlist sg;
776 int err;
8d1e977d
LP
777 int is_rpmb = false;
778 u32 status = 0;
cb87ea28
JC
779
780 /*
781 * The caller must have CAP_SYS_RAWIO, and must be calling this on the
782 * whole block device, not on a partition. This prevents overspray
783 * between sibling partitions.
784 */
785 if ((!capable(CAP_SYS_RAWIO)) || (bdev != bdev->bd_contains))
786 return -EPERM;
787
788 idata = mmc_blk_ioctl_copy_from_user(ic_ptr);
789 if (IS_ERR(idata))
790 return PTR_ERR(idata);
791
cb87ea28
JC
792 md = mmc_blk_get(bdev->bd_disk);
793 if (!md) {
794 err = -EINVAL;
1c02f000 795 goto cmd_err;
cb87ea28
JC
796 }
797
8d1e977d
LP
798 if (md->area_type & MMC_BLK_DATA_AREA_RPMB)
799 is_rpmb = true;
800
cb87ea28
JC
801 card = md->queue.card;
802 if (IS_ERR(card)) {
803 err = PTR_ERR(card);
804 goto cmd_done;
805 }
806
4d6144de
JR
807 cmd.opcode = idata->ic.opcode;
808 cmd.arg = idata->ic.arg;
809 cmd.flags = idata->ic.flags;
810
811 if (idata->buf_bytes) {
812 data.sg = &sg;
813 data.sg_len = 1;
814 data.blksz = idata->ic.blksz;
815 data.blocks = idata->ic.blocks;
816
817 sg_init_one(data.sg, idata->buf, idata->buf_bytes);
818
819 if (idata->ic.write_flag)
820 data.flags = MMC_DATA_WRITE;
821 else
822 data.flags = MMC_DATA_READ;
823
824 /* data.flags must already be set before doing this. */
825 mmc_set_data_timeout(&data, card);
826
827 /* Allow overriding the timeout_ns for empirical tuning. */
828 if (idata->ic.data_timeout_ns)
829 data.timeout_ns = idata->ic.data_timeout_ns;
830
831 if ((cmd.flags & MMC_RSP_R1B) == MMC_RSP_R1B) {
832 /*
833 * Pretend this is a data transfer and rely on the
834 * host driver to compute timeout. When all host
835 * drivers support cmd.cmd_timeout for R1B, this
836 * can be changed to:
837 *
838 * mrq.data = NULL;
839 * cmd.cmd_timeout = idata->ic.cmd_timeout_ms;
840 */
841 data.timeout_ns = idata->ic.cmd_timeout_ms * 1000000;
842 }
843
844 mrq.data = &data;
845 }
846
847 mrq.cmd = &cmd;
848
cb87ea28
JC
849 mmc_claim_host(card->host);
850
8d1e977d
LP
851 err = mmc_blk_part_switch(card, md);
852 if (err)
853 goto cmd_rel_host;
854
cb87ea28
JC
855 if (idata->ic.is_acmd) {
856 err = mmc_app_cmd(card->host, card);
857 if (err)
858 goto cmd_rel_host;
859 }
860
8d1e977d
LP
861 if (is_rpmb) {
862 err = mmc_set_blockcount(card, data.blocks,
863 idata->ic.write_flag & (1 << 31));
864 if (err)
865 goto cmd_rel_host;
866 }
867
cb87ea28
JC
868 mmc_wait_for_req(card->host, &mrq);
869
870 if (cmd.error) {
871 dev_err(mmc_dev(card->host), "%s: cmd error %d\n",
872 __func__, cmd.error);
873 err = cmd.error;
874 goto cmd_rel_host;
875 }
876 if (data.error) {
877 dev_err(mmc_dev(card->host), "%s: data error %d\n",
878 __func__, data.error);
879 err = data.error;
880 goto cmd_rel_host;
881 }
882
883 /*
884 * According to the SD specs, some commands require a delay after
885 * issuing the command.
886 */
887 if (idata->ic.postsleep_min_us)
888 usleep_range(idata->ic.postsleep_min_us, idata->ic.postsleep_max_us);
889
890 if (copy_to_user(&(ic_ptr->response), cmd.resp, sizeof(cmd.resp))) {
891 err = -EFAULT;
892 goto cmd_rel_host;
893 }
894
895 if (!idata->ic.write_flag) {
896 if (copy_to_user((void __user *)(unsigned long) idata->ic.data_ptr,
897 idata->buf, idata->buf_bytes)) {
898 err = -EFAULT;
899 goto cmd_rel_host;
900 }
901 }
902
8d1e977d
LP
903 if (is_rpmb) {
904 /*
905 * Ensure RPMB command has completed by polling CMD13
906 * "Send Status".
907 */
908 err = ioctl_rpmb_card_status_poll(card, &status, 5);
909 if (err)
910 dev_err(mmc_dev(card->host),
911 "%s: Card Status=0x%08X, error %d\n",
912 __func__, status, err);
913 }
914
cb87ea28
JC
915cmd_rel_host:
916 mmc_release_host(card->host);
917
918cmd_done:
919 mmc_blk_put(md);
1c02f000 920cmd_err:
cb87ea28
JC
921 kfree(idata->buf);
922 kfree(idata);
923 return err;
924}
925
926static int mmc_blk_ioctl(struct block_device *bdev, fmode_t mode,
927 unsigned int cmd, unsigned long arg)
928{
929 int ret = -EINVAL;
930 if (cmd == MMC_IOC_CMD)
931 ret = mmc_blk_ioctl_cmd(bdev, (struct mmc_ioc_cmd __user *)arg);
932 return ret;
933}
934
935#ifdef CONFIG_COMPAT
936static int mmc_blk_compat_ioctl(struct block_device *bdev, fmode_t mode,
937 unsigned int cmd, unsigned long arg)
938{
939 return mmc_blk_ioctl(bdev, mode, cmd, (unsigned long) compat_ptr(arg));
940}
941#endif
942
83d5cde4 943static const struct block_device_operations mmc_bdops = {
a5a1561f
AV
944 .open = mmc_blk_open,
945 .release = mmc_blk_release,
a885c8c4 946 .getgeo = mmc_blk_getgeo,
1da177e4 947 .owner = THIS_MODULE,
cb87ea28
JC
948 .ioctl = mmc_blk_ioctl,
949#ifdef CONFIG_COMPAT
950 .compat_ioctl = mmc_blk_compat_ioctl,
951#endif
1da177e4
LT
952};
953
371a689f
AW
954static inline int mmc_blk_part_switch(struct mmc_card *card,
955 struct mmc_blk_data *md)
956{
957 int ret;
958 struct mmc_blk_data *main_md = mmc_get_drvdata(card);
0d7d85ca 959
371a689f
AW
960 if (main_md->part_curr == md->part_type)
961 return 0;
962
963 if (mmc_card_mmc(card)) {
0d7d85ca
AH
964 u8 part_config = card->ext_csd.part_config;
965
966 part_config &= ~EXT_CSD_PART_CONFIG_ACC_MASK;
967 part_config |= md->part_type;
371a689f
AW
968
969 ret = mmc_switch(card, EXT_CSD_CMD_SET_NORMAL,
0d7d85ca 970 EXT_CSD_PART_CONFIG, part_config,
371a689f
AW
971 card->ext_csd.part_time);
972 if (ret)
973 return ret;
0d7d85ca
AH
974
975 card->ext_csd.part_config = part_config;
67716327 976 }
371a689f
AW
977
978 main_md->part_curr = md->part_type;
979 return 0;
980}
981
ec5a19dd
PO
982static u32 mmc_sd_num_wr_blocks(struct mmc_card *card)
983{
984 int err;
051913da
BD
985 u32 result;
986 __be32 *blocks;
ec5a19dd 987
ad5fd972 988 struct mmc_request mrq = {NULL};
1278dba1 989 struct mmc_command cmd = {0};
a61ad2b4 990 struct mmc_data data = {0};
ec5a19dd
PO
991
992 struct scatterlist sg;
993
ec5a19dd
PO
994 cmd.opcode = MMC_APP_CMD;
995 cmd.arg = card->rca << 16;
7213d175 996 cmd.flags = MMC_RSP_SPI_R1 | MMC_RSP_R1 | MMC_CMD_AC;
ec5a19dd
PO
997
998 err = mmc_wait_for_cmd(card->host, &cmd, 0);
7213d175
DB
999 if (err)
1000 return (u32)-1;
1001 if (!mmc_host_is_spi(card->host) && !(cmd.resp[0] & R1_APP_CMD))
ec5a19dd
PO
1002 return (u32)-1;
1003
1004 memset(&cmd, 0, sizeof(struct mmc_command));
1005
1006 cmd.opcode = SD_APP_SEND_NUM_WR_BLKS;
1007 cmd.arg = 0;
7213d175 1008 cmd.flags = MMC_RSP_SPI_R1 | MMC_RSP_R1 | MMC_CMD_ADTC;
ec5a19dd 1009
ec5a19dd
PO
1010 data.blksz = 4;
1011 data.blocks = 1;
1012 data.flags = MMC_DATA_READ;
1013 data.sg = &sg;
1014 data.sg_len = 1;
d380443c 1015 mmc_set_data_timeout(&data, card);
ec5a19dd 1016
ec5a19dd
PO
1017 mrq.cmd = &cmd;
1018 mrq.data = &data;
1019
051913da
BD
1020 blocks = kmalloc(4, GFP_KERNEL);
1021 if (!blocks)
1022 return (u32)-1;
1023
1024 sg_init_one(&sg, blocks, 4);
ec5a19dd
PO
1025
1026 mmc_wait_for_req(card->host, &mrq);
1027
051913da
BD
1028 result = ntohl(*blocks);
1029 kfree(blocks);
1030
17b0429d 1031 if (cmd.error || data.error)
051913da 1032 result = (u32)-1;
ec5a19dd 1033
051913da 1034 return result;
ec5a19dd
PO
1035}
1036
6fa3eb70
S
1037u32 __mmc_sd_num_wr_blocks(struct mmc_card *card)
1038{
1039 return mmc_sd_num_wr_blocks(card);
1040}
1041EXPORT_SYMBOL(__mmc_sd_num_wr_blocks);
1042
a01f3ccf
RKAL
1043static int send_stop(struct mmc_card *card, u32 *status)
1044{
1045 struct mmc_command cmd = {0};
1046 int err;
1047
1048 cmd.opcode = MMC_STOP_TRANSMISSION;
1049 cmd.flags = MMC_RSP_SPI_R1B | MMC_RSP_R1B | MMC_CMD_AC;
1050 err = mmc_wait_for_cmd(card->host, &cmd, 5);
1051 if (err == 0)
1052 *status = cmd.resp[0];
1053 return err;
1054}
1055
0a2d4048 1056static int get_card_status(struct mmc_card *card, u32 *status, int retries)
504f191f 1057{
1278dba1 1058 struct mmc_command cmd = {0};
504f191f
AH
1059 int err;
1060
504f191f
AH
1061 cmd.opcode = MMC_SEND_STATUS;
1062 if (!mmc_host_is_spi(card->host))
1063 cmd.arg = card->rca << 16;
1064 cmd.flags = MMC_RSP_SPI_R2 | MMC_RSP_R1 | MMC_CMD_AC;
0a2d4048
RKAL
1065 err = mmc_wait_for_cmd(card->host, &cmd, retries);
1066 if (err == 0)
1067 *status = cmd.resp[0];
1068 return err;
504f191f
AH
1069}
1070
a8ad82cc 1071#define ERR_NOMEDIUM 3
a01f3ccf
RKAL
1072#define ERR_RETRY 2
1073#define ERR_ABORT 1
1074#define ERR_CONTINUE 0
1075
1076static int mmc_blk_cmd_error(struct request *req, const char *name, int error,
1077 bool status_valid, u32 status)
1078{
1079 switch (error) {
1080 case -EILSEQ:
1081 /* response crc error, retry the r/w cmd */
1082 pr_err("%s: %s sending %s command, card status %#x\n",
1083 req->rq_disk->disk_name, "response CRC error",
1084 name, status);
1085 return ERR_RETRY;
1086
1087 case -ETIMEDOUT:
1088 pr_err("%s: %s sending %s command, card status %#x\n",
1089 req->rq_disk->disk_name, "timed out", name, status);
1090
1091 /* If the status cmd initially failed, retry the r/w cmd */
6fa3eb70
S
1092 if (!status_valid) {
1093 pr_err("%s: status not valid, retrying timeout\n", req->rq_disk->disk_name);
a01f3ccf 1094 return ERR_RETRY;
6fa3eb70 1095 }
a01f3ccf
RKAL
1096 /*
1097 * If it was a r/w cmd crc error, or illegal command
1098 * (eg, issued in wrong state) then retry - we should
1099 * have corrected the state problem above.
1100 */
6fa3eb70
S
1101 if (status & (R1_COM_CRC_ERROR | R1_ILLEGAL_COMMAND)) {
1102 pr_err("%s: command error, retrying timeout\n", req->rq_disk->disk_name);
a01f3ccf 1103 return ERR_RETRY;
6fa3eb70 1104 }
a01f3ccf
RKAL
1105
1106 /* Otherwise abort the command */
6fa3eb70 1107 pr_err("%s: not retrying timeout\n", req->rq_disk->disk_name);
a01f3ccf
RKAL
1108 return ERR_ABORT;
1109
1110 default:
1111 /* We don't understand the error code the driver gave us */
1112 pr_err("%s: unknown error %d sending read/write command, card status %#x\n",
1113 req->rq_disk->disk_name, error, status);
1114 return ERR_ABORT;
1115 }
1116}
1117
1118/*
1119 * Initial r/w and stop cmd error recovery.
1120 * We don't know whether the card received the r/w cmd or not, so try to
1121 * restore things back to a sane state. Essentially, we do this as follows:
1122 * - Obtain card status. If the first attempt to obtain card status fails,
1123 * the status word will reflect the failed status cmd, not the failed
1124 * r/w cmd. If we fail to obtain card status, it suggests we can no
1125 * longer communicate with the card.
1126 * - Check the card state. If the card received the cmd but there was a
1127 * transient problem with the response, it might still be in a data transfer
1128 * mode. Try to send it a stop command. If this fails, we can't recover.
1129 * - If the r/w cmd failed due to a response CRC error, it was probably
1130 * transient, so retry the cmd.
1131 * - If the r/w cmd timed out, but we didn't get the r/w cmd status, retry.
1132 * - If the r/w cmd timed out, and the r/w cmd failed due to CRC error or
1133 * illegal cmd, retry.
1134 * Otherwise we don't understand what happened, so abort.
1135 */
1136static int mmc_blk_cmd_recovery(struct mmc_card *card, struct request *req,
604ae797 1137 struct mmc_blk_request *brq, int *ecc_err, int *gen_err)
a01f3ccf
RKAL
1138{
1139 bool prev_cmd_status_valid = true;
1140 u32 status, stop_status = 0;
1141 int err, retry;
1142
a8ad82cc
SRT
1143 if (mmc_card_removed(card))
1144 return ERR_NOMEDIUM;
1145
a01f3ccf
RKAL
1146 /*
1147 * Try to get card status which indicates both the card state
1148 * and why there was no response. If the first attempt fails,
1149 * we can't be sure the returned status is for the r/w command.
1150 */
1151 for (retry = 2; retry >= 0; retry--) {
1152 err = get_card_status(card, &status, 0);
1153 if (!err)
1154 break;
1155
1156 prev_cmd_status_valid = false;
1157 pr_err("%s: error %d sending status command, %sing\n",
1158 req->rq_disk->disk_name, err, retry ? "retry" : "abort");
1159 }
1160
1161 /* We couldn't get a response from the card. Give up. */
a8ad82cc
SRT
1162 if (err) {
1163 /* Check if the card is removed */
1164 if (mmc_detect_card_removed(card->host))
1165 return ERR_NOMEDIUM;
a01f3ccf 1166 return ERR_ABORT;
a8ad82cc 1167 }
a01f3ccf 1168
67716327
AH
1169 /* Flag ECC errors */
1170 if ((status & R1_CARD_ECC_FAILED) ||
1171 (brq->stop.resp[0] & R1_CARD_ECC_FAILED) ||
1172 (brq->cmd.resp[0] & R1_CARD_ECC_FAILED))
1173 *ecc_err = 1;
1174
604ae797
KY
1175 /* Flag General errors */
1176 if (!mmc_host_is_spi(card->host) && rq_data_dir(req) != READ)
1177 if ((status & R1_ERROR) ||
1178 (brq->stop.resp[0] & R1_ERROR)) {
1179 pr_err("%s: %s: general error sending stop or status command, stop cmd response %#x, card status %#x\n",
1180 req->rq_disk->disk_name, __func__,
1181 brq->stop.resp[0], status);
1182 *gen_err = 1;
1183 }
1184
a01f3ccf
RKAL
1185 /*
1186 * Check the current card state. If it is in some data transfer
1187 * mode, tell it to stop (and hopefully transition back to TRAN.)
1188 */
1189 if (R1_CURRENT_STATE(status) == R1_STATE_DATA ||
1190 R1_CURRENT_STATE(status) == R1_STATE_RCV) {
1191 err = send_stop(card, &stop_status);
6fa3eb70
S
1192 if (err)
1193 {
1194 get_card_status(card,&status,0);
1195 if ((R1_CURRENT_STATE(status) == R1_STATE_TRAN) ||(R1_CURRENT_STATE(status) == R1_STATE_PRG)){
1196 err=0;
1197 stop_status=0;
1198 pr_err("b card status %d \n",status);
1199 }
1200 else
1201 pr_err("g card status %d \n",status);
1202 }
a01f3ccf
RKAL
1203 if (err)
1204 pr_err("%s: error %d sending stop command\n",
1205 req->rq_disk->disk_name, err);
1206
1207 /*
1208 * If the stop cmd also timed out, the card is probably
1209 * not present, so abort. Other errors are bad news too.
1210 */
1211 if (err)
1212 return ERR_ABORT;
67716327
AH
1213 if (stop_status & R1_CARD_ECC_FAILED)
1214 *ecc_err = 1;
604ae797
KY
1215 if (!mmc_host_is_spi(card->host) && rq_data_dir(req) != READ)
1216 if (stop_status & R1_ERROR) {
1217 pr_err("%s: %s: general error sending stop command, stop cmd response %#x\n",
1218 req->rq_disk->disk_name, __func__,
1219 stop_status);
1220 *gen_err = 1;
1221 }
a01f3ccf
RKAL
1222 }
1223
1224 /* Check for set block count errors */
1225 if (brq->sbc.error)
1226 return mmc_blk_cmd_error(req, "SET_BLOCK_COUNT", brq->sbc.error,
1227 prev_cmd_status_valid, status);
1228
1229 /* Check for r/w command errors */
1230 if (brq->cmd.error)
1231 return mmc_blk_cmd_error(req, "r/w cmd", brq->cmd.error,
1232 prev_cmd_status_valid, status);
1233
67716327
AH
1234 /* Data errors */
1235 if (!brq->stop.error)
1236 return ERR_CONTINUE;
1237
a01f3ccf
RKAL
1238 /* Now for stop errors. These aren't fatal to the transfer. */
1239 pr_err("%s: error %d sending stop command, original cmd response %#x, card status %#x\n",
1240 req->rq_disk->disk_name, brq->stop.error,
1241 brq->cmd.resp[0], status);
1242
1243 /*
1244 * Subsitute in our own stop status as this will give the error
1245 * state which happened during the execution of the r/w command.
1246 */
1247 if (stop_status) {
1248 brq->stop.resp[0] = stop_status;
1249 brq->stop.error = 0;
1250 }
1251 return ERR_CONTINUE;
1252}
1253
67716327
AH
1254static int mmc_blk_reset(struct mmc_blk_data *md, struct mmc_host *host,
1255 int type)
1256{
1257 int err;
1258
1259 if (md->reset_done & type)
1260 return -EEXIST;
1261
1262 md->reset_done |= type;
1263 err = mmc_hw_reset(host);
1264 /* Ensure we switch back to the correct partition */
1265 if (err != -EOPNOTSUPP) {
1266 struct mmc_blk_data *main_md = mmc_get_drvdata(host->card);
1267 int part_err;
1268
1269 main_md->part_curr = main_md->part_type;
1270 part_err = mmc_blk_part_switch(host->card, md);
1271 if (part_err) {
1272 /*
1273 * We have failed to get back into the correct
1274 * partition, so we need to abort the whole request.
1275 */
1276 return -ENODEV;
1277 }
1278 }
1279 return err;
1280}
1281
1282static inline void mmc_blk_reset_success(struct mmc_blk_data *md, int type)
1283{
1284 md->reset_done &= ~type;
1285}
1286
6186ada9
CD
1287int mmc_access_rpmb(struct mmc_queue *mq)
1288{
1289 struct mmc_blk_data *md = mq->data;
1290 /*
1291 * If this is a RPMB partition access, return ture
1292 */
1293 if (md && md->part_type == EXT_CSD_PART_CONFIG_ACC_RPMB)
1294 return true;
1295
1296 return false;
1297}
1298
bd788c96
AH
1299static int mmc_blk_issue_discard_rq(struct mmc_queue *mq, struct request *req)
1300{
1301 struct mmc_blk_data *md = mq->data;
1302 struct mmc_card *card = md->queue.card;
1303 unsigned int from, nr, arg;
67716327 1304 int err = 0, type = MMC_BLK_DISCARD;
bd788c96 1305
bd788c96
AH
1306 if (!mmc_can_erase(card)) {
1307 err = -EOPNOTSUPP;
1308 goto out;
1309 }
1310
1311 from = blk_rq_pos(req);
1312 nr = blk_rq_sectors(req);
1313
b3bf9153
KP
1314 if (mmc_can_discard(card))
1315 arg = MMC_DISCARD_ARG;
1316 else if (mmc_can_trim(card))
bd788c96
AH
1317 arg = MMC_TRIM_ARG;
1318 else
1319 arg = MMC_ERASE_ARG;
67716327 1320retry:
6a7a6b45
AW
1321 if (card->quirks & MMC_QUIRK_INAND_CMD38) {
1322 err = mmc_switch(card, EXT_CSD_CMD_SET_NORMAL,
1323 INAND_CMD38_ARG_EXT_CSD,
1324 arg == MMC_TRIM_ARG ?
1325 INAND_CMD38_ARG_TRIM :
1326 INAND_CMD38_ARG_ERASE,
1327 0);
1328 if (err)
1329 goto out;
1330 }
bd788c96
AH
1331 err = mmc_erase(card, from, nr, arg);
1332out:
67716327
AH
1333 if (err == -EIO && !mmc_blk_reset(md, card->host, type))
1334 goto retry;
1335 if (!err)
1336 mmc_blk_reset_success(md, type);
ecf8b5d0 1337 blk_end_request(req, err, blk_rq_bytes(req));
bd788c96 1338
bd788c96
AH
1339 return err ? 0 : 1;
1340}
1341
49804548
AH
1342static int mmc_blk_issue_secdiscard_rq(struct mmc_queue *mq,
1343 struct request *req)
1344{
1345 struct mmc_blk_data *md = mq->data;
1346 struct mmc_card *card = md->queue.card;
28302812 1347 unsigned int from, nr, arg, trim_arg, erase_arg;
67716327 1348 int err = 0, type = MMC_BLK_SECDISCARD;
49804548 1349
d9ddd629 1350 if (!(mmc_can_secure_erase_trim(card) || mmc_can_sanitize(card))) {
49804548
AH
1351 err = -EOPNOTSUPP;
1352 goto out;
1353 }
1354
28302812
AH
1355 from = blk_rq_pos(req);
1356 nr = blk_rq_sectors(req);
1357
d9ddd629
KP
1358 /* The sanitize operation is supported at v4.5 only */
1359 if (mmc_can_sanitize(card)) {
28302812
AH
1360 erase_arg = MMC_ERASE_ARG;
1361 trim_arg = MMC_TRIM_ARG;
1362 } else {
1363 erase_arg = MMC_SECURE_ERASE_ARG;
1364 trim_arg = MMC_SECURE_TRIM1_ARG;
d9ddd629
KP
1365 }
1366
28302812
AH
1367 if (mmc_erase_group_aligned(card, from, nr))
1368 arg = erase_arg;
1369 else if (mmc_can_trim(card))
1370 arg = trim_arg;
1371 else {
1372 err = -EINVAL;
1373 goto out;
1374 }
67716327 1375retry:
6a7a6b45
AW
1376 if (card->quirks & MMC_QUIRK_INAND_CMD38) {
1377 err = mmc_switch(card, EXT_CSD_CMD_SET_NORMAL,
1378 INAND_CMD38_ARG_EXT_CSD,
1379 arg == MMC_SECURE_TRIM1_ARG ?
1380 INAND_CMD38_ARG_SECTRIM1 :
1381 INAND_CMD38_ARG_SECERASE,
1382 0);
1383 if (err)
28302812 1384 goto out_retry;
6a7a6b45 1385 }
28302812 1386
49804548 1387 err = mmc_erase(card, from, nr, arg);
28302812
AH
1388 if (err == -EIO)
1389 goto out_retry;
1390 if (err)
1391 goto out;
1392
1393 if (arg == MMC_SECURE_TRIM1_ARG) {
6a7a6b45
AW
1394 if (card->quirks & MMC_QUIRK_INAND_CMD38) {
1395 err = mmc_switch(card, EXT_CSD_CMD_SET_NORMAL,
1396 INAND_CMD38_ARG_EXT_CSD,
1397 INAND_CMD38_ARG_SECTRIM2,
1398 0);
1399 if (err)
28302812 1400 goto out_retry;
6a7a6b45 1401 }
28302812 1402
49804548 1403 err = mmc_erase(card, from, nr, MMC_SECURE_TRIM2_ARG);
28302812
AH
1404 if (err == -EIO)
1405 goto out_retry;
1406 if (err)
1407 goto out;
6a7a6b45 1408 }
28302812 1409
6fa3eb70
S
1410 if (mmc_can_sanitize(card)) {
1411 trace_mmc_blk_erase_start(EXT_CSD_SANITIZE_START, 0, 0);
28302812
AH
1412 err = mmc_switch(card, EXT_CSD_CMD_SET_NORMAL,
1413 EXT_CSD_SANITIZE_START, 1, 0);
6fa3eb70
S
1414 trace_mmc_blk_erase_end(EXT_CSD_SANITIZE_START, 0, 0);
1415 }
28302812
AH
1416out_retry:
1417 if (err && !mmc_blk_reset(md, card->host, type))
67716327
AH
1418 goto retry;
1419 if (!err)
1420 mmc_blk_reset_success(md, type);
28302812 1421out:
ecf8b5d0 1422 blk_end_request(req, err, blk_rq_bytes(req));
49804548 1423
49804548
AH
1424 return err ? 0 : 1;
1425}
1426
f4c5522b
AW
1427static int mmc_blk_issue_flush(struct mmc_queue *mq, struct request *req)
1428{
1429 struct mmc_blk_data *md = mq->data;
881d1c25
SJ
1430 struct mmc_card *card = md->queue.card;
1431 int ret = 0;
1432
1433 ret = mmc_flush_cache(card);
1434 if (ret)
1435 ret = -EIO;
f4c5522b 1436
ecf8b5d0 1437 blk_end_request_all(req, ret);
f4c5522b 1438
881d1c25 1439 return ret ? 0 : 1;
f4c5522b
AW
1440}
1441
1442/*
1443 * Reformat current write as a reliable write, supporting
1444 * both legacy and the enhanced reliable write MMC cards.
1445 * In each transfer we'll handle only as much as a single
1446 * reliable write can handle, thus finish the request in
1447 * partial completions.
1448 */
d0c97cfb
AW
1449static inline void mmc_apply_rel_rw(struct mmc_blk_request *brq,
1450 struct mmc_card *card,
1451 struct request *req)
f4c5522b 1452{
f4c5522b
AW
1453 if (!(card->ext_csd.rel_param & EXT_CSD_WR_REL_PARAM_EN)) {
1454 /* Legacy mode imposes restrictions on transfers. */
1455 if (!IS_ALIGNED(brq->cmd.arg, card->ext_csd.rel_sectors))
1456 brq->data.blocks = 1;
1457
1458 if (brq->data.blocks > card->ext_csd.rel_sectors)
1459 brq->data.blocks = card->ext_csd.rel_sectors;
1460 else if (brq->data.blocks < card->ext_csd.rel_sectors)
1461 brq->data.blocks = 1;
1462 }
f4c5522b
AW
1463}
1464
4c2b8f26
RKAL
1465#define CMD_ERRORS \
1466 (R1_OUT_OF_RANGE | /* Command argument out of range */ \
1467 R1_ADDRESS_ERROR | /* Misaligned address */ \
1468 R1_BLOCK_LEN_ERROR | /* Transferred block length incorrect */\
1469 R1_WP_VIOLATION | /* Tried to write to protected block */ \
1470 R1_CC_ERROR | /* Card controller error */ \
1471 R1_ERROR) /* General/unknown error */
1472
ee8a43a5
PF
1473static int mmc_blk_err_check(struct mmc_card *card,
1474 struct mmc_async_req *areq)
d78d4a8a 1475{
ee8a43a5
PF
1476 struct mmc_queue_req *mq_mrq = container_of(areq, struct mmc_queue_req,
1477 mmc_active);
1478 struct mmc_blk_request *brq = &mq_mrq->brq;
1479 struct request *req = mq_mrq->req;
604ae797 1480 int ecc_err = 0, gen_err = 0;
d78d4a8a
PF
1481
1482 /*
1483 * sbc.error indicates a problem with the set block count
1484 * command. No data will have been transferred.
1485 *
1486 * cmd.error indicates a problem with the r/w command. No
1487 * data will have been transferred.
1488 *
1489 * stop.error indicates a problem with the stop command. Data
1490 * may have been transferred, or may still be transferring.
1491 */
67716327
AH
1492 if (brq->sbc.error || brq->cmd.error || brq->stop.error ||
1493 brq->data.error) {
604ae797 1494 switch (mmc_blk_cmd_recovery(card, req, brq, &ecc_err, &gen_err)) {
d78d4a8a
PF
1495 case ERR_RETRY:
1496 return MMC_BLK_RETRY;
1497 case ERR_ABORT:
1498 return MMC_BLK_ABORT;
a8ad82cc
SRT
1499 case ERR_NOMEDIUM:
1500 return MMC_BLK_NOMEDIUM;
d78d4a8a
PF
1501 case ERR_CONTINUE:
1502 break;
1503 }
1504 }
1505
1506 /*
1507 * Check for errors relating to the execution of the
1508 * initial command - such as address errors. No data
1509 * has been transferred.
1510 */
1511 if (brq->cmd.resp[0] & CMD_ERRORS) {
1512 pr_err("%s: r/w command failed, status = %#x\n",
1513 req->rq_disk->disk_name, brq->cmd.resp[0]);
1514 return MMC_BLK_ABORT;
1515 }
1516
1517 /*
1518 * Everything else is either success, or a data error of some
1519 * kind. If it was a write, we may have transitioned to
1520 * program mode, which we have to wait for it to complete.
1521 */
1522 if (!mmc_host_is_spi(card->host) && rq_data_dir(req) != READ) {
1523 u32 status;
8fee476b
TR
1524 unsigned long timeout;
1525
604ae797
KY
1526 /* Check stop command response */
1527 if (brq->stop.resp[0] & R1_ERROR) {
1528 pr_err("%s: %s: general error sending stop command, stop cmd response %#x\n",
1529 req->rq_disk->disk_name, __func__,
1530 brq->stop.resp[0]);
1531 gen_err = 1;
1532 }
1533
8fee476b 1534 timeout = jiffies + msecs_to_jiffies(MMC_BLK_TIMEOUT_MS);
d78d4a8a
PF
1535 do {
1536 int err = get_card_status(card, &status, 5);
1537 if (err) {
a3c76eb9 1538 pr_err("%s: error %d requesting status\n",
d78d4a8a
PF
1539 req->rq_disk->disk_name, err);
1540 return MMC_BLK_CMD_ERR;
1541 }
8fee476b 1542
604ae797
KY
1543 if (status & R1_ERROR) {
1544 pr_err("%s: %s: general error sending status command, card status %#x\n",
1545 req->rq_disk->disk_name, __func__,
1546 status);
1547 gen_err = 1;
1548 }
1549
8fee476b
TR
1550 /* Timeout if the device never becomes ready for data
1551 * and never leaves the program state.
1552 */
1553 if (time_after(jiffies, timeout)) {
1554 pr_err("%s: Card stuck in programming state!"\
1555 " %s %s\n", mmc_hostname(card->host),
1556 req->rq_disk->disk_name, __func__);
1557
1558 return MMC_BLK_CMD_ERR;
1559 }
d78d4a8a
PF
1560 /*
1561 * Some cards mishandle the status bits,
1562 * so make sure to check both the busy
1563 * indication and the card state.
1564 */
1565 } while (!(status & R1_READY_FOR_DATA) ||
1566 (R1_CURRENT_STATE(status) == R1_STATE_PRG));
1567 }
1568
604ae797
KY
1569 /* if general error occurs, retry the write operation. */
1570 if (gen_err) {
1571 pr_warn("%s: retrying write for general error\n",
1572 req->rq_disk->disk_name);
1573 return MMC_BLK_RETRY;
1574 }
1575
d78d4a8a
PF
1576 if (brq->data.error) {
1577 pr_err("%s: error %d transferring data, sector %u, nr %u, cmd response %#x, card status %#x\n",
1578 req->rq_disk->disk_name, brq->data.error,
1579 (unsigned)blk_rq_pos(req),
1580 (unsigned)blk_rq_sectors(req),
1581 brq->cmd.resp[0], brq->stop.resp[0]);
1582
1583 if (rq_data_dir(req) == READ) {
67716327
AH
1584 if (ecc_err)
1585 return MMC_BLK_ECC_ERR;
d78d4a8a
PF
1586 return MMC_BLK_DATA_ERR;
1587 } else {
1588 return MMC_BLK_CMD_ERR;
1589 }
1590 }
1591
67716327
AH
1592 if (!brq->data.bytes_xfered)
1593 return MMC_BLK_RETRY;
d78d4a8a 1594
ce39f9d1
SJ
1595 if (mmc_packed_cmd(mq_mrq->cmd_type)) {
1596 if (unlikely(brq->data.blocks << 9 != brq->data.bytes_xfered))
1597 return MMC_BLK_PARTIAL;
1598 else
1599 return MMC_BLK_SUCCESS;
1600 }
1601
67716327
AH
1602 if (blk_rq_bytes(req) != brq->data.bytes_xfered)
1603 return MMC_BLK_PARTIAL;
1604
1605 return MMC_BLK_SUCCESS;
d78d4a8a
PF
1606}
1607
ce39f9d1
SJ
1608static int mmc_blk_packed_err_check(struct mmc_card *card,
1609 struct mmc_async_req *areq)
1610{
1611 struct mmc_queue_req *mq_rq = container_of(areq, struct mmc_queue_req,
1612 mmc_active);
1613 struct request *req = mq_rq->req;
1614 struct mmc_packed *packed = mq_rq->packed;
1615 int err, check, status;
1616 u8 *ext_csd;
1617
1618 BUG_ON(!packed);
1619
1620 packed->retries--;
1621 check = mmc_blk_err_check(card, areq);
1622 err = get_card_status(card, &status, 0);
1623 if (err) {
1624 pr_err("%s: error %d sending status command\n",
1625 req->rq_disk->disk_name, err);
1626 return MMC_BLK_ABORT;
1627 }
1628
1629 if (status & R1_EXCEPTION_EVENT) {
1630 ext_csd = kzalloc(512, GFP_KERNEL);
1631 if (!ext_csd) {
1632 pr_err("%s: unable to allocate buffer for ext_csd\n",
1633 req->rq_disk->disk_name);
1634 return -ENOMEM;
1635 }
1636
1637 err = mmc_send_ext_csd(card, ext_csd);
1638 if (err) {
1639 pr_err("%s: error %d sending ext_csd\n",
1640 req->rq_disk->disk_name, err);
1641 check = MMC_BLK_ABORT;
1642 goto free;
1643 }
1644
1645 if ((ext_csd[EXT_CSD_EXP_EVENTS_STATUS] &
1646 EXT_CSD_PACKED_FAILURE) &&
1647 (ext_csd[EXT_CSD_PACKED_CMD_STATUS] &
1648 EXT_CSD_PACKED_GENERIC_ERROR)) {
1649 if (ext_csd[EXT_CSD_PACKED_CMD_STATUS] &
1650 EXT_CSD_PACKED_INDEXED_ERROR) {
1651 packed->idx_failure =
1652 ext_csd[EXT_CSD_PACKED_FAILURE_INDEX] - 1;
1653 check = MMC_BLK_PARTIAL;
1654 }
1655 pr_err("%s: packed cmd failed, nr %u, sectors %u, "
1656 "failure index: %d\n",
1657 req->rq_disk->disk_name, packed->nr_entries,
1658 packed->blocks, packed->idx_failure);
1659 }
1660free:
1661 kfree(ext_csd);
1662 }
1663
1664 return check;
1665}
1666
54d49d77
PF
1667static void mmc_blk_rw_rq_prep(struct mmc_queue_req *mqrq,
1668 struct mmc_card *card,
1669 int disable_multi,
1670 struct mmc_queue *mq)
1da177e4 1671{
54d49d77
PF
1672 u32 readcmd, writecmd;
1673 struct mmc_blk_request *brq = &mqrq->brq;
1674 struct request *req = mqrq->req;
1da177e4 1675 struct mmc_blk_data *md = mq->data;
4265900e 1676 bool do_data_tag;
1da177e4 1677
f4c5522b
AW
1678 /*
1679 * Reliable writes are used to implement Forced Unit Access and
e3dda035 1680 * are supported only on MMCs.
f4c5522b 1681 */
e3dda035 1682 bool do_rel_wr = (req->cmd_flags & REQ_FUA) &&
f4c5522b 1683 (rq_data_dir(req) == WRITE) &&
d0c97cfb 1684 (md->flags & MMC_BLK_REL_WR);
f4c5522b 1685
54d49d77
PF
1686 memset(brq, 0, sizeof(struct mmc_blk_request));
1687 brq->mrq.cmd = &brq->cmd;
1688 brq->mrq.data = &brq->data;
1da177e4 1689
54d49d77
PF
1690 brq->cmd.arg = blk_rq_pos(req);
1691 if (!mmc_card_blockaddr(card))
1692 brq->cmd.arg <<= 9;
1693 brq->cmd.flags = MMC_RSP_SPI_R1 | MMC_RSP_R1 | MMC_CMD_ADTC;
1694 brq->data.blksz = 512;
1695 brq->stop.opcode = MMC_STOP_TRANSMISSION;
1696 brq->stop.arg = 0;
1697 brq->stop.flags = MMC_RSP_SPI_R1B | MMC_RSP_R1B | MMC_CMD_AC;
1698 brq->data.blocks = blk_rq_sectors(req);
6a79e391 1699
54d49d77
PF
1700 /*
1701 * The block layer doesn't support all sector count
1702 * restrictions, so we need to be prepared for too big
1703 * requests.
1704 */
1705 if (brq->data.blocks > card->host->max_blk_count)
1706 brq->data.blocks = card->host->max_blk_count;
1da177e4 1707
2bf22b39
PW
1708 if (brq->data.blocks > 1) {
1709 /*
1710 * After a read error, we redo the request one sector
1711 * at a time in order to accurately determine which
1712 * sectors can be read successfully.
1713 */
1714 if (disable_multi)
1715 brq->data.blocks = 1;
1716
1717 /* Some controllers can't do multiblock reads due to hw bugs */
1718 if (card->host->caps2 & MMC_CAP2_NO_MULTI_READ &&
1719 rq_data_dir(req) == READ)
1720 brq->data.blocks = 1;
1721 }
d0c97cfb 1722
54d49d77
PF
1723 if (brq->data.blocks > 1 || do_rel_wr) {
1724 /* SPI multiblock writes terminate using a special
1725 * token, not a STOP_TRANSMISSION request.
d0c97cfb 1726 */
54d49d77
PF
1727 if (!mmc_host_is_spi(card->host) ||
1728 rq_data_dir(req) == READ)
1729 brq->mrq.stop = &brq->stop;
1730 readcmd = MMC_READ_MULTIPLE_BLOCK;
1731 writecmd = MMC_WRITE_MULTIPLE_BLOCK;
1732 } else {
1733 brq->mrq.stop = NULL;
1734 readcmd = MMC_READ_SINGLE_BLOCK;
1735 writecmd = MMC_WRITE_BLOCK;
1736 }
6fa3eb70
S
1737#ifdef CONFIG_MTK_EMMC_CACHE
1738 /* for non-cacheable system data,
1739 * the implementation of reliable write / force prg write,
1740 * must be applied with mutli write cmd
1741 * */
1742 if (mmc_card_mmc(card) && (card->ext_csd.cache_ctrl & 0x1)){
1743 writecmd = MMC_WRITE_MULTIPLE_BLOCK;
1744 }
1745#endif
54d49d77
PF
1746 if (rq_data_dir(req) == READ) {
1747 brq->cmd.opcode = readcmd;
1748 brq->data.flags |= MMC_DATA_READ;
1749 } else {
1750 brq->cmd.opcode = writecmd;
1751 brq->data.flags |= MMC_DATA_WRITE;
1752 }
d0c97cfb 1753
54d49d77
PF
1754 if (do_rel_wr)
1755 mmc_apply_rel_rw(brq, card, req);
f4c5522b 1756
4265900e
SD
1757 /*
1758 * Data tag is used only during writing meta data to speed
1759 * up write and any subsequent read of this meta data
1760 */
1761 do_data_tag = (card->ext_csd.data_tag_unit_size) &&
1762 (req->cmd_flags & REQ_META) &&
1763 (rq_data_dir(req) == WRITE) &&
1764 ((brq->data.blocks * brq->data.blksz) >=
1765 card->ext_csd.data_tag_unit_size);
1766
54d49d77
PF
1767 /*
1768 * Pre-defined multi-block transfers are preferable to
1769 * open ended-ones (and necessary for reliable writes).
1770 * However, it is not sufficient to just send CMD23,
1771 * and avoid the final CMD12, as on an error condition
1772 * CMD12 (stop) needs to be sent anyway. This, coupled
1773 * with Auto-CMD23 enhancements provided by some
1774 * hosts, means that the complexity of dealing
1775 * with this is best left to the host. If CMD23 is
1776 * supported by card and host, we'll fill sbc in and let
1777 * the host deal with handling it correctly. This means
1778 * that for hosts that don't expose MMC_CAP_CMD23, no
1779 * change of behavior will be observed.
1780 *
1781 * N.B: Some MMC cards experience perf degradation.
1782 * We'll avoid using CMD23-bounded multiblock writes for
1783 * these, while retaining features like reliable writes.
1784 */
4265900e
SD
1785 if ((md->flags & MMC_BLK_CMD23) && mmc_op_multi(brq->cmd.opcode) &&
1786 (do_rel_wr || !(card->quirks & MMC_QUIRK_BLK_NO_CMD23) ||
1787 do_data_tag)) {
54d49d77
PF
1788 brq->sbc.opcode = MMC_SET_BLOCK_COUNT;
1789 brq->sbc.arg = brq->data.blocks |
4265900e
SD
1790 (do_rel_wr ? (1 << 31) : 0) |
1791 (do_data_tag ? (1 << 29) : 0);
54d49d77
PF
1792 brq->sbc.flags = MMC_RSP_R1 | MMC_CMD_AC;
1793 brq->mrq.sbc = &brq->sbc;
1794 }
98ccf149 1795
54d49d77
PF
1796 mmc_set_data_timeout(&brq->data, card);
1797
1798 brq->data.sg = mqrq->sg;
1799 brq->data.sg_len = mmc_queue_map_sg(mq, mqrq);
1800
6fa3eb70
S
1801 if (brq->data.sg_len > 1024)
1802 pr_err("%s:%d sglen = %x\n", __func__, __LINE__, brq->data.sg_len);
1803
54d49d77
PF
1804 /*
1805 * Adjust the sg list so it is the same size as the
1806 * request.
1807 */
1808 if (brq->data.blocks != blk_rq_sectors(req)) {
1809 int i, data_size = brq->data.blocks << 9;
1810 struct scatterlist *sg;
1811
1812 for_each_sg(brq->data.sg, sg, brq->data.sg_len, i) {
1813 data_size -= sg->length;
1814 if (data_size <= 0) {
1815 sg->length += data_size;
1816 i++;
1817 break;
6a79e391 1818 }
6a79e391 1819 }
54d49d77 1820 brq->data.sg_len = i;
6fa3eb70 1821 pr_err("%s:%d sglen = %x\n", __func__, __LINE__, brq->data.sg_len);
54d49d77
PF
1822 }
1823
ee8a43a5
PF
1824 mqrq->mmc_active.mrq = &brq->mrq;
1825 mqrq->mmc_active.err_check = mmc_blk_err_check;
1826
54d49d77
PF
1827 mmc_queue_bounce_pre(mqrq);
1828}
6a79e391 1829
ce39f9d1
SJ
1830static inline u8 mmc_calc_packed_hdr_segs(struct request_queue *q,
1831 struct mmc_card *card)
1832{
1833 unsigned int hdr_sz = mmc_large_sector(card) ? 4096 : 512;
1834 unsigned int max_seg_sz = queue_max_segment_size(q);
1835 unsigned int len, nr_segs = 0;
1836
1837 do {
1838 len = min(hdr_sz, max_seg_sz);
1839 hdr_sz -= len;
1840 nr_segs++;
1841 } while (hdr_sz);
1842
1843 return nr_segs;
1844}
1845
1846static u8 mmc_blk_prep_packed_list(struct mmc_queue *mq, struct request *req)
1847{
1848 struct request_queue *q = mq->queue;
1849 struct mmc_card *card = mq->card;
1850 struct request *cur = req, *next = NULL;
1851 struct mmc_blk_data *md = mq->data;
1852 struct mmc_queue_req *mqrq = mq->mqrq_cur;
1853 bool en_rel_wr = card->ext_csd.rel_param & EXT_CSD_WR_REL_PARAM_EN;
1854 unsigned int req_sectors = 0, phys_segments = 0;
1855 unsigned int max_blk_count, max_phys_segs;
1856 bool put_back = true;
1857 u8 max_packed_rw = 0;
1858 u8 reqs = 0;
1859
1860 if (!(md->flags & MMC_BLK_PACKED_CMD))
1861 goto no_packed;
1862
1863 if ((rq_data_dir(cur) == WRITE) &&
1864 mmc_host_packed_wr(card->host))
1865 max_packed_rw = card->ext_csd.max_packed_writes;
1866
1867 if (max_packed_rw == 0)
1868 goto no_packed;
1869
1870 if (mmc_req_rel_wr(cur) &&
1871 (md->flags & MMC_BLK_REL_WR) && !en_rel_wr)
1872 goto no_packed;
1873
1874 if (mmc_large_sector(card) &&
1875 !IS_ALIGNED(blk_rq_sectors(cur), 8))
1876 goto no_packed;
1877
1878 mmc_blk_clear_packed(mqrq);
1879
1880 max_blk_count = min(card->host->max_blk_count,
1881 card->host->max_req_size >> 9);
1882 if (unlikely(max_blk_count > 0xffff))
1883 max_blk_count = 0xffff;
1884
1885 max_phys_segs = queue_max_segments(q);
1886 req_sectors += blk_rq_sectors(cur);
1887 phys_segments += cur->nr_phys_segments;
1888
1889 if (rq_data_dir(cur) == WRITE) {
1890 req_sectors += mmc_large_sector(card) ? 8 : 1;
1891 phys_segments += mmc_calc_packed_hdr_segs(q, card);
1892 }
1893
1894 do {
1895 if (reqs >= max_packed_rw - 1) {
1896 put_back = false;
1897 break;
1898 }
1899
1900 spin_lock_irq(q->queue_lock);
1901 next = blk_fetch_request(q);
1902 spin_unlock_irq(q->queue_lock);
1903 if (!next) {
1904 put_back = false;
1905 break;
1906 }
1907
1908 if (mmc_large_sector(card) &&
1909 !IS_ALIGNED(blk_rq_sectors(next), 8))
1910 break;
1911
1912 if (next->cmd_flags & REQ_DISCARD ||
1913 next->cmd_flags & REQ_FLUSH)
1914 break;
1915
1916 if (rq_data_dir(cur) != rq_data_dir(next))
1917 break;
1918
1919 if (mmc_req_rel_wr(next) &&
1920 (md->flags & MMC_BLK_REL_WR) && !en_rel_wr)
1921 break;
1922
1923 req_sectors += blk_rq_sectors(next);
1924 if (req_sectors > max_blk_count)
1925 break;
1926
1927 phys_segments += next->nr_phys_segments;
1928 if (phys_segments > max_phys_segs)
1929 break;
1930
1931 list_add_tail(&next->queuelist, &mqrq->packed->list);
1932 cur = next;
1933 reqs++;
1934 } while (1);
1935
1936 if (put_back) {
1937 spin_lock_irq(q->queue_lock);
1938 blk_requeue_request(q, next);
1939 spin_unlock_irq(q->queue_lock);
1940 }
1941
1942 if (reqs > 0) {
1943 list_add(&req->queuelist, &mqrq->packed->list);
1944 mqrq->packed->nr_entries = ++reqs;
1945 mqrq->packed->retries = reqs;
1946 return reqs;
1947 }
1948
1949no_packed:
1950 mqrq->cmd_type = MMC_PACKED_NONE;
1951 return 0;
1952}
1953
1954static void mmc_blk_packed_hdr_wrq_prep(struct mmc_queue_req *mqrq,
1955 struct mmc_card *card,
1956 struct mmc_queue *mq)
1957{
1958 struct mmc_blk_request *brq = &mqrq->brq;
1959 struct request *req = mqrq->req;
1960 struct request *prq;
1961 struct mmc_blk_data *md = mq->data;
1962 struct mmc_packed *packed = mqrq->packed;
1963 bool do_rel_wr, do_data_tag;
1964 u32 *packed_cmd_hdr;
1965 u8 hdr_blocks;
1966 u8 i = 1;
1967
1968 BUG_ON(!packed);
1969
1970 mqrq->cmd_type = MMC_PACKED_WRITE;
1971 packed->blocks = 0;
1972 packed->idx_failure = MMC_PACKED_NR_IDX;
1973
1974 packed_cmd_hdr = packed->cmd_hdr;
1975 memset(packed_cmd_hdr, 0, sizeof(packed->cmd_hdr));
389ca69d
TK
1976 packed_cmd_hdr[0] = cpu_to_le32((packed->nr_entries << 16) |
1977 (PACKED_CMD_WR << 8) | PACKED_CMD_VER);
ce39f9d1
SJ
1978 hdr_blocks = mmc_large_sector(card) ? 8 : 1;
1979
1980 /*
1981 * Argument for each entry of packed group
1982 */
1983 list_for_each_entry(prq, &packed->list, queuelist) {
1984 do_rel_wr = mmc_req_rel_wr(prq) && (md->flags & MMC_BLK_REL_WR);
1985 do_data_tag = (card->ext_csd.data_tag_unit_size) &&
1986 (prq->cmd_flags & REQ_META) &&
1987 (rq_data_dir(prq) == WRITE) &&
1988 ((brq->data.blocks * brq->data.blksz) >=
1989 card->ext_csd.data_tag_unit_size);
1990 /* Argument of CMD23 */
389ca69d 1991 packed_cmd_hdr[(i * 2)] = cpu_to_le32(
ce39f9d1
SJ
1992 (do_rel_wr ? MMC_CMD23_ARG_REL_WR : 0) |
1993 (do_data_tag ? MMC_CMD23_ARG_TAG_REQ : 0) |
389ca69d 1994 blk_rq_sectors(prq));
ce39f9d1 1995 /* Argument of CMD18 or CMD25 */
389ca69d 1996 packed_cmd_hdr[((i * 2)) + 1] = cpu_to_le32(
ce39f9d1 1997 mmc_card_blockaddr(card) ?
389ca69d 1998 blk_rq_pos(prq) : blk_rq_pos(prq) << 9);
ce39f9d1
SJ
1999 packed->blocks += blk_rq_sectors(prq);
2000 i++;
2001 }
2002
2003 memset(brq, 0, sizeof(struct mmc_blk_request));
2004 brq->mrq.cmd = &brq->cmd;
2005 brq->mrq.data = &brq->data;
2006 brq->mrq.sbc = &brq->sbc;
2007 brq->mrq.stop = &brq->stop;
2008
2009 brq->sbc.opcode = MMC_SET_BLOCK_COUNT;
2010 brq->sbc.arg = MMC_CMD23_ARG_PACKED | (packed->blocks + hdr_blocks);
2011 brq->sbc.flags = MMC_RSP_R1 | MMC_CMD_AC;
2012
2013 brq->cmd.opcode = MMC_WRITE_MULTIPLE_BLOCK;
2014 brq->cmd.arg = blk_rq_pos(req);
2015 if (!mmc_card_blockaddr(card))
2016 brq->cmd.arg <<= 9;
2017 brq->cmd.flags = MMC_RSP_SPI_R1 | MMC_RSP_R1 | MMC_CMD_ADTC;
2018
2019 brq->data.blksz = 512;
2020 brq->data.blocks = packed->blocks + hdr_blocks;
2021 brq->data.flags |= MMC_DATA_WRITE;
2022
2023 brq->stop.opcode = MMC_STOP_TRANSMISSION;
2024 brq->stop.arg = 0;
2025 brq->stop.flags = MMC_RSP_SPI_R1B | MMC_RSP_R1B | MMC_CMD_AC;
2026
2027 mmc_set_data_timeout(&brq->data, card);
2028
2029 brq->data.sg = mqrq->sg;
2030 brq->data.sg_len = mmc_queue_map_sg(mq, mqrq);
6fa3eb70 2031 pr_err("%s: sglen = %d\n", __func__, brq->data.sg_len);
ce39f9d1
SJ
2032
2033 mqrq->mmc_active.mrq = &brq->mrq;
2034 mqrq->mmc_active.err_check = mmc_blk_packed_err_check;
2035
2036 mmc_queue_bounce_pre(mqrq);
2037}
2038
67716327
AH
2039static int mmc_blk_cmd_err(struct mmc_blk_data *md, struct mmc_card *card,
2040 struct mmc_blk_request *brq, struct request *req,
2041 int ret)
2042{
ce39f9d1
SJ
2043 struct mmc_queue_req *mq_rq;
2044 mq_rq = container_of(brq, struct mmc_queue_req, brq);
2045
67716327
AH
2046 /*
2047 * If this is an SD card and we're writing, we can first
2048 * mark the known good sectors as ok.
2049 *
2050 * If the card is not SD, we can still ok written sectors
2051 * as reported by the controller (which might be less than
2052 * the real number of written sectors, but never more).
2053 */
2054 if (mmc_card_sd(card)) {
2055 u32 blocks;
2056
2057 blocks = mmc_sd_num_wr_blocks(card);
2058 if (blocks != (u32)-1) {
ecf8b5d0 2059 ret = blk_end_request(req, 0, blocks << 9);
67716327
AH
2060 }
2061 } else {
ce39f9d1
SJ
2062 if (!mmc_packed_cmd(mq_rq->cmd_type))
2063 ret = blk_end_request(req, 0, brq->data.bytes_xfered);
67716327
AH
2064 }
2065 return ret;
2066}
2067
ce39f9d1
SJ
2068static int mmc_blk_end_packed_req(struct mmc_queue_req *mq_rq)
2069{
2070 struct request *prq;
2071 struct mmc_packed *packed = mq_rq->packed;
2072 int idx = packed->idx_failure, i = 0;
2073 int ret = 0;
2074
2075 BUG_ON(!packed);
2076
2077 while (!list_empty(&packed->list)) {
2078 prq = list_entry_rq(packed->list.next);
2079 if (idx == i) {
2080 /* retry from error index */
2081 packed->nr_entries -= idx;
2082 mq_rq->req = prq;
2083 ret = 1;
2084
2085 if (packed->nr_entries == MMC_PACKED_NR_SINGLE) {
2086 list_del_init(&prq->queuelist);
2087 mmc_blk_clear_packed(mq_rq);
2088 }
2089 return ret;
2090 }
2091 list_del_init(&prq->queuelist);
2092 blk_end_request(prq, 0, blk_rq_bytes(prq));
2093 i++;
2094 }
2095
2096 mmc_blk_clear_packed(mq_rq);
2097 return ret;
2098}
2099
2100static void mmc_blk_abort_packed_req(struct mmc_queue_req *mq_rq)
2101{
2102 struct request *prq;
2103 struct mmc_packed *packed = mq_rq->packed;
2104
2105 BUG_ON(!packed);
2106
2107 while (!list_empty(&packed->list)) {
2108 prq = list_entry_rq(packed->list.next);
2109 list_del_init(&prq->queuelist);
2110 blk_end_request(prq, -EIO, blk_rq_bytes(prq));
2111 }
2112
2113 mmc_blk_clear_packed(mq_rq);
2114}
2115
2116static void mmc_blk_revert_packed_req(struct mmc_queue *mq,
2117 struct mmc_queue_req *mq_rq)
2118{
2119 struct request *prq;
2120 struct request_queue *q = mq->queue;
2121 struct mmc_packed *packed = mq_rq->packed;
2122
2123 BUG_ON(!packed);
2124
2125 while (!list_empty(&packed->list)) {
2126 prq = list_entry_rq(packed->list.prev);
2127 if (prq->queuelist.prev != &packed->list) {
2128 list_del_init(&prq->queuelist);
2129 spin_lock_irq(q->queue_lock);
2130 blk_requeue_request(mq->queue, prq);
2131 spin_unlock_irq(q->queue_lock);
2132 } else {
2133 list_del_init(&prq->queuelist);
2134 }
2135 }
2136
2137 mmc_blk_clear_packed(mq_rq);
2138}
6fa3eb70
S
2139#if defined(FEATURE_STORAGE_PERF_INDEX)
2140#define PRT_TIME_PERIOD 500000000
2141#define UP_LIMITS_4BYTE 4294967295UL //((4*1024*1024*1024)-1)
2142#define ID_CNT 10
2143pid_t mmcqd[ID_CNT]={0};
2144bool start_async_req[ID_CNT] = {0};
2145unsigned long long start_async_req_time[ID_CNT] = {0};
2146static unsigned long long mmcqd_tag_t1[ID_CNT]={0}, mmccid_tag_t1=0;
2147unsigned long long mmcqd_t_usage_wr[ID_CNT]={0}, mmcqd_t_usage_rd[ID_CNT]={0};
2148unsigned int mmcqd_rq_size_wr[ID_CNT]={0}, mmcqd_rq_size_rd[ID_CNT]={0};
2149static unsigned int mmcqd_wr_offset_tag[ID_CNT]={0}, mmcqd_rd_offset_tag[ID_CNT]={0}, mmcqd_wr_offset[ID_CNT]={0}, mmcqd_rd_offset[ID_CNT]={0};
2150static unsigned int mmcqd_wr_bit[ID_CNT]={0},mmcqd_wr_tract[ID_CNT]={0};
2151static unsigned int mmcqd_rd_bit[ID_CNT]={0},mmcqd_rd_tract[ID_CNT]={0};
2152static unsigned int mmcqd_wr_break[ID_CNT]={0}, mmcqd_rd_break[ID_CNT]={0};
2153unsigned int mmcqd_rq_count[ID_CNT]={0}, mmcqd_wr_rq_count[ID_CNT]={0}, mmcqd_rd_rq_count[ID_CNT]={0};
2154extern u32 g_u32_cid[4];
2155#ifdef FEATURE_STORAGE_META_LOG
2156int check_perdev_minors = CONFIG_MMC_BLOCK_MINORS;
2157struct metadata_rwlogger metadata_logger[10] = {{{0}}};
2158#endif
ce39f9d1 2159
6fa3eb70
S
2160unsigned int mmcqd_work_percent[ID_CNT]={0};
2161unsigned int mmcqd_w_throughput[ID_CNT]={0};
2162unsigned int mmcqd_r_throughput[ID_CNT]={0};
2163unsigned int mmcqd_read_clear[ID_CNT]={0};
2164
2165static void g_var_clear(unsigned int idx)
2166{
2167 mmcqd_t_usage_wr[idx]=0;
2168 mmcqd_t_usage_rd[idx]=0;
2169 mmcqd_rq_size_wr[idx]=0;
2170 mmcqd_rq_size_rd[idx]=0;
2171 mmcqd_rq_count[idx]=0;
2172 mmcqd_wr_offset[idx]=0;
2173 mmcqd_rd_offset[idx]=0;
2174 mmcqd_wr_break[idx]=0;
2175 mmcqd_rd_break[idx]=0;
2176 mmcqd_wr_tract[idx]=0;
2177 mmcqd_wr_bit[idx]=0;
2178 mmcqd_rd_tract[idx]=0;
2179 mmcqd_rd_bit[idx]=0;
2180 mmcqd_wr_rq_count[idx]=0;
2181 mmcqd_rd_rq_count[idx]=0;
2182}
2183
2184unsigned int find_mmcqd_index(void)
2185{
2186 pid_t mmcqd_pid=0;
2187 unsigned int idx=0;
2188 unsigned char i=0;
2189
2190 mmcqd_pid = task_pid_nr(current);
2191
2192 if(mmcqd[0] ==0) {
2193 mmcqd[0] = mmcqd_pid;
2194 start_async_req[0]=0;
2195 }
2196
2197 for(i=0;i<ID_CNT;i++)
2198 {
2199 if(mmcqd_pid == mmcqd[i])
2200 {
2201 idx=i;
2202 break;
2203 }
2204 if ((mmcqd[i] == 0) ||( i==ID_CNT-1))
2205 {
2206 mmcqd[i]=mmcqd_pid;
2207 start_async_req[i]=0;
2208 idx=i;
2209 break;
2210 }
2211 }
2212 return idx;
2213}
2214
2215#endif
2216//#undef FEATURE_STORAGE_PID_LOGGER
2217#if defined(FEATURE_STORAGE_PID_LOGGER)
2218
2219struct struct_pid_logger g_pid_logger[PID_ID_CNT]={{0,0,{0},{0},{0},{0}}};
2220
2221
2222
2223unsigned char *page_logger = NULL;
2224spinlock_t g_locker;
2225
2226#endif
ee8a43a5 2227static int mmc_blk_issue_rw_rq(struct mmc_queue *mq, struct request *rqc)
54d49d77
PF
2228{
2229 struct mmc_blk_data *md = mq->data;
2230 struct mmc_card *card = md->queue.card;
2231 struct mmc_blk_request *brq = &mq->mqrq_cur->brq;
67716327 2232 int ret = 1, disable_multi = 0, retry = 0, type;
d78d4a8a 2233 enum mmc_blk_status status;
ee8a43a5 2234 struct mmc_queue_req *mq_rq;
a5075eb9 2235 struct request *req = rqc;
ee8a43a5 2236 struct mmc_async_req *areq;
ce39f9d1
SJ
2237 const u8 packed_nr = 2;
2238 u8 reqs = 0;
6fa3eb70
S
2239 unsigned long long time1 = 0;
2240#if defined(FEATURE_STORAGE_PERF_INDEX)
2241 pid_t mmcqd_pid=0;
2242 unsigned long long t_period=0, t_usage=0;
2243 unsigned int t_percent=0;
2244 unsigned int perf_meter=0;
2245 unsigned int rq_byte=0,rq_sector=0,sect_offset=0;
2246 unsigned int diversity=0;
2247 unsigned int idx=0;
2248#ifdef FEATURE_STORAGE_META_LOG
2249 unsigned int mmcmetaindex=0;
2250#endif
2251#endif
2252#if defined(FEATURE_STORAGE_PID_LOGGER)
2253 unsigned int index=0;
2254#endif
1da177e4 2255
ee8a43a5
PF
2256 if (!rqc && !mq->mqrq_prev->req)
2257 return 0;
6fa3eb70 2258 time1 = sched_clock();
98ccf149 2259
ce39f9d1
SJ
2260 if (rqc)
2261 reqs = mmc_blk_prep_packed_list(mq, rqc);
6fa3eb70
S
2262#if defined(FEATURE_STORAGE_PERF_INDEX)
2263 mmcqd_pid = task_pid_nr(current);
2264
2265 idx = find_mmcqd_index();
2266
2267 mmcqd_read_clear[idx] = 1;
2268 if(mmccid_tag_t1==0)
2269 mmccid_tag_t1 = time1;
2270 t_period = time1 - mmccid_tag_t1;
2271 if(t_period >= (unsigned long long )((PRT_TIME_PERIOD)*(unsigned long long )10))
2272 {
2273 xlog_printk(ANDROID_LOG_DEBUG, "BLOCK_TAG", "MMC Queue Thread:%d, %d, %d, %d, %d \n", mmcqd[0], mmcqd[1], mmcqd[2], mmcqd[3], mmcqd[4]);
2274 xlog_printk(ANDROID_LOG_DEBUG, "BLOCK_TAG", "MMC CID: %lx %lx %lx %lx \n", g_u32_cid[0], g_u32_cid[1], g_u32_cid[2], g_u32_cid[3]);
2275 mmccid_tag_t1 = time1;
2276 }
2277 if(mmcqd_tag_t1[idx]==0)
2278 mmcqd_tag_t1[idx] = time1;
2279 t_period = time1 - mmcqd_tag_t1[idx];
2280
2281 if(t_period >= (unsigned long long )PRT_TIME_PERIOD)
2282 {
2283 mmcqd_read_clear[idx] = 2;
2284 mmcqd_work_percent[idx] = 1;
2285 mmcqd_r_throughput[idx] = 0;
2286 mmcqd_w_throughput[idx] = 0;
2287 t_usage = mmcqd_t_usage_wr [idx] + mmcqd_t_usage_rd[idx];
2288 if(t_period > t_usage*100)
2289 xlog_printk(ANDROID_LOG_DEBUG, "BLOCK_TAG", "mmcqd:%d Workload < 1%%, duty %lld, period %lld, req_cnt=%d \n", mmcqd[idx], t_usage, t_period, mmcqd_rq_count[idx]);
2290 else
2291 {
2292 do_div(t_period, 100); //boundary issue
2293 t_percent =((unsigned int)t_usage)/((unsigned int)t_period);
2294 mmcqd_work_percent[idx] = t_percent;
2295 xlog_printk(ANDROID_LOG_DEBUG, "BLOCK_TAG", "mmcqd:%d Workload=%d%%, duty %lld, period %lld00, req_cnt=%d \n", mmcqd[idx], t_percent, t_usage, t_period, mmcqd_rq_count[idx]); //period %lld00 == period %lld x100
2296 }
2297 if(mmcqd_wr_rq_count[idx] >= 2)
2298 {
2299 diversity = mmcqd_wr_offset[idx]/(mmcqd_wr_rq_count[idx]-1);
2300 xlog_printk(ANDROID_LOG_DEBUG, "BLOCK_TAG", "mmcqd:%d Write Diversity=%d sectors offset, req_cnt=%d, break_cnt=%d, tract_cnt=%d, bit_cnt=%d\n", mmcqd[idx], diversity, mmcqd_wr_rq_count[idx], mmcqd_wr_break[idx], mmcqd_wr_tract[idx], mmcqd_wr_bit[idx]);
2301 }
2302 if(mmcqd_rd_rq_count[idx] >= 2)
2303 {
2304 diversity = mmcqd_rd_offset[idx]/(mmcqd_rd_rq_count[idx]-1);
2305 xlog_printk(ANDROID_LOG_DEBUG, "BLOCK_TAG", "mmcqd:%d Read Diversity=%d sectors offset, req_cnt=%d, break_cnt=%d, tract_cnt=%d, bit_cnt=%d\n", mmcqd[idx], diversity, mmcqd_rd_rq_count[idx], mmcqd_rd_break[idx], mmcqd_rd_tract[idx], mmcqd_rd_bit[idx]);
2306 }
2307 if(mmcqd_t_usage_wr[idx])
2308 {
2309 do_div(mmcqd_t_usage_wr[idx], 1000000); //boundary issue
2310 if(mmcqd_t_usage_wr[idx]) // discard print if duration will <1ms
2311 {
2312 perf_meter = (mmcqd_rq_size_wr[idx])/((unsigned int)mmcqd_t_usage_wr[idx]); //kb/s
2313 mmcqd_w_throughput[idx] = perf_meter;
2314 xlog_printk(ANDROID_LOG_DEBUG, "BLOCK_TAG", "mmcqd:%d Write Throughput=%d kB/s, size: %d bytes, time:%lld ms\n", mmcqd[idx], perf_meter, mmcqd_rq_size_wr[idx], mmcqd_t_usage_wr[idx]);
2315 }
2316 }
2317 if(mmcqd_t_usage_rd[idx])
2318 {
2319 do_div(mmcqd_t_usage_rd[idx], 1000000); //boundary issue
2320 if(mmcqd_t_usage_rd[idx]) // discard print if duration will <1ms
2321 {
2322 perf_meter = (mmcqd_rq_size_rd[idx])/((unsigned int)mmcqd_t_usage_rd[idx]); //kb/s
2323 mmcqd_r_throughput[idx] = perf_meter;
2324 xlog_printk(ANDROID_LOG_DEBUG, "BLOCK_TAG", "mmcqd:%d Read Throughput=%d kB/s, size: %d bytes, time:%lld ms\n", mmcqd[idx], perf_meter, mmcqd_rq_size_rd[idx], mmcqd_t_usage_rd[idx]);
2325 }
2326 }
2327 mmcqd_tag_t1[idx]=time1;
2328 g_var_clear(idx);
2329#ifdef FEATURE_STORAGE_META_LOG
2330 mmcmetaindex = mmc_get_devidx(md->disk);
2331 xlog_printk(ANDROID_LOG_DEBUG, "BLOCK_TAG", "mmcqd metarw WR:%d NWR:%d HR:%d WDR:%d HDR:%d WW:%d NWW:%d HW:%d\n",
2332 metadata_logger[mmcmetaindex].metadata_rw_logger[0], metadata_logger[mmcmetaindex].metadata_rw_logger[1],
2333 metadata_logger[mmcmetaindex].metadata_rw_logger[2], metadata_logger[mmcmetaindex].metadata_rw_logger[3],
2334 metadata_logger[mmcmetaindex].metadata_rw_logger[4], metadata_logger[mmcmetaindex].metadata_rw_logger[5],
2335 metadata_logger[mmcmetaindex].metadata_rw_logger[6], metadata_logger[mmcmetaindex].metadata_rw_logger[7]);
2336 clear_metadata_rw_status(md->disk->first_minor);
2337#endif
2338#if defined(FEATURE_STORAGE_PID_LOGGER)
2339 do {
2340 int i;
2341 for(index=0; index<PID_ID_CNT; index++) {
2342
2343 if( g_pid_logger[index].current_pid!=0 && g_pid_logger[index].current_pid == mmcqd_pid)
2344 break;
2345 }
2346 if( index == PID_ID_CNT )
2347 break;
2348 for( i=0; i<PID_LOGGER_COUNT; i++) {
2349 //printk(KERN_INFO"hank mmcqd %d %d", g_pid_logger[index].pid_logger[i], mmcqd_pid);
2350 if( g_pid_logger[index].pid_logger[i] == 0)
2351 break;
2352 sprintf (g_pid_logger[index].pid_buffer+i*37, "{%05d:%05d:%08d:%05d:%08d}", g_pid_logger[index].pid_logger[i], g_pid_logger[index].pid_logger_counter[i], g_pid_logger[index].pid_logger_length[i], g_pid_logger[index].pid_logger_r_counter[i], g_pid_logger[index].pid_logger_r_length[i]);
2353
2354 }
2355 if( i != 0) {
2356 xlog_printk(ANDROID_LOG_DEBUG, "BLOCK_TAG", "mmcqd pid:%d %s\n", g_pid_logger[index].current_pid, g_pid_logger[index].pid_buffer);
2357 //xlog_printk(ANDROID_LOG_DEBUG, "BLOCK_TAG", "sizeof(&(g_pid_logger[index].pid_logger)):%d\n", sizeof(unsigned short)*PID_LOGGER_COUNT);
2358 //memset( &(g_pid_logger[index].pid_logger), 0, sizeof(struct struct_pid_logger)-(unsigned long)&(((struct struct_pid_logger *)0)->pid_logger));
2359 memset( &(g_pid_logger[index].pid_logger), 0, sizeof(unsigned short)*PID_LOGGER_COUNT);
2360 memset( &(g_pid_logger[index].pid_logger_counter), 0, sizeof(unsigned short)*PID_LOGGER_COUNT);
2361 memset( &(g_pid_logger[index].pid_logger_length), 0, sizeof(unsigned int)*PID_LOGGER_COUNT);
2362 memset( &(g_pid_logger[index].pid_logger_r_counter), 0, sizeof(unsigned short)*PID_LOGGER_COUNT);
2363 memset( &(g_pid_logger[index].pid_logger_r_length), 0, sizeof(unsigned int)*PID_LOGGER_COUNT);
2364 memset( &(g_pid_logger[index].pid_buffer), 0, sizeof(char)*1024);
2365
2366
2367 }
2368 g_pid_logger[index].pid_buffer[0] = '\0';
2369
2370 } while(0);
2371#endif
2372
2373#if defined(FEATURE_STORAGE_VMSTAT_LOGGER)
2374 xlog_printk(ANDROID_LOG_DEBUG, "BLOCK_TAG", "vmstat (FP:%ld)(FD:%ld)(ND:%ld)(WB:%ld)(NW:%ld)\n",
2375 ((global_page_state(NR_FILE_PAGES)) << (PAGE_SHIFT - 10)),
2376 ((global_page_state(NR_FILE_DIRTY)) << (PAGE_SHIFT - 10)),
2377 ((global_page_state(NR_DIRTIED)) << (PAGE_SHIFT - 10)),
2378 ((global_page_state(NR_WRITEBACK)) << (PAGE_SHIFT - 10)),
2379 ((global_page_state(NR_WRITTEN)) << (PAGE_SHIFT - 10)));
2380#endif
ce39f9d1 2381
6fa3eb70
S
2382 }
2383 if( rqc )
2384 {
2385 rq_byte = blk_rq_bytes(rqc);
2386 rq_sector = blk_rq_sectors(rqc);
2387 if(rq_data_dir(rqc) == WRITE)
2388 {
2389 if(mmcqd_wr_offset_tag[idx]>0)
2390 {
2391 sect_offset = abs(blk_rq_pos(rqc) - mmcqd_wr_offset_tag[idx]);
2392 mmcqd_wr_offset[idx] += sect_offset;
2393 if(sect_offset == 1)
2394 mmcqd_wr_break[idx]++;
2395 }
2396 mmcqd_wr_offset_tag[idx] = blk_rq_pos(rqc) + rq_sector;
2397 if(rq_sector <= 1) //512 bytes
2398 mmcqd_wr_bit[idx] ++;
2399 else if(rq_sector >= 1016) //508kB
2400 mmcqd_wr_tract[idx] ++;
2401 }
2402 else //read
2403 {
2404 if(mmcqd_rd_offset_tag[idx]>0)
2405 {
2406 sect_offset = abs(blk_rq_pos(rqc) - mmcqd_rd_offset_tag[idx]);
2407 mmcqd_rd_offset[idx] += sect_offset;
2408 if(sect_offset == 1)
2409 mmcqd_rd_break[idx]++;
2410 }
2411 mmcqd_rd_offset_tag[idx] = blk_rq_pos(rqc) + rq_sector;
2412 if(rq_sector <= 1) //512 bytes
2413 mmcqd_rd_bit[idx] ++;
2414 else if(rq_sector >= 1016) //508kB
2415 mmcqd_rd_tract[idx] ++;
2416 }
2417 }
2418#endif
ee8a43a5
PF
2419 do {
2420 if (rqc) {
a5075eb9
SD
2421 /*
2422 * When 4KB native sector is enabled, only 8 blocks
2423 * multiple read or write is allowed
2424 */
2425 if ((brq->data.blocks & 0x07) &&
2426 (card->ext_csd.data_sector_size == 4096)) {
2427 pr_err("%s: Transfer size is not 4KB sector size aligned\n",
2428 req->rq_disk->disk_name);
ce39f9d1 2429 mq_rq = mq->mqrq_cur;
a5075eb9
SD
2430 goto cmd_abort;
2431 }
ce39f9d1
SJ
2432
2433 if (reqs >= packed_nr)
2434 mmc_blk_packed_hdr_wrq_prep(mq->mqrq_cur,
2435 card, mq);
2436 else
2437 mmc_blk_rw_rq_prep(mq->mqrq_cur, card, 0, mq);
ee8a43a5
PF
2438 areq = &mq->mqrq_cur->mmc_active;
2439 } else
2440 areq = NULL;
2441 areq = mmc_start_req(card->host, areq, (int *) &status);
2220eedf
KD
2442 if (!areq) {
2443 if (status == MMC_BLK_NEW_REQUEST)
2444 mq->flags |= MMC_QUEUE_NEW_REQUEST;
ee8a43a5 2445 return 0;
2220eedf 2446 }
ee8a43a5
PF
2447
2448 mq_rq = container_of(areq, struct mmc_queue_req, mmc_active);
2449 brq = &mq_rq->brq;
2450 req = mq_rq->req;
67716327 2451 type = rq_data_dir(req) == READ ? MMC_BLK_READ : MMC_BLK_WRITE;
ee8a43a5 2452 mmc_queue_bounce_post(mq_rq);
98ccf149 2453
d78d4a8a
PF
2454 switch (status) {
2455 case MMC_BLK_SUCCESS:
2456 case MMC_BLK_PARTIAL:
2457 /*
2458 * A block was successfully transferred.
2459 */
67716327 2460 mmc_blk_reset_success(md, type);
ce39f9d1
SJ
2461
2462 if (mmc_packed_cmd(mq_rq->cmd_type)) {
2463 ret = mmc_blk_end_packed_req(mq_rq);
2464 break;
2465 } else {
2466 ret = blk_end_request(req, 0,
d78d4a8a 2467 brq->data.bytes_xfered);
ce39f9d1
SJ
2468 }
2469
6fa3eb70
S
2470// if (card && card->host && card->host->areq)
2471// met_mmc_end(card->host, card->host->areq);
2472
67716327
AH
2473 /*
2474 * If the blk_end_request function returns non-zero even
2475 * though all data has been transferred and no errors
2476 * were returned by the host controller, it's a bug.
2477 */
ee8a43a5 2478 if (status == MMC_BLK_SUCCESS && ret) {
a3c76eb9 2479 pr_err("%s BUG rq_tot %d d_xfer %d\n",
ee8a43a5
PF
2480 __func__, blk_rq_bytes(req),
2481 brq->data.bytes_xfered);
2482 rqc = NULL;
2483 goto cmd_abort;
2484 }
d78d4a8a
PF
2485 break;
2486 case MMC_BLK_CMD_ERR:
67716327 2487 ret = mmc_blk_cmd_err(md, card, brq, req, ret);
21dd5b3d
DW
2488 if (mmc_blk_reset(md, card->host, type))
2489 goto cmd_abort;
2490 if (!ret)
2491 goto start_new_req;
2492 break;
d78d4a8a
PF
2493 case MMC_BLK_RETRY:
2494 if (retry++ < 5)
a01f3ccf 2495 break;
67716327 2496 /* Fall through */
d78d4a8a 2497 case MMC_BLK_ABORT:
67716327
AH
2498 if (!mmc_blk_reset(md, card->host, type))
2499 break;
4c2b8f26 2500 goto cmd_abort;
67716327
AH
2501 case MMC_BLK_DATA_ERR: {
2502 int err;
2503
2504 err = mmc_blk_reset(md, card->host, type);
2505 if (!err)
2506 break;
ce39f9d1
SJ
2507 if (err == -ENODEV ||
2508 mmc_packed_cmd(mq_rq->cmd_type))
67716327
AH
2509 goto cmd_abort;
2510 /* Fall through */
2511 }
2512 case MMC_BLK_ECC_ERR:
2513 if (brq->data.blocks > 1) {
2514 /* Redo read one sector at a time */
2515 pr_warning("%s: retrying using single block read\n",
2516 req->rq_disk->disk_name);
2517 disable_multi = 1;
2518 break;
2519 }
d78d4a8a
PF
2520 /*
2521 * After an error, we redo I/O one sector at a
2522 * time, so we only reach here after trying to
2523 * read a single sector.
2524 */
ecf8b5d0 2525 ret = blk_end_request(req, -EIO,
d78d4a8a 2526 brq->data.blksz);
ee8a43a5
PF
2527 if (!ret)
2528 goto start_new_req;
d78d4a8a 2529 break;
a8ad82cc
SRT
2530 case MMC_BLK_NOMEDIUM:
2531 goto cmd_abort;
2220eedf
KD
2532 default:
2533 pr_err("%s: Unhandled return value (%d)",
2534 req->rq_disk->disk_name, status);
2535 goto cmd_abort;
4c2b8f26
RKAL
2536 }
2537
ee8a43a5 2538 if (ret) {
ce39f9d1
SJ
2539 if (mmc_packed_cmd(mq_rq->cmd_type)) {
2540 if (!mq_rq->packed->retries)
2541 goto cmd_abort;
2542 mmc_blk_packed_hdr_wrq_prep(mq_rq, card, mq);
2543 mmc_start_req(card->host,
2544 &mq_rq->mmc_active, NULL);
2545 } else {
2546
2547 /*
2548 * In case of a incomplete request
2549 * prepare it again and resend.
2550 */
2551 mmc_blk_rw_rq_prep(mq_rq, card,
2552 disable_multi, mq);
2553 mmc_start_req(card->host,
2554 &mq_rq->mmc_active, NULL);
2555 }
ee8a43a5 2556 }
1da177e4
LT
2557 } while (ret);
2558
1da177e4
LT
2559 return 1;
2560
a01f3ccf 2561 cmd_abort:
ce39f9d1
SJ
2562 if (mmc_packed_cmd(mq_rq->cmd_type)) {
2563 mmc_blk_abort_packed_req(mq_rq);
2564 } else {
2565 if (mmc_card_removed(card))
2566 req->cmd_flags |= REQ_QUIET;
2567 while (ret)
2568 ret = blk_end_request(req, -EIO,
2569 blk_rq_cur_bytes(req));
2570 }
1da177e4 2571
ee8a43a5
PF
2572 start_new_req:
2573 if (rqc) {
7a81902f
SJ
2574 if (mmc_card_removed(card)) {
2575 rqc->cmd_flags |= REQ_QUIET;
2576 blk_end_request_all(rqc, -EIO);
2577 } else {
ce39f9d1
SJ
2578 /*
2579 * If current request is packed, it needs to put back.
2580 */
2581 if (mmc_packed_cmd(mq->mqrq_cur->cmd_type))
2582 mmc_blk_revert_packed_req(mq, mq->mqrq_cur);
2583
7a81902f
SJ
2584 mmc_blk_rw_rq_prep(mq->mqrq_cur, card, 0, mq);
2585 mmc_start_req(card->host,
2586 &mq->mqrq_cur->mmc_active, NULL);
2587 }
ee8a43a5
PF
2588 }
2589
1da177e4
LT
2590 return 0;
2591}
2592
bd788c96
AH
2593static int mmc_blk_issue_rq(struct mmc_queue *mq, struct request *req)
2594{
1a258db6
AW
2595 int ret;
2596 struct mmc_blk_data *md = mq->data;
2597 struct mmc_card *card = md->queue.card;
2220eedf
KD
2598 struct mmc_host *host = card->host;
2599 unsigned long flags;
1e06335d 2600 unsigned int cmd_flags = req ? req->cmd_flags : 0;
1a258db6 2601
6fa3eb70
S
2602#ifdef CONFIG_MMC_BLOCK_DEFERRED_RESUME
2603 if (mmc_bus_needs_resume(card->host))
2604 mmc_resume_bus(card->host);
2605#endif
2606
ee8a43a5
PF
2607 if (req && !mq->mqrq_prev->req)
2608 /* claim host only for the first request */
2609 mmc_claim_host(card->host);
2610
371a689f
AW
2611 ret = mmc_blk_part_switch(card, md);
2612 if (ret) {
0d7d85ca 2613 if (req) {
ecf8b5d0 2614 blk_end_request_all(req, -EIO);
0d7d85ca 2615 }
371a689f
AW
2616 ret = 0;
2617 goto out;
2618 }
1a258db6 2619
2220eedf 2620 mq->flags &= ~MMC_QUEUE_NEW_REQUEST;
1e06335d 2621 if (cmd_flags & REQ_DISCARD) {
ee8a43a5
PF
2622 /* complete ongoing async transfer before issuing discard */
2623 if (card->host->areq)
2624 mmc_blk_issue_rw_rq(mq, NULL);
3550ccdb
IC
2625 if (req->cmd_flags & REQ_SECURE &&
2626 !(card->quirks & MMC_QUIRK_SEC_ERASE_TRIM_BROKEN))
1a258db6 2627 ret = mmc_blk_issue_secdiscard_rq(mq, req);
49804548 2628 else
1a258db6 2629 ret = mmc_blk_issue_discard_rq(mq, req);
1e06335d 2630 } else if (cmd_flags & REQ_FLUSH) {
393f9a08
JC
2631 /* complete ongoing async transfer before issuing flush */
2632 if (card->host->areq)
2633 mmc_blk_issue_rw_rq(mq, NULL);
1a258db6 2634 ret = mmc_blk_issue_flush(mq, req);
49804548 2635 } else {
2220eedf
KD
2636 if (!req && host->areq) {
2637 spin_lock_irqsave(&host->context_info.lock, flags);
2638 host->context_info.is_waiting_last_req = true;
2639 spin_unlock_irqrestore(&host->context_info.lock, flags);
2640 }
1a258db6 2641 ret = mmc_blk_issue_rw_rq(mq, req);
49804548 2642 }
1a258db6 2643
371a689f 2644out:
ef3a69c7 2645 if ((!req && !(mq->flags & MMC_QUEUE_NEW_REQUEST)) ||
1e06335d 2646 (cmd_flags & MMC_REQ_SPECIAL_MASK))
ef3a69c7
SJ
2647 /*
2648 * Release host when there are no more requests
2649 * and after special request(discard, flush) is done.
2650 * In case sepecial request, there is no reentry to
2651 * the 'mmc_blk_issue_rq' with 'mqrq_prev->req'.
2652 */
ee8a43a5 2653 mmc_release_host(card->host);
1a258db6 2654 return ret;
bd788c96 2655}
1da177e4 2656
a6f6c96b
RK
2657static inline int mmc_blk_readonly(struct mmc_card *card)
2658{
2659 return mmc_card_readonly(card) ||
2660 !(card->csd.cmdclass & CCC_BLOCK_WRITE);
2661}
2662
6fa3eb70
S
2663//#if defined(FEATURE_STORAGE_PID_LOGGER)
2664//extern unsigned long get_memory_size(void);
2665//#endif
2666#ifdef CONFIG_MTK_EXTMEM
2667extern void* extmem_malloc_page_align(size_t bytes);
2668#endif
371a689f
AW
2669static struct mmc_blk_data *mmc_blk_alloc_req(struct mmc_card *card,
2670 struct device *parent,
2671 sector_t size,
2672 bool default_ro,
add710ea
JR
2673 const char *subname,
2674 int area_type)
1da177e4
LT
2675{
2676 struct mmc_blk_data *md;
2677 int devidx, ret;
2678
5e71b7a6
OJ
2679 devidx = find_first_zero_bit(dev_use, max_devices);
2680 if (devidx >= max_devices)
1da177e4
LT
2681 return ERR_PTR(-ENOSPC);
2682 __set_bit(devidx, dev_use);
2683
dd00cc48 2684 md = kzalloc(sizeof(struct mmc_blk_data), GFP_KERNEL);
a6f6c96b
RK
2685 if (!md) {
2686 ret = -ENOMEM;
2687 goto out;
2688 }
1da177e4 2689
f06c9153
AW
2690 /*
2691 * !subname implies we are creating main mmc_blk_data that will be
2692 * associated with mmc_card with mmc_set_drvdata. Due to device
2693 * partitions, devidx will not coincide with a per-physical card
2694 * index anymore so we keep track of a name index.
2695 */
2696 if (!subname) {
2697 md->name_idx = find_first_zero_bit(name_use, max_devices);
2698 __set_bit(md->name_idx, name_use);
add710ea 2699 } else
f06c9153
AW
2700 md->name_idx = ((struct mmc_blk_data *)
2701 dev_to_disk(parent)->private_data)->name_idx;
2702
add710ea
JR
2703 md->area_type = area_type;
2704
a6f6c96b
RK
2705 /*
2706 * Set the read-only status based on the supported commands
2707 * and the write protect switch.
2708 */
2709 md->read_only = mmc_blk_readonly(card);
1da177e4 2710
5e71b7a6 2711 md->disk = alloc_disk(perdev_minors);
a6f6c96b
RK
2712 if (md->disk == NULL) {
2713 ret = -ENOMEM;
2714 goto err_kfree;
2715 }
1da177e4 2716
a6f6c96b 2717 spin_lock_init(&md->lock);
371a689f 2718 INIT_LIST_HEAD(&md->part);
a6f6c96b 2719 md->usage = 1;
1da177e4 2720
d09408ad 2721 ret = mmc_init_queue(&md->queue, card, &md->lock, subname);
a6f6c96b
RK
2722 if (ret)
2723 goto err_putdisk;
6fa3eb70
S
2724#if defined(FEATURE_STORAGE_PID_LOGGER)
2725 if( !page_logger){
2726 //num_page_logger = sizeof(struct page_pid_logger);
2727 //page_logger = vmalloc(num_physpages*sizeof(struct page_pid_logger));
2728 // solution: use get_memory_size to obtain the size from start pfn to max pfn
2729
2730 //unsigned long count = get_memory_size() >> PAGE_SHIFT;
2731 unsigned long count = get_max_DRAM_size() >> PAGE_SHIFT;
2732#ifdef CONFIG_MTK_EXTMEM
2733 page_logger = extmem_malloc_page_align(count * sizeof(struct page_pid_logger));
2734#else
2735 page_logger = vmalloc(count * sizeof(struct page_pid_logger));
2736#endif
2737 if( page_logger) {
2738 memset( page_logger, -1, count*sizeof( struct page_pid_logger));
2739 }
2740 spin_lock_init(&g_locker);
2741 }
2742#endif
2743#if defined(FEATURE_STORAGE_META_LOG)
2744 check_perdev_minors = perdev_minors;
2745#endif
1da177e4 2746
a6f6c96b
RK
2747 md->queue.issue_fn = mmc_blk_issue_rq;
2748 md->queue.data = md;
d2b18394 2749
fe6b4c88 2750 md->disk->major = MMC_BLOCK_MAJOR;
5e71b7a6 2751 md->disk->first_minor = devidx * perdev_minors;
a6f6c96b
RK
2752 md->disk->fops = &mmc_bdops;
2753 md->disk->private_data = md;
2754 md->disk->queue = md->queue.queue;
371a689f
AW
2755 md->disk->driverfs_dev = parent;
2756 set_disk_ro(md->disk, md->read_only || default_ro);
6fa3eb70 2757 md->disk->flags = GENHD_FL_EXT_DEVT;
53d8f974
LP
2758 if (area_type & MMC_BLK_DATA_AREA_RPMB)
2759 md->disk->flags |= GENHD_FL_NO_PART_SCAN;
a6f6c96b
RK
2760
2761 /*
2762 * As discussed on lkml, GENHD_FL_REMOVABLE should:
2763 *
2764 * - be set for removable media with permanent block devices
2765 * - be unset for removable block devices with permanent media
2766 *
2767 * Since MMC block devices clearly fall under the second
2768 * case, we do not set GENHD_FL_REMOVABLE. Userspace
2769 * should use the block device creation/destruction hotplug
2770 * messages to tell when the card is present.
2771 */
2772
f06c9153
AW
2773 snprintf(md->disk->disk_name, sizeof(md->disk->disk_name),
2774 "mmcblk%d%s", md->name_idx, subname ? subname : "");
a6f6c96b 2775
a5075eb9
SD
2776 if (mmc_card_mmc(card))
2777 blk_queue_logical_block_size(md->queue.queue,
2778 card->ext_csd.data_sector_size);
2779 else
2780 blk_queue_logical_block_size(md->queue.queue, 512);
2781
371a689f 2782 set_capacity(md->disk, size);
d0c97cfb 2783
f0d89972
AW
2784 if (mmc_host_cmd23(card->host)) {
2785 if (mmc_card_mmc(card) ||
2786 (mmc_card_sd(card) &&
2787 card->scr.cmds & SD_SCR_CMD23_SUPPORT))
2788 md->flags |= MMC_BLK_CMD23;
2789 }
d0c97cfb
AW
2790
2791 if (mmc_card_mmc(card) &&
2792 md->flags & MMC_BLK_CMD23 &&
2793 ((card->ext_csd.rel_param & EXT_CSD_WR_REL_PARAM_EN) ||
2794 card->ext_csd.rel_sectors)) {
2795 md->flags |= MMC_BLK_REL_WR;
2796 blk_queue_flush(md->queue.queue, REQ_FLUSH | REQ_FUA);
2797 }
2798
ce39f9d1
SJ
2799 if (mmc_card_mmc(card) &&
2800 (area_type == MMC_BLK_DATA_AREA_MAIN) &&
2801 (md->flags & MMC_BLK_CMD23) &&
2802 card->ext_csd.packed_event_en) {
2803 if (!mmc_packed_init(&md->queue, card))
2804 md->flags |= MMC_BLK_PACKED_CMD;
2805 }
2806
371a689f
AW
2807 return md;
2808
2809 err_putdisk:
2810 put_disk(md->disk);
2811 err_kfree:
2812 kfree(md);
2813 out:
2814 return ERR_PTR(ret);
2815}
2816
2817static struct mmc_blk_data *mmc_blk_alloc(struct mmc_card *card)
2818{
2819 sector_t size;
6fa3eb70
S
2820#ifdef CONFIG_MTK_EMMC_SUPPORT
2821 unsigned int l_reserve;
2822 struct storage_info s_info = {0};
2823#endif
371a689f 2824 struct mmc_blk_data *md;
a6f6c96b 2825
85a18ad9
PO
2826 if (!mmc_card_sd(card) && mmc_card_blockaddr(card)) {
2827 /*
2828 * The EXT_CSD sector count is in number or 512 byte
2829 * sectors.
2830 */
371a689f 2831 size = card->ext_csd.sectors;
85a18ad9
PO
2832 } else {
2833 /*
2834 * The CSD capacity field is in units of read_blkbits.
2835 * set_capacity takes units of 512 bytes.
2836 */
371a689f 2837 size = card->csd.capacity << (card->csd.read_blkbits - 9);
85a18ad9 2838 }
371a689f 2839
6fa3eb70
S
2840 if(!mmc_card_sd(card)){
2841#ifdef CONFIG_MTK_EMMC_SUPPORT
2842 msdc_get_info(EMMC_CARD_BOOT, EMMC_RESERVE, &s_info);
2843 l_reserve = s_info.emmc_reserve;
2844 printk("l_reserve = 0x%x\n", l_reserve);
2845 size -= l_reserve; /*reserved for 64MB (emmc otp + emmc combo offset + reserved)*/
2846#endif
2847 }
add710ea
JR
2848 md = mmc_blk_alloc_req(card, &card->dev, size, false, NULL,
2849 MMC_BLK_DATA_AREA_MAIN);
1da177e4 2850 return md;
371a689f 2851}
a6f6c96b 2852
371a689f
AW
2853static int mmc_blk_alloc_part(struct mmc_card *card,
2854 struct mmc_blk_data *md,
2855 unsigned int part_type,
2856 sector_t size,
2857 bool default_ro,
add710ea
JR
2858 const char *subname,
2859 int area_type)
371a689f
AW
2860{
2861 char cap_str[10];
2862 struct mmc_blk_data *part_md;
2863
2864 part_md = mmc_blk_alloc_req(card, disk_to_dev(md->disk), size, default_ro,
add710ea 2865 subname, area_type);
371a689f
AW
2866 if (IS_ERR(part_md))
2867 return PTR_ERR(part_md);
2868 part_md->part_type = part_type;
2869 list_add(&part_md->part, &md->part);
2870
2871 string_get_size((u64)get_capacity(part_md->disk) << 9, STRING_UNITS_2,
2872 cap_str, sizeof(cap_str));
a3c76eb9 2873 pr_info("%s: %s %s partition %u %s\n",
371a689f
AW
2874 part_md->disk->disk_name, mmc_card_id(card),
2875 mmc_card_name(card), part_md->part_type, cap_str);
2876 return 0;
2877}
2878
e0c368d5
NJ
2879/* MMC Physical partitions consist of two boot partitions and
2880 * up to four general purpose partitions.
2881 * For each partition enabled in EXT_CSD a block device will be allocatedi
2882 * to provide access to the partition.
2883 */
2884
371a689f
AW
2885static int mmc_blk_alloc_parts(struct mmc_card *card, struct mmc_blk_data *md)
2886{
e0c368d5 2887 int idx, ret = 0;
371a689f
AW
2888
2889 if (!mmc_card_mmc(card))
2890 return 0;
2891
e0c368d5
NJ
2892 for (idx = 0; idx < card->nr_parts; idx++) {
2893 if (card->part[idx].size) {
2894 ret = mmc_blk_alloc_part(card, md,
2895 card->part[idx].part_cfg,
2896 card->part[idx].size >> 9,
2897 card->part[idx].force_ro,
add710ea
JR
2898 card->part[idx].name,
2899 card->part[idx].area_type);
e0c368d5
NJ
2900 if (ret)
2901 return ret;
2902 }
371a689f
AW
2903 }
2904
2905 return ret;
1da177e4
LT
2906}
2907
371a689f
AW
2908static void mmc_blk_remove_req(struct mmc_blk_data *md)
2909{
add710ea
JR
2910 struct mmc_card *card;
2911
371a689f 2912 if (md) {
add710ea 2913 card = md->queue.card;
371a689f
AW
2914 if (md->disk->flags & GENHD_FL_UP) {
2915 device_remove_file(disk_to_dev(md->disk), &md->force_ro);
add710ea
JR
2916 if ((md->area_type & MMC_BLK_DATA_AREA_BOOT) &&
2917 card->ext_csd.boot_ro_lockable)
2918 device_remove_file(disk_to_dev(md->disk),
2919 &md->power_ro_lock);
371a689f
AW
2920
2921 /* Stop new requests from getting into the queue */
2922 del_gendisk(md->disk);
2923 }
2924
2925 /* Then flush out any already in there */
2926 mmc_cleanup_queue(&md->queue);
ce39f9d1
SJ
2927 if (md->flags & MMC_BLK_PACKED_CMD)
2928 mmc_packed_clean(&md->queue);
371a689f
AW
2929 mmc_blk_put(md);
2930 }
2931}
2932
2933static void mmc_blk_remove_parts(struct mmc_card *card,
2934 struct mmc_blk_data *md)
2935{
2936 struct list_head *pos, *q;
2937 struct mmc_blk_data *part_md;
2938
f06c9153 2939 __clear_bit(md->name_idx, name_use);
371a689f
AW
2940 list_for_each_safe(pos, q, &md->part) {
2941 part_md = list_entry(pos, struct mmc_blk_data, part);
2942 list_del(pos);
2943 mmc_blk_remove_req(part_md);
2944 }
2945}
2946
2947static int mmc_add_disk(struct mmc_blk_data *md)
2948{
2949 int ret;
add710ea 2950 struct mmc_card *card = md->queue.card;
371a689f
AW
2951
2952 add_disk(md->disk);
2953 md->force_ro.show = force_ro_show;
2954 md->force_ro.store = force_ro_store;
641c3187 2955 sysfs_attr_init(&md->force_ro.attr);
371a689f
AW
2956 md->force_ro.attr.name = "force_ro";
2957 md->force_ro.attr.mode = S_IRUGO | S_IWUSR;
2958 ret = device_create_file(disk_to_dev(md->disk), &md->force_ro);
2959 if (ret)
add710ea
JR
2960 goto force_ro_fail;
2961
2962 if ((md->area_type & MMC_BLK_DATA_AREA_BOOT) &&
2963 card->ext_csd.boot_ro_lockable) {
88187398 2964 umode_t mode;
add710ea
JR
2965
2966 if (card->ext_csd.boot_ro_lock & EXT_CSD_BOOT_WP_B_PWR_WP_DIS)
2967 mode = S_IRUGO;
2968 else
2969 mode = S_IRUGO | S_IWUSR;
2970
2971 md->power_ro_lock.show = power_ro_lock_show;
2972 md->power_ro_lock.store = power_ro_lock_store;
00d9ac08 2973 sysfs_attr_init(&md->power_ro_lock.attr);
add710ea
JR
2974 md->power_ro_lock.attr.mode = mode;
2975 md->power_ro_lock.attr.name =
2976 "ro_lock_until_next_power_on";
2977 ret = device_create_file(disk_to_dev(md->disk),
2978 &md->power_ro_lock);
2979 if (ret)
2980 goto power_ro_lock_fail;
2981 }
2982 return ret;
2983
2984power_ro_lock_fail:
2985 device_remove_file(disk_to_dev(md->disk), &md->force_ro);
2986force_ro_fail:
2987 del_gendisk(md->disk);
371a689f
AW
2988
2989 return ret;
2990}
2991
c59d4473
CB
2992#define CID_MANFID_SANDISK 0x2
2993#define CID_MANFID_TOSHIBA 0x11
2994#define CID_MANFID_MICRON 0x13
3550ccdb 2995#define CID_MANFID_SAMSUNG 0x15
6fa3eb70
S
2996#define CID_MANFID_SANDISK_NEW 0x45
2997#define CID_MANFID_HYNIX 0x90
2998#define CID_MANFID_KSI 0x70
c59d4473 2999
6f60c222
AW
3000static const struct mmc_fixup blk_fixups[] =
3001{
c59d4473
CB
3002 MMC_FIXUP("SEM02G", CID_MANFID_SANDISK, 0x100, add_quirk,
3003 MMC_QUIRK_INAND_CMD38),
3004 MMC_FIXUP("SEM04G", CID_MANFID_SANDISK, 0x100, add_quirk,
3005 MMC_QUIRK_INAND_CMD38),
3006 MMC_FIXUP("SEM08G", CID_MANFID_SANDISK, 0x100, add_quirk,
3007 MMC_QUIRK_INAND_CMD38),
3008 MMC_FIXUP("SEM16G", CID_MANFID_SANDISK, 0x100, add_quirk,
3009 MMC_QUIRK_INAND_CMD38),
3010 MMC_FIXUP("SEM32G", CID_MANFID_SANDISK, 0x100, add_quirk,
3011 MMC_QUIRK_INAND_CMD38),
6fa3eb70
S
3012 MMC_FIXUP(CID_NAME_ANY, CID_MANFID_SANDISK_NEW, CID_OEMID_ANY, add_quirk,
3013 MMC_QUIRK_PON),
d0c97cfb
AW
3014 /*
3015 * Some MMC cards experience performance degradation with CMD23
3016 * instead of CMD12-bounded multiblock transfers. For now we'll
3017 * black list what's bad...
3018 * - Certain Toshiba cards.
3019 *
3020 * N.B. This doesn't affect SD cards.
3021 */
c59d4473 3022 MMC_FIXUP("MMC08G", CID_MANFID_TOSHIBA, CID_OEMID_ANY, add_quirk_mmc,
d0c97cfb 3023 MMC_QUIRK_BLK_NO_CMD23),
c59d4473 3024 MMC_FIXUP("MMC16G", CID_MANFID_TOSHIBA, CID_OEMID_ANY, add_quirk_mmc,
d0c97cfb 3025 MMC_QUIRK_BLK_NO_CMD23),
c59d4473 3026 MMC_FIXUP("MMC32G", CID_MANFID_TOSHIBA, CID_OEMID_ANY, add_quirk_mmc,
d0c97cfb 3027 MMC_QUIRK_BLK_NO_CMD23),
6de5fc9c
SNX
3028
3029 /*
e9e0c8ad 3030 * Some MMC cards need longer data read timeout than indicated in CSD.
6de5fc9c 3031 */
c59d4473 3032 MMC_FIXUP(CID_NAME_ANY, CID_MANFID_MICRON, 0x200, add_quirk_mmc,
6de5fc9c 3033 MMC_QUIRK_LONG_READ_TIME),
e9e0c8ad
MG
3034 MMC_FIXUP("008GE0", CID_MANFID_TOSHIBA, CID_OEMID_ANY, add_quirk_mmc,
3035 MMC_QUIRK_LONG_READ_TIME),
6de5fc9c 3036
3550ccdb
IC
3037 /*
3038 * On these Samsung MoviNAND parts, performing secure erase or
3039 * secure trim can result in unrecoverable corruption due to a
3040 * firmware bug.
3041 */
3042 MMC_FIXUP("M8G2FA", CID_MANFID_SAMSUNG, CID_OEMID_ANY, add_quirk_mmc,
3043 MMC_QUIRK_SEC_ERASE_TRIM_BROKEN),
3044 MMC_FIXUP("MAG4FA", CID_MANFID_SAMSUNG, CID_OEMID_ANY, add_quirk_mmc,
3045 MMC_QUIRK_SEC_ERASE_TRIM_BROKEN),
3046 MMC_FIXUP("MBG8FA", CID_MANFID_SAMSUNG, CID_OEMID_ANY, add_quirk_mmc,
3047 MMC_QUIRK_SEC_ERASE_TRIM_BROKEN),
3048 MMC_FIXUP("MCGAFA", CID_MANFID_SAMSUNG, CID_OEMID_ANY, add_quirk_mmc,
3049 MMC_QUIRK_SEC_ERASE_TRIM_BROKEN),
3050 MMC_FIXUP("VAL00M", CID_MANFID_SAMSUNG, CID_OEMID_ANY, add_quirk_mmc,
3051 MMC_QUIRK_SEC_ERASE_TRIM_BROKEN),
3052 MMC_FIXUP("VYL00M", CID_MANFID_SAMSUNG, CID_OEMID_ANY, add_quirk_mmc,
3053 MMC_QUIRK_SEC_ERASE_TRIM_BROKEN),
3054 MMC_FIXUP("KYL00M", CID_MANFID_SAMSUNG, CID_OEMID_ANY, add_quirk_mmc,
3055 MMC_QUIRK_SEC_ERASE_TRIM_BROKEN),
3056 MMC_FIXUP("VZL00M", CID_MANFID_SAMSUNG, CID_OEMID_ANY, add_quirk_mmc,
3057 MMC_QUIRK_SEC_ERASE_TRIM_BROKEN),
6fa3eb70
S
3058#ifdef CONFIG_MTK_EMMC_CACHE
3059 /*
3060 * Some MMC cards cache feature, cannot flush the previous cache data by force programming or reliable write
3061 * which cannot gurrantee the strong order betwee meta data and file data.
3062 */
3063
3064 /*
3065 * Toshiba eMMC after enable cache feature, write performance drop, because flush operation waste much time
3066 */
3067 MMC_FIXUP(CID_NAME_ANY, CID_MANFID_TOSHIBA, CID_OEMID_ANY, add_quirk_mmc,
3068 MMC_QUIRK_DISABLE_CACHE),
3069#endif
3070
3071 /* Hynix 4.41 trim will lead boot up failed. */
3072 MMC_FIXUP(CID_NAME_ANY, CID_MANFID_HYNIX, CID_OEMID_ANY, add_quirk_mmc,
3073 MMC_QUIRK_TRIM_UNSTABLE),
3074
3075 /* KSI PRV=0x3 trim will lead write performance drop. */
3076 MMC_FIXUP(CID_NAME_ANY, CID_MANFID_KSI, CID_OEMID_ANY, add_quirk_mmc_ksi_v03_skip_trim,
3077 MMC_QUIRK_KSI_V03_SKIP_TRIM),
3550ccdb 3078
6f60c222
AW
3079 END_FIXUP
3080};
3081
6fa3eb70
S
3082#if defined(CONFIG_MTK_EMMC_SUPPORT) && !defined(CONFIG_MTK_GPT_SCHEME_SUPPORT)
3083 extern void emmc_create_sys_symlink (struct mmc_card *card);
3084#endif
1da177e4
LT
3085static int mmc_blk_probe(struct mmc_card *card)
3086{
371a689f 3087 struct mmc_blk_data *md, *part_md;
a7bbb573
PO
3088 char cap_str[10];
3089
912490db
PO
3090 /*
3091 * Check that the card supports the command class(es) we need.
3092 */
3093 if (!(card->csd.cmdclass & CCC_BLOCK_READ))
1da177e4
LT
3094 return -ENODEV;
3095
1da177e4
LT
3096 md = mmc_blk_alloc(card);
3097 if (IS_ERR(md))
3098 return PTR_ERR(md);
3099
444122fd 3100 string_get_size((u64)get_capacity(md->disk) << 9, STRING_UNITS_2,
a7bbb573 3101 cap_str, sizeof(cap_str));
a3c76eb9 3102 pr_info("%s: %s %s %s %s\n",
1da177e4 3103 md->disk->disk_name, mmc_card_id(card), mmc_card_name(card),
a7bbb573 3104 cap_str, md->read_only ? "(ro)" : "");
1da177e4 3105
371a689f
AW
3106 if (mmc_blk_alloc_parts(card, md))
3107 goto out;
3108
1da177e4 3109 mmc_set_drvdata(card, md);
6f60c222
AW
3110 mmc_fixup_device(card, blk_fixups);
3111
6fa3eb70
S
3112 printk("[%s]: %s by manufacturer settings, quirks=0x%x\n", __func__, md->disk->disk_name, card->quirks);
3113
3114#ifdef CONFIG_MMC_BLOCK_DEFERRED_RESUME
3115 mmc_set_bus_resume_policy(card->host, 1);
3116#endif
371a689f
AW
3117 if (mmc_add_disk(md))
3118 goto out;
3119
3120 list_for_each_entry(part_md, &md->part, part) {
3121 if (mmc_add_disk(part_md))
3122 goto out;
3123 }
6fa3eb70
S
3124#if defined(CONFIG_MTK_EMMC_SUPPORT) && !defined(CONFIG_MTK_GPT_SCHEME_SUPPORT)
3125 emmc_create_sys_symlink(card);
3126#endif
1da177e4
LT
3127 return 0;
3128
3129 out:
371a689f
AW
3130 mmc_blk_remove_parts(card, md);
3131 mmc_blk_remove_req(md);
5865f287 3132 return 0;
1da177e4
LT
3133}
3134
3135static void mmc_blk_remove(struct mmc_card *card)
3136{
3137 struct mmc_blk_data *md = mmc_get_drvdata(card);
3138
371a689f 3139 mmc_blk_remove_parts(card, md);
ddd6fa7e
AH
3140 mmc_claim_host(card->host);
3141 mmc_blk_part_switch(card, md);
3142 mmc_release_host(card->host);
371a689f 3143 mmc_blk_remove_req(md);
1da177e4 3144 mmc_set_drvdata(card, NULL);
6fa3eb70
S
3145#ifdef CONFIG_MMC_BLOCK_DEFERRED_RESUME
3146 mmc_set_bus_resume_policy(card->host, 0);
3147#endif
1da177e4
LT
3148}
3149
3150#ifdef CONFIG_PM
32d317c6 3151static int mmc_blk_suspend(struct mmc_card *card)
1da177e4 3152{
371a689f 3153 struct mmc_blk_data *part_md;
1da177e4
LT
3154 struct mmc_blk_data *md = mmc_get_drvdata(card);
3155
3156 if (md) {
3157 mmc_queue_suspend(&md->queue);
371a689f
AW
3158 list_for_each_entry(part_md, &md->part, part) {
3159 mmc_queue_suspend(&part_md->queue);
3160 }
1da177e4
LT
3161 }
3162 return 0;
3163}
3164
3165static int mmc_blk_resume(struct mmc_card *card)
3166{
371a689f 3167 struct mmc_blk_data *part_md;
1da177e4
LT
3168 struct mmc_blk_data *md = mmc_get_drvdata(card);
3169
3170 if (md) {
371a689f
AW
3171 /*
3172 * Resume involves the card going into idle state,
3173 * so current partition is always the main one.
3174 */
3175 md->part_curr = md->part_type;
1da177e4 3176 mmc_queue_resume(&md->queue);
371a689f
AW
3177 list_for_each_entry(part_md, &md->part, part) {
3178 mmc_queue_resume(&part_md->queue);
3179 }
1da177e4
LT
3180 }
3181 return 0;
3182}
3183#else
3184#define mmc_blk_suspend NULL
3185#define mmc_blk_resume NULL
3186#endif
3187
3188static struct mmc_driver mmc_driver = {
3189 .drv = {
3190 .name = "mmcblk",
3191 },
3192 .probe = mmc_blk_probe,
3193 .remove = mmc_blk_remove,
3194 .suspend = mmc_blk_suspend,
3195 .resume = mmc_blk_resume,
3196};
3197
3198static int __init mmc_blk_init(void)
3199{
9d4e98e9 3200 int res;
1da177e4 3201
5e71b7a6
OJ
3202 if (perdev_minors != CONFIG_MMC_BLOCK_MINORS)
3203 pr_info("mmcblk: using %d minors per device\n", perdev_minors);
3204
3205 max_devices = 256 / perdev_minors;
3206
fe6b4c88
PO
3207 res = register_blkdev(MMC_BLOCK_MAJOR, "mmc");
3208 if (res)
1da177e4 3209 goto out;
1da177e4 3210
9d4e98e9
AM
3211 res = mmc_register_driver(&mmc_driver);
3212 if (res)
3213 goto out2;
1da177e4 3214
9d4e98e9
AM
3215 return 0;
3216 out2:
3217 unregister_blkdev(MMC_BLOCK_MAJOR, "mmc");
1da177e4
LT
3218 out:
3219 return res;
3220}
3221
3222static void __exit mmc_blk_exit(void)
3223{
3224 mmc_unregister_driver(&mmc_driver);
fe6b4c88 3225 unregister_blkdev(MMC_BLOCK_MAJOR, "mmc");
1da177e4
LT
3226}
3227
3228module_init(mmc_blk_init);
3229module_exit(mmc_blk_exit);
3230
3231MODULE_LICENSE("GPL");
3232MODULE_DESCRIPTION("Multimedia Card (MMC) block device driver");
3233