drivers: power: report battery voltage in AOSP compatible format
[GitHub/mt8127/android_kernel_alcatel_ttab.git] / drivers / mmc / card / block.c
1 /*
2 * Block driver for media (i.e., flash cards)
3 *
4 * Copyright 2002 Hewlett-Packard Company
5 * Copyright 2005-2008 Pierre Ossman
6 *
7 * Use consistent with the GNU GPL is permitted,
8 * provided that this copyright notice is
9 * preserved in its entirety in all copies and derived works.
10 *
11 * HEWLETT-PACKARD COMPANY MAKES NO WARRANTIES, EXPRESSED OR IMPLIED,
12 * AS TO THE USEFULNESS OR CORRECTNESS OF THIS CODE OR ITS
13 * FITNESS FOR ANY PARTICULAR PURPOSE.
14 *
15 * Many thanks to Alessandro Rubini and Jonathan Corbet!
16 *
17 * Author: Andrew Christian
18 * 28 May 2002
19 */
20 #include <linux/moduleparam.h>
21 #include <linux/module.h>
22 #include <linux/init.h>
23
24 #include <linux/kernel.h>
25 #include <linux/fs.h>
26 #include <linux/slab.h>
27 #include <linux/errno.h>
28 #include <linux/hdreg.h>
29 #include <linux/kdev_t.h>
30 #include <linux/blkdev.h>
31 #include <linux/mutex.h>
32 #include <linux/scatterlist.h>
33 #include <linux/string_helpers.h>
34 #include <linux/delay.h>
35 #include <linux/capability.h>
36 #include <linux/compat.h>
37
38 #define CREATE_TRACE_POINTS
39 #include <trace/events/mmc.h>
40
41 #include <linux/mmc/ioctl.h>
42 #include <linux/mmc/card.h>
43 #include <linux/mmc/host.h>
44 #include <linux/mmc/mmc.h>
45 #include <linux/mmc/sd.h>
46
47 #include <asm/uaccess.h>
48
49 #include "queue.h"
50 #include <mach/mtk_meminfo.h>
51
52 //add vmstat info with block tag log
53 #include <linux/vmstat.h>
54 #define FEATURE_STORAGE_VMSTAT_LOGGER
55
56
57 #include <linux/xlog.h>
58 #include <asm/div64.h>
59 #include <linux/vmalloc.h>
60
61 #include <linux/mmc/sd_misc.h>
62
63 #define MET_USER_EVENT_SUPPORT
64 #include <linux/met_drv.h>
65
66 #define FEATURE_STORAGE_PERF_INDEX
67 //enable storage log in user load
68 #if 0
69 #ifdef USER_BUILD_KERNEL
70 #undef FEATURE_STORAGE_PERF_INDEX
71 #endif
72 #endif
73
74 MODULE_ALIAS("mmc:block");
75 #ifdef MODULE_PARAM_PREFIX
76 #undef MODULE_PARAM_PREFIX
77 #endif
78 #define MODULE_PARAM_PREFIX "mmcblk."
79
80 #define INAND_CMD38_ARG_EXT_CSD 113
81 #define INAND_CMD38_ARG_ERASE 0x00
82 #define INAND_CMD38_ARG_TRIM 0x01
83 #define INAND_CMD38_ARG_SECERASE 0x80
84 #define INAND_CMD38_ARG_SECTRIM1 0x81
85 #define INAND_CMD38_ARG_SECTRIM2 0x88
86 #define MMC_BLK_TIMEOUT_MS (10 * 60 * 1000) /* 10 minute timeout */
87
88 #define mmc_req_rel_wr(req) ((req->cmd_flags & REQ_FUA) && \
89 (rq_data_dir(req) == WRITE))
90 #define PACKED_CMD_VER 0x01
91 #define PACKED_CMD_WR 0x02
92
93 static DEFINE_MUTEX(block_mutex);
94
95 /*
96 * The defaults come from config options but can be overriden by module
97 * or bootarg options.
98 */
99 static int perdev_minors = CONFIG_MMC_BLOCK_MINORS;
100
101 /*
102 * We've only got one major, so number of mmcblk devices is
103 * limited to 256 / number of minors per device.
104 */
105 static int max_devices;
106
107 /* 256 minors, so at most 256 separate devices */
108 static DECLARE_BITMAP(dev_use, 256);
109 static DECLARE_BITMAP(name_use, 256);
110
111 /*
112 * There is one mmc_blk_data per slot.
113 */
114 struct mmc_blk_data {
115 spinlock_t lock;
116 struct gendisk *disk;
117 struct mmc_queue queue;
118 struct list_head part;
119
120 unsigned int flags;
121 #define MMC_BLK_CMD23 (1 << 0) /* Can do SET_BLOCK_COUNT for multiblock */
122 #define MMC_BLK_REL_WR (1 << 1) /* MMC Reliable write support */
123 #define MMC_BLK_PACKED_CMD (1 << 2) /* MMC packed command support */
124
125 unsigned int usage;
126 unsigned int read_only;
127 unsigned int part_type;
128 unsigned int name_idx;
129 unsigned int reset_done;
130 #define MMC_BLK_READ BIT(0)
131 #define MMC_BLK_WRITE BIT(1)
132 #define MMC_BLK_DISCARD BIT(2)
133 #define MMC_BLK_SECDISCARD BIT(3)
134
135 /*
136 * Only set in main mmc_blk_data associated
137 * with mmc_card with mmc_set_drvdata, and keeps
138 * track of the current selected device partition.
139 */
140 unsigned int part_curr;
141 struct device_attribute force_ro;
142 struct device_attribute power_ro_lock;
143 int area_type;
144 };
145
146 static DEFINE_MUTEX(open_lock);
147
148 enum {
149 MMC_PACKED_NR_IDX = -1,
150 MMC_PACKED_NR_ZERO,
151 MMC_PACKED_NR_SINGLE,
152 };
153
154 module_param(perdev_minors, int, 0444);
155 MODULE_PARM_DESC(perdev_minors, "Minors numbers to allocate per device");
156
157 static inline int mmc_blk_part_switch(struct mmc_card *card,
158 struct mmc_blk_data *md);
159 static int get_card_status(struct mmc_card *card, u32 *status, int retries);
160
161 #ifndef CONFIG_MTK_FPGA
162 #include <linux/met_ftrace_bio.h>
163 #endif
164
165 char mmc_get_rw_type(u32 opcode)
166 {
167 switch (opcode)
168 {
169 case MMC_READ_SINGLE_BLOCK:
170 case MMC_READ_MULTIPLE_BLOCK:
171 return 'R';
172 case MMC_WRITE_BLOCK:
173 case MMC_WRITE_MULTIPLE_BLOCK:
174 return 'W';
175 default:
176 // Unknown opcode!!!
177 return 'X';
178 }
179 }
180
181 inline int check_met_mmc_async_req_legal(struct mmc_host *host, struct mmc_async_req *areq)
182 {
183 int is_legal = 0;
184
185 if (!((host == NULL) || (areq == NULL) || (areq->mrq == NULL)
186 || (areq->mrq->cmd == NULL) || (areq->mrq->data == NULL)
187 || (host->card == NULL))) {
188 is_legal = 1;
189 }
190
191 return is_legal;
192 }
193
194 inline int check_met_mmc_blk_data_legal(struct mmc_blk_data *md)
195 {
196 int is_legal = 0;
197
198 if (!((md == NULL) || (md->disk == NULL))) {
199 is_legal = 1;
200 }
201
202 return is_legal;
203 }
204
205 inline int check_met_mmc_req_legal(struct mmc_host *host, struct mmc_request *req)
206 {
207 int is_legal = 0;
208
209 if (!((host == NULL) || (req == NULL) || (req->cmd == NULL)
210 || (req->data == NULL) || (host->card == NULL))) {
211 is_legal = 1;
212 }
213
214 return is_legal;
215 }
216
217 void met_mmc_insert(struct mmc_host *host, struct mmc_async_req *areq)
218 {
219 struct mmc_blk_data *md;
220 char type;
221
222 if (!check_met_mmc_async_req_legal(host, areq))
223 return;
224
225 md = mmc_get_drvdata(host->card);
226 if (!check_met_mmc_blk_data_legal(md))
227 return;
228
229 type = mmc_get_rw_type(areq->mrq->cmd->opcode);
230 if (type == 'X')
231 return;
232
233 #ifndef CONFIG_MTK_FPGA
234 MET_FTRACE_PRINTK(met_mmc_insert, md, areq, type);
235 #endif
236 }
237
238 void met_mmc_dma_map(struct mmc_host *host, struct mmc_async_req *areq)
239 {
240 struct mmc_blk_data *md;
241 char type;
242
243 if (!check_met_mmc_async_req_legal(host, areq))
244 return;
245
246 md = mmc_get_drvdata(host->card);
247 if (!check_met_mmc_blk_data_legal(md))
248 return;
249
250 type = mmc_get_rw_type(areq->mrq->cmd->opcode);
251 if (type == 'X')
252 return;
253 #ifndef CONFIG_MTK_FPGA
254 MET_FTRACE_PRINTK(met_mmc_dma_map, md, areq, type);
255 #endif
256 }
257
258 //void met_mmc_issue(struct mmc_host *host, struct mmc_async_req *areq)
259 //{
260 // struct mmc_blk_data *md;
261 // char type;
262 //
263 // if (!check_met_mmc_async_req_legal(host, areq))
264 // return;
265 //
266 // md = mmc_get_drvdata(host->card);
267 //
268 // type = mmc_get_rw_type(areq->mrq->cmd->opcode);
269 // if (type == 'X')
270 // return;
271 //
272 // MET_FTRACE_PRINTK(met_mmc_issue, md, areq, type);
273 //}
274
275 void met_mmc_issue(struct mmc_host *host, struct mmc_request *req)
276 {
277 struct mmc_blk_data *md;
278 char type;
279
280 if (!check_met_mmc_req_legal(host, req))
281 return;
282
283 md = mmc_get_drvdata(host->card);
284 if (!check_met_mmc_blk_data_legal(md))
285 return;
286
287 type = mmc_get_rw_type(req->cmd->opcode);
288 if (type == 'X')
289 return;
290 #ifndef CONFIG_MTK_FPGA
291 MET_FTRACE_PRINTK(met_mmc_issue, md, req, type);
292 #endif
293 }
294
295 void met_mmc_send_cmd(struct mmc_host *host, struct mmc_command *cmd)
296 {
297 struct mmc_blk_data *md = mmc_get_drvdata(host->card);
298 char type;
299
300 type = mmc_get_rw_type(cmd->opcode);
301 if (type == 'X')
302 return;
303
304 trace_printk("%d,%d %c %d + %d [%s]\n",
305 md->disk->major, md->disk->first_minor, type,
306 cmd->arg, cmd->data->blocks,
307 current->comm);
308 }
309
310 void met_mmc_xfr_done(struct mmc_host *host, struct mmc_command *cmd)
311 {
312 struct mmc_blk_data *md=mmc_get_drvdata(host->card);
313 char type;
314
315 type = mmc_get_rw_type(cmd->opcode);
316 if (type == 'X')
317 return;
318
319 trace_printk("%d,%d %c %d + %d [%s]\n",
320 md->disk->major, md->disk->first_minor, type,
321 cmd->arg, cmd->data->blocks,
322 current->comm);
323 }
324
325 void met_mmc_wait_xfr(struct mmc_host *host, struct mmc_async_req *areq)
326 {
327 struct mmc_blk_data *md = mmc_get_drvdata(host->card);
328 char type;
329
330 type = mmc_get_rw_type(areq->mrq->cmd->opcode);
331 if (type == 'X')
332 return;
333
334 trace_printk("%d,%d %c %d + %d [%s]\n",
335 md->disk->major, md->disk->first_minor, type,
336 areq->mrq->cmd->arg, areq->mrq->data->blocks,
337 current->comm);
338
339 }
340
341 void met_mmc_tuning_start(struct mmc_host *host, struct mmc_command *cmd)
342 {
343 struct mmc_blk_data *md = mmc_get_drvdata(host->card);
344 char type;
345
346 type = mmc_get_rw_type(cmd->opcode);
347 if (type == 'X')
348 return;
349
350 trace_printk("%d,%d %c %d + %d [%s]\n",
351 md->disk->major, md->disk->first_minor, type,
352 cmd->arg, cmd->data->blocks,
353 current->comm);
354 }
355
356 void met_mmc_tuning_end(struct mmc_host *host, struct mmc_command *cmd)
357 {
358 struct mmc_blk_data *md = mmc_get_drvdata(host->card);
359 char type;
360
361 type = mmc_get_rw_type(cmd->opcode);
362 if (type == 'X')
363 return;
364
365 trace_printk("%d,%d %c %d + %d [%s]\n",
366 md->disk->major, md->disk->first_minor, type,
367 cmd->arg, cmd->data->blocks,
368 current->comm);
369 }
370
371 void met_mmc_complete(struct mmc_host *host, struct mmc_async_req *areq)
372 {
373 struct mmc_blk_data *md;
374 char type;
375
376 if (!check_met_mmc_async_req_legal(host, areq))
377 return;
378
379 md = mmc_get_drvdata(host->card);
380 if (!check_met_mmc_blk_data_legal(md))
381 return;
382
383 type = mmc_get_rw_type(areq->mrq->cmd->opcode);
384 if (type == 'X')
385 return;
386 #ifndef CONFIG_MTK_FPGA
387 MET_FTRACE_PRINTK(met_mmc_complete, md, areq, type);
388 #endif
389 }
390
391 void met_mmc_dma_unmap_start(struct mmc_host *host, struct mmc_async_req *areq)
392 {
393 struct mmc_blk_data *md;
394 char type;
395
396 if (!check_met_mmc_async_req_legal(host, areq))
397 return;
398
399 md = mmc_get_drvdata(host->card);
400 if (!check_met_mmc_blk_data_legal(md))
401 return;
402
403 type = mmc_get_rw_type(areq->mrq->cmd->opcode);
404 if (type == 'X')
405 return;
406 #ifndef CONFIG_MTK_FPGA
407 MET_FTRACE_PRINTK(met_mmc_dma_unmap_start, md, areq, type);
408 #endif
409 }
410
411 void met_mmc_dma_unmap_stop(struct mmc_host *host, struct mmc_async_req *areq)
412 {
413 struct mmc_blk_data *md;
414 char type;
415
416 if (!check_met_mmc_async_req_legal(host, areq))
417 return;
418
419 md = mmc_get_drvdata(host->card);
420 if (!check_met_mmc_blk_data_legal(md))
421 return;
422
423 type = mmc_get_rw_type(areq->mrq->cmd->opcode);
424 if (type == 'X')
425 return;
426 #ifndef CONFIG_MTK_FPGA
427 MET_FTRACE_PRINTK(met_mmc_dma_unmap_stop, md, areq, type);
428 #endif
429 }
430
431 void met_mmc_continue_req_end(struct mmc_host *host, struct mmc_async_req *areq)
432 {
433 struct mmc_blk_data *md;
434 char type;
435
436 if (!check_met_mmc_async_req_legal(host, areq))
437 return;
438
439 md = mmc_get_drvdata(host->card);
440 if (!check_met_mmc_blk_data_legal(md))
441 return;
442
443 type = mmc_get_rw_type(areq->mrq->cmd->opcode);
444 if (type == 'X')
445 return;
446 #ifndef CONFIG_MTK_FPGA
447 MET_FTRACE_PRINTK(met_mmc_continue_req_end, md, areq, type);
448 #endif
449 }
450
451 void met_mmc_dma_stop(struct mmc_host *host, struct mmc_async_req *areq, unsigned int bd_num)
452 {
453 struct mmc_blk_data *md;
454 char type;
455
456 if (!check_met_mmc_async_req_legal(host, areq))
457 return;
458
459 md = mmc_get_drvdata(host->card);
460 if (!check_met_mmc_blk_data_legal(md))
461 return;
462
463 type = mmc_get_rw_type(areq->mrq->cmd->opcode);
464 if (type == 'X')
465 return;
466 #ifndef CONFIG_MTK_FPGA
467 MET_FTRACE_PRINTK(met_mmc_dma_stop, md, areq, type, bd_num);
468 #endif
469 }
470
471 //void met_mmc_end(struct mmc_host *host, struct mmc_async_req *areq)
472 //{
473 // struct mmc_blk_data *md;
474 // char type;
475 //
476 // if (areq && areq->mrq && host && host->card) {
477 // type = mmc_get_rw_type(areq->mrq->cmd->opcode);
478 // if (type == 'X')
479 // return;
480 //
481 // md = mmc_get_drvdata(host->card);
482 //
483 // if (areq && areq->mrq)
484 // {
485 // trace_printk("%d,%d %c %d + %d [%s]\n",
486 // md->disk->major, md->disk->first_minor, type,
487 // areq->mrq->cmd->arg, areq->mrq->data->blocks,
488 // current->comm);
489 // }
490 // }
491 //}
492
493 static inline void mmc_blk_clear_packed(struct mmc_queue_req *mqrq)
494 {
495 struct mmc_packed *packed = mqrq->packed;
496
497 BUG_ON(!packed);
498
499 mqrq->cmd_type = MMC_PACKED_NONE;
500 packed->nr_entries = MMC_PACKED_NR_ZERO;
501 packed->idx_failure = MMC_PACKED_NR_IDX;
502 packed->retries = 0;
503 packed->blocks = 0;
504 }
505
506 static struct mmc_blk_data *mmc_blk_get(struct gendisk *disk)
507 {
508 struct mmc_blk_data *md;
509
510 mutex_lock(&open_lock);
511 md = disk->private_data;
512 if (md && md->usage == 0)
513 md = NULL;
514 if (md)
515 md->usage++;
516 mutex_unlock(&open_lock);
517
518 return md;
519 }
520
521 static inline int mmc_get_devidx(struct gendisk *disk)
522 {
523 int devidx = disk->first_minor / perdev_minors;
524 return devidx;
525 }
526
527 static void mmc_blk_put(struct mmc_blk_data *md)
528 {
529 mutex_lock(&open_lock);
530 md->usage--;
531 if (md->usage == 0) {
532 int devidx = mmc_get_devidx(md->disk);
533 blk_cleanup_queue(md->queue.queue);
534
535 __clear_bit(devidx, dev_use);
536
537 put_disk(md->disk);
538 kfree(md);
539 }
540 mutex_unlock(&open_lock);
541 }
542
543 static ssize_t power_ro_lock_show(struct device *dev,
544 struct device_attribute *attr, char *buf)
545 {
546 int ret;
547 struct mmc_blk_data *md = mmc_blk_get(dev_to_disk(dev));
548 struct mmc_card *card = md->queue.card;
549 int locked = 0;
550
551 if (card->ext_csd.boot_ro_lock & EXT_CSD_BOOT_WP_B_PERM_WP_EN)
552 locked = 2;
553 else if (card->ext_csd.boot_ro_lock & EXT_CSD_BOOT_WP_B_PWR_WP_EN)
554 locked = 1;
555
556 ret = snprintf(buf, PAGE_SIZE, "%d\n", locked);
557
558 mmc_blk_put(md);
559
560 return ret;
561 }
562
563 static ssize_t power_ro_lock_store(struct device *dev,
564 struct device_attribute *attr, const char *buf, size_t count)
565 {
566 int ret;
567 struct mmc_blk_data *md, *part_md;
568 struct mmc_card *card;
569 unsigned long set;
570
571 if (kstrtoul(buf, 0, &set))
572 return -EINVAL;
573
574 if (set != 1)
575 return count;
576
577 md = mmc_blk_get(dev_to_disk(dev));
578 card = md->queue.card;
579
580 mmc_claim_host(card->host);
581
582 ret = mmc_switch(card, EXT_CSD_CMD_SET_NORMAL, EXT_CSD_BOOT_WP,
583 card->ext_csd.boot_ro_lock |
584 EXT_CSD_BOOT_WP_B_PWR_WP_EN,
585 card->ext_csd.part_time);
586 if (ret)
587 pr_err("%s: Locking boot partition ro until next power on failed: %d\n", md->disk->disk_name, ret);
588 else
589 card->ext_csd.boot_ro_lock |= EXT_CSD_BOOT_WP_B_PWR_WP_EN;
590
591 mmc_release_host(card->host);
592
593 if (!ret) {
594 pr_info("%s: Locking boot partition ro until next power on\n",
595 md->disk->disk_name);
596 set_disk_ro(md->disk, 1);
597
598 list_for_each_entry(part_md, &md->part, part)
599 if (part_md->area_type == MMC_BLK_DATA_AREA_BOOT) {
600 pr_info("%s: Locking boot partition ro until next power on\n", part_md->disk->disk_name);
601 set_disk_ro(part_md->disk, 1);
602 }
603 }
604
605 mmc_blk_put(md);
606 return count;
607 }
608
609 static ssize_t force_ro_show(struct device *dev, struct device_attribute *attr,
610 char *buf)
611 {
612 int ret;
613 struct mmc_blk_data *md = mmc_blk_get(dev_to_disk(dev));
614
615 ret = snprintf(buf, PAGE_SIZE, "%d\n",
616 get_disk_ro(dev_to_disk(dev)) ^
617 md->read_only);
618 mmc_blk_put(md);
619 return ret;
620 }
621
622 static ssize_t force_ro_store(struct device *dev, struct device_attribute *attr,
623 const char *buf, size_t count)
624 {
625 int ret;
626 char *end;
627 struct mmc_blk_data *md = mmc_blk_get(dev_to_disk(dev));
628 unsigned long set = simple_strtoul(buf, &end, 0);
629 if (end == buf) {
630 ret = -EINVAL;
631 goto out;
632 }
633
634 set_disk_ro(dev_to_disk(dev), set || md->read_only);
635 ret = count;
636 out:
637 mmc_blk_put(md);
638 return ret;
639 }
640
641 static int mmc_blk_open(struct block_device *bdev, fmode_t mode)
642 {
643 struct mmc_blk_data *md = mmc_blk_get(bdev->bd_disk);
644 int ret = -ENXIO;
645
646 mutex_lock(&block_mutex);
647 if (md) {
648 if (md->usage == 2)
649 check_disk_change(bdev);
650 ret = 0;
651
652 if ((mode & FMODE_WRITE) && md->read_only) {
653 mmc_blk_put(md);
654 ret = -EROFS;
655 }
656 }
657 mutex_unlock(&block_mutex);
658
659 return ret;
660 }
661
662 static void mmc_blk_release(struct gendisk *disk, fmode_t mode)
663 {
664 struct mmc_blk_data *md = disk->private_data;
665
666 mutex_lock(&block_mutex);
667 mmc_blk_put(md);
668 mutex_unlock(&block_mutex);
669 }
670
671 static int
672 mmc_blk_getgeo(struct block_device *bdev, struct hd_geometry *geo)
673 {
674 geo->cylinders = get_capacity(bdev->bd_disk) / (4 * 16);
675 geo->heads = 4;
676 geo->sectors = 16;
677 return 0;
678 }
679
680 struct mmc_blk_ioc_data {
681 struct mmc_ioc_cmd ic;
682 unsigned char *buf;
683 u64 buf_bytes;
684 };
685
686 static struct mmc_blk_ioc_data *mmc_blk_ioctl_copy_from_user(
687 struct mmc_ioc_cmd __user *user)
688 {
689 struct mmc_blk_ioc_data *idata;
690 int err;
691
692 idata = kzalloc(sizeof(*idata), GFP_KERNEL);
693 if (!idata) {
694 err = -ENOMEM;
695 goto out;
696 }
697
698 if (copy_from_user(&idata->ic, user, sizeof(idata->ic))) {
699 err = -EFAULT;
700 goto idata_err;
701 }
702
703 idata->buf_bytes = (u64) idata->ic.blksz * idata->ic.blocks;
704 if (idata->buf_bytes > MMC_IOC_MAX_BYTES) {
705 err = -EOVERFLOW;
706 goto idata_err;
707 }
708
709 if (!idata->buf_bytes)
710 return idata;
711
712 idata->buf = kzalloc(idata->buf_bytes, GFP_KERNEL);
713 if (!idata->buf) {
714 err = -ENOMEM;
715 goto idata_err;
716 }
717
718 if (copy_from_user(idata->buf, (void __user *)(unsigned long)
719 idata->ic.data_ptr, idata->buf_bytes)) {
720 err = -EFAULT;
721 goto copy_err;
722 }
723
724 return idata;
725
726 copy_err:
727 kfree(idata->buf);
728 idata_err:
729 kfree(idata);
730 out:
731 return ERR_PTR(err);
732 }
733
734 static int ioctl_rpmb_card_status_poll(struct mmc_card *card, u32 *status,
735 u32 retries_max)
736 {
737 int err;
738 u32 retry_count = 0;
739
740 if (!status || !retries_max)
741 return -EINVAL;
742
743 do {
744 err = get_card_status(card, status, 5);
745 if (err)
746 break;
747
748 if (!R1_STATUS(*status) &&
749 (R1_CURRENT_STATE(*status) != R1_STATE_PRG))
750 break; /* RPMB programming operation complete */
751
752 /*
753 * Rechedule to give the MMC device a chance to continue
754 * processing the previous command without being polled too
755 * frequently.
756 */
757 usleep_range(1000, 5000);
758 } while (++retry_count < retries_max);
759
760 if (retry_count == retries_max)
761 err = -EPERM;
762
763 return err;
764 }
765
766 static int mmc_blk_ioctl_cmd(struct block_device *bdev,
767 struct mmc_ioc_cmd __user *ic_ptr)
768 {
769 struct mmc_blk_ioc_data *idata;
770 struct mmc_blk_data *md;
771 struct mmc_card *card;
772 struct mmc_command cmd = {0};
773 struct mmc_data data = {0};
774 struct mmc_request mrq = {NULL};
775 struct scatterlist sg;
776 int err;
777 int is_rpmb = false;
778 u32 status = 0;
779
780 /*
781 * The caller must have CAP_SYS_RAWIO, and must be calling this on the
782 * whole block device, not on a partition. This prevents overspray
783 * between sibling partitions.
784 */
785 if ((!capable(CAP_SYS_RAWIO)) || (bdev != bdev->bd_contains))
786 return -EPERM;
787
788 idata = mmc_blk_ioctl_copy_from_user(ic_ptr);
789 if (IS_ERR(idata))
790 return PTR_ERR(idata);
791
792 md = mmc_blk_get(bdev->bd_disk);
793 if (!md) {
794 err = -EINVAL;
795 goto cmd_err;
796 }
797
798 if (md->area_type & MMC_BLK_DATA_AREA_RPMB)
799 is_rpmb = true;
800
801 card = md->queue.card;
802 if (IS_ERR(card)) {
803 err = PTR_ERR(card);
804 goto cmd_done;
805 }
806
807 cmd.opcode = idata->ic.opcode;
808 cmd.arg = idata->ic.arg;
809 cmd.flags = idata->ic.flags;
810
811 if (idata->buf_bytes) {
812 data.sg = &sg;
813 data.sg_len = 1;
814 data.blksz = idata->ic.blksz;
815 data.blocks = idata->ic.blocks;
816
817 sg_init_one(data.sg, idata->buf, idata->buf_bytes);
818
819 if (idata->ic.write_flag)
820 data.flags = MMC_DATA_WRITE;
821 else
822 data.flags = MMC_DATA_READ;
823
824 /* data.flags must already be set before doing this. */
825 mmc_set_data_timeout(&data, card);
826
827 /* Allow overriding the timeout_ns for empirical tuning. */
828 if (idata->ic.data_timeout_ns)
829 data.timeout_ns = idata->ic.data_timeout_ns;
830
831 if ((cmd.flags & MMC_RSP_R1B) == MMC_RSP_R1B) {
832 /*
833 * Pretend this is a data transfer and rely on the
834 * host driver to compute timeout. When all host
835 * drivers support cmd.cmd_timeout for R1B, this
836 * can be changed to:
837 *
838 * mrq.data = NULL;
839 * cmd.cmd_timeout = idata->ic.cmd_timeout_ms;
840 */
841 data.timeout_ns = idata->ic.cmd_timeout_ms * 1000000;
842 }
843
844 mrq.data = &data;
845 }
846
847 mrq.cmd = &cmd;
848
849 mmc_claim_host(card->host);
850
851 err = mmc_blk_part_switch(card, md);
852 if (err)
853 goto cmd_rel_host;
854
855 if (idata->ic.is_acmd) {
856 err = mmc_app_cmd(card->host, card);
857 if (err)
858 goto cmd_rel_host;
859 }
860
861 if (is_rpmb) {
862 err = mmc_set_blockcount(card, data.blocks,
863 idata->ic.write_flag & (1 << 31));
864 if (err)
865 goto cmd_rel_host;
866 }
867
868 mmc_wait_for_req(card->host, &mrq);
869
870 if (cmd.error) {
871 dev_err(mmc_dev(card->host), "%s: cmd error %d\n",
872 __func__, cmd.error);
873 err = cmd.error;
874 goto cmd_rel_host;
875 }
876 if (data.error) {
877 dev_err(mmc_dev(card->host), "%s: data error %d\n",
878 __func__, data.error);
879 err = data.error;
880 goto cmd_rel_host;
881 }
882
883 /*
884 * According to the SD specs, some commands require a delay after
885 * issuing the command.
886 */
887 if (idata->ic.postsleep_min_us)
888 usleep_range(idata->ic.postsleep_min_us, idata->ic.postsleep_max_us);
889
890 if (copy_to_user(&(ic_ptr->response), cmd.resp, sizeof(cmd.resp))) {
891 err = -EFAULT;
892 goto cmd_rel_host;
893 }
894
895 if (!idata->ic.write_flag) {
896 if (copy_to_user((void __user *)(unsigned long) idata->ic.data_ptr,
897 idata->buf, idata->buf_bytes)) {
898 err = -EFAULT;
899 goto cmd_rel_host;
900 }
901 }
902
903 if (is_rpmb) {
904 /*
905 * Ensure RPMB command has completed by polling CMD13
906 * "Send Status".
907 */
908 err = ioctl_rpmb_card_status_poll(card, &status, 5);
909 if (err)
910 dev_err(mmc_dev(card->host),
911 "%s: Card Status=0x%08X, error %d\n",
912 __func__, status, err);
913 }
914
915 cmd_rel_host:
916 mmc_release_host(card->host);
917
918 cmd_done:
919 mmc_blk_put(md);
920 cmd_err:
921 kfree(idata->buf);
922 kfree(idata);
923 return err;
924 }
925
926 static int mmc_blk_ioctl(struct block_device *bdev, fmode_t mode,
927 unsigned int cmd, unsigned long arg)
928 {
929 int ret = -EINVAL;
930 if (cmd == MMC_IOC_CMD)
931 ret = mmc_blk_ioctl_cmd(bdev, (struct mmc_ioc_cmd __user *)arg);
932 return ret;
933 }
934
935 #ifdef CONFIG_COMPAT
936 static int mmc_blk_compat_ioctl(struct block_device *bdev, fmode_t mode,
937 unsigned int cmd, unsigned long arg)
938 {
939 return mmc_blk_ioctl(bdev, mode, cmd, (unsigned long) compat_ptr(arg));
940 }
941 #endif
942
943 static const struct block_device_operations mmc_bdops = {
944 .open = mmc_blk_open,
945 .release = mmc_blk_release,
946 .getgeo = mmc_blk_getgeo,
947 .owner = THIS_MODULE,
948 .ioctl = mmc_blk_ioctl,
949 #ifdef CONFIG_COMPAT
950 .compat_ioctl = mmc_blk_compat_ioctl,
951 #endif
952 };
953
954 static inline int mmc_blk_part_switch(struct mmc_card *card,
955 struct mmc_blk_data *md)
956 {
957 int ret;
958 struct mmc_blk_data *main_md = mmc_get_drvdata(card);
959
960 if (main_md->part_curr == md->part_type)
961 return 0;
962
963 if (mmc_card_mmc(card)) {
964 u8 part_config = card->ext_csd.part_config;
965
966 part_config &= ~EXT_CSD_PART_CONFIG_ACC_MASK;
967 part_config |= md->part_type;
968
969 ret = mmc_switch(card, EXT_CSD_CMD_SET_NORMAL,
970 EXT_CSD_PART_CONFIG, part_config,
971 card->ext_csd.part_time);
972 if (ret)
973 return ret;
974
975 card->ext_csd.part_config = part_config;
976 }
977
978 main_md->part_curr = md->part_type;
979 return 0;
980 }
981
982 static u32 mmc_sd_num_wr_blocks(struct mmc_card *card)
983 {
984 int err;
985 u32 result;
986 __be32 *blocks;
987
988 struct mmc_request mrq = {NULL};
989 struct mmc_command cmd = {0};
990 struct mmc_data data = {0};
991
992 struct scatterlist sg;
993
994 cmd.opcode = MMC_APP_CMD;
995 cmd.arg = card->rca << 16;
996 cmd.flags = MMC_RSP_SPI_R1 | MMC_RSP_R1 | MMC_CMD_AC;
997
998 err = mmc_wait_for_cmd(card->host, &cmd, 0);
999 if (err)
1000 return (u32)-1;
1001 if (!mmc_host_is_spi(card->host) && !(cmd.resp[0] & R1_APP_CMD))
1002 return (u32)-1;
1003
1004 memset(&cmd, 0, sizeof(struct mmc_command));
1005
1006 cmd.opcode = SD_APP_SEND_NUM_WR_BLKS;
1007 cmd.arg = 0;
1008 cmd.flags = MMC_RSP_SPI_R1 | MMC_RSP_R1 | MMC_CMD_ADTC;
1009
1010 data.blksz = 4;
1011 data.blocks = 1;
1012 data.flags = MMC_DATA_READ;
1013 data.sg = &sg;
1014 data.sg_len = 1;
1015 mmc_set_data_timeout(&data, card);
1016
1017 mrq.cmd = &cmd;
1018 mrq.data = &data;
1019
1020 blocks = kmalloc(4, GFP_KERNEL);
1021 if (!blocks)
1022 return (u32)-1;
1023
1024 sg_init_one(&sg, blocks, 4);
1025
1026 mmc_wait_for_req(card->host, &mrq);
1027
1028 result = ntohl(*blocks);
1029 kfree(blocks);
1030
1031 if (cmd.error || data.error)
1032 result = (u32)-1;
1033
1034 return result;
1035 }
1036
1037 u32 __mmc_sd_num_wr_blocks(struct mmc_card *card)
1038 {
1039 return mmc_sd_num_wr_blocks(card);
1040 }
1041 EXPORT_SYMBOL(__mmc_sd_num_wr_blocks);
1042
1043 static int send_stop(struct mmc_card *card, u32 *status)
1044 {
1045 struct mmc_command cmd = {0};
1046 int err;
1047
1048 cmd.opcode = MMC_STOP_TRANSMISSION;
1049 cmd.flags = MMC_RSP_SPI_R1B | MMC_RSP_R1B | MMC_CMD_AC;
1050 err = mmc_wait_for_cmd(card->host, &cmd, 5);
1051 if (err == 0)
1052 *status = cmd.resp[0];
1053 return err;
1054 }
1055
1056 static int get_card_status(struct mmc_card *card, u32 *status, int retries)
1057 {
1058 struct mmc_command cmd = {0};
1059 int err;
1060
1061 cmd.opcode = MMC_SEND_STATUS;
1062 if (!mmc_host_is_spi(card->host))
1063 cmd.arg = card->rca << 16;
1064 cmd.flags = MMC_RSP_SPI_R2 | MMC_RSP_R1 | MMC_CMD_AC;
1065 err = mmc_wait_for_cmd(card->host, &cmd, retries);
1066 if (err == 0)
1067 *status = cmd.resp[0];
1068 return err;
1069 }
1070
1071 #define ERR_NOMEDIUM 3
1072 #define ERR_RETRY 2
1073 #define ERR_ABORT 1
1074 #define ERR_CONTINUE 0
1075
1076 static int mmc_blk_cmd_error(struct request *req, const char *name, int error,
1077 bool status_valid, u32 status)
1078 {
1079 switch (error) {
1080 case -EILSEQ:
1081 /* response crc error, retry the r/w cmd */
1082 pr_err("%s: %s sending %s command, card status %#x\n",
1083 req->rq_disk->disk_name, "response CRC error",
1084 name, status);
1085 return ERR_RETRY;
1086
1087 case -ETIMEDOUT:
1088 pr_err("%s: %s sending %s command, card status %#x\n",
1089 req->rq_disk->disk_name, "timed out", name, status);
1090
1091 /* If the status cmd initially failed, retry the r/w cmd */
1092 if (!status_valid) {
1093 pr_err("%s: status not valid, retrying timeout\n", req->rq_disk->disk_name);
1094 return ERR_RETRY;
1095 }
1096 /*
1097 * If it was a r/w cmd crc error, or illegal command
1098 * (eg, issued in wrong state) then retry - we should
1099 * have corrected the state problem above.
1100 */
1101 if (status & (R1_COM_CRC_ERROR | R1_ILLEGAL_COMMAND)) {
1102 pr_err("%s: command error, retrying timeout\n", req->rq_disk->disk_name);
1103 return ERR_RETRY;
1104 }
1105
1106 /* Otherwise abort the command */
1107 pr_err("%s: not retrying timeout\n", req->rq_disk->disk_name);
1108 return ERR_ABORT;
1109
1110 default:
1111 /* We don't understand the error code the driver gave us */
1112 pr_err("%s: unknown error %d sending read/write command, card status %#x\n",
1113 req->rq_disk->disk_name, error, status);
1114 return ERR_ABORT;
1115 }
1116 }
1117
1118 /*
1119 * Initial r/w and stop cmd error recovery.
1120 * We don't know whether the card received the r/w cmd or not, so try to
1121 * restore things back to a sane state. Essentially, we do this as follows:
1122 * - Obtain card status. If the first attempt to obtain card status fails,
1123 * the status word will reflect the failed status cmd, not the failed
1124 * r/w cmd. If we fail to obtain card status, it suggests we can no
1125 * longer communicate with the card.
1126 * - Check the card state. If the card received the cmd but there was a
1127 * transient problem with the response, it might still be in a data transfer
1128 * mode. Try to send it a stop command. If this fails, we can't recover.
1129 * - If the r/w cmd failed due to a response CRC error, it was probably
1130 * transient, so retry the cmd.
1131 * - If the r/w cmd timed out, but we didn't get the r/w cmd status, retry.
1132 * - If the r/w cmd timed out, and the r/w cmd failed due to CRC error or
1133 * illegal cmd, retry.
1134 * Otherwise we don't understand what happened, so abort.
1135 */
1136 static int mmc_blk_cmd_recovery(struct mmc_card *card, struct request *req,
1137 struct mmc_blk_request *brq, int *ecc_err, int *gen_err)
1138 {
1139 bool prev_cmd_status_valid = true;
1140 u32 status, stop_status = 0;
1141 int err, retry;
1142
1143 if (mmc_card_removed(card))
1144 return ERR_NOMEDIUM;
1145
1146 /*
1147 * Try to get card status which indicates both the card state
1148 * and why there was no response. If the first attempt fails,
1149 * we can't be sure the returned status is for the r/w command.
1150 */
1151 for (retry = 2; retry >= 0; retry--) {
1152 err = get_card_status(card, &status, 0);
1153 if (!err)
1154 break;
1155
1156 prev_cmd_status_valid = false;
1157 pr_err("%s: error %d sending status command, %sing\n",
1158 req->rq_disk->disk_name, err, retry ? "retry" : "abort");
1159 }
1160
1161 /* We couldn't get a response from the card. Give up. */
1162 if (err) {
1163 /* Check if the card is removed */
1164 if (mmc_detect_card_removed(card->host))
1165 return ERR_NOMEDIUM;
1166 return ERR_ABORT;
1167 }
1168
1169 /* Flag ECC errors */
1170 if ((status & R1_CARD_ECC_FAILED) ||
1171 (brq->stop.resp[0] & R1_CARD_ECC_FAILED) ||
1172 (brq->cmd.resp[0] & R1_CARD_ECC_FAILED))
1173 *ecc_err = 1;
1174
1175 /* Flag General errors */
1176 if (!mmc_host_is_spi(card->host) && rq_data_dir(req) != READ)
1177 if ((status & R1_ERROR) ||
1178 (brq->stop.resp[0] & R1_ERROR)) {
1179 pr_err("%s: %s: general error sending stop or status command, stop cmd response %#x, card status %#x\n",
1180 req->rq_disk->disk_name, __func__,
1181 brq->stop.resp[0], status);
1182 *gen_err = 1;
1183 }
1184
1185 /*
1186 * Check the current card state. If it is in some data transfer
1187 * mode, tell it to stop (and hopefully transition back to TRAN.)
1188 */
1189 if (R1_CURRENT_STATE(status) == R1_STATE_DATA ||
1190 R1_CURRENT_STATE(status) == R1_STATE_RCV) {
1191 err = send_stop(card, &stop_status);
1192 if (err)
1193 {
1194 get_card_status(card,&status,0);
1195 if ((R1_CURRENT_STATE(status) == R1_STATE_TRAN) ||(R1_CURRENT_STATE(status) == R1_STATE_PRG)){
1196 err=0;
1197 stop_status=0;
1198 pr_err("b card status %d \n",status);
1199 }
1200 else
1201 pr_err("g card status %d \n",status);
1202 }
1203 if (err)
1204 pr_err("%s: error %d sending stop command\n",
1205 req->rq_disk->disk_name, err);
1206
1207 /*
1208 * If the stop cmd also timed out, the card is probably
1209 * not present, so abort. Other errors are bad news too.
1210 */
1211 if (err)
1212 return ERR_ABORT;
1213 if (stop_status & R1_CARD_ECC_FAILED)
1214 *ecc_err = 1;
1215 if (!mmc_host_is_spi(card->host) && rq_data_dir(req) != READ)
1216 if (stop_status & R1_ERROR) {
1217 pr_err("%s: %s: general error sending stop command, stop cmd response %#x\n",
1218 req->rq_disk->disk_name, __func__,
1219 stop_status);
1220 *gen_err = 1;
1221 }
1222 }
1223
1224 /* Check for set block count errors */
1225 if (brq->sbc.error)
1226 return mmc_blk_cmd_error(req, "SET_BLOCK_COUNT", brq->sbc.error,
1227 prev_cmd_status_valid, status);
1228
1229 /* Check for r/w command errors */
1230 if (brq->cmd.error)
1231 return mmc_blk_cmd_error(req, "r/w cmd", brq->cmd.error,
1232 prev_cmd_status_valid, status);
1233
1234 /* Data errors */
1235 if (!brq->stop.error)
1236 return ERR_CONTINUE;
1237
1238 /* Now for stop errors. These aren't fatal to the transfer. */
1239 pr_err("%s: error %d sending stop command, original cmd response %#x, card status %#x\n",
1240 req->rq_disk->disk_name, brq->stop.error,
1241 brq->cmd.resp[0], status);
1242
1243 /*
1244 * Subsitute in our own stop status as this will give the error
1245 * state which happened during the execution of the r/w command.
1246 */
1247 if (stop_status) {
1248 brq->stop.resp[0] = stop_status;
1249 brq->stop.error = 0;
1250 }
1251 return ERR_CONTINUE;
1252 }
1253
1254 static int mmc_blk_reset(struct mmc_blk_data *md, struct mmc_host *host,
1255 int type)
1256 {
1257 int err;
1258
1259 if (md->reset_done & type)
1260 return -EEXIST;
1261
1262 md->reset_done |= type;
1263 err = mmc_hw_reset(host);
1264 /* Ensure we switch back to the correct partition */
1265 if (err != -EOPNOTSUPP) {
1266 struct mmc_blk_data *main_md = mmc_get_drvdata(host->card);
1267 int part_err;
1268
1269 main_md->part_curr = main_md->part_type;
1270 part_err = mmc_blk_part_switch(host->card, md);
1271 if (part_err) {
1272 /*
1273 * We have failed to get back into the correct
1274 * partition, so we need to abort the whole request.
1275 */
1276 return -ENODEV;
1277 }
1278 }
1279 return err;
1280 }
1281
1282 static inline void mmc_blk_reset_success(struct mmc_blk_data *md, int type)
1283 {
1284 md->reset_done &= ~type;
1285 }
1286
1287 int mmc_access_rpmb(struct mmc_queue *mq)
1288 {
1289 struct mmc_blk_data *md = mq->data;
1290 /*
1291 * If this is a RPMB partition access, return ture
1292 */
1293 if (md && md->part_type == EXT_CSD_PART_CONFIG_ACC_RPMB)
1294 return true;
1295
1296 return false;
1297 }
1298
1299 static int mmc_blk_issue_discard_rq(struct mmc_queue *mq, struct request *req)
1300 {
1301 struct mmc_blk_data *md = mq->data;
1302 struct mmc_card *card = md->queue.card;
1303 unsigned int from, nr, arg;
1304 int err = 0, type = MMC_BLK_DISCARD;
1305
1306 if (!mmc_can_erase(card)) {
1307 err = -EOPNOTSUPP;
1308 goto out;
1309 }
1310
1311 from = blk_rq_pos(req);
1312 nr = blk_rq_sectors(req);
1313
1314 if (mmc_can_discard(card))
1315 arg = MMC_DISCARD_ARG;
1316 else if (mmc_can_trim(card))
1317 arg = MMC_TRIM_ARG;
1318 else
1319 arg = MMC_ERASE_ARG;
1320 retry:
1321 if (card->quirks & MMC_QUIRK_INAND_CMD38) {
1322 err = mmc_switch(card, EXT_CSD_CMD_SET_NORMAL,
1323 INAND_CMD38_ARG_EXT_CSD,
1324 arg == MMC_TRIM_ARG ?
1325 INAND_CMD38_ARG_TRIM :
1326 INAND_CMD38_ARG_ERASE,
1327 0);
1328 if (err)
1329 goto out;
1330 }
1331 err = mmc_erase(card, from, nr, arg);
1332 out:
1333 if (err == -EIO && !mmc_blk_reset(md, card->host, type))
1334 goto retry;
1335 if (!err)
1336 mmc_blk_reset_success(md, type);
1337 blk_end_request(req, err, blk_rq_bytes(req));
1338
1339 return err ? 0 : 1;
1340 }
1341
1342 static int mmc_blk_issue_secdiscard_rq(struct mmc_queue *mq,
1343 struct request *req)
1344 {
1345 struct mmc_blk_data *md = mq->data;
1346 struct mmc_card *card = md->queue.card;
1347 unsigned int from, nr, arg, trim_arg, erase_arg;
1348 int err = 0, type = MMC_BLK_SECDISCARD;
1349
1350 if (!(mmc_can_secure_erase_trim(card) || mmc_can_sanitize(card))) {
1351 err = -EOPNOTSUPP;
1352 goto out;
1353 }
1354
1355 from = blk_rq_pos(req);
1356 nr = blk_rq_sectors(req);
1357
1358 /* The sanitize operation is supported at v4.5 only */
1359 if (mmc_can_sanitize(card)) {
1360 erase_arg = MMC_ERASE_ARG;
1361 trim_arg = MMC_TRIM_ARG;
1362 } else {
1363 erase_arg = MMC_SECURE_ERASE_ARG;
1364 trim_arg = MMC_SECURE_TRIM1_ARG;
1365 }
1366
1367 if (mmc_erase_group_aligned(card, from, nr))
1368 arg = erase_arg;
1369 else if (mmc_can_trim(card))
1370 arg = trim_arg;
1371 else {
1372 err = -EINVAL;
1373 goto out;
1374 }
1375 retry:
1376 if (card->quirks & MMC_QUIRK_INAND_CMD38) {
1377 err = mmc_switch(card, EXT_CSD_CMD_SET_NORMAL,
1378 INAND_CMD38_ARG_EXT_CSD,
1379 arg == MMC_SECURE_TRIM1_ARG ?
1380 INAND_CMD38_ARG_SECTRIM1 :
1381 INAND_CMD38_ARG_SECERASE,
1382 0);
1383 if (err)
1384 goto out_retry;
1385 }
1386
1387 err = mmc_erase(card, from, nr, arg);
1388 if (err == -EIO)
1389 goto out_retry;
1390 if (err)
1391 goto out;
1392
1393 if (arg == MMC_SECURE_TRIM1_ARG) {
1394 if (card->quirks & MMC_QUIRK_INAND_CMD38) {
1395 err = mmc_switch(card, EXT_CSD_CMD_SET_NORMAL,
1396 INAND_CMD38_ARG_EXT_CSD,
1397 INAND_CMD38_ARG_SECTRIM2,
1398 0);
1399 if (err)
1400 goto out_retry;
1401 }
1402
1403 err = mmc_erase(card, from, nr, MMC_SECURE_TRIM2_ARG);
1404 if (err == -EIO)
1405 goto out_retry;
1406 if (err)
1407 goto out;
1408 }
1409
1410 if (mmc_can_sanitize(card)) {
1411 trace_mmc_blk_erase_start(EXT_CSD_SANITIZE_START, 0, 0);
1412 err = mmc_switch(card, EXT_CSD_CMD_SET_NORMAL,
1413 EXT_CSD_SANITIZE_START, 1, 0);
1414 trace_mmc_blk_erase_end(EXT_CSD_SANITIZE_START, 0, 0);
1415 }
1416 out_retry:
1417 if (err && !mmc_blk_reset(md, card->host, type))
1418 goto retry;
1419 if (!err)
1420 mmc_blk_reset_success(md, type);
1421 out:
1422 blk_end_request(req, err, blk_rq_bytes(req));
1423
1424 return err ? 0 : 1;
1425 }
1426
1427 static int mmc_blk_issue_flush(struct mmc_queue *mq, struct request *req)
1428 {
1429 struct mmc_blk_data *md = mq->data;
1430 struct mmc_card *card = md->queue.card;
1431 int ret = 0;
1432
1433 ret = mmc_flush_cache(card);
1434 if (ret)
1435 ret = -EIO;
1436
1437 blk_end_request_all(req, ret);
1438
1439 return ret ? 0 : 1;
1440 }
1441
1442 /*
1443 * Reformat current write as a reliable write, supporting
1444 * both legacy and the enhanced reliable write MMC cards.
1445 * In each transfer we'll handle only as much as a single
1446 * reliable write can handle, thus finish the request in
1447 * partial completions.
1448 */
1449 static inline void mmc_apply_rel_rw(struct mmc_blk_request *brq,
1450 struct mmc_card *card,
1451 struct request *req)
1452 {
1453 if (!(card->ext_csd.rel_param & EXT_CSD_WR_REL_PARAM_EN)) {
1454 /* Legacy mode imposes restrictions on transfers. */
1455 if (!IS_ALIGNED(brq->cmd.arg, card->ext_csd.rel_sectors))
1456 brq->data.blocks = 1;
1457
1458 if (brq->data.blocks > card->ext_csd.rel_sectors)
1459 brq->data.blocks = card->ext_csd.rel_sectors;
1460 else if (brq->data.blocks < card->ext_csd.rel_sectors)
1461 brq->data.blocks = 1;
1462 }
1463 }
1464
1465 #define CMD_ERRORS \
1466 (R1_OUT_OF_RANGE | /* Command argument out of range */ \
1467 R1_ADDRESS_ERROR | /* Misaligned address */ \
1468 R1_BLOCK_LEN_ERROR | /* Transferred block length incorrect */\
1469 R1_WP_VIOLATION | /* Tried to write to protected block */ \
1470 R1_CC_ERROR | /* Card controller error */ \
1471 R1_ERROR) /* General/unknown error */
1472
1473 static int mmc_blk_err_check(struct mmc_card *card,
1474 struct mmc_async_req *areq)
1475 {
1476 struct mmc_queue_req *mq_mrq = container_of(areq, struct mmc_queue_req,
1477 mmc_active);
1478 struct mmc_blk_request *brq = &mq_mrq->brq;
1479 struct request *req = mq_mrq->req;
1480 int ecc_err = 0, gen_err = 0;
1481
1482 /*
1483 * sbc.error indicates a problem with the set block count
1484 * command. No data will have been transferred.
1485 *
1486 * cmd.error indicates a problem with the r/w command. No
1487 * data will have been transferred.
1488 *
1489 * stop.error indicates a problem with the stop command. Data
1490 * may have been transferred, or may still be transferring.
1491 */
1492 if (brq->sbc.error || brq->cmd.error || brq->stop.error ||
1493 brq->data.error) {
1494 switch (mmc_blk_cmd_recovery(card, req, brq, &ecc_err, &gen_err)) {
1495 case ERR_RETRY:
1496 return MMC_BLK_RETRY;
1497 case ERR_ABORT:
1498 return MMC_BLK_ABORT;
1499 case ERR_NOMEDIUM:
1500 return MMC_BLK_NOMEDIUM;
1501 case ERR_CONTINUE:
1502 break;
1503 }
1504 }
1505
1506 /*
1507 * Check for errors relating to the execution of the
1508 * initial command - such as address errors. No data
1509 * has been transferred.
1510 */
1511 if (brq->cmd.resp[0] & CMD_ERRORS) {
1512 pr_err("%s: r/w command failed, status = %#x\n",
1513 req->rq_disk->disk_name, brq->cmd.resp[0]);
1514 return MMC_BLK_ABORT;
1515 }
1516
1517 /*
1518 * Everything else is either success, or a data error of some
1519 * kind. If it was a write, we may have transitioned to
1520 * program mode, which we have to wait for it to complete.
1521 */
1522 if (!mmc_host_is_spi(card->host) && rq_data_dir(req) != READ) {
1523 u32 status;
1524 unsigned long timeout;
1525
1526 /* Check stop command response */
1527 if (brq->stop.resp[0] & R1_ERROR) {
1528 pr_err("%s: %s: general error sending stop command, stop cmd response %#x\n",
1529 req->rq_disk->disk_name, __func__,
1530 brq->stop.resp[0]);
1531 gen_err = 1;
1532 }
1533
1534 timeout = jiffies + msecs_to_jiffies(MMC_BLK_TIMEOUT_MS);
1535 do {
1536 int err = get_card_status(card, &status, 5);
1537 if (err) {
1538 pr_err("%s: error %d requesting status\n",
1539 req->rq_disk->disk_name, err);
1540 return MMC_BLK_CMD_ERR;
1541 }
1542
1543 if (status & R1_ERROR) {
1544 pr_err("%s: %s: general error sending status command, card status %#x\n",
1545 req->rq_disk->disk_name, __func__,
1546 status);
1547 gen_err = 1;
1548 }
1549
1550 /* Timeout if the device never becomes ready for data
1551 * and never leaves the program state.
1552 */
1553 if (time_after(jiffies, timeout)) {
1554 pr_err("%s: Card stuck in programming state!"\
1555 " %s %s\n", mmc_hostname(card->host),
1556 req->rq_disk->disk_name, __func__);
1557
1558 return MMC_BLK_CMD_ERR;
1559 }
1560 /*
1561 * Some cards mishandle the status bits,
1562 * so make sure to check both the busy
1563 * indication and the card state.
1564 */
1565 } while (!(status & R1_READY_FOR_DATA) ||
1566 (R1_CURRENT_STATE(status) == R1_STATE_PRG));
1567 }
1568
1569 /* if general error occurs, retry the write operation. */
1570 if (gen_err) {
1571 pr_warn("%s: retrying write for general error\n",
1572 req->rq_disk->disk_name);
1573 return MMC_BLK_RETRY;
1574 }
1575
1576 if (brq->data.error) {
1577 pr_err("%s: error %d transferring data, sector %u, nr %u, cmd response %#x, card status %#x\n",
1578 req->rq_disk->disk_name, brq->data.error,
1579 (unsigned)blk_rq_pos(req),
1580 (unsigned)blk_rq_sectors(req),
1581 brq->cmd.resp[0], brq->stop.resp[0]);
1582
1583 if (rq_data_dir(req) == READ) {
1584 if (ecc_err)
1585 return MMC_BLK_ECC_ERR;
1586 return MMC_BLK_DATA_ERR;
1587 } else {
1588 return MMC_BLK_CMD_ERR;
1589 }
1590 }
1591
1592 if (!brq->data.bytes_xfered)
1593 return MMC_BLK_RETRY;
1594
1595 if (mmc_packed_cmd(mq_mrq->cmd_type)) {
1596 if (unlikely(brq->data.blocks << 9 != brq->data.bytes_xfered))
1597 return MMC_BLK_PARTIAL;
1598 else
1599 return MMC_BLK_SUCCESS;
1600 }
1601
1602 if (blk_rq_bytes(req) != brq->data.bytes_xfered)
1603 return MMC_BLK_PARTIAL;
1604
1605 return MMC_BLK_SUCCESS;
1606 }
1607
1608 static int mmc_blk_packed_err_check(struct mmc_card *card,
1609 struct mmc_async_req *areq)
1610 {
1611 struct mmc_queue_req *mq_rq = container_of(areq, struct mmc_queue_req,
1612 mmc_active);
1613 struct request *req = mq_rq->req;
1614 struct mmc_packed *packed = mq_rq->packed;
1615 int err, check, status;
1616 u8 *ext_csd;
1617
1618 BUG_ON(!packed);
1619
1620 packed->retries--;
1621 check = mmc_blk_err_check(card, areq);
1622 err = get_card_status(card, &status, 0);
1623 if (err) {
1624 pr_err("%s: error %d sending status command\n",
1625 req->rq_disk->disk_name, err);
1626 return MMC_BLK_ABORT;
1627 }
1628
1629 if (status & R1_EXCEPTION_EVENT) {
1630 ext_csd = kzalloc(512, GFP_KERNEL);
1631 if (!ext_csd) {
1632 pr_err("%s: unable to allocate buffer for ext_csd\n",
1633 req->rq_disk->disk_name);
1634 return -ENOMEM;
1635 }
1636
1637 err = mmc_send_ext_csd(card, ext_csd);
1638 if (err) {
1639 pr_err("%s: error %d sending ext_csd\n",
1640 req->rq_disk->disk_name, err);
1641 check = MMC_BLK_ABORT;
1642 goto free;
1643 }
1644
1645 if ((ext_csd[EXT_CSD_EXP_EVENTS_STATUS] &
1646 EXT_CSD_PACKED_FAILURE) &&
1647 (ext_csd[EXT_CSD_PACKED_CMD_STATUS] &
1648 EXT_CSD_PACKED_GENERIC_ERROR)) {
1649 if (ext_csd[EXT_CSD_PACKED_CMD_STATUS] &
1650 EXT_CSD_PACKED_INDEXED_ERROR) {
1651 packed->idx_failure =
1652 ext_csd[EXT_CSD_PACKED_FAILURE_INDEX] - 1;
1653 check = MMC_BLK_PARTIAL;
1654 }
1655 pr_err("%s: packed cmd failed, nr %u, sectors %u, "
1656 "failure index: %d\n",
1657 req->rq_disk->disk_name, packed->nr_entries,
1658 packed->blocks, packed->idx_failure);
1659 }
1660 free:
1661 kfree(ext_csd);
1662 }
1663
1664 return check;
1665 }
1666
1667 static void mmc_blk_rw_rq_prep(struct mmc_queue_req *mqrq,
1668 struct mmc_card *card,
1669 int disable_multi,
1670 struct mmc_queue *mq)
1671 {
1672 u32 readcmd, writecmd;
1673 struct mmc_blk_request *brq = &mqrq->brq;
1674 struct request *req = mqrq->req;
1675 struct mmc_blk_data *md = mq->data;
1676 bool do_data_tag;
1677
1678 /*
1679 * Reliable writes are used to implement Forced Unit Access and
1680 * are supported only on MMCs.
1681 */
1682 bool do_rel_wr = (req->cmd_flags & REQ_FUA) &&
1683 (rq_data_dir(req) == WRITE) &&
1684 (md->flags & MMC_BLK_REL_WR);
1685
1686 memset(brq, 0, sizeof(struct mmc_blk_request));
1687 brq->mrq.cmd = &brq->cmd;
1688 brq->mrq.data = &brq->data;
1689
1690 brq->cmd.arg = blk_rq_pos(req);
1691 if (!mmc_card_blockaddr(card))
1692 brq->cmd.arg <<= 9;
1693 brq->cmd.flags = MMC_RSP_SPI_R1 | MMC_RSP_R1 | MMC_CMD_ADTC;
1694 brq->data.blksz = 512;
1695 brq->stop.opcode = MMC_STOP_TRANSMISSION;
1696 brq->stop.arg = 0;
1697 brq->stop.flags = MMC_RSP_SPI_R1B | MMC_RSP_R1B | MMC_CMD_AC;
1698 brq->data.blocks = blk_rq_sectors(req);
1699
1700 /*
1701 * The block layer doesn't support all sector count
1702 * restrictions, so we need to be prepared for too big
1703 * requests.
1704 */
1705 if (brq->data.blocks > card->host->max_blk_count)
1706 brq->data.blocks = card->host->max_blk_count;
1707
1708 if (brq->data.blocks > 1) {
1709 /*
1710 * After a read error, we redo the request one sector
1711 * at a time in order to accurately determine which
1712 * sectors can be read successfully.
1713 */
1714 if (disable_multi)
1715 brq->data.blocks = 1;
1716
1717 /* Some controllers can't do multiblock reads due to hw bugs */
1718 if (card->host->caps2 & MMC_CAP2_NO_MULTI_READ &&
1719 rq_data_dir(req) == READ)
1720 brq->data.blocks = 1;
1721 }
1722
1723 if (brq->data.blocks > 1 || do_rel_wr) {
1724 /* SPI multiblock writes terminate using a special
1725 * token, not a STOP_TRANSMISSION request.
1726 */
1727 if (!mmc_host_is_spi(card->host) ||
1728 rq_data_dir(req) == READ)
1729 brq->mrq.stop = &brq->stop;
1730 readcmd = MMC_READ_MULTIPLE_BLOCK;
1731 writecmd = MMC_WRITE_MULTIPLE_BLOCK;
1732 } else {
1733 brq->mrq.stop = NULL;
1734 readcmd = MMC_READ_SINGLE_BLOCK;
1735 writecmd = MMC_WRITE_BLOCK;
1736 }
1737 #ifdef CONFIG_MTK_EMMC_CACHE
1738 /* for non-cacheable system data,
1739 * the implementation of reliable write / force prg write,
1740 * must be applied with mutli write cmd
1741 * */
1742 if (mmc_card_mmc(card) && (card->ext_csd.cache_ctrl & 0x1)){
1743 writecmd = MMC_WRITE_MULTIPLE_BLOCK;
1744 }
1745 #endif
1746 if (rq_data_dir(req) == READ) {
1747 brq->cmd.opcode = readcmd;
1748 brq->data.flags |= MMC_DATA_READ;
1749 } else {
1750 brq->cmd.opcode = writecmd;
1751 brq->data.flags |= MMC_DATA_WRITE;
1752 }
1753
1754 if (do_rel_wr)
1755 mmc_apply_rel_rw(brq, card, req);
1756
1757 /*
1758 * Data tag is used only during writing meta data to speed
1759 * up write and any subsequent read of this meta data
1760 */
1761 do_data_tag = (card->ext_csd.data_tag_unit_size) &&
1762 (req->cmd_flags & REQ_META) &&
1763 (rq_data_dir(req) == WRITE) &&
1764 ((brq->data.blocks * brq->data.blksz) >=
1765 card->ext_csd.data_tag_unit_size);
1766
1767 /*
1768 * Pre-defined multi-block transfers are preferable to
1769 * open ended-ones (and necessary for reliable writes).
1770 * However, it is not sufficient to just send CMD23,
1771 * and avoid the final CMD12, as on an error condition
1772 * CMD12 (stop) needs to be sent anyway. This, coupled
1773 * with Auto-CMD23 enhancements provided by some
1774 * hosts, means that the complexity of dealing
1775 * with this is best left to the host. If CMD23 is
1776 * supported by card and host, we'll fill sbc in and let
1777 * the host deal with handling it correctly. This means
1778 * that for hosts that don't expose MMC_CAP_CMD23, no
1779 * change of behavior will be observed.
1780 *
1781 * N.B: Some MMC cards experience perf degradation.
1782 * We'll avoid using CMD23-bounded multiblock writes for
1783 * these, while retaining features like reliable writes.
1784 */
1785 if ((md->flags & MMC_BLK_CMD23) && mmc_op_multi(brq->cmd.opcode) &&
1786 (do_rel_wr || !(card->quirks & MMC_QUIRK_BLK_NO_CMD23) ||
1787 do_data_tag)) {
1788 brq->sbc.opcode = MMC_SET_BLOCK_COUNT;
1789 brq->sbc.arg = brq->data.blocks |
1790 (do_rel_wr ? (1 << 31) : 0) |
1791 (do_data_tag ? (1 << 29) : 0);
1792 brq->sbc.flags = MMC_RSP_R1 | MMC_CMD_AC;
1793 brq->mrq.sbc = &brq->sbc;
1794 }
1795
1796 mmc_set_data_timeout(&brq->data, card);
1797
1798 brq->data.sg = mqrq->sg;
1799 brq->data.sg_len = mmc_queue_map_sg(mq, mqrq);
1800
1801 if (brq->data.sg_len > 1024)
1802 pr_err("%s:%d sglen = %x\n", __func__, __LINE__, brq->data.sg_len);
1803
1804 /*
1805 * Adjust the sg list so it is the same size as the
1806 * request.
1807 */
1808 if (brq->data.blocks != blk_rq_sectors(req)) {
1809 int i, data_size = brq->data.blocks << 9;
1810 struct scatterlist *sg;
1811
1812 for_each_sg(brq->data.sg, sg, brq->data.sg_len, i) {
1813 data_size -= sg->length;
1814 if (data_size <= 0) {
1815 sg->length += data_size;
1816 i++;
1817 break;
1818 }
1819 }
1820 brq->data.sg_len = i;
1821 pr_err("%s:%d sglen = %x\n", __func__, __LINE__, brq->data.sg_len);
1822 }
1823
1824 mqrq->mmc_active.mrq = &brq->mrq;
1825 mqrq->mmc_active.err_check = mmc_blk_err_check;
1826
1827 mmc_queue_bounce_pre(mqrq);
1828 }
1829
1830 static inline u8 mmc_calc_packed_hdr_segs(struct request_queue *q,
1831 struct mmc_card *card)
1832 {
1833 unsigned int hdr_sz = mmc_large_sector(card) ? 4096 : 512;
1834 unsigned int max_seg_sz = queue_max_segment_size(q);
1835 unsigned int len, nr_segs = 0;
1836
1837 do {
1838 len = min(hdr_sz, max_seg_sz);
1839 hdr_sz -= len;
1840 nr_segs++;
1841 } while (hdr_sz);
1842
1843 return nr_segs;
1844 }
1845
1846 static u8 mmc_blk_prep_packed_list(struct mmc_queue *mq, struct request *req)
1847 {
1848 struct request_queue *q = mq->queue;
1849 struct mmc_card *card = mq->card;
1850 struct request *cur = req, *next = NULL;
1851 struct mmc_blk_data *md = mq->data;
1852 struct mmc_queue_req *mqrq = mq->mqrq_cur;
1853 bool en_rel_wr = card->ext_csd.rel_param & EXT_CSD_WR_REL_PARAM_EN;
1854 unsigned int req_sectors = 0, phys_segments = 0;
1855 unsigned int max_blk_count, max_phys_segs;
1856 bool put_back = true;
1857 u8 max_packed_rw = 0;
1858 u8 reqs = 0;
1859
1860 if (!(md->flags & MMC_BLK_PACKED_CMD))
1861 goto no_packed;
1862
1863 if ((rq_data_dir(cur) == WRITE) &&
1864 mmc_host_packed_wr(card->host))
1865 max_packed_rw = card->ext_csd.max_packed_writes;
1866
1867 if (max_packed_rw == 0)
1868 goto no_packed;
1869
1870 if (mmc_req_rel_wr(cur) &&
1871 (md->flags & MMC_BLK_REL_WR) && !en_rel_wr)
1872 goto no_packed;
1873
1874 if (mmc_large_sector(card) &&
1875 !IS_ALIGNED(blk_rq_sectors(cur), 8))
1876 goto no_packed;
1877
1878 mmc_blk_clear_packed(mqrq);
1879
1880 max_blk_count = min(card->host->max_blk_count,
1881 card->host->max_req_size >> 9);
1882 if (unlikely(max_blk_count > 0xffff))
1883 max_blk_count = 0xffff;
1884
1885 max_phys_segs = queue_max_segments(q);
1886 req_sectors += blk_rq_sectors(cur);
1887 phys_segments += cur->nr_phys_segments;
1888
1889 if (rq_data_dir(cur) == WRITE) {
1890 req_sectors += mmc_large_sector(card) ? 8 : 1;
1891 phys_segments += mmc_calc_packed_hdr_segs(q, card);
1892 }
1893
1894 do {
1895 if (reqs >= max_packed_rw - 1) {
1896 put_back = false;
1897 break;
1898 }
1899
1900 spin_lock_irq(q->queue_lock);
1901 next = blk_fetch_request(q);
1902 spin_unlock_irq(q->queue_lock);
1903 if (!next) {
1904 put_back = false;
1905 break;
1906 }
1907
1908 if (mmc_large_sector(card) &&
1909 !IS_ALIGNED(blk_rq_sectors(next), 8))
1910 break;
1911
1912 if (next->cmd_flags & REQ_DISCARD ||
1913 next->cmd_flags & REQ_FLUSH)
1914 break;
1915
1916 if (rq_data_dir(cur) != rq_data_dir(next))
1917 break;
1918
1919 if (mmc_req_rel_wr(next) &&
1920 (md->flags & MMC_BLK_REL_WR) && !en_rel_wr)
1921 break;
1922
1923 req_sectors += blk_rq_sectors(next);
1924 if (req_sectors > max_blk_count)
1925 break;
1926
1927 phys_segments += next->nr_phys_segments;
1928 if (phys_segments > max_phys_segs)
1929 break;
1930
1931 list_add_tail(&next->queuelist, &mqrq->packed->list);
1932 cur = next;
1933 reqs++;
1934 } while (1);
1935
1936 if (put_back) {
1937 spin_lock_irq(q->queue_lock);
1938 blk_requeue_request(q, next);
1939 spin_unlock_irq(q->queue_lock);
1940 }
1941
1942 if (reqs > 0) {
1943 list_add(&req->queuelist, &mqrq->packed->list);
1944 mqrq->packed->nr_entries = ++reqs;
1945 mqrq->packed->retries = reqs;
1946 return reqs;
1947 }
1948
1949 no_packed:
1950 mqrq->cmd_type = MMC_PACKED_NONE;
1951 return 0;
1952 }
1953
1954 static void mmc_blk_packed_hdr_wrq_prep(struct mmc_queue_req *mqrq,
1955 struct mmc_card *card,
1956 struct mmc_queue *mq)
1957 {
1958 struct mmc_blk_request *brq = &mqrq->brq;
1959 struct request *req = mqrq->req;
1960 struct request *prq;
1961 struct mmc_blk_data *md = mq->data;
1962 struct mmc_packed *packed = mqrq->packed;
1963 bool do_rel_wr, do_data_tag;
1964 u32 *packed_cmd_hdr;
1965 u8 hdr_blocks;
1966 u8 i = 1;
1967
1968 BUG_ON(!packed);
1969
1970 mqrq->cmd_type = MMC_PACKED_WRITE;
1971 packed->blocks = 0;
1972 packed->idx_failure = MMC_PACKED_NR_IDX;
1973
1974 packed_cmd_hdr = packed->cmd_hdr;
1975 memset(packed_cmd_hdr, 0, sizeof(packed->cmd_hdr));
1976 packed_cmd_hdr[0] = cpu_to_le32((packed->nr_entries << 16) |
1977 (PACKED_CMD_WR << 8) | PACKED_CMD_VER);
1978 hdr_blocks = mmc_large_sector(card) ? 8 : 1;
1979
1980 /*
1981 * Argument for each entry of packed group
1982 */
1983 list_for_each_entry(prq, &packed->list, queuelist) {
1984 do_rel_wr = mmc_req_rel_wr(prq) && (md->flags & MMC_BLK_REL_WR);
1985 do_data_tag = (card->ext_csd.data_tag_unit_size) &&
1986 (prq->cmd_flags & REQ_META) &&
1987 (rq_data_dir(prq) == WRITE) &&
1988 ((brq->data.blocks * brq->data.blksz) >=
1989 card->ext_csd.data_tag_unit_size);
1990 /* Argument of CMD23 */
1991 packed_cmd_hdr[(i * 2)] = cpu_to_le32(
1992 (do_rel_wr ? MMC_CMD23_ARG_REL_WR : 0) |
1993 (do_data_tag ? MMC_CMD23_ARG_TAG_REQ : 0) |
1994 blk_rq_sectors(prq));
1995 /* Argument of CMD18 or CMD25 */
1996 packed_cmd_hdr[((i * 2)) + 1] = cpu_to_le32(
1997 mmc_card_blockaddr(card) ?
1998 blk_rq_pos(prq) : blk_rq_pos(prq) << 9);
1999 packed->blocks += blk_rq_sectors(prq);
2000 i++;
2001 }
2002
2003 memset(brq, 0, sizeof(struct mmc_blk_request));
2004 brq->mrq.cmd = &brq->cmd;
2005 brq->mrq.data = &brq->data;
2006 brq->mrq.sbc = &brq->sbc;
2007 brq->mrq.stop = &brq->stop;
2008
2009 brq->sbc.opcode = MMC_SET_BLOCK_COUNT;
2010 brq->sbc.arg = MMC_CMD23_ARG_PACKED | (packed->blocks + hdr_blocks);
2011 brq->sbc.flags = MMC_RSP_R1 | MMC_CMD_AC;
2012
2013 brq->cmd.opcode = MMC_WRITE_MULTIPLE_BLOCK;
2014 brq->cmd.arg = blk_rq_pos(req);
2015 if (!mmc_card_blockaddr(card))
2016 brq->cmd.arg <<= 9;
2017 brq->cmd.flags = MMC_RSP_SPI_R1 | MMC_RSP_R1 | MMC_CMD_ADTC;
2018
2019 brq->data.blksz = 512;
2020 brq->data.blocks = packed->blocks + hdr_blocks;
2021 brq->data.flags |= MMC_DATA_WRITE;
2022
2023 brq->stop.opcode = MMC_STOP_TRANSMISSION;
2024 brq->stop.arg = 0;
2025 brq->stop.flags = MMC_RSP_SPI_R1B | MMC_RSP_R1B | MMC_CMD_AC;
2026
2027 mmc_set_data_timeout(&brq->data, card);
2028
2029 brq->data.sg = mqrq->sg;
2030 brq->data.sg_len = mmc_queue_map_sg(mq, mqrq);
2031 pr_err("%s: sglen = %d\n", __func__, brq->data.sg_len);
2032
2033 mqrq->mmc_active.mrq = &brq->mrq;
2034 mqrq->mmc_active.err_check = mmc_blk_packed_err_check;
2035
2036 mmc_queue_bounce_pre(mqrq);
2037 }
2038
2039 static int mmc_blk_cmd_err(struct mmc_blk_data *md, struct mmc_card *card,
2040 struct mmc_blk_request *brq, struct request *req,
2041 int ret)
2042 {
2043 struct mmc_queue_req *mq_rq;
2044 mq_rq = container_of(brq, struct mmc_queue_req, brq);
2045
2046 /*
2047 * If this is an SD card and we're writing, we can first
2048 * mark the known good sectors as ok.
2049 *
2050 * If the card is not SD, we can still ok written sectors
2051 * as reported by the controller (which might be less than
2052 * the real number of written sectors, but never more).
2053 */
2054 if (mmc_card_sd(card)) {
2055 u32 blocks;
2056
2057 blocks = mmc_sd_num_wr_blocks(card);
2058 if (blocks != (u32)-1) {
2059 ret = blk_end_request(req, 0, blocks << 9);
2060 }
2061 } else {
2062 if (!mmc_packed_cmd(mq_rq->cmd_type))
2063 ret = blk_end_request(req, 0, brq->data.bytes_xfered);
2064 }
2065 return ret;
2066 }
2067
2068 static int mmc_blk_end_packed_req(struct mmc_queue_req *mq_rq)
2069 {
2070 struct request *prq;
2071 struct mmc_packed *packed = mq_rq->packed;
2072 int idx = packed->idx_failure, i = 0;
2073 int ret = 0;
2074
2075 BUG_ON(!packed);
2076
2077 while (!list_empty(&packed->list)) {
2078 prq = list_entry_rq(packed->list.next);
2079 if (idx == i) {
2080 /* retry from error index */
2081 packed->nr_entries -= idx;
2082 mq_rq->req = prq;
2083 ret = 1;
2084
2085 if (packed->nr_entries == MMC_PACKED_NR_SINGLE) {
2086 list_del_init(&prq->queuelist);
2087 mmc_blk_clear_packed(mq_rq);
2088 }
2089 return ret;
2090 }
2091 list_del_init(&prq->queuelist);
2092 blk_end_request(prq, 0, blk_rq_bytes(prq));
2093 i++;
2094 }
2095
2096 mmc_blk_clear_packed(mq_rq);
2097 return ret;
2098 }
2099
2100 static void mmc_blk_abort_packed_req(struct mmc_queue_req *mq_rq)
2101 {
2102 struct request *prq;
2103 struct mmc_packed *packed = mq_rq->packed;
2104
2105 BUG_ON(!packed);
2106
2107 while (!list_empty(&packed->list)) {
2108 prq = list_entry_rq(packed->list.next);
2109 list_del_init(&prq->queuelist);
2110 blk_end_request(prq, -EIO, blk_rq_bytes(prq));
2111 }
2112
2113 mmc_blk_clear_packed(mq_rq);
2114 }
2115
2116 static void mmc_blk_revert_packed_req(struct mmc_queue *mq,
2117 struct mmc_queue_req *mq_rq)
2118 {
2119 struct request *prq;
2120 struct request_queue *q = mq->queue;
2121 struct mmc_packed *packed = mq_rq->packed;
2122
2123 BUG_ON(!packed);
2124
2125 while (!list_empty(&packed->list)) {
2126 prq = list_entry_rq(packed->list.prev);
2127 if (prq->queuelist.prev != &packed->list) {
2128 list_del_init(&prq->queuelist);
2129 spin_lock_irq(q->queue_lock);
2130 blk_requeue_request(mq->queue, prq);
2131 spin_unlock_irq(q->queue_lock);
2132 } else {
2133 list_del_init(&prq->queuelist);
2134 }
2135 }
2136
2137 mmc_blk_clear_packed(mq_rq);
2138 }
2139 #if defined(FEATURE_STORAGE_PERF_INDEX)
2140 #define PRT_TIME_PERIOD 500000000
2141 #define UP_LIMITS_4BYTE 4294967295UL //((4*1024*1024*1024)-1)
2142 #define ID_CNT 10
2143 pid_t mmcqd[ID_CNT]={0};
2144 bool start_async_req[ID_CNT] = {0};
2145 unsigned long long start_async_req_time[ID_CNT] = {0};
2146 static unsigned long long mmcqd_tag_t1[ID_CNT]={0}, mmccid_tag_t1=0;
2147 unsigned long long mmcqd_t_usage_wr[ID_CNT]={0}, mmcqd_t_usage_rd[ID_CNT]={0};
2148 unsigned int mmcqd_rq_size_wr[ID_CNT]={0}, mmcqd_rq_size_rd[ID_CNT]={0};
2149 static unsigned int mmcqd_wr_offset_tag[ID_CNT]={0}, mmcqd_rd_offset_tag[ID_CNT]={0}, mmcqd_wr_offset[ID_CNT]={0}, mmcqd_rd_offset[ID_CNT]={0};
2150 static unsigned int mmcqd_wr_bit[ID_CNT]={0},mmcqd_wr_tract[ID_CNT]={0};
2151 static unsigned int mmcqd_rd_bit[ID_CNT]={0},mmcqd_rd_tract[ID_CNT]={0};
2152 static unsigned int mmcqd_wr_break[ID_CNT]={0}, mmcqd_rd_break[ID_CNT]={0};
2153 unsigned int mmcqd_rq_count[ID_CNT]={0}, mmcqd_wr_rq_count[ID_CNT]={0}, mmcqd_rd_rq_count[ID_CNT]={0};
2154 extern u32 g_u32_cid[4];
2155 #ifdef FEATURE_STORAGE_META_LOG
2156 int check_perdev_minors = CONFIG_MMC_BLOCK_MINORS;
2157 struct metadata_rwlogger metadata_logger[10] = {{{0}}};
2158 #endif
2159
2160 unsigned int mmcqd_work_percent[ID_CNT]={0};
2161 unsigned int mmcqd_w_throughput[ID_CNT]={0};
2162 unsigned int mmcqd_r_throughput[ID_CNT]={0};
2163 unsigned int mmcqd_read_clear[ID_CNT]={0};
2164
2165 static void g_var_clear(unsigned int idx)
2166 {
2167 mmcqd_t_usage_wr[idx]=0;
2168 mmcqd_t_usage_rd[idx]=0;
2169 mmcqd_rq_size_wr[idx]=0;
2170 mmcqd_rq_size_rd[idx]=0;
2171 mmcqd_rq_count[idx]=0;
2172 mmcqd_wr_offset[idx]=0;
2173 mmcqd_rd_offset[idx]=0;
2174 mmcqd_wr_break[idx]=0;
2175 mmcqd_rd_break[idx]=0;
2176 mmcqd_wr_tract[idx]=0;
2177 mmcqd_wr_bit[idx]=0;
2178 mmcqd_rd_tract[idx]=0;
2179 mmcqd_rd_bit[idx]=0;
2180 mmcqd_wr_rq_count[idx]=0;
2181 mmcqd_rd_rq_count[idx]=0;
2182 }
2183
2184 unsigned int find_mmcqd_index(void)
2185 {
2186 pid_t mmcqd_pid=0;
2187 unsigned int idx=0;
2188 unsigned char i=0;
2189
2190 mmcqd_pid = task_pid_nr(current);
2191
2192 if(mmcqd[0] ==0) {
2193 mmcqd[0] = mmcqd_pid;
2194 start_async_req[0]=0;
2195 }
2196
2197 for(i=0;i<ID_CNT;i++)
2198 {
2199 if(mmcqd_pid == mmcqd[i])
2200 {
2201 idx=i;
2202 break;
2203 }
2204 if ((mmcqd[i] == 0) ||( i==ID_CNT-1))
2205 {
2206 mmcqd[i]=mmcqd_pid;
2207 start_async_req[i]=0;
2208 idx=i;
2209 break;
2210 }
2211 }
2212 return idx;
2213 }
2214
2215 #endif
2216 //#undef FEATURE_STORAGE_PID_LOGGER
2217 #if defined(FEATURE_STORAGE_PID_LOGGER)
2218
2219 struct struct_pid_logger g_pid_logger[PID_ID_CNT]={{0,0,{0},{0},{0},{0}}};
2220
2221
2222
2223 unsigned char *page_logger = NULL;
2224 spinlock_t g_locker;
2225
2226 #endif
2227 static int mmc_blk_issue_rw_rq(struct mmc_queue *mq, struct request *rqc)
2228 {
2229 struct mmc_blk_data *md = mq->data;
2230 struct mmc_card *card = md->queue.card;
2231 struct mmc_blk_request *brq = &mq->mqrq_cur->brq;
2232 int ret = 1, disable_multi = 0, retry = 0, type;
2233 enum mmc_blk_status status;
2234 struct mmc_queue_req *mq_rq;
2235 struct request *req = rqc;
2236 struct mmc_async_req *areq;
2237 const u8 packed_nr = 2;
2238 u8 reqs = 0;
2239 unsigned long long time1 = 0;
2240 #if defined(FEATURE_STORAGE_PERF_INDEX)
2241 pid_t mmcqd_pid=0;
2242 unsigned long long t_period=0, t_usage=0;
2243 unsigned int t_percent=0;
2244 unsigned int perf_meter=0;
2245 unsigned int rq_byte=0,rq_sector=0,sect_offset=0;
2246 unsigned int diversity=0;
2247 unsigned int idx=0;
2248 #ifdef FEATURE_STORAGE_META_LOG
2249 unsigned int mmcmetaindex=0;
2250 #endif
2251 #endif
2252 #if defined(FEATURE_STORAGE_PID_LOGGER)
2253 unsigned int index=0;
2254 #endif
2255
2256 if (!rqc && !mq->mqrq_prev->req)
2257 return 0;
2258 time1 = sched_clock();
2259
2260 if (rqc)
2261 reqs = mmc_blk_prep_packed_list(mq, rqc);
2262 #if defined(FEATURE_STORAGE_PERF_INDEX)
2263 mmcqd_pid = task_pid_nr(current);
2264
2265 idx = find_mmcqd_index();
2266
2267 mmcqd_read_clear[idx] = 1;
2268 if(mmccid_tag_t1==0)
2269 mmccid_tag_t1 = time1;
2270 t_period = time1 - mmccid_tag_t1;
2271 if(t_period >= (unsigned long long )((PRT_TIME_PERIOD)*(unsigned long long )10))
2272 {
2273 xlog_printk(ANDROID_LOG_DEBUG, "BLOCK_TAG", "MMC Queue Thread:%d, %d, %d, %d, %d \n", mmcqd[0], mmcqd[1], mmcqd[2], mmcqd[3], mmcqd[4]);
2274 xlog_printk(ANDROID_LOG_DEBUG, "BLOCK_TAG", "MMC CID: %lx %lx %lx %lx \n", g_u32_cid[0], g_u32_cid[1], g_u32_cid[2], g_u32_cid[3]);
2275 mmccid_tag_t1 = time1;
2276 }
2277 if(mmcqd_tag_t1[idx]==0)
2278 mmcqd_tag_t1[idx] = time1;
2279 t_period = time1 - mmcqd_tag_t1[idx];
2280
2281 if(t_period >= (unsigned long long )PRT_TIME_PERIOD)
2282 {
2283 mmcqd_read_clear[idx] = 2;
2284 mmcqd_work_percent[idx] = 1;
2285 mmcqd_r_throughput[idx] = 0;
2286 mmcqd_w_throughput[idx] = 0;
2287 t_usage = mmcqd_t_usage_wr [idx] + mmcqd_t_usage_rd[idx];
2288 if(t_period > t_usage*100)
2289 xlog_printk(ANDROID_LOG_DEBUG, "BLOCK_TAG", "mmcqd:%d Workload < 1%%, duty %lld, period %lld, req_cnt=%d \n", mmcqd[idx], t_usage, t_period, mmcqd_rq_count[idx]);
2290 else
2291 {
2292 do_div(t_period, 100); //boundary issue
2293 t_percent =((unsigned int)t_usage)/((unsigned int)t_period);
2294 mmcqd_work_percent[idx] = t_percent;
2295 xlog_printk(ANDROID_LOG_DEBUG, "BLOCK_TAG", "mmcqd:%d Workload=%d%%, duty %lld, period %lld00, req_cnt=%d \n", mmcqd[idx], t_percent, t_usage, t_period, mmcqd_rq_count[idx]); //period %lld00 == period %lld x100
2296 }
2297 if(mmcqd_wr_rq_count[idx] >= 2)
2298 {
2299 diversity = mmcqd_wr_offset[idx]/(mmcqd_wr_rq_count[idx]-1);
2300 xlog_printk(ANDROID_LOG_DEBUG, "BLOCK_TAG", "mmcqd:%d Write Diversity=%d sectors offset, req_cnt=%d, break_cnt=%d, tract_cnt=%d, bit_cnt=%d\n", mmcqd[idx], diversity, mmcqd_wr_rq_count[idx], mmcqd_wr_break[idx], mmcqd_wr_tract[idx], mmcqd_wr_bit[idx]);
2301 }
2302 if(mmcqd_rd_rq_count[idx] >= 2)
2303 {
2304 diversity = mmcqd_rd_offset[idx]/(mmcqd_rd_rq_count[idx]-1);
2305 xlog_printk(ANDROID_LOG_DEBUG, "BLOCK_TAG", "mmcqd:%d Read Diversity=%d sectors offset, req_cnt=%d, break_cnt=%d, tract_cnt=%d, bit_cnt=%d\n", mmcqd[idx], diversity, mmcqd_rd_rq_count[idx], mmcqd_rd_break[idx], mmcqd_rd_tract[idx], mmcqd_rd_bit[idx]);
2306 }
2307 if(mmcqd_t_usage_wr[idx])
2308 {
2309 do_div(mmcqd_t_usage_wr[idx], 1000000); //boundary issue
2310 if(mmcqd_t_usage_wr[idx]) // discard print if duration will <1ms
2311 {
2312 perf_meter = (mmcqd_rq_size_wr[idx])/((unsigned int)mmcqd_t_usage_wr[idx]); //kb/s
2313 mmcqd_w_throughput[idx] = perf_meter;
2314 xlog_printk(ANDROID_LOG_DEBUG, "BLOCK_TAG", "mmcqd:%d Write Throughput=%d kB/s, size: %d bytes, time:%lld ms\n", mmcqd[idx], perf_meter, mmcqd_rq_size_wr[idx], mmcqd_t_usage_wr[idx]);
2315 }
2316 }
2317 if(mmcqd_t_usage_rd[idx])
2318 {
2319 do_div(mmcqd_t_usage_rd[idx], 1000000); //boundary issue
2320 if(mmcqd_t_usage_rd[idx]) // discard print if duration will <1ms
2321 {
2322 perf_meter = (mmcqd_rq_size_rd[idx])/((unsigned int)mmcqd_t_usage_rd[idx]); //kb/s
2323 mmcqd_r_throughput[idx] = perf_meter;
2324 xlog_printk(ANDROID_LOG_DEBUG, "BLOCK_TAG", "mmcqd:%d Read Throughput=%d kB/s, size: %d bytes, time:%lld ms\n", mmcqd[idx], perf_meter, mmcqd_rq_size_rd[idx], mmcqd_t_usage_rd[idx]);
2325 }
2326 }
2327 mmcqd_tag_t1[idx]=time1;
2328 g_var_clear(idx);
2329 #ifdef FEATURE_STORAGE_META_LOG
2330 mmcmetaindex = mmc_get_devidx(md->disk);
2331 xlog_printk(ANDROID_LOG_DEBUG, "BLOCK_TAG", "mmcqd metarw WR:%d NWR:%d HR:%d WDR:%d HDR:%d WW:%d NWW:%d HW:%d\n",
2332 metadata_logger[mmcmetaindex].metadata_rw_logger[0], metadata_logger[mmcmetaindex].metadata_rw_logger[1],
2333 metadata_logger[mmcmetaindex].metadata_rw_logger[2], metadata_logger[mmcmetaindex].metadata_rw_logger[3],
2334 metadata_logger[mmcmetaindex].metadata_rw_logger[4], metadata_logger[mmcmetaindex].metadata_rw_logger[5],
2335 metadata_logger[mmcmetaindex].metadata_rw_logger[6], metadata_logger[mmcmetaindex].metadata_rw_logger[7]);
2336 clear_metadata_rw_status(md->disk->first_minor);
2337 #endif
2338 #if defined(FEATURE_STORAGE_PID_LOGGER)
2339 do {
2340 int i;
2341 for(index=0; index<PID_ID_CNT; index++) {
2342
2343 if( g_pid_logger[index].current_pid!=0 && g_pid_logger[index].current_pid == mmcqd_pid)
2344 break;
2345 }
2346 if( index == PID_ID_CNT )
2347 break;
2348 for( i=0; i<PID_LOGGER_COUNT; i++) {
2349 //printk(KERN_INFO"hank mmcqd %d %d", g_pid_logger[index].pid_logger[i], mmcqd_pid);
2350 if( g_pid_logger[index].pid_logger[i] == 0)
2351 break;
2352 sprintf (g_pid_logger[index].pid_buffer+i*37, "{%05d:%05d:%08d:%05d:%08d}", g_pid_logger[index].pid_logger[i], g_pid_logger[index].pid_logger_counter[i], g_pid_logger[index].pid_logger_length[i], g_pid_logger[index].pid_logger_r_counter[i], g_pid_logger[index].pid_logger_r_length[i]);
2353
2354 }
2355 if( i != 0) {
2356 xlog_printk(ANDROID_LOG_DEBUG, "BLOCK_TAG", "mmcqd pid:%d %s\n", g_pid_logger[index].current_pid, g_pid_logger[index].pid_buffer);
2357 //xlog_printk(ANDROID_LOG_DEBUG, "BLOCK_TAG", "sizeof(&(g_pid_logger[index].pid_logger)):%d\n", sizeof(unsigned short)*PID_LOGGER_COUNT);
2358 //memset( &(g_pid_logger[index].pid_logger), 0, sizeof(struct struct_pid_logger)-(unsigned long)&(((struct struct_pid_logger *)0)->pid_logger));
2359 memset( &(g_pid_logger[index].pid_logger), 0, sizeof(unsigned short)*PID_LOGGER_COUNT);
2360 memset( &(g_pid_logger[index].pid_logger_counter), 0, sizeof(unsigned short)*PID_LOGGER_COUNT);
2361 memset( &(g_pid_logger[index].pid_logger_length), 0, sizeof(unsigned int)*PID_LOGGER_COUNT);
2362 memset( &(g_pid_logger[index].pid_logger_r_counter), 0, sizeof(unsigned short)*PID_LOGGER_COUNT);
2363 memset( &(g_pid_logger[index].pid_logger_r_length), 0, sizeof(unsigned int)*PID_LOGGER_COUNT);
2364 memset( &(g_pid_logger[index].pid_buffer), 0, sizeof(char)*1024);
2365
2366
2367 }
2368 g_pid_logger[index].pid_buffer[0] = '\0';
2369
2370 } while(0);
2371 #endif
2372
2373 #if defined(FEATURE_STORAGE_VMSTAT_LOGGER)
2374 xlog_printk(ANDROID_LOG_DEBUG, "BLOCK_TAG", "vmstat (FP:%ld)(FD:%ld)(ND:%ld)(WB:%ld)(NW:%ld)\n",
2375 ((global_page_state(NR_FILE_PAGES)) << (PAGE_SHIFT - 10)),
2376 ((global_page_state(NR_FILE_DIRTY)) << (PAGE_SHIFT - 10)),
2377 ((global_page_state(NR_DIRTIED)) << (PAGE_SHIFT - 10)),
2378 ((global_page_state(NR_WRITEBACK)) << (PAGE_SHIFT - 10)),
2379 ((global_page_state(NR_WRITTEN)) << (PAGE_SHIFT - 10)));
2380 #endif
2381
2382 }
2383 if( rqc )
2384 {
2385 rq_byte = blk_rq_bytes(rqc);
2386 rq_sector = blk_rq_sectors(rqc);
2387 if(rq_data_dir(rqc) == WRITE)
2388 {
2389 if(mmcqd_wr_offset_tag[idx]>0)
2390 {
2391 sect_offset = abs(blk_rq_pos(rqc) - mmcqd_wr_offset_tag[idx]);
2392 mmcqd_wr_offset[idx] += sect_offset;
2393 if(sect_offset == 1)
2394 mmcqd_wr_break[idx]++;
2395 }
2396 mmcqd_wr_offset_tag[idx] = blk_rq_pos(rqc) + rq_sector;
2397 if(rq_sector <= 1) //512 bytes
2398 mmcqd_wr_bit[idx] ++;
2399 else if(rq_sector >= 1016) //508kB
2400 mmcqd_wr_tract[idx] ++;
2401 }
2402 else //read
2403 {
2404 if(mmcqd_rd_offset_tag[idx]>0)
2405 {
2406 sect_offset = abs(blk_rq_pos(rqc) - mmcqd_rd_offset_tag[idx]);
2407 mmcqd_rd_offset[idx] += sect_offset;
2408 if(sect_offset == 1)
2409 mmcqd_rd_break[idx]++;
2410 }
2411 mmcqd_rd_offset_tag[idx] = blk_rq_pos(rqc) + rq_sector;
2412 if(rq_sector <= 1) //512 bytes
2413 mmcqd_rd_bit[idx] ++;
2414 else if(rq_sector >= 1016) //508kB
2415 mmcqd_rd_tract[idx] ++;
2416 }
2417 }
2418 #endif
2419 do {
2420 if (rqc) {
2421 /*
2422 * When 4KB native sector is enabled, only 8 blocks
2423 * multiple read or write is allowed
2424 */
2425 if ((brq->data.blocks & 0x07) &&
2426 (card->ext_csd.data_sector_size == 4096)) {
2427 pr_err("%s: Transfer size is not 4KB sector size aligned\n",
2428 req->rq_disk->disk_name);
2429 mq_rq = mq->mqrq_cur;
2430 goto cmd_abort;
2431 }
2432
2433 if (reqs >= packed_nr)
2434 mmc_blk_packed_hdr_wrq_prep(mq->mqrq_cur,
2435 card, mq);
2436 else
2437 mmc_blk_rw_rq_prep(mq->mqrq_cur, card, 0, mq);
2438 areq = &mq->mqrq_cur->mmc_active;
2439 } else
2440 areq = NULL;
2441 areq = mmc_start_req(card->host, areq, (int *) &status);
2442 if (!areq) {
2443 if (status == MMC_BLK_NEW_REQUEST)
2444 mq->flags |= MMC_QUEUE_NEW_REQUEST;
2445 return 0;
2446 }
2447
2448 mq_rq = container_of(areq, struct mmc_queue_req, mmc_active);
2449 brq = &mq_rq->brq;
2450 req = mq_rq->req;
2451 type = rq_data_dir(req) == READ ? MMC_BLK_READ : MMC_BLK_WRITE;
2452 mmc_queue_bounce_post(mq_rq);
2453
2454 switch (status) {
2455 case MMC_BLK_SUCCESS:
2456 case MMC_BLK_PARTIAL:
2457 /*
2458 * A block was successfully transferred.
2459 */
2460 mmc_blk_reset_success(md, type);
2461
2462 if (mmc_packed_cmd(mq_rq->cmd_type)) {
2463 ret = mmc_blk_end_packed_req(mq_rq);
2464 break;
2465 } else {
2466 ret = blk_end_request(req, 0,
2467 brq->data.bytes_xfered);
2468 }
2469
2470 // if (card && card->host && card->host->areq)
2471 // met_mmc_end(card->host, card->host->areq);
2472
2473 /*
2474 * If the blk_end_request function returns non-zero even
2475 * though all data has been transferred and no errors
2476 * were returned by the host controller, it's a bug.
2477 */
2478 if (status == MMC_BLK_SUCCESS && ret) {
2479 pr_err("%s BUG rq_tot %d d_xfer %d\n",
2480 __func__, blk_rq_bytes(req),
2481 brq->data.bytes_xfered);
2482 rqc = NULL;
2483 goto cmd_abort;
2484 }
2485 break;
2486 case MMC_BLK_CMD_ERR:
2487 ret = mmc_blk_cmd_err(md, card, brq, req, ret);
2488 if (mmc_blk_reset(md, card->host, type))
2489 goto cmd_abort;
2490 if (!ret)
2491 goto start_new_req;
2492 break;
2493 case MMC_BLK_RETRY:
2494 if (retry++ < 5)
2495 break;
2496 /* Fall through */
2497 case MMC_BLK_ABORT:
2498 if (!mmc_blk_reset(md, card->host, type))
2499 break;
2500 goto cmd_abort;
2501 case MMC_BLK_DATA_ERR: {
2502 int err;
2503
2504 err = mmc_blk_reset(md, card->host, type);
2505 if (!err)
2506 break;
2507 if (err == -ENODEV ||
2508 mmc_packed_cmd(mq_rq->cmd_type))
2509 goto cmd_abort;
2510 /* Fall through */
2511 }
2512 case MMC_BLK_ECC_ERR:
2513 if (brq->data.blocks > 1) {
2514 /* Redo read one sector at a time */
2515 pr_warning("%s: retrying using single block read\n",
2516 req->rq_disk->disk_name);
2517 disable_multi = 1;
2518 break;
2519 }
2520 /*
2521 * After an error, we redo I/O one sector at a
2522 * time, so we only reach here after trying to
2523 * read a single sector.
2524 */
2525 ret = blk_end_request(req, -EIO,
2526 brq->data.blksz);
2527 if (!ret)
2528 goto start_new_req;
2529 break;
2530 case MMC_BLK_NOMEDIUM:
2531 goto cmd_abort;
2532 default:
2533 pr_err("%s: Unhandled return value (%d)",
2534 req->rq_disk->disk_name, status);
2535 goto cmd_abort;
2536 }
2537
2538 if (ret) {
2539 if (mmc_packed_cmd(mq_rq->cmd_type)) {
2540 if (!mq_rq->packed->retries)
2541 goto cmd_abort;
2542 mmc_blk_packed_hdr_wrq_prep(mq_rq, card, mq);
2543 mmc_start_req(card->host,
2544 &mq_rq->mmc_active, NULL);
2545 } else {
2546
2547 /*
2548 * In case of a incomplete request
2549 * prepare it again and resend.
2550 */
2551 mmc_blk_rw_rq_prep(mq_rq, card,
2552 disable_multi, mq);
2553 mmc_start_req(card->host,
2554 &mq_rq->mmc_active, NULL);
2555 }
2556 }
2557 } while (ret);
2558
2559 return 1;
2560
2561 cmd_abort:
2562 if (mmc_packed_cmd(mq_rq->cmd_type)) {
2563 mmc_blk_abort_packed_req(mq_rq);
2564 } else {
2565 if (mmc_card_removed(card))
2566 req->cmd_flags |= REQ_QUIET;
2567 while (ret)
2568 ret = blk_end_request(req, -EIO,
2569 blk_rq_cur_bytes(req));
2570 }
2571
2572 start_new_req:
2573 if (rqc) {
2574 if (mmc_card_removed(card)) {
2575 rqc->cmd_flags |= REQ_QUIET;
2576 blk_end_request_all(rqc, -EIO);
2577 } else {
2578 /*
2579 * If current request is packed, it needs to put back.
2580 */
2581 if (mmc_packed_cmd(mq->mqrq_cur->cmd_type))
2582 mmc_blk_revert_packed_req(mq, mq->mqrq_cur);
2583
2584 mmc_blk_rw_rq_prep(mq->mqrq_cur, card, 0, mq);
2585 mmc_start_req(card->host,
2586 &mq->mqrq_cur->mmc_active, NULL);
2587 }
2588 }
2589
2590 return 0;
2591 }
2592
2593 static int mmc_blk_issue_rq(struct mmc_queue *mq, struct request *req)
2594 {
2595 int ret;
2596 struct mmc_blk_data *md = mq->data;
2597 struct mmc_card *card = md->queue.card;
2598 struct mmc_host *host = card->host;
2599 unsigned long flags;
2600 unsigned int cmd_flags = req ? req->cmd_flags : 0;
2601
2602 #ifdef CONFIG_MMC_BLOCK_DEFERRED_RESUME
2603 if (mmc_bus_needs_resume(card->host))
2604 mmc_resume_bus(card->host);
2605 #endif
2606
2607 if (req && !mq->mqrq_prev->req)
2608 /* claim host only for the first request */
2609 mmc_claim_host(card->host);
2610
2611 ret = mmc_blk_part_switch(card, md);
2612 if (ret) {
2613 if (req) {
2614 blk_end_request_all(req, -EIO);
2615 }
2616 ret = 0;
2617 goto out;
2618 }
2619
2620 mq->flags &= ~MMC_QUEUE_NEW_REQUEST;
2621 if (cmd_flags & REQ_DISCARD) {
2622 /* complete ongoing async transfer before issuing discard */
2623 if (card->host->areq)
2624 mmc_blk_issue_rw_rq(mq, NULL);
2625 if (req->cmd_flags & REQ_SECURE &&
2626 !(card->quirks & MMC_QUIRK_SEC_ERASE_TRIM_BROKEN))
2627 ret = mmc_blk_issue_secdiscard_rq(mq, req);
2628 else
2629 ret = mmc_blk_issue_discard_rq(mq, req);
2630 } else if (cmd_flags & REQ_FLUSH) {
2631 /* complete ongoing async transfer before issuing flush */
2632 if (card->host->areq)
2633 mmc_blk_issue_rw_rq(mq, NULL);
2634 ret = mmc_blk_issue_flush(mq, req);
2635 } else {
2636 if (!req && host->areq) {
2637 spin_lock_irqsave(&host->context_info.lock, flags);
2638 host->context_info.is_waiting_last_req = true;
2639 spin_unlock_irqrestore(&host->context_info.lock, flags);
2640 }
2641 ret = mmc_blk_issue_rw_rq(mq, req);
2642 }
2643
2644 out:
2645 if ((!req && !(mq->flags & MMC_QUEUE_NEW_REQUEST)) ||
2646 (cmd_flags & MMC_REQ_SPECIAL_MASK))
2647 /*
2648 * Release host when there are no more requests
2649 * and after special request(discard, flush) is done.
2650 * In case sepecial request, there is no reentry to
2651 * the 'mmc_blk_issue_rq' with 'mqrq_prev->req'.
2652 */
2653 mmc_release_host(card->host);
2654 return ret;
2655 }
2656
2657 static inline int mmc_blk_readonly(struct mmc_card *card)
2658 {
2659 return mmc_card_readonly(card) ||
2660 !(card->csd.cmdclass & CCC_BLOCK_WRITE);
2661 }
2662
2663 //#if defined(FEATURE_STORAGE_PID_LOGGER)
2664 //extern unsigned long get_memory_size(void);
2665 //#endif
2666 #ifdef CONFIG_MTK_EXTMEM
2667 extern void* extmem_malloc_page_align(size_t bytes);
2668 #endif
2669 static struct mmc_blk_data *mmc_blk_alloc_req(struct mmc_card *card,
2670 struct device *parent,
2671 sector_t size,
2672 bool default_ro,
2673 const char *subname,
2674 int area_type)
2675 {
2676 struct mmc_blk_data *md;
2677 int devidx, ret;
2678
2679 devidx = find_first_zero_bit(dev_use, max_devices);
2680 if (devidx >= max_devices)
2681 return ERR_PTR(-ENOSPC);
2682 __set_bit(devidx, dev_use);
2683
2684 md = kzalloc(sizeof(struct mmc_blk_data), GFP_KERNEL);
2685 if (!md) {
2686 ret = -ENOMEM;
2687 goto out;
2688 }
2689
2690 /*
2691 * !subname implies we are creating main mmc_blk_data that will be
2692 * associated with mmc_card with mmc_set_drvdata. Due to device
2693 * partitions, devidx will not coincide with a per-physical card
2694 * index anymore so we keep track of a name index.
2695 */
2696 if (!subname) {
2697 md->name_idx = find_first_zero_bit(name_use, max_devices);
2698 __set_bit(md->name_idx, name_use);
2699 } else
2700 md->name_idx = ((struct mmc_blk_data *)
2701 dev_to_disk(parent)->private_data)->name_idx;
2702
2703 md->area_type = area_type;
2704
2705 /*
2706 * Set the read-only status based on the supported commands
2707 * and the write protect switch.
2708 */
2709 md->read_only = mmc_blk_readonly(card);
2710
2711 md->disk = alloc_disk(perdev_minors);
2712 if (md->disk == NULL) {
2713 ret = -ENOMEM;
2714 goto err_kfree;
2715 }
2716
2717 spin_lock_init(&md->lock);
2718 INIT_LIST_HEAD(&md->part);
2719 md->usage = 1;
2720
2721 ret = mmc_init_queue(&md->queue, card, &md->lock, subname);
2722 if (ret)
2723 goto err_putdisk;
2724 #if defined(FEATURE_STORAGE_PID_LOGGER)
2725 if( !page_logger){
2726 //num_page_logger = sizeof(struct page_pid_logger);
2727 //page_logger = vmalloc(num_physpages*sizeof(struct page_pid_logger));
2728 // solution: use get_memory_size to obtain the size from start pfn to max pfn
2729
2730 //unsigned long count = get_memory_size() >> PAGE_SHIFT;
2731 unsigned long count = get_max_DRAM_size() >> PAGE_SHIFT;
2732 #ifdef CONFIG_MTK_EXTMEM
2733 page_logger = extmem_malloc_page_align(count * sizeof(struct page_pid_logger));
2734 #else
2735 page_logger = vmalloc(count * sizeof(struct page_pid_logger));
2736 #endif
2737 if( page_logger) {
2738 memset( page_logger, -1, count*sizeof( struct page_pid_logger));
2739 }
2740 spin_lock_init(&g_locker);
2741 }
2742 #endif
2743 #if defined(FEATURE_STORAGE_META_LOG)
2744 check_perdev_minors = perdev_minors;
2745 #endif
2746
2747 md->queue.issue_fn = mmc_blk_issue_rq;
2748 md->queue.data = md;
2749
2750 md->disk->major = MMC_BLOCK_MAJOR;
2751 md->disk->first_minor = devidx * perdev_minors;
2752 md->disk->fops = &mmc_bdops;
2753 md->disk->private_data = md;
2754 md->disk->queue = md->queue.queue;
2755 md->disk->driverfs_dev = parent;
2756 set_disk_ro(md->disk, md->read_only || default_ro);
2757 md->disk->flags = GENHD_FL_EXT_DEVT;
2758 if (area_type & MMC_BLK_DATA_AREA_RPMB)
2759 md->disk->flags |= GENHD_FL_NO_PART_SCAN;
2760
2761 /*
2762 * As discussed on lkml, GENHD_FL_REMOVABLE should:
2763 *
2764 * - be set for removable media with permanent block devices
2765 * - be unset for removable block devices with permanent media
2766 *
2767 * Since MMC block devices clearly fall under the second
2768 * case, we do not set GENHD_FL_REMOVABLE. Userspace
2769 * should use the block device creation/destruction hotplug
2770 * messages to tell when the card is present.
2771 */
2772
2773 snprintf(md->disk->disk_name, sizeof(md->disk->disk_name),
2774 "mmcblk%d%s", md->name_idx, subname ? subname : "");
2775
2776 if (mmc_card_mmc(card))
2777 blk_queue_logical_block_size(md->queue.queue,
2778 card->ext_csd.data_sector_size);
2779 else
2780 blk_queue_logical_block_size(md->queue.queue, 512);
2781
2782 set_capacity(md->disk, size);
2783
2784 if (mmc_host_cmd23(card->host)) {
2785 if ((mmc_card_mmc(card) &&
2786 card->csd.mmca_vsn >= CSD_SPEC_VER_3) ||
2787 (mmc_card_sd(card) &&
2788 card->scr.cmds & SD_SCR_CMD23_SUPPORT))
2789 md->flags |= MMC_BLK_CMD23;
2790 }
2791
2792 if (mmc_card_mmc(card) &&
2793 md->flags & MMC_BLK_CMD23 &&
2794 ((card->ext_csd.rel_param & EXT_CSD_WR_REL_PARAM_EN) ||
2795 card->ext_csd.rel_sectors)) {
2796 md->flags |= MMC_BLK_REL_WR;
2797 blk_queue_flush(md->queue.queue, REQ_FLUSH | REQ_FUA);
2798 }
2799
2800 if (mmc_card_mmc(card) &&
2801 (area_type == MMC_BLK_DATA_AREA_MAIN) &&
2802 (md->flags & MMC_BLK_CMD23) &&
2803 card->ext_csd.packed_event_en) {
2804 if (!mmc_packed_init(&md->queue, card))
2805 md->flags |= MMC_BLK_PACKED_CMD;
2806 }
2807
2808 return md;
2809
2810 err_putdisk:
2811 put_disk(md->disk);
2812 err_kfree:
2813 kfree(md);
2814 out:
2815 return ERR_PTR(ret);
2816 }
2817
2818 static struct mmc_blk_data *mmc_blk_alloc(struct mmc_card *card)
2819 {
2820 sector_t size;
2821 #ifdef CONFIG_MTK_EMMC_SUPPORT
2822 unsigned int l_reserve;
2823 struct storage_info s_info = {0};
2824 #endif
2825 struct mmc_blk_data *md;
2826
2827 if (!mmc_card_sd(card) && mmc_card_blockaddr(card)) {
2828 /*
2829 * The EXT_CSD sector count is in number or 512 byte
2830 * sectors.
2831 */
2832 size = card->ext_csd.sectors;
2833 } else {
2834 /*
2835 * The CSD capacity field is in units of read_blkbits.
2836 * set_capacity takes units of 512 bytes.
2837 */
2838 size = card->csd.capacity << (card->csd.read_blkbits - 9);
2839 }
2840
2841 if(!mmc_card_sd(card)){
2842 #ifdef CONFIG_MTK_EMMC_SUPPORT
2843 msdc_get_info(EMMC_CARD_BOOT, EMMC_RESERVE, &s_info);
2844 l_reserve = s_info.emmc_reserve;
2845 printk("l_reserve = 0x%x\n", l_reserve);
2846 size -= l_reserve; /*reserved for 64MB (emmc otp + emmc combo offset + reserved)*/
2847 #endif
2848 }
2849 md = mmc_blk_alloc_req(card, &card->dev, size, false, NULL,
2850 MMC_BLK_DATA_AREA_MAIN);
2851 return md;
2852 }
2853
2854 static int mmc_blk_alloc_part(struct mmc_card *card,
2855 struct mmc_blk_data *md,
2856 unsigned int part_type,
2857 sector_t size,
2858 bool default_ro,
2859 const char *subname,
2860 int area_type)
2861 {
2862 char cap_str[10];
2863 struct mmc_blk_data *part_md;
2864
2865 part_md = mmc_blk_alloc_req(card, disk_to_dev(md->disk), size, default_ro,
2866 subname, area_type);
2867 if (IS_ERR(part_md))
2868 return PTR_ERR(part_md);
2869 part_md->part_type = part_type;
2870 list_add(&part_md->part, &md->part);
2871
2872 string_get_size((u64)get_capacity(part_md->disk) << 9, STRING_UNITS_2,
2873 cap_str, sizeof(cap_str));
2874 pr_info("%s: %s %s partition %u %s\n",
2875 part_md->disk->disk_name, mmc_card_id(card),
2876 mmc_card_name(card), part_md->part_type, cap_str);
2877 return 0;
2878 }
2879
2880 /* MMC Physical partitions consist of two boot partitions and
2881 * up to four general purpose partitions.
2882 * For each partition enabled in EXT_CSD a block device will be allocatedi
2883 * to provide access to the partition.
2884 */
2885
2886 static int mmc_blk_alloc_parts(struct mmc_card *card, struct mmc_blk_data *md)
2887 {
2888 int idx, ret = 0;
2889
2890 if (!mmc_card_mmc(card))
2891 return 0;
2892
2893 for (idx = 0; idx < card->nr_parts; idx++) {
2894 if (card->part[idx].size) {
2895 ret = mmc_blk_alloc_part(card, md,
2896 card->part[idx].part_cfg,
2897 card->part[idx].size >> 9,
2898 card->part[idx].force_ro,
2899 card->part[idx].name,
2900 card->part[idx].area_type);
2901 if (ret)
2902 return ret;
2903 }
2904 }
2905
2906 return ret;
2907 }
2908
2909 static void mmc_blk_remove_req(struct mmc_blk_data *md)
2910 {
2911 struct mmc_card *card;
2912
2913 if (md) {
2914 card = md->queue.card;
2915 if (md->disk->flags & GENHD_FL_UP) {
2916 device_remove_file(disk_to_dev(md->disk), &md->force_ro);
2917 if ((md->area_type & MMC_BLK_DATA_AREA_BOOT) &&
2918 card->ext_csd.boot_ro_lockable)
2919 device_remove_file(disk_to_dev(md->disk),
2920 &md->power_ro_lock);
2921
2922 /* Stop new requests from getting into the queue */
2923 del_gendisk(md->disk);
2924 }
2925
2926 /* Then flush out any already in there */
2927 mmc_cleanup_queue(&md->queue);
2928 if (md->flags & MMC_BLK_PACKED_CMD)
2929 mmc_packed_clean(&md->queue);
2930 mmc_blk_put(md);
2931 }
2932 }
2933
2934 static void mmc_blk_remove_parts(struct mmc_card *card,
2935 struct mmc_blk_data *md)
2936 {
2937 struct list_head *pos, *q;
2938 struct mmc_blk_data *part_md;
2939
2940 __clear_bit(md->name_idx, name_use);
2941 list_for_each_safe(pos, q, &md->part) {
2942 part_md = list_entry(pos, struct mmc_blk_data, part);
2943 list_del(pos);
2944 mmc_blk_remove_req(part_md);
2945 }
2946 }
2947
2948 static int mmc_add_disk(struct mmc_blk_data *md)
2949 {
2950 int ret;
2951 struct mmc_card *card = md->queue.card;
2952
2953 add_disk(md->disk);
2954 md->force_ro.show = force_ro_show;
2955 md->force_ro.store = force_ro_store;
2956 sysfs_attr_init(&md->force_ro.attr);
2957 md->force_ro.attr.name = "force_ro";
2958 md->force_ro.attr.mode = S_IRUGO | S_IWUSR;
2959 ret = device_create_file(disk_to_dev(md->disk), &md->force_ro);
2960 if (ret)
2961 goto force_ro_fail;
2962
2963 if ((md->area_type & MMC_BLK_DATA_AREA_BOOT) &&
2964 card->ext_csd.boot_ro_lockable) {
2965 umode_t mode;
2966
2967 if (card->ext_csd.boot_ro_lock & EXT_CSD_BOOT_WP_B_PWR_WP_DIS)
2968 mode = S_IRUGO;
2969 else
2970 mode = S_IRUGO | S_IWUSR;
2971
2972 md->power_ro_lock.show = power_ro_lock_show;
2973 md->power_ro_lock.store = power_ro_lock_store;
2974 sysfs_attr_init(&md->power_ro_lock.attr);
2975 md->power_ro_lock.attr.mode = mode;
2976 md->power_ro_lock.attr.name =
2977 "ro_lock_until_next_power_on";
2978 ret = device_create_file(disk_to_dev(md->disk),
2979 &md->power_ro_lock);
2980 if (ret)
2981 goto power_ro_lock_fail;
2982 }
2983 return ret;
2984
2985 power_ro_lock_fail:
2986 device_remove_file(disk_to_dev(md->disk), &md->force_ro);
2987 force_ro_fail:
2988 del_gendisk(md->disk);
2989
2990 return ret;
2991 }
2992
2993 #define CID_MANFID_SANDISK 0x2
2994 #define CID_MANFID_TOSHIBA 0x11
2995 #define CID_MANFID_MICRON 0x13
2996 #define CID_MANFID_SAMSUNG 0x15
2997 #define CID_MANFID_SANDISK_NEW 0x45
2998 #define CID_MANFID_HYNIX 0x90
2999 #define CID_MANFID_KSI 0x70
3000
3001 static const struct mmc_fixup blk_fixups[] =
3002 {
3003 MMC_FIXUP("SEM02G", CID_MANFID_SANDISK, 0x100, add_quirk,
3004 MMC_QUIRK_INAND_CMD38),
3005 MMC_FIXUP("SEM04G", CID_MANFID_SANDISK, 0x100, add_quirk,
3006 MMC_QUIRK_INAND_CMD38),
3007 MMC_FIXUP("SEM08G", CID_MANFID_SANDISK, 0x100, add_quirk,
3008 MMC_QUIRK_INAND_CMD38),
3009 MMC_FIXUP("SEM16G", CID_MANFID_SANDISK, 0x100, add_quirk,
3010 MMC_QUIRK_INAND_CMD38),
3011 MMC_FIXUP("SEM32G", CID_MANFID_SANDISK, 0x100, add_quirk,
3012 MMC_QUIRK_INAND_CMD38),
3013 MMC_FIXUP(CID_NAME_ANY, CID_MANFID_SANDISK_NEW, CID_OEMID_ANY, add_quirk,
3014 MMC_QUIRK_PON),
3015 /*
3016 * Some MMC cards experience performance degradation with CMD23
3017 * instead of CMD12-bounded multiblock transfers. For now we'll
3018 * black list what's bad...
3019 * - Certain Toshiba cards.
3020 *
3021 * N.B. This doesn't affect SD cards.
3022 */
3023 MMC_FIXUP("MMC08G", CID_MANFID_TOSHIBA, CID_OEMID_ANY, add_quirk_mmc,
3024 MMC_QUIRK_BLK_NO_CMD23),
3025 MMC_FIXUP("MMC16G", CID_MANFID_TOSHIBA, CID_OEMID_ANY, add_quirk_mmc,
3026 MMC_QUIRK_BLK_NO_CMD23),
3027 MMC_FIXUP("MMC32G", CID_MANFID_TOSHIBA, CID_OEMID_ANY, add_quirk_mmc,
3028 MMC_QUIRK_BLK_NO_CMD23),
3029
3030 /*
3031 * Some MMC cards need longer data read timeout than indicated in CSD.
3032 */
3033 MMC_FIXUP(CID_NAME_ANY, CID_MANFID_MICRON, 0x200, add_quirk_mmc,
3034 MMC_QUIRK_LONG_READ_TIME),
3035 MMC_FIXUP("008GE0", CID_MANFID_TOSHIBA, CID_OEMID_ANY, add_quirk_mmc,
3036 MMC_QUIRK_LONG_READ_TIME),
3037
3038 /*
3039 * On these Samsung MoviNAND parts, performing secure erase or
3040 * secure trim can result in unrecoverable corruption due to a
3041 * firmware bug.
3042 */
3043 MMC_FIXUP("M8G2FA", CID_MANFID_SAMSUNG, CID_OEMID_ANY, add_quirk_mmc,
3044 MMC_QUIRK_SEC_ERASE_TRIM_BROKEN),
3045 MMC_FIXUP("MAG4FA", CID_MANFID_SAMSUNG, CID_OEMID_ANY, add_quirk_mmc,
3046 MMC_QUIRK_SEC_ERASE_TRIM_BROKEN),
3047 MMC_FIXUP("MBG8FA", CID_MANFID_SAMSUNG, CID_OEMID_ANY, add_quirk_mmc,
3048 MMC_QUIRK_SEC_ERASE_TRIM_BROKEN),
3049 MMC_FIXUP("MCGAFA", CID_MANFID_SAMSUNG, CID_OEMID_ANY, add_quirk_mmc,
3050 MMC_QUIRK_SEC_ERASE_TRIM_BROKEN),
3051 MMC_FIXUP("VAL00M", CID_MANFID_SAMSUNG, CID_OEMID_ANY, add_quirk_mmc,
3052 MMC_QUIRK_SEC_ERASE_TRIM_BROKEN),
3053 MMC_FIXUP("VYL00M", CID_MANFID_SAMSUNG, CID_OEMID_ANY, add_quirk_mmc,
3054 MMC_QUIRK_SEC_ERASE_TRIM_BROKEN),
3055 MMC_FIXUP("KYL00M", CID_MANFID_SAMSUNG, CID_OEMID_ANY, add_quirk_mmc,
3056 MMC_QUIRK_SEC_ERASE_TRIM_BROKEN),
3057 MMC_FIXUP("VZL00M", CID_MANFID_SAMSUNG, CID_OEMID_ANY, add_quirk_mmc,
3058 MMC_QUIRK_SEC_ERASE_TRIM_BROKEN),
3059 #ifdef CONFIG_MTK_EMMC_CACHE
3060 /*
3061 * Some MMC cards cache feature, cannot flush the previous cache data by force programming or reliable write
3062 * which cannot gurrantee the strong order betwee meta data and file data.
3063 */
3064
3065 /*
3066 * Toshiba eMMC after enable cache feature, write performance drop, because flush operation waste much time
3067 */
3068 MMC_FIXUP(CID_NAME_ANY, CID_MANFID_TOSHIBA, CID_OEMID_ANY, add_quirk_mmc,
3069 MMC_QUIRK_DISABLE_CACHE),
3070 #endif
3071
3072 /* Hynix 4.41 trim will lead boot up failed. */
3073 MMC_FIXUP(CID_NAME_ANY, CID_MANFID_HYNIX, CID_OEMID_ANY, add_quirk_mmc,
3074 MMC_QUIRK_TRIM_UNSTABLE),
3075
3076 /* KSI PRV=0x3 trim will lead write performance drop. */
3077 MMC_FIXUP(CID_NAME_ANY, CID_MANFID_KSI, CID_OEMID_ANY, add_quirk_mmc_ksi_v03_skip_trim,
3078 MMC_QUIRK_KSI_V03_SKIP_TRIM),
3079
3080 END_FIXUP
3081 };
3082
3083 #if defined(CONFIG_MTK_EMMC_SUPPORT) && !defined(CONFIG_MTK_GPT_SCHEME_SUPPORT)
3084 extern void emmc_create_sys_symlink (struct mmc_card *card);
3085 #endif
3086 static int mmc_blk_probe(struct mmc_card *card)
3087 {
3088 struct mmc_blk_data *md, *part_md;
3089 char cap_str[10];
3090
3091 /*
3092 * Check that the card supports the command class(es) we need.
3093 */
3094 if (!(card->csd.cmdclass & CCC_BLOCK_READ))
3095 return -ENODEV;
3096
3097 md = mmc_blk_alloc(card);
3098 if (IS_ERR(md))
3099 return PTR_ERR(md);
3100
3101 string_get_size((u64)get_capacity(md->disk) << 9, STRING_UNITS_2,
3102 cap_str, sizeof(cap_str));
3103 pr_info("%s: %s %s %s %s\n",
3104 md->disk->disk_name, mmc_card_id(card), mmc_card_name(card),
3105 cap_str, md->read_only ? "(ro)" : "");
3106
3107 if (mmc_blk_alloc_parts(card, md))
3108 goto out;
3109
3110 mmc_set_drvdata(card, md);
3111 mmc_fixup_device(card, blk_fixups);
3112
3113 printk("[%s]: %s by manufacturer settings, quirks=0x%x\n", __func__, md->disk->disk_name, card->quirks);
3114
3115 #ifdef CONFIG_MMC_BLOCK_DEFERRED_RESUME
3116 mmc_set_bus_resume_policy(card->host, 1);
3117 #endif
3118 if (mmc_add_disk(md))
3119 goto out;
3120
3121 list_for_each_entry(part_md, &md->part, part) {
3122 if (mmc_add_disk(part_md))
3123 goto out;
3124 }
3125 #if defined(CONFIG_MTK_EMMC_SUPPORT) && !defined(CONFIG_MTK_GPT_SCHEME_SUPPORT)
3126 emmc_create_sys_symlink(card);
3127 #endif
3128 return 0;
3129
3130 out:
3131 mmc_blk_remove_parts(card, md);
3132 mmc_blk_remove_req(md);
3133 return 0;
3134 }
3135
3136 static void mmc_blk_remove(struct mmc_card *card)
3137 {
3138 struct mmc_blk_data *md = mmc_get_drvdata(card);
3139
3140 mmc_blk_remove_parts(card, md);
3141 mmc_claim_host(card->host);
3142 mmc_blk_part_switch(card, md);
3143 mmc_release_host(card->host);
3144 mmc_blk_remove_req(md);
3145 mmc_set_drvdata(card, NULL);
3146 #ifdef CONFIG_MMC_BLOCK_DEFERRED_RESUME
3147 mmc_set_bus_resume_policy(card->host, 0);
3148 #endif
3149 }
3150
3151 #ifdef CONFIG_PM
3152 static int mmc_blk_suspend(struct mmc_card *card)
3153 {
3154 struct mmc_blk_data *part_md;
3155 struct mmc_blk_data *md = mmc_get_drvdata(card);
3156
3157 if (md) {
3158 mmc_queue_suspend(&md->queue);
3159 list_for_each_entry(part_md, &md->part, part) {
3160 mmc_queue_suspend(&part_md->queue);
3161 }
3162 }
3163 return 0;
3164 }
3165
3166 static int mmc_blk_resume(struct mmc_card *card)
3167 {
3168 struct mmc_blk_data *part_md;
3169 struct mmc_blk_data *md = mmc_get_drvdata(card);
3170
3171 if (md) {
3172 /*
3173 * Resume involves the card going into idle state,
3174 * so current partition is always the main one.
3175 */
3176 md->part_curr = md->part_type;
3177 mmc_queue_resume(&md->queue);
3178 list_for_each_entry(part_md, &md->part, part) {
3179 mmc_queue_resume(&part_md->queue);
3180 }
3181 }
3182 return 0;
3183 }
3184 #else
3185 #define mmc_blk_suspend NULL
3186 #define mmc_blk_resume NULL
3187 #endif
3188
3189 static struct mmc_driver mmc_driver = {
3190 .drv = {
3191 .name = "mmcblk",
3192 },
3193 .probe = mmc_blk_probe,
3194 .remove = mmc_blk_remove,
3195 .suspend = mmc_blk_suspend,
3196 .resume = mmc_blk_resume,
3197 };
3198
3199 static int __init mmc_blk_init(void)
3200 {
3201 int res;
3202
3203 if (perdev_minors != CONFIG_MMC_BLOCK_MINORS)
3204 pr_info("mmcblk: using %d minors per device\n", perdev_minors);
3205
3206 max_devices = 256 / perdev_minors;
3207
3208 res = register_blkdev(MMC_BLOCK_MAJOR, "mmc");
3209 if (res)
3210 goto out;
3211
3212 res = mmc_register_driver(&mmc_driver);
3213 if (res)
3214 goto out2;
3215
3216 return 0;
3217 out2:
3218 unregister_blkdev(MMC_BLOCK_MAJOR, "mmc");
3219 out:
3220 return res;
3221 }
3222
3223 static void __exit mmc_blk_exit(void)
3224 {
3225 mmc_unregister_driver(&mmc_driver);
3226 unregister_blkdev(MMC_BLOCK_MAJOR, "mmc");
3227 }
3228
3229 module_init(mmc_blk_init);
3230 module_exit(mmc_blk_exit);
3231
3232 MODULE_LICENSE("GPL");
3233 MODULE_DESCRIPTION("Multimedia Card (MMC) block device driver");
3234