Merge tag 'v3.10.79' into update
[GitHub/mt8127/android_kernel_alcatel_ttab.git] / drivers / mmc / card / block.c
1 /*
2 * Block driver for media (i.e., flash cards)
3 *
4 * Copyright 2002 Hewlett-Packard Company
5 * Copyright 2005-2008 Pierre Ossman
6 *
7 * Use consistent with the GNU GPL is permitted,
8 * provided that this copyright notice is
9 * preserved in its entirety in all copies and derived works.
10 *
11 * HEWLETT-PACKARD COMPANY MAKES NO WARRANTIES, EXPRESSED OR IMPLIED,
12 * AS TO THE USEFULNESS OR CORRECTNESS OF THIS CODE OR ITS
13 * FITNESS FOR ANY PARTICULAR PURPOSE.
14 *
15 * Many thanks to Alessandro Rubini and Jonathan Corbet!
16 *
17 * Author: Andrew Christian
18 * 28 May 2002
19 */
20 #include <linux/moduleparam.h>
21 #include <linux/module.h>
22 #include <linux/init.h>
23
24 #include <linux/kernel.h>
25 #include <linux/fs.h>
26 #include <linux/slab.h>
27 #include <linux/errno.h>
28 #include <linux/hdreg.h>
29 #include <linux/kdev_t.h>
30 #include <linux/blkdev.h>
31 #include <linux/mutex.h>
32 #include <linux/scatterlist.h>
33 #include <linux/string_helpers.h>
34 #include <linux/delay.h>
35 #include <linux/capability.h>
36 #include <linux/compat.h>
37
38 #define CREATE_TRACE_POINTS
39 #include <trace/events/mmc.h>
40
41 #include <linux/mmc/ioctl.h>
42 #include <linux/mmc/card.h>
43 #include <linux/mmc/host.h>
44 #include <linux/mmc/mmc.h>
45 #include <linux/mmc/sd.h>
46
47 #include <asm/uaccess.h>
48
49 #include "queue.h"
50 #include <mach/mtk_meminfo.h>
51
52 //add vmstat info with block tag log
53 #include <linux/vmstat.h>
54 #define FEATURE_STORAGE_VMSTAT_LOGGER
55
56
57 #include <linux/xlog.h>
58 #include <asm/div64.h>
59 #include <linux/vmalloc.h>
60
61 #include <linux/mmc/sd_misc.h>
62
63 #define MET_USER_EVENT_SUPPORT
64 #include <linux/met_drv.h>
65
66 #define FEATURE_STORAGE_PERF_INDEX
67 //enable storage log in user load
68 #if 0
69 #ifdef USER_BUILD_KERNEL
70 #undef FEATURE_STORAGE_PERF_INDEX
71 #endif
72 #endif
73
74 MODULE_ALIAS("mmc:block");
75 #ifdef MODULE_PARAM_PREFIX
76 #undef MODULE_PARAM_PREFIX
77 #endif
78 #define MODULE_PARAM_PREFIX "mmcblk."
79
80 #define INAND_CMD38_ARG_EXT_CSD 113
81 #define INAND_CMD38_ARG_ERASE 0x00
82 #define INAND_CMD38_ARG_TRIM 0x01
83 #define INAND_CMD38_ARG_SECERASE 0x80
84 #define INAND_CMD38_ARG_SECTRIM1 0x81
85 #define INAND_CMD38_ARG_SECTRIM2 0x88
86 #define MMC_BLK_TIMEOUT_MS (10 * 60 * 1000) /* 10 minute timeout */
87
88 #define mmc_req_rel_wr(req) (((req->cmd_flags & REQ_FUA) || \
89 (req->cmd_flags & REQ_META)) && \
90 (rq_data_dir(req) == WRITE))
91 #define PACKED_CMD_VER 0x01
92 #define PACKED_CMD_WR 0x02
93
94 static DEFINE_MUTEX(block_mutex);
95
96 /*
97 * The defaults come from config options but can be overriden by module
98 * or bootarg options.
99 */
100 static int perdev_minors = CONFIG_MMC_BLOCK_MINORS;
101
102 /*
103 * We've only got one major, so number of mmcblk devices is
104 * limited to 256 / number of minors per device.
105 */
106 static int max_devices;
107
108 /* 256 minors, so at most 256 separate devices */
109 static DECLARE_BITMAP(dev_use, 256);
110 static DECLARE_BITMAP(name_use, 256);
111
112 /*
113 * There is one mmc_blk_data per slot.
114 */
115 struct mmc_blk_data {
116 spinlock_t lock;
117 struct gendisk *disk;
118 struct mmc_queue queue;
119 struct list_head part;
120
121 unsigned int flags;
122 #define MMC_BLK_CMD23 (1 << 0) /* Can do SET_BLOCK_COUNT for multiblock */
123 #define MMC_BLK_REL_WR (1 << 1) /* MMC Reliable write support */
124 #define MMC_BLK_PACKED_CMD (1 << 2) /* MMC packed command support */
125
126 unsigned int usage;
127 unsigned int read_only;
128 unsigned int part_type;
129 unsigned int name_idx;
130 unsigned int reset_done;
131 #define MMC_BLK_READ BIT(0)
132 #define MMC_BLK_WRITE BIT(1)
133 #define MMC_BLK_DISCARD BIT(2)
134 #define MMC_BLK_SECDISCARD BIT(3)
135
136 /*
137 * Only set in main mmc_blk_data associated
138 * with mmc_card with mmc_set_drvdata, and keeps
139 * track of the current selected device partition.
140 */
141 unsigned int part_curr;
142 struct device_attribute force_ro;
143 struct device_attribute power_ro_lock;
144 int area_type;
145 };
146
147 static DEFINE_MUTEX(open_lock);
148
149 enum {
150 MMC_PACKED_NR_IDX = -1,
151 MMC_PACKED_NR_ZERO,
152 MMC_PACKED_NR_SINGLE,
153 };
154
155 module_param(perdev_minors, int, 0444);
156 MODULE_PARM_DESC(perdev_minors, "Minors numbers to allocate per device");
157
158 static inline int mmc_blk_part_switch(struct mmc_card *card,
159 struct mmc_blk_data *md);
160 static int get_card_status(struct mmc_card *card, u32 *status, int retries);
161
162 #ifndef CONFIG_MTK_FPGA
163 #include <linux/met_ftrace_bio.h>
164 #endif
165
166 char mmc_get_rw_type(u32 opcode)
167 {
168 switch (opcode)
169 {
170 case MMC_READ_SINGLE_BLOCK:
171 case MMC_READ_MULTIPLE_BLOCK:
172 return 'R';
173 case MMC_WRITE_BLOCK:
174 case MMC_WRITE_MULTIPLE_BLOCK:
175 return 'W';
176 default:
177 // Unknown opcode!!!
178 return 'X';
179 }
180 }
181
182 inline int check_met_mmc_async_req_legal(struct mmc_host *host, struct mmc_async_req *areq)
183 {
184 int is_legal = 0;
185
186 if (!((host == NULL) || (areq == NULL) || (areq->mrq == NULL)
187 || (areq->mrq->cmd == NULL) || (areq->mrq->data == NULL)
188 || (host->card == NULL))) {
189 is_legal = 1;
190 }
191
192 return is_legal;
193 }
194
195 inline int check_met_mmc_blk_data_legal(struct mmc_blk_data *md)
196 {
197 int is_legal = 0;
198
199 if (!((md == NULL) || (md->disk == NULL))) {
200 is_legal = 1;
201 }
202
203 return is_legal;
204 }
205
206 inline int check_met_mmc_req_legal(struct mmc_host *host, struct mmc_request *req)
207 {
208 int is_legal = 0;
209
210 if (!((host == NULL) || (req == NULL) || (req->cmd == NULL)
211 || (req->data == NULL) || (host->card == NULL))) {
212 is_legal = 1;
213 }
214
215 return is_legal;
216 }
217
218 void met_mmc_insert(struct mmc_host *host, struct mmc_async_req *areq)
219 {
220 struct mmc_blk_data *md;
221 char type;
222
223 if (!check_met_mmc_async_req_legal(host, areq))
224 return;
225
226 md = mmc_get_drvdata(host->card);
227 if (!check_met_mmc_blk_data_legal(md))
228 return;
229
230 type = mmc_get_rw_type(areq->mrq->cmd->opcode);
231 if (type == 'X')
232 return;
233
234 #ifndef CONFIG_MTK_FPGA
235 MET_FTRACE_PRINTK(met_mmc_insert, md, areq, type);
236 #endif
237 }
238
239 void met_mmc_dma_map(struct mmc_host *host, struct mmc_async_req *areq)
240 {
241 struct mmc_blk_data *md;
242 char type;
243
244 if (!check_met_mmc_async_req_legal(host, areq))
245 return;
246
247 md = mmc_get_drvdata(host->card);
248 if (!check_met_mmc_blk_data_legal(md))
249 return;
250
251 type = mmc_get_rw_type(areq->mrq->cmd->opcode);
252 if (type == 'X')
253 return;
254 #ifndef CONFIG_MTK_FPGA
255 MET_FTRACE_PRINTK(met_mmc_dma_map, md, areq, type);
256 #endif
257 }
258
259 //void met_mmc_issue(struct mmc_host *host, struct mmc_async_req *areq)
260 //{
261 // struct mmc_blk_data *md;
262 // char type;
263 //
264 // if (!check_met_mmc_async_req_legal(host, areq))
265 // return;
266 //
267 // md = mmc_get_drvdata(host->card);
268 //
269 // type = mmc_get_rw_type(areq->mrq->cmd->opcode);
270 // if (type == 'X')
271 // return;
272 //
273 // MET_FTRACE_PRINTK(met_mmc_issue, md, areq, type);
274 //}
275
276 void met_mmc_issue(struct mmc_host *host, struct mmc_request *req)
277 {
278 struct mmc_blk_data *md;
279 char type;
280
281 if (!check_met_mmc_req_legal(host, req))
282 return;
283
284 md = mmc_get_drvdata(host->card);
285 if (!check_met_mmc_blk_data_legal(md))
286 return;
287
288 type = mmc_get_rw_type(req->cmd->opcode);
289 if (type == 'X')
290 return;
291 #ifndef CONFIG_MTK_FPGA
292 MET_FTRACE_PRINTK(met_mmc_issue, md, req, type);
293 #endif
294 }
295
296 void met_mmc_send_cmd(struct mmc_host *host, struct mmc_command *cmd)
297 {
298 struct mmc_blk_data *md = mmc_get_drvdata(host->card);
299 char type;
300
301 type = mmc_get_rw_type(cmd->opcode);
302 if (type == 'X')
303 return;
304
305 trace_printk("%d,%d %c %d + %d [%s]\n",
306 md->disk->major, md->disk->first_minor, type,
307 cmd->arg, cmd->data->blocks,
308 current->comm);
309 }
310
311 void met_mmc_xfr_done(struct mmc_host *host, struct mmc_command *cmd)
312 {
313 struct mmc_blk_data *md=mmc_get_drvdata(host->card);
314 char type;
315
316 type = mmc_get_rw_type(cmd->opcode);
317 if (type == 'X')
318 return;
319
320 trace_printk("%d,%d %c %d + %d [%s]\n",
321 md->disk->major, md->disk->first_minor, type,
322 cmd->arg, cmd->data->blocks,
323 current->comm);
324 }
325
326 void met_mmc_wait_xfr(struct mmc_host *host, struct mmc_async_req *areq)
327 {
328 struct mmc_blk_data *md = mmc_get_drvdata(host->card);
329 char type;
330
331 type = mmc_get_rw_type(areq->mrq->cmd->opcode);
332 if (type == 'X')
333 return;
334
335 trace_printk("%d,%d %c %d + %d [%s]\n",
336 md->disk->major, md->disk->first_minor, type,
337 areq->mrq->cmd->arg, areq->mrq->data->blocks,
338 current->comm);
339
340 }
341
342 void met_mmc_tuning_start(struct mmc_host *host, struct mmc_command *cmd)
343 {
344 struct mmc_blk_data *md = mmc_get_drvdata(host->card);
345 char type;
346
347 type = mmc_get_rw_type(cmd->opcode);
348 if (type == 'X')
349 return;
350
351 trace_printk("%d,%d %c %d + %d [%s]\n",
352 md->disk->major, md->disk->first_minor, type,
353 cmd->arg, cmd->data->blocks,
354 current->comm);
355 }
356
357 void met_mmc_tuning_end(struct mmc_host *host, struct mmc_command *cmd)
358 {
359 struct mmc_blk_data *md = mmc_get_drvdata(host->card);
360 char type;
361
362 type = mmc_get_rw_type(cmd->opcode);
363 if (type == 'X')
364 return;
365
366 trace_printk("%d,%d %c %d + %d [%s]\n",
367 md->disk->major, md->disk->first_minor, type,
368 cmd->arg, cmd->data->blocks,
369 current->comm);
370 }
371
372 void met_mmc_complete(struct mmc_host *host, struct mmc_async_req *areq)
373 {
374 struct mmc_blk_data *md;
375 char type;
376
377 if (!check_met_mmc_async_req_legal(host, areq))
378 return;
379
380 md = mmc_get_drvdata(host->card);
381 if (!check_met_mmc_blk_data_legal(md))
382 return;
383
384 type = mmc_get_rw_type(areq->mrq->cmd->opcode);
385 if (type == 'X')
386 return;
387 #ifndef CONFIG_MTK_FPGA
388 MET_FTRACE_PRINTK(met_mmc_complete, md, areq, type);
389 #endif
390 }
391
392 void met_mmc_dma_unmap_start(struct mmc_host *host, struct mmc_async_req *areq)
393 {
394 struct mmc_blk_data *md;
395 char type;
396
397 if (!check_met_mmc_async_req_legal(host, areq))
398 return;
399
400 md = mmc_get_drvdata(host->card);
401 if (!check_met_mmc_blk_data_legal(md))
402 return;
403
404 type = mmc_get_rw_type(areq->mrq->cmd->opcode);
405 if (type == 'X')
406 return;
407 #ifndef CONFIG_MTK_FPGA
408 MET_FTRACE_PRINTK(met_mmc_dma_unmap_start, md, areq, type);
409 #endif
410 }
411
412 void met_mmc_dma_unmap_stop(struct mmc_host *host, struct mmc_async_req *areq)
413 {
414 struct mmc_blk_data *md;
415 char type;
416
417 if (!check_met_mmc_async_req_legal(host, areq))
418 return;
419
420 md = mmc_get_drvdata(host->card);
421 if (!check_met_mmc_blk_data_legal(md))
422 return;
423
424 type = mmc_get_rw_type(areq->mrq->cmd->opcode);
425 if (type == 'X')
426 return;
427 #ifndef CONFIG_MTK_FPGA
428 MET_FTRACE_PRINTK(met_mmc_dma_unmap_stop, md, areq, type);
429 #endif
430 }
431
432 void met_mmc_continue_req_end(struct mmc_host *host, struct mmc_async_req *areq)
433 {
434 struct mmc_blk_data *md;
435 char type;
436
437 if (!check_met_mmc_async_req_legal(host, areq))
438 return;
439
440 md = mmc_get_drvdata(host->card);
441 if (!check_met_mmc_blk_data_legal(md))
442 return;
443
444 type = mmc_get_rw_type(areq->mrq->cmd->opcode);
445 if (type == 'X')
446 return;
447 #ifndef CONFIG_MTK_FPGA
448 MET_FTRACE_PRINTK(met_mmc_continue_req_end, md, areq, type);
449 #endif
450 }
451
452 void met_mmc_dma_stop(struct mmc_host *host, struct mmc_async_req *areq, unsigned int bd_num)
453 {
454 struct mmc_blk_data *md;
455 char type;
456
457 if (!check_met_mmc_async_req_legal(host, areq))
458 return;
459
460 md = mmc_get_drvdata(host->card);
461 if (!check_met_mmc_blk_data_legal(md))
462 return;
463
464 type = mmc_get_rw_type(areq->mrq->cmd->opcode);
465 if (type == 'X')
466 return;
467 #ifndef CONFIG_MTK_FPGA
468 MET_FTRACE_PRINTK(met_mmc_dma_stop, md, areq, type, bd_num);
469 #endif
470 }
471
472 //void met_mmc_end(struct mmc_host *host, struct mmc_async_req *areq)
473 //{
474 // struct mmc_blk_data *md;
475 // char type;
476 //
477 // if (areq && areq->mrq && host && host->card) {
478 // type = mmc_get_rw_type(areq->mrq->cmd->opcode);
479 // if (type == 'X')
480 // return;
481 //
482 // md = mmc_get_drvdata(host->card);
483 //
484 // if (areq && areq->mrq)
485 // {
486 // trace_printk("%d,%d %c %d + %d [%s]\n",
487 // md->disk->major, md->disk->first_minor, type,
488 // areq->mrq->cmd->arg, areq->mrq->data->blocks,
489 // current->comm);
490 // }
491 // }
492 //}
493
494 static inline void mmc_blk_clear_packed(struct mmc_queue_req *mqrq)
495 {
496 struct mmc_packed *packed = mqrq->packed;
497
498 BUG_ON(!packed);
499
500 mqrq->cmd_type = MMC_PACKED_NONE;
501 packed->nr_entries = MMC_PACKED_NR_ZERO;
502 packed->idx_failure = MMC_PACKED_NR_IDX;
503 packed->retries = 0;
504 packed->blocks = 0;
505 }
506
507 static struct mmc_blk_data *mmc_blk_get(struct gendisk *disk)
508 {
509 struct mmc_blk_data *md;
510
511 mutex_lock(&open_lock);
512 md = disk->private_data;
513 if (md && md->usage == 0)
514 md = NULL;
515 if (md)
516 md->usage++;
517 mutex_unlock(&open_lock);
518
519 return md;
520 }
521
522 static inline int mmc_get_devidx(struct gendisk *disk)
523 {
524 int devidx = disk->first_minor / perdev_minors;
525 return devidx;
526 }
527
528 static void mmc_blk_put(struct mmc_blk_data *md)
529 {
530 mutex_lock(&open_lock);
531 md->usage--;
532 if (md->usage == 0) {
533 int devidx = mmc_get_devidx(md->disk);
534 blk_cleanup_queue(md->queue.queue);
535
536 __clear_bit(devidx, dev_use);
537
538 put_disk(md->disk);
539 kfree(md);
540 }
541 mutex_unlock(&open_lock);
542 }
543
544 static ssize_t power_ro_lock_show(struct device *dev,
545 struct device_attribute *attr, char *buf)
546 {
547 int ret;
548 struct mmc_blk_data *md = mmc_blk_get(dev_to_disk(dev));
549 struct mmc_card *card = md->queue.card;
550 int locked = 0;
551
552 if (card->ext_csd.boot_ro_lock & EXT_CSD_BOOT_WP_B_PERM_WP_EN)
553 locked = 2;
554 else if (card->ext_csd.boot_ro_lock & EXT_CSD_BOOT_WP_B_PWR_WP_EN)
555 locked = 1;
556
557 ret = snprintf(buf, PAGE_SIZE, "%d\n", locked);
558
559 return ret;
560 }
561
562 static ssize_t power_ro_lock_store(struct device *dev,
563 struct device_attribute *attr, const char *buf, size_t count)
564 {
565 int ret;
566 struct mmc_blk_data *md, *part_md;
567 struct mmc_card *card;
568 unsigned long set;
569
570 if (kstrtoul(buf, 0, &set))
571 return -EINVAL;
572
573 if (set != 1)
574 return count;
575
576 md = mmc_blk_get(dev_to_disk(dev));
577 card = md->queue.card;
578
579 mmc_claim_host(card->host);
580
581 ret = mmc_switch(card, EXT_CSD_CMD_SET_NORMAL, EXT_CSD_BOOT_WP,
582 card->ext_csd.boot_ro_lock |
583 EXT_CSD_BOOT_WP_B_PWR_WP_EN,
584 card->ext_csd.part_time);
585 if (ret)
586 pr_err("%s: Locking boot partition ro until next power on failed: %d\n", md->disk->disk_name, ret);
587 else
588 card->ext_csd.boot_ro_lock |= EXT_CSD_BOOT_WP_B_PWR_WP_EN;
589
590 mmc_release_host(card->host);
591
592 if (!ret) {
593 pr_info("%s: Locking boot partition ro until next power on\n",
594 md->disk->disk_name);
595 set_disk_ro(md->disk, 1);
596
597 list_for_each_entry(part_md, &md->part, part)
598 if (part_md->area_type == MMC_BLK_DATA_AREA_BOOT) {
599 pr_info("%s: Locking boot partition ro until next power on\n", part_md->disk->disk_name);
600 set_disk_ro(part_md->disk, 1);
601 }
602 }
603
604 mmc_blk_put(md);
605 return count;
606 }
607
608 static ssize_t force_ro_show(struct device *dev, struct device_attribute *attr,
609 char *buf)
610 {
611 int ret;
612 struct mmc_blk_data *md = mmc_blk_get(dev_to_disk(dev));
613
614 ret = snprintf(buf, PAGE_SIZE, "%d\n",
615 get_disk_ro(dev_to_disk(dev)) ^
616 md->read_only);
617 mmc_blk_put(md);
618 return ret;
619 }
620
621 static ssize_t force_ro_store(struct device *dev, struct device_attribute *attr,
622 const char *buf, size_t count)
623 {
624 int ret;
625 char *end;
626 struct mmc_blk_data *md = mmc_blk_get(dev_to_disk(dev));
627 unsigned long set = simple_strtoul(buf, &end, 0);
628 if (end == buf) {
629 ret = -EINVAL;
630 goto out;
631 }
632
633 set_disk_ro(dev_to_disk(dev), set || md->read_only);
634 ret = count;
635 out:
636 mmc_blk_put(md);
637 return ret;
638 }
639
640 static int mmc_blk_open(struct block_device *bdev, fmode_t mode)
641 {
642 struct mmc_blk_data *md = mmc_blk_get(bdev->bd_disk);
643 int ret = -ENXIO;
644
645 mutex_lock(&block_mutex);
646 if (md) {
647 if (md->usage == 2)
648 check_disk_change(bdev);
649 ret = 0;
650
651 if ((mode & FMODE_WRITE) && md->read_only) {
652 mmc_blk_put(md);
653 ret = -EROFS;
654 }
655 }
656 mutex_unlock(&block_mutex);
657
658 return ret;
659 }
660
661 static void mmc_blk_release(struct gendisk *disk, fmode_t mode)
662 {
663 struct mmc_blk_data *md = disk->private_data;
664
665 mutex_lock(&block_mutex);
666 mmc_blk_put(md);
667 mutex_unlock(&block_mutex);
668 }
669
670 static int
671 mmc_blk_getgeo(struct block_device *bdev, struct hd_geometry *geo)
672 {
673 geo->cylinders = get_capacity(bdev->bd_disk) / (4 * 16);
674 geo->heads = 4;
675 geo->sectors = 16;
676 return 0;
677 }
678
679 struct mmc_blk_ioc_data {
680 struct mmc_ioc_cmd ic;
681 unsigned char *buf;
682 u64 buf_bytes;
683 };
684
685 static struct mmc_blk_ioc_data *mmc_blk_ioctl_copy_from_user(
686 struct mmc_ioc_cmd __user *user)
687 {
688 struct mmc_blk_ioc_data *idata;
689 int err;
690
691 idata = kzalloc(sizeof(*idata), GFP_KERNEL);
692 if (!idata) {
693 err = -ENOMEM;
694 goto out;
695 }
696
697 if (copy_from_user(&idata->ic, user, sizeof(idata->ic))) {
698 err = -EFAULT;
699 goto idata_err;
700 }
701
702 idata->buf_bytes = (u64) idata->ic.blksz * idata->ic.blocks;
703 if (idata->buf_bytes > MMC_IOC_MAX_BYTES) {
704 err = -EOVERFLOW;
705 goto idata_err;
706 }
707
708 if (!idata->buf_bytes)
709 return idata;
710
711 idata->buf = kzalloc(idata->buf_bytes, GFP_KERNEL);
712 if (!idata->buf) {
713 err = -ENOMEM;
714 goto idata_err;
715 }
716
717 if (copy_from_user(idata->buf, (void __user *)(unsigned long)
718 idata->ic.data_ptr, idata->buf_bytes)) {
719 err = -EFAULT;
720 goto copy_err;
721 }
722
723 return idata;
724
725 copy_err:
726 kfree(idata->buf);
727 idata_err:
728 kfree(idata);
729 out:
730 return ERR_PTR(err);
731 }
732
733 static int ioctl_rpmb_card_status_poll(struct mmc_card *card, u32 *status,
734 u32 retries_max)
735 {
736 int err;
737 u32 retry_count = 0;
738
739 if (!status || !retries_max)
740 return -EINVAL;
741
742 do {
743 err = get_card_status(card, status, 5);
744 if (err)
745 break;
746
747 if (!R1_STATUS(*status) &&
748 (R1_CURRENT_STATE(*status) != R1_STATE_PRG))
749 break; /* RPMB programming operation complete */
750
751 /*
752 * Rechedule to give the MMC device a chance to continue
753 * processing the previous command without being polled too
754 * frequently.
755 */
756 usleep_range(1000, 5000);
757 } while (++retry_count < retries_max);
758
759 if (retry_count == retries_max)
760 err = -EPERM;
761
762 return err;
763 }
764
765 static int mmc_blk_ioctl_cmd(struct block_device *bdev,
766 struct mmc_ioc_cmd __user *ic_ptr)
767 {
768 struct mmc_blk_ioc_data *idata;
769 struct mmc_blk_data *md;
770 struct mmc_card *card;
771 struct mmc_command cmd = {0};
772 struct mmc_data data = {0};
773 struct mmc_request mrq = {NULL};
774 struct scatterlist sg;
775 int err;
776 int is_rpmb = false;
777 u32 status = 0;
778
779 /*
780 * The caller must have CAP_SYS_RAWIO, and must be calling this on the
781 * whole block device, not on a partition. This prevents overspray
782 * between sibling partitions.
783 */
784 if ((!capable(CAP_SYS_RAWIO)) || (bdev != bdev->bd_contains))
785 return -EPERM;
786
787 idata = mmc_blk_ioctl_copy_from_user(ic_ptr);
788 if (IS_ERR(idata))
789 return PTR_ERR(idata);
790
791 md = mmc_blk_get(bdev->bd_disk);
792 if (!md) {
793 err = -EINVAL;
794 goto cmd_err;
795 }
796
797 if (md->area_type & MMC_BLK_DATA_AREA_RPMB)
798 is_rpmb = true;
799
800 card = md->queue.card;
801 if (IS_ERR(card)) {
802 err = PTR_ERR(card);
803 goto cmd_done;
804 }
805
806 cmd.opcode = idata->ic.opcode;
807 cmd.arg = idata->ic.arg;
808 cmd.flags = idata->ic.flags;
809
810 if (idata->buf_bytes) {
811 data.sg = &sg;
812 data.sg_len = 1;
813 data.blksz = idata->ic.blksz;
814 data.blocks = idata->ic.blocks;
815
816 sg_init_one(data.sg, idata->buf, idata->buf_bytes);
817
818 if (idata->ic.write_flag)
819 data.flags = MMC_DATA_WRITE;
820 else
821 data.flags = MMC_DATA_READ;
822
823 /* data.flags must already be set before doing this. */
824 mmc_set_data_timeout(&data, card);
825
826 /* Allow overriding the timeout_ns for empirical tuning. */
827 if (idata->ic.data_timeout_ns)
828 data.timeout_ns = idata->ic.data_timeout_ns;
829
830 if ((cmd.flags & MMC_RSP_R1B) == MMC_RSP_R1B) {
831 /*
832 * Pretend this is a data transfer and rely on the
833 * host driver to compute timeout. When all host
834 * drivers support cmd.cmd_timeout for R1B, this
835 * can be changed to:
836 *
837 * mrq.data = NULL;
838 * cmd.cmd_timeout = idata->ic.cmd_timeout_ms;
839 */
840 data.timeout_ns = idata->ic.cmd_timeout_ms * 1000000;
841 }
842
843 mrq.data = &data;
844 }
845
846 mrq.cmd = &cmd;
847
848 mmc_claim_host(card->host);
849
850 err = mmc_blk_part_switch(card, md);
851 if (err)
852 goto cmd_rel_host;
853
854 if (idata->ic.is_acmd) {
855 err = mmc_app_cmd(card->host, card);
856 if (err)
857 goto cmd_rel_host;
858 }
859
860 if (is_rpmb) {
861 err = mmc_set_blockcount(card, data.blocks,
862 idata->ic.write_flag & (1 << 31));
863 if (err)
864 goto cmd_rel_host;
865 }
866
867 mmc_wait_for_req(card->host, &mrq);
868
869 if (cmd.error) {
870 dev_err(mmc_dev(card->host), "%s: cmd error %d\n",
871 __func__, cmd.error);
872 err = cmd.error;
873 goto cmd_rel_host;
874 }
875 if (data.error) {
876 dev_err(mmc_dev(card->host), "%s: data error %d\n",
877 __func__, data.error);
878 err = data.error;
879 goto cmd_rel_host;
880 }
881
882 /*
883 * According to the SD specs, some commands require a delay after
884 * issuing the command.
885 */
886 if (idata->ic.postsleep_min_us)
887 usleep_range(idata->ic.postsleep_min_us, idata->ic.postsleep_max_us);
888
889 if (copy_to_user(&(ic_ptr->response), cmd.resp, sizeof(cmd.resp))) {
890 err = -EFAULT;
891 goto cmd_rel_host;
892 }
893
894 if (!idata->ic.write_flag) {
895 if (copy_to_user((void __user *)(unsigned long) idata->ic.data_ptr,
896 idata->buf, idata->buf_bytes)) {
897 err = -EFAULT;
898 goto cmd_rel_host;
899 }
900 }
901
902 if (is_rpmb) {
903 /*
904 * Ensure RPMB command has completed by polling CMD13
905 * "Send Status".
906 */
907 err = ioctl_rpmb_card_status_poll(card, &status, 5);
908 if (err)
909 dev_err(mmc_dev(card->host),
910 "%s: Card Status=0x%08X, error %d\n",
911 __func__, status, err);
912 }
913
914 cmd_rel_host:
915 mmc_release_host(card->host);
916
917 cmd_done:
918 mmc_blk_put(md);
919 cmd_err:
920 kfree(idata->buf);
921 kfree(idata);
922 return err;
923 }
924
925 static int mmc_blk_ioctl(struct block_device *bdev, fmode_t mode,
926 unsigned int cmd, unsigned long arg)
927 {
928 int ret = -EINVAL;
929 if (cmd == MMC_IOC_CMD)
930 ret = mmc_blk_ioctl_cmd(bdev, (struct mmc_ioc_cmd __user *)arg);
931 return ret;
932 }
933
934 #ifdef CONFIG_COMPAT
935 static int mmc_blk_compat_ioctl(struct block_device *bdev, fmode_t mode,
936 unsigned int cmd, unsigned long arg)
937 {
938 return mmc_blk_ioctl(bdev, mode, cmd, (unsigned long) compat_ptr(arg));
939 }
940 #endif
941
942 static const struct block_device_operations mmc_bdops = {
943 .open = mmc_blk_open,
944 .release = mmc_blk_release,
945 .getgeo = mmc_blk_getgeo,
946 .owner = THIS_MODULE,
947 .ioctl = mmc_blk_ioctl,
948 #ifdef CONFIG_COMPAT
949 .compat_ioctl = mmc_blk_compat_ioctl,
950 #endif
951 };
952
953 static inline int mmc_blk_part_switch(struct mmc_card *card,
954 struct mmc_blk_data *md)
955 {
956 int ret;
957 struct mmc_blk_data *main_md = mmc_get_drvdata(card);
958
959 if (main_md->part_curr == md->part_type)
960 return 0;
961
962 if (mmc_card_mmc(card)) {
963 u8 part_config = card->ext_csd.part_config;
964
965 part_config &= ~EXT_CSD_PART_CONFIG_ACC_MASK;
966 part_config |= md->part_type;
967
968 ret = mmc_switch(card, EXT_CSD_CMD_SET_NORMAL,
969 EXT_CSD_PART_CONFIG, part_config,
970 card->ext_csd.part_time);
971 if (ret)
972 return ret;
973
974 card->ext_csd.part_config = part_config;
975 }
976
977 main_md->part_curr = md->part_type;
978 return 0;
979 }
980
981 static u32 mmc_sd_num_wr_blocks(struct mmc_card *card)
982 {
983 int err;
984 u32 result;
985 __be32 *blocks;
986
987 struct mmc_request mrq = {NULL};
988 struct mmc_command cmd = {0};
989 struct mmc_data data = {0};
990
991 struct scatterlist sg;
992
993 cmd.opcode = MMC_APP_CMD;
994 cmd.arg = card->rca << 16;
995 cmd.flags = MMC_RSP_SPI_R1 | MMC_RSP_R1 | MMC_CMD_AC;
996
997 err = mmc_wait_for_cmd(card->host, &cmd, 0);
998 if (err)
999 return (u32)-1;
1000 if (!mmc_host_is_spi(card->host) && !(cmd.resp[0] & R1_APP_CMD))
1001 return (u32)-1;
1002
1003 memset(&cmd, 0, sizeof(struct mmc_command));
1004
1005 cmd.opcode = SD_APP_SEND_NUM_WR_BLKS;
1006 cmd.arg = 0;
1007 cmd.flags = MMC_RSP_SPI_R1 | MMC_RSP_R1 | MMC_CMD_ADTC;
1008
1009 data.blksz = 4;
1010 data.blocks = 1;
1011 data.flags = MMC_DATA_READ;
1012 data.sg = &sg;
1013 data.sg_len = 1;
1014 mmc_set_data_timeout(&data, card);
1015
1016 mrq.cmd = &cmd;
1017 mrq.data = &data;
1018
1019 blocks = kmalloc(4, GFP_KERNEL);
1020 if (!blocks)
1021 return (u32)-1;
1022
1023 sg_init_one(&sg, blocks, 4);
1024
1025 mmc_wait_for_req(card->host, &mrq);
1026
1027 result = ntohl(*blocks);
1028 kfree(blocks);
1029
1030 if (cmd.error || data.error)
1031 result = (u32)-1;
1032
1033 return result;
1034 }
1035
1036 u32 __mmc_sd_num_wr_blocks(struct mmc_card *card)
1037 {
1038 return mmc_sd_num_wr_blocks(card);
1039 }
1040 EXPORT_SYMBOL(__mmc_sd_num_wr_blocks);
1041
1042 static int send_stop(struct mmc_card *card, u32 *status)
1043 {
1044 struct mmc_command cmd = {0};
1045 int err;
1046
1047 cmd.opcode = MMC_STOP_TRANSMISSION;
1048 cmd.flags = MMC_RSP_SPI_R1B | MMC_RSP_R1B | MMC_CMD_AC;
1049 err = mmc_wait_for_cmd(card->host, &cmd, 5);
1050 if (err == 0)
1051 *status = cmd.resp[0];
1052 return err;
1053 }
1054
1055 static int get_card_status(struct mmc_card *card, u32 *status, int retries)
1056 {
1057 struct mmc_command cmd = {0};
1058 int err;
1059
1060 cmd.opcode = MMC_SEND_STATUS;
1061 if (!mmc_host_is_spi(card->host))
1062 cmd.arg = card->rca << 16;
1063 cmd.flags = MMC_RSP_SPI_R2 | MMC_RSP_R1 | MMC_CMD_AC;
1064 err = mmc_wait_for_cmd(card->host, &cmd, retries);
1065 if (err == 0)
1066 *status = cmd.resp[0];
1067 return err;
1068 }
1069
1070 #define ERR_NOMEDIUM 3
1071 #define ERR_RETRY 2
1072 #define ERR_ABORT 1
1073 #define ERR_CONTINUE 0
1074
1075 static int mmc_blk_cmd_error(struct request *req, const char *name, int error,
1076 bool status_valid, u32 status)
1077 {
1078 switch (error) {
1079 case -EILSEQ:
1080 /* response crc error, retry the r/w cmd */
1081 pr_err("%s: %s sending %s command, card status %#x\n",
1082 req->rq_disk->disk_name, "response CRC error",
1083 name, status);
1084 return ERR_RETRY;
1085
1086 case -ETIMEDOUT:
1087 pr_err("%s: %s sending %s command, card status %#x\n",
1088 req->rq_disk->disk_name, "timed out", name, status);
1089
1090 /* If the status cmd initially failed, retry the r/w cmd */
1091 if (!status_valid) {
1092 pr_err("%s: status not valid, retrying timeout\n", req->rq_disk->disk_name);
1093 return ERR_RETRY;
1094 }
1095 /*
1096 * If it was a r/w cmd crc error, or illegal command
1097 * (eg, issued in wrong state) then retry - we should
1098 * have corrected the state problem above.
1099 */
1100 if (status & (R1_COM_CRC_ERROR | R1_ILLEGAL_COMMAND)) {
1101 pr_err("%s: command error, retrying timeout\n", req->rq_disk->disk_name);
1102 return ERR_RETRY;
1103 }
1104
1105 /* Otherwise abort the command */
1106 pr_err("%s: not retrying timeout\n", req->rq_disk->disk_name);
1107 return ERR_ABORT;
1108
1109 default:
1110 /* We don't understand the error code the driver gave us */
1111 pr_err("%s: unknown error %d sending read/write command, card status %#x\n",
1112 req->rq_disk->disk_name, error, status);
1113 return ERR_ABORT;
1114 }
1115 }
1116
1117 /*
1118 * Initial r/w and stop cmd error recovery.
1119 * We don't know whether the card received the r/w cmd or not, so try to
1120 * restore things back to a sane state. Essentially, we do this as follows:
1121 * - Obtain card status. If the first attempt to obtain card status fails,
1122 * the status word will reflect the failed status cmd, not the failed
1123 * r/w cmd. If we fail to obtain card status, it suggests we can no
1124 * longer communicate with the card.
1125 * - Check the card state. If the card received the cmd but there was a
1126 * transient problem with the response, it might still be in a data transfer
1127 * mode. Try to send it a stop command. If this fails, we can't recover.
1128 * - If the r/w cmd failed due to a response CRC error, it was probably
1129 * transient, so retry the cmd.
1130 * - If the r/w cmd timed out, but we didn't get the r/w cmd status, retry.
1131 * - If the r/w cmd timed out, and the r/w cmd failed due to CRC error or
1132 * illegal cmd, retry.
1133 * Otherwise we don't understand what happened, so abort.
1134 */
1135 static int mmc_blk_cmd_recovery(struct mmc_card *card, struct request *req,
1136 struct mmc_blk_request *brq, int *ecc_err, int *gen_err)
1137 {
1138 bool prev_cmd_status_valid = true;
1139 u32 status, stop_status = 0;
1140 int err, retry;
1141
1142 if (mmc_card_removed(card))
1143 return ERR_NOMEDIUM;
1144
1145 /*
1146 * Try to get card status which indicates both the card state
1147 * and why there was no response. If the first attempt fails,
1148 * we can't be sure the returned status is for the r/w command.
1149 */
1150 for (retry = 2; retry >= 0; retry--) {
1151 err = get_card_status(card, &status, 0);
1152 if (!err)
1153 break;
1154
1155 prev_cmd_status_valid = false;
1156 pr_err("%s: error %d sending status command, %sing\n",
1157 req->rq_disk->disk_name, err, retry ? "retry" : "abort");
1158 }
1159
1160 /* We couldn't get a response from the card. Give up. */
1161 if (err) {
1162 /* Check if the card is removed */
1163 if (mmc_detect_card_removed(card->host))
1164 return ERR_NOMEDIUM;
1165 return ERR_ABORT;
1166 }
1167
1168 /* Flag ECC errors */
1169 if ((status & R1_CARD_ECC_FAILED) ||
1170 (brq->stop.resp[0] & R1_CARD_ECC_FAILED) ||
1171 (brq->cmd.resp[0] & R1_CARD_ECC_FAILED))
1172 *ecc_err = 1;
1173
1174 /* Flag General errors */
1175 if (!mmc_host_is_spi(card->host) && rq_data_dir(req) != READ)
1176 if ((status & R1_ERROR) ||
1177 (brq->stop.resp[0] & R1_ERROR)) {
1178 pr_err("%s: %s: general error sending stop or status command, stop cmd response %#x, card status %#x\n",
1179 req->rq_disk->disk_name, __func__,
1180 brq->stop.resp[0], status);
1181 *gen_err = 1;
1182 }
1183
1184 /*
1185 * Check the current card state. If it is in some data transfer
1186 * mode, tell it to stop (and hopefully transition back to TRAN.)
1187 */
1188 if (R1_CURRENT_STATE(status) == R1_STATE_DATA ||
1189 R1_CURRENT_STATE(status) == R1_STATE_RCV) {
1190 err = send_stop(card, &stop_status);
1191 if (err)
1192 {
1193 get_card_status(card,&status,0);
1194 if ((R1_CURRENT_STATE(status) == R1_STATE_TRAN) ||(R1_CURRENT_STATE(status) == R1_STATE_PRG)){
1195 err=0;
1196 stop_status=0;
1197 pr_err("b card status %d \n",status);
1198 }
1199 else
1200 pr_err("g card status %d \n",status);
1201 }
1202 if (err)
1203 pr_err("%s: error %d sending stop command\n",
1204 req->rq_disk->disk_name, err);
1205
1206 /*
1207 * If the stop cmd also timed out, the card is probably
1208 * not present, so abort. Other errors are bad news too.
1209 */
1210 if (err)
1211 return ERR_ABORT;
1212 if (stop_status & R1_CARD_ECC_FAILED)
1213 *ecc_err = 1;
1214 if (!mmc_host_is_spi(card->host) && rq_data_dir(req) != READ)
1215 if (stop_status & R1_ERROR) {
1216 pr_err("%s: %s: general error sending stop command, stop cmd response %#x\n",
1217 req->rq_disk->disk_name, __func__,
1218 stop_status);
1219 *gen_err = 1;
1220 }
1221 }
1222
1223 /* Check for set block count errors */
1224 if (brq->sbc.error)
1225 return mmc_blk_cmd_error(req, "SET_BLOCK_COUNT", brq->sbc.error,
1226 prev_cmd_status_valid, status);
1227
1228 /* Check for r/w command errors */
1229 if (brq->cmd.error)
1230 return mmc_blk_cmd_error(req, "r/w cmd", brq->cmd.error,
1231 prev_cmd_status_valid, status);
1232
1233 /* Data errors */
1234 if (!brq->stop.error)
1235 return ERR_CONTINUE;
1236
1237 /* Now for stop errors. These aren't fatal to the transfer. */
1238 pr_err("%s: error %d sending stop command, original cmd response %#x, card status %#x\n",
1239 req->rq_disk->disk_name, brq->stop.error,
1240 brq->cmd.resp[0], status);
1241
1242 /*
1243 * Subsitute in our own stop status as this will give the error
1244 * state which happened during the execution of the r/w command.
1245 */
1246 if (stop_status) {
1247 brq->stop.resp[0] = stop_status;
1248 brq->stop.error = 0;
1249 }
1250 return ERR_CONTINUE;
1251 }
1252
1253 static int mmc_blk_reset(struct mmc_blk_data *md, struct mmc_host *host,
1254 int type)
1255 {
1256 int err;
1257
1258 if (md->reset_done & type)
1259 return -EEXIST;
1260
1261 md->reset_done |= type;
1262 err = mmc_hw_reset(host);
1263 /* Ensure we switch back to the correct partition */
1264 if (err != -EOPNOTSUPP) {
1265 struct mmc_blk_data *main_md = mmc_get_drvdata(host->card);
1266 int part_err;
1267
1268 main_md->part_curr = main_md->part_type;
1269 part_err = mmc_blk_part_switch(host->card, md);
1270 if (part_err) {
1271 /*
1272 * We have failed to get back into the correct
1273 * partition, so we need to abort the whole request.
1274 */
1275 return -ENODEV;
1276 }
1277 }
1278 return err;
1279 }
1280
1281 static inline void mmc_blk_reset_success(struct mmc_blk_data *md, int type)
1282 {
1283 md->reset_done &= ~type;
1284 }
1285
1286 int mmc_access_rpmb(struct mmc_queue *mq)
1287 {
1288 struct mmc_blk_data *md = mq->data;
1289 /*
1290 * If this is a RPMB partition access, return ture
1291 */
1292 if (md && md->part_type == EXT_CSD_PART_CONFIG_ACC_RPMB)
1293 return true;
1294
1295 return false;
1296 }
1297
1298 static int mmc_blk_issue_discard_rq(struct mmc_queue *mq, struct request *req)
1299 {
1300 struct mmc_blk_data *md = mq->data;
1301 struct mmc_card *card = md->queue.card;
1302 unsigned int from, nr, arg;
1303 int err = 0, type = MMC_BLK_DISCARD;
1304
1305 if (!mmc_can_erase(card)) {
1306 err = -EOPNOTSUPP;
1307 goto out;
1308 }
1309
1310 from = blk_rq_pos(req);
1311 nr = blk_rq_sectors(req);
1312
1313 if (mmc_can_discard(card))
1314 arg = MMC_DISCARD_ARG;
1315 else if (mmc_can_trim(card))
1316 arg = MMC_TRIM_ARG;
1317 else
1318 arg = MMC_ERASE_ARG;
1319 retry:
1320 if (card->quirks & MMC_QUIRK_INAND_CMD38) {
1321 err = mmc_switch(card, EXT_CSD_CMD_SET_NORMAL,
1322 INAND_CMD38_ARG_EXT_CSD,
1323 arg == MMC_TRIM_ARG ?
1324 INAND_CMD38_ARG_TRIM :
1325 INAND_CMD38_ARG_ERASE,
1326 0);
1327 if (err)
1328 goto out;
1329 }
1330 err = mmc_erase(card, from, nr, arg);
1331 out:
1332 if (err == -EIO && !mmc_blk_reset(md, card->host, type))
1333 goto retry;
1334 if (!err)
1335 mmc_blk_reset_success(md, type);
1336 blk_end_request(req, err, blk_rq_bytes(req));
1337
1338 return err ? 0 : 1;
1339 }
1340
1341 static int mmc_blk_issue_secdiscard_rq(struct mmc_queue *mq,
1342 struct request *req)
1343 {
1344 struct mmc_blk_data *md = mq->data;
1345 struct mmc_card *card = md->queue.card;
1346 unsigned int from, nr, arg, trim_arg, erase_arg;
1347 int err = 0, type = MMC_BLK_SECDISCARD;
1348
1349 if (!(mmc_can_secure_erase_trim(card) || mmc_can_sanitize(card))) {
1350 err = -EOPNOTSUPP;
1351 goto out;
1352 }
1353
1354 from = blk_rq_pos(req);
1355 nr = blk_rq_sectors(req);
1356
1357 /* The sanitize operation is supported at v4.5 only */
1358 if (mmc_can_sanitize(card)) {
1359 erase_arg = MMC_ERASE_ARG;
1360 trim_arg = MMC_TRIM_ARG;
1361 } else {
1362 erase_arg = MMC_SECURE_ERASE_ARG;
1363 trim_arg = MMC_SECURE_TRIM1_ARG;
1364 }
1365
1366 if (mmc_erase_group_aligned(card, from, nr))
1367 arg = erase_arg;
1368 else if (mmc_can_trim(card))
1369 arg = trim_arg;
1370 else {
1371 err = -EINVAL;
1372 goto out;
1373 }
1374 retry:
1375 if (card->quirks & MMC_QUIRK_INAND_CMD38) {
1376 err = mmc_switch(card, EXT_CSD_CMD_SET_NORMAL,
1377 INAND_CMD38_ARG_EXT_CSD,
1378 arg == MMC_SECURE_TRIM1_ARG ?
1379 INAND_CMD38_ARG_SECTRIM1 :
1380 INAND_CMD38_ARG_SECERASE,
1381 0);
1382 if (err)
1383 goto out_retry;
1384 }
1385
1386 err = mmc_erase(card, from, nr, arg);
1387 if (err == -EIO)
1388 goto out_retry;
1389 if (err)
1390 goto out;
1391
1392 if (arg == MMC_SECURE_TRIM1_ARG) {
1393 if (card->quirks & MMC_QUIRK_INAND_CMD38) {
1394 err = mmc_switch(card, EXT_CSD_CMD_SET_NORMAL,
1395 INAND_CMD38_ARG_EXT_CSD,
1396 INAND_CMD38_ARG_SECTRIM2,
1397 0);
1398 if (err)
1399 goto out_retry;
1400 }
1401
1402 err = mmc_erase(card, from, nr, MMC_SECURE_TRIM2_ARG);
1403 if (err == -EIO)
1404 goto out_retry;
1405 if (err)
1406 goto out;
1407 }
1408
1409 if (mmc_can_sanitize(card)) {
1410 trace_mmc_blk_erase_start(EXT_CSD_SANITIZE_START, 0, 0);
1411 err = mmc_switch(card, EXT_CSD_CMD_SET_NORMAL,
1412 EXT_CSD_SANITIZE_START, 1, 0);
1413 trace_mmc_blk_erase_end(EXT_CSD_SANITIZE_START, 0, 0);
1414 }
1415 out_retry:
1416 if (err && !mmc_blk_reset(md, card->host, type))
1417 goto retry;
1418 if (!err)
1419 mmc_blk_reset_success(md, type);
1420 out:
1421 blk_end_request(req, err, blk_rq_bytes(req));
1422
1423 return err ? 0 : 1;
1424 }
1425
1426 static int mmc_blk_issue_flush(struct mmc_queue *mq, struct request *req)
1427 {
1428 struct mmc_blk_data *md = mq->data;
1429 struct mmc_card *card = md->queue.card;
1430 int ret = 0;
1431
1432 ret = mmc_flush_cache(card);
1433 if (ret)
1434 ret = -EIO;
1435
1436 blk_end_request_all(req, ret);
1437
1438 return ret ? 0 : 1;
1439 }
1440
1441 /*
1442 * Reformat current write as a reliable write, supporting
1443 * both legacy and the enhanced reliable write MMC cards.
1444 * In each transfer we'll handle only as much as a single
1445 * reliable write can handle, thus finish the request in
1446 * partial completions.
1447 */
1448 static inline void mmc_apply_rel_rw(struct mmc_blk_request *brq,
1449 struct mmc_card *card,
1450 struct request *req)
1451 {
1452 if (!(card->ext_csd.rel_param & EXT_CSD_WR_REL_PARAM_EN)) {
1453 /* Legacy mode imposes restrictions on transfers. */
1454 if (!IS_ALIGNED(brq->cmd.arg, card->ext_csd.rel_sectors))
1455 brq->data.blocks = 1;
1456
1457 if (brq->data.blocks > card->ext_csd.rel_sectors)
1458 brq->data.blocks = card->ext_csd.rel_sectors;
1459 else if (brq->data.blocks < card->ext_csd.rel_sectors)
1460 brq->data.blocks = 1;
1461 }
1462 }
1463
1464 #define CMD_ERRORS \
1465 (R1_OUT_OF_RANGE | /* Command argument out of range */ \
1466 R1_ADDRESS_ERROR | /* Misaligned address */ \
1467 R1_BLOCK_LEN_ERROR | /* Transferred block length incorrect */\
1468 R1_WP_VIOLATION | /* Tried to write to protected block */ \
1469 R1_CC_ERROR | /* Card controller error */ \
1470 R1_ERROR) /* General/unknown error */
1471
1472 static int mmc_blk_err_check(struct mmc_card *card,
1473 struct mmc_async_req *areq)
1474 {
1475 struct mmc_queue_req *mq_mrq = container_of(areq, struct mmc_queue_req,
1476 mmc_active);
1477 struct mmc_blk_request *brq = &mq_mrq->brq;
1478 struct request *req = mq_mrq->req;
1479 int ecc_err = 0, gen_err = 0;
1480
1481 /*
1482 * sbc.error indicates a problem with the set block count
1483 * command. No data will have been transferred.
1484 *
1485 * cmd.error indicates a problem with the r/w command. No
1486 * data will have been transferred.
1487 *
1488 * stop.error indicates a problem with the stop command. Data
1489 * may have been transferred, or may still be transferring.
1490 */
1491 if (brq->sbc.error || brq->cmd.error || brq->stop.error ||
1492 brq->data.error) {
1493 switch (mmc_blk_cmd_recovery(card, req, brq, &ecc_err, &gen_err)) {
1494 case ERR_RETRY:
1495 return MMC_BLK_RETRY;
1496 case ERR_ABORT:
1497 return MMC_BLK_ABORT;
1498 case ERR_NOMEDIUM:
1499 return MMC_BLK_NOMEDIUM;
1500 case ERR_CONTINUE:
1501 break;
1502 }
1503 }
1504
1505 /*
1506 * Check for errors relating to the execution of the
1507 * initial command - such as address errors. No data
1508 * has been transferred.
1509 */
1510 if (brq->cmd.resp[0] & CMD_ERRORS) {
1511 pr_err("%s: r/w command failed, status = %#x\n",
1512 req->rq_disk->disk_name, brq->cmd.resp[0]);
1513 return MMC_BLK_ABORT;
1514 }
1515
1516 /*
1517 * Everything else is either success, or a data error of some
1518 * kind. If it was a write, we may have transitioned to
1519 * program mode, which we have to wait for it to complete.
1520 */
1521 if (!mmc_host_is_spi(card->host) && rq_data_dir(req) != READ) {
1522 u32 status;
1523 unsigned long timeout;
1524
1525 /* Check stop command response */
1526 if (brq->stop.resp[0] & R1_ERROR) {
1527 pr_err("%s: %s: general error sending stop command, stop cmd response %#x\n",
1528 req->rq_disk->disk_name, __func__,
1529 brq->stop.resp[0]);
1530 gen_err = 1;
1531 }
1532
1533 timeout = jiffies + msecs_to_jiffies(MMC_BLK_TIMEOUT_MS);
1534 do {
1535 int err = get_card_status(card, &status, 5);
1536 if (err) {
1537 pr_err("%s: error %d requesting status\n",
1538 req->rq_disk->disk_name, err);
1539 return MMC_BLK_CMD_ERR;
1540 }
1541
1542 if (status & R1_ERROR) {
1543 pr_err("%s: %s: general error sending status command, card status %#x\n",
1544 req->rq_disk->disk_name, __func__,
1545 status);
1546 gen_err = 1;
1547 }
1548
1549 /* Timeout if the device never becomes ready for data
1550 * and never leaves the program state.
1551 */
1552 if (time_after(jiffies, timeout)) {
1553 pr_err("%s: Card stuck in programming state!"\
1554 " %s %s\n", mmc_hostname(card->host),
1555 req->rq_disk->disk_name, __func__);
1556
1557 return MMC_BLK_CMD_ERR;
1558 }
1559 /*
1560 * Some cards mishandle the status bits,
1561 * so make sure to check both the busy
1562 * indication and the card state.
1563 */
1564 } while (!(status & R1_READY_FOR_DATA) ||
1565 (R1_CURRENT_STATE(status) == R1_STATE_PRG));
1566 }
1567
1568 /* if general error occurs, retry the write operation. */
1569 if (gen_err) {
1570 pr_warn("%s: retrying write for general error\n",
1571 req->rq_disk->disk_name);
1572 return MMC_BLK_RETRY;
1573 }
1574
1575 if (brq->data.error) {
1576 pr_err("%s: error %d transferring data, sector %u, nr %u, cmd response %#x, card status %#x\n",
1577 req->rq_disk->disk_name, brq->data.error,
1578 (unsigned)blk_rq_pos(req),
1579 (unsigned)blk_rq_sectors(req),
1580 brq->cmd.resp[0], brq->stop.resp[0]);
1581
1582 if (rq_data_dir(req) == READ) {
1583 if (ecc_err)
1584 return MMC_BLK_ECC_ERR;
1585 return MMC_BLK_DATA_ERR;
1586 } else {
1587 return MMC_BLK_CMD_ERR;
1588 }
1589 }
1590
1591 if (!brq->data.bytes_xfered)
1592 return MMC_BLK_RETRY;
1593
1594 if (mmc_packed_cmd(mq_mrq->cmd_type)) {
1595 if (unlikely(brq->data.blocks << 9 != brq->data.bytes_xfered))
1596 return MMC_BLK_PARTIAL;
1597 else
1598 return MMC_BLK_SUCCESS;
1599 }
1600
1601 if (blk_rq_bytes(req) != brq->data.bytes_xfered)
1602 return MMC_BLK_PARTIAL;
1603
1604 return MMC_BLK_SUCCESS;
1605 }
1606
1607 static int mmc_blk_packed_err_check(struct mmc_card *card,
1608 struct mmc_async_req *areq)
1609 {
1610 struct mmc_queue_req *mq_rq = container_of(areq, struct mmc_queue_req,
1611 mmc_active);
1612 struct request *req = mq_rq->req;
1613 struct mmc_packed *packed = mq_rq->packed;
1614 int err, check, status;
1615 u8 *ext_csd;
1616
1617 BUG_ON(!packed);
1618
1619 packed->retries--;
1620 check = mmc_blk_err_check(card, areq);
1621 err = get_card_status(card, &status, 0);
1622 if (err) {
1623 pr_err("%s: error %d sending status command\n",
1624 req->rq_disk->disk_name, err);
1625 return MMC_BLK_ABORT;
1626 }
1627
1628 if (status & R1_EXCEPTION_EVENT) {
1629 ext_csd = kzalloc(512, GFP_KERNEL);
1630 if (!ext_csd) {
1631 pr_err("%s: unable to allocate buffer for ext_csd\n",
1632 req->rq_disk->disk_name);
1633 return -ENOMEM;
1634 }
1635
1636 err = mmc_send_ext_csd(card, ext_csd);
1637 if (err) {
1638 pr_err("%s: error %d sending ext_csd\n",
1639 req->rq_disk->disk_name, err);
1640 check = MMC_BLK_ABORT;
1641 goto free;
1642 }
1643
1644 if ((ext_csd[EXT_CSD_EXP_EVENTS_STATUS] &
1645 EXT_CSD_PACKED_FAILURE) &&
1646 (ext_csd[EXT_CSD_PACKED_CMD_STATUS] &
1647 EXT_CSD_PACKED_GENERIC_ERROR)) {
1648 if (ext_csd[EXT_CSD_PACKED_CMD_STATUS] &
1649 EXT_CSD_PACKED_INDEXED_ERROR) {
1650 packed->idx_failure =
1651 ext_csd[EXT_CSD_PACKED_FAILURE_INDEX] - 1;
1652 check = MMC_BLK_PARTIAL;
1653 }
1654 pr_err("%s: packed cmd failed, nr %u, sectors %u, "
1655 "failure index: %d\n",
1656 req->rq_disk->disk_name, packed->nr_entries,
1657 packed->blocks, packed->idx_failure);
1658 }
1659 free:
1660 kfree(ext_csd);
1661 }
1662
1663 return check;
1664 }
1665
1666 static void mmc_blk_rw_rq_prep(struct mmc_queue_req *mqrq,
1667 struct mmc_card *card,
1668 int disable_multi,
1669 struct mmc_queue *mq)
1670 {
1671 u32 readcmd, writecmd;
1672 struct mmc_blk_request *brq = &mqrq->brq;
1673 struct request *req = mqrq->req;
1674 struct mmc_blk_data *md = mq->data;
1675 bool do_data_tag;
1676
1677 /*
1678 * Reliable writes are used to implement Forced Unit Access and
1679 * REQ_META accesses, and are supported only on MMCs.
1680 *
1681 * XXX: this really needs a good explanation of why REQ_META
1682 * is treated special.
1683 */
1684 bool do_rel_wr = ((req->cmd_flags & REQ_FUA) ||
1685 (req->cmd_flags & REQ_META)) &&
1686 (rq_data_dir(req) == WRITE) &&
1687 (md->flags & MMC_BLK_REL_WR);
1688
1689 memset(brq, 0, sizeof(struct mmc_blk_request));
1690 brq->mrq.cmd = &brq->cmd;
1691 brq->mrq.data = &brq->data;
1692
1693 brq->cmd.arg = blk_rq_pos(req);
1694 if (!mmc_card_blockaddr(card))
1695 brq->cmd.arg <<= 9;
1696 brq->cmd.flags = MMC_RSP_SPI_R1 | MMC_RSP_R1 | MMC_CMD_ADTC;
1697 brq->data.blksz = 512;
1698 brq->stop.opcode = MMC_STOP_TRANSMISSION;
1699 brq->stop.arg = 0;
1700 brq->stop.flags = MMC_RSP_SPI_R1B | MMC_RSP_R1B | MMC_CMD_AC;
1701 brq->data.blocks = blk_rq_sectors(req);
1702
1703 /*
1704 * The block layer doesn't support all sector count
1705 * restrictions, so we need to be prepared for too big
1706 * requests.
1707 */
1708 if (brq->data.blocks > card->host->max_blk_count)
1709 brq->data.blocks = card->host->max_blk_count;
1710
1711 if (brq->data.blocks > 1) {
1712 /*
1713 * After a read error, we redo the request one sector
1714 * at a time in order to accurately determine which
1715 * sectors can be read successfully.
1716 */
1717 if (disable_multi)
1718 brq->data.blocks = 1;
1719
1720 /* Some controllers can't do multiblock reads due to hw bugs */
1721 if (card->host->caps2 & MMC_CAP2_NO_MULTI_READ &&
1722 rq_data_dir(req) == READ)
1723 brq->data.blocks = 1;
1724 }
1725
1726 if (brq->data.blocks > 1 || do_rel_wr) {
1727 /* SPI multiblock writes terminate using a special
1728 * token, not a STOP_TRANSMISSION request.
1729 */
1730 if (!mmc_host_is_spi(card->host) ||
1731 rq_data_dir(req) == READ)
1732 brq->mrq.stop = &brq->stop;
1733 readcmd = MMC_READ_MULTIPLE_BLOCK;
1734 writecmd = MMC_WRITE_MULTIPLE_BLOCK;
1735 } else {
1736 brq->mrq.stop = NULL;
1737 readcmd = MMC_READ_SINGLE_BLOCK;
1738 writecmd = MMC_WRITE_BLOCK;
1739 }
1740 #ifdef CONFIG_MTK_EMMC_CACHE
1741 /* for non-cacheable system data,
1742 * the implementation of reliable write / force prg write,
1743 * must be applied with mutli write cmd
1744 * */
1745 if (mmc_card_mmc(card) && (card->ext_csd.cache_ctrl & 0x1)){
1746 writecmd = MMC_WRITE_MULTIPLE_BLOCK;
1747 }
1748 #endif
1749 if (rq_data_dir(req) == READ) {
1750 brq->cmd.opcode = readcmd;
1751 brq->data.flags |= MMC_DATA_READ;
1752 } else {
1753 brq->cmd.opcode = writecmd;
1754 brq->data.flags |= MMC_DATA_WRITE;
1755 }
1756
1757 if (do_rel_wr)
1758 mmc_apply_rel_rw(brq, card, req);
1759
1760 /*
1761 * Data tag is used only during writing meta data to speed
1762 * up write and any subsequent read of this meta data
1763 */
1764 do_data_tag = (card->ext_csd.data_tag_unit_size) &&
1765 (req->cmd_flags & REQ_META) &&
1766 (rq_data_dir(req) == WRITE) &&
1767 ((brq->data.blocks * brq->data.blksz) >=
1768 card->ext_csd.data_tag_unit_size);
1769
1770 /*
1771 * Pre-defined multi-block transfers are preferable to
1772 * open ended-ones (and necessary for reliable writes).
1773 * However, it is not sufficient to just send CMD23,
1774 * and avoid the final CMD12, as on an error condition
1775 * CMD12 (stop) needs to be sent anyway. This, coupled
1776 * with Auto-CMD23 enhancements provided by some
1777 * hosts, means that the complexity of dealing
1778 * with this is best left to the host. If CMD23 is
1779 * supported by card and host, we'll fill sbc in and let
1780 * the host deal with handling it correctly. This means
1781 * that for hosts that don't expose MMC_CAP_CMD23, no
1782 * change of behavior will be observed.
1783 *
1784 * N.B: Some MMC cards experience perf degradation.
1785 * We'll avoid using CMD23-bounded multiblock writes for
1786 * these, while retaining features like reliable writes.
1787 */
1788 if ((md->flags & MMC_BLK_CMD23) && mmc_op_multi(brq->cmd.opcode) &&
1789 (do_rel_wr || !(card->quirks & MMC_QUIRK_BLK_NO_CMD23) ||
1790 do_data_tag)) {
1791 brq->sbc.opcode = MMC_SET_BLOCK_COUNT;
1792 brq->sbc.arg = brq->data.blocks |
1793 (do_rel_wr ? (1 << 31) : 0) |
1794 (do_data_tag ? (1 << 29) : 0);
1795 brq->sbc.flags = MMC_RSP_R1 | MMC_CMD_AC;
1796 brq->mrq.sbc = &brq->sbc;
1797 }
1798
1799 mmc_set_data_timeout(&brq->data, card);
1800
1801 brq->data.sg = mqrq->sg;
1802 brq->data.sg_len = mmc_queue_map_sg(mq, mqrq);
1803
1804 if (brq->data.sg_len > 1024)
1805 pr_err("%s:%d sglen = %x\n", __func__, __LINE__, brq->data.sg_len);
1806
1807 /*
1808 * Adjust the sg list so it is the same size as the
1809 * request.
1810 */
1811 if (brq->data.blocks != blk_rq_sectors(req)) {
1812 int i, data_size = brq->data.blocks << 9;
1813 struct scatterlist *sg;
1814
1815 for_each_sg(brq->data.sg, sg, brq->data.sg_len, i) {
1816 data_size -= sg->length;
1817 if (data_size <= 0) {
1818 sg->length += data_size;
1819 i++;
1820 break;
1821 }
1822 }
1823 brq->data.sg_len = i;
1824 pr_err("%s:%d sglen = %x\n", __func__, __LINE__, brq->data.sg_len);
1825 }
1826
1827 mqrq->mmc_active.mrq = &brq->mrq;
1828 mqrq->mmc_active.err_check = mmc_blk_err_check;
1829
1830 mmc_queue_bounce_pre(mqrq);
1831 }
1832
1833 static inline u8 mmc_calc_packed_hdr_segs(struct request_queue *q,
1834 struct mmc_card *card)
1835 {
1836 unsigned int hdr_sz = mmc_large_sector(card) ? 4096 : 512;
1837 unsigned int max_seg_sz = queue_max_segment_size(q);
1838 unsigned int len, nr_segs = 0;
1839
1840 do {
1841 len = min(hdr_sz, max_seg_sz);
1842 hdr_sz -= len;
1843 nr_segs++;
1844 } while (hdr_sz);
1845
1846 return nr_segs;
1847 }
1848
1849 static u8 mmc_blk_prep_packed_list(struct mmc_queue *mq, struct request *req)
1850 {
1851 struct request_queue *q = mq->queue;
1852 struct mmc_card *card = mq->card;
1853 struct request *cur = req, *next = NULL;
1854 struct mmc_blk_data *md = mq->data;
1855 struct mmc_queue_req *mqrq = mq->mqrq_cur;
1856 bool en_rel_wr = card->ext_csd.rel_param & EXT_CSD_WR_REL_PARAM_EN;
1857 unsigned int req_sectors = 0, phys_segments = 0;
1858 unsigned int max_blk_count, max_phys_segs;
1859 bool put_back = true;
1860 u8 max_packed_rw = 0;
1861 u8 reqs = 0;
1862
1863 if (!(md->flags & MMC_BLK_PACKED_CMD))
1864 goto no_packed;
1865
1866 if ((rq_data_dir(cur) == WRITE) &&
1867 mmc_host_packed_wr(card->host))
1868 max_packed_rw = card->ext_csd.max_packed_writes;
1869
1870 if (max_packed_rw == 0)
1871 goto no_packed;
1872
1873 if (mmc_req_rel_wr(cur) &&
1874 (md->flags & MMC_BLK_REL_WR) && !en_rel_wr)
1875 goto no_packed;
1876
1877 if (mmc_large_sector(card) &&
1878 !IS_ALIGNED(blk_rq_sectors(cur), 8))
1879 goto no_packed;
1880
1881 mmc_blk_clear_packed(mqrq);
1882
1883 max_blk_count = min(card->host->max_blk_count,
1884 card->host->max_req_size >> 9);
1885 if (unlikely(max_blk_count > 0xffff))
1886 max_blk_count = 0xffff;
1887
1888 max_phys_segs = queue_max_segments(q);
1889 req_sectors += blk_rq_sectors(cur);
1890 phys_segments += cur->nr_phys_segments;
1891
1892 if (rq_data_dir(cur) == WRITE) {
1893 req_sectors += mmc_large_sector(card) ? 8 : 1;
1894 phys_segments += mmc_calc_packed_hdr_segs(q, card);
1895 }
1896
1897 do {
1898 if (reqs >= max_packed_rw - 1) {
1899 put_back = false;
1900 break;
1901 }
1902
1903 spin_lock_irq(q->queue_lock);
1904 next = blk_fetch_request(q);
1905 spin_unlock_irq(q->queue_lock);
1906 if (!next) {
1907 put_back = false;
1908 break;
1909 }
1910
1911 if (mmc_large_sector(card) &&
1912 !IS_ALIGNED(blk_rq_sectors(next), 8))
1913 break;
1914
1915 if (next->cmd_flags & REQ_DISCARD ||
1916 next->cmd_flags & REQ_FLUSH)
1917 break;
1918
1919 if (rq_data_dir(cur) != rq_data_dir(next))
1920 break;
1921
1922 if (mmc_req_rel_wr(next) &&
1923 (md->flags & MMC_BLK_REL_WR) && !en_rel_wr)
1924 break;
1925
1926 req_sectors += blk_rq_sectors(next);
1927 if (req_sectors > max_blk_count)
1928 break;
1929
1930 phys_segments += next->nr_phys_segments;
1931 if (phys_segments > max_phys_segs)
1932 break;
1933
1934 list_add_tail(&next->queuelist, &mqrq->packed->list);
1935 cur = next;
1936 reqs++;
1937 } while (1);
1938
1939 if (put_back) {
1940 spin_lock_irq(q->queue_lock);
1941 blk_requeue_request(q, next);
1942 spin_unlock_irq(q->queue_lock);
1943 }
1944
1945 if (reqs > 0) {
1946 list_add(&req->queuelist, &mqrq->packed->list);
1947 mqrq->packed->nr_entries = ++reqs;
1948 mqrq->packed->retries = reqs;
1949 return reqs;
1950 }
1951
1952 no_packed:
1953 mqrq->cmd_type = MMC_PACKED_NONE;
1954 return 0;
1955 }
1956
1957 static void mmc_blk_packed_hdr_wrq_prep(struct mmc_queue_req *mqrq,
1958 struct mmc_card *card,
1959 struct mmc_queue *mq)
1960 {
1961 struct mmc_blk_request *brq = &mqrq->brq;
1962 struct request *req = mqrq->req;
1963 struct request *prq;
1964 struct mmc_blk_data *md = mq->data;
1965 struct mmc_packed *packed = mqrq->packed;
1966 bool do_rel_wr, do_data_tag;
1967 u32 *packed_cmd_hdr;
1968 u8 hdr_blocks;
1969 u8 i = 1;
1970
1971 BUG_ON(!packed);
1972
1973 mqrq->cmd_type = MMC_PACKED_WRITE;
1974 packed->blocks = 0;
1975 packed->idx_failure = MMC_PACKED_NR_IDX;
1976
1977 packed_cmd_hdr = packed->cmd_hdr;
1978 memset(packed_cmd_hdr, 0, sizeof(packed->cmd_hdr));
1979 packed_cmd_hdr[0] = (packed->nr_entries << 16) |
1980 (PACKED_CMD_WR << 8) | PACKED_CMD_VER;
1981 hdr_blocks = mmc_large_sector(card) ? 8 : 1;
1982
1983 /*
1984 * Argument for each entry of packed group
1985 */
1986 list_for_each_entry(prq, &packed->list, queuelist) {
1987 do_rel_wr = mmc_req_rel_wr(prq) && (md->flags & MMC_BLK_REL_WR);
1988 do_data_tag = (card->ext_csd.data_tag_unit_size) &&
1989 (prq->cmd_flags & REQ_META) &&
1990 (rq_data_dir(prq) == WRITE) &&
1991 ((brq->data.blocks * brq->data.blksz) >=
1992 card->ext_csd.data_tag_unit_size);
1993 /* Argument of CMD23 */
1994 packed_cmd_hdr[(i * 2)] =
1995 (do_rel_wr ? MMC_CMD23_ARG_REL_WR : 0) |
1996 (do_data_tag ? MMC_CMD23_ARG_TAG_REQ : 0) |
1997 blk_rq_sectors(prq);
1998 /* Argument of CMD18 or CMD25 */
1999 packed_cmd_hdr[((i * 2)) + 1] =
2000 mmc_card_blockaddr(card) ?
2001 blk_rq_pos(prq) : blk_rq_pos(prq) << 9;
2002 packed->blocks += blk_rq_sectors(prq);
2003 i++;
2004 }
2005
2006 memset(brq, 0, sizeof(struct mmc_blk_request));
2007 brq->mrq.cmd = &brq->cmd;
2008 brq->mrq.data = &brq->data;
2009 brq->mrq.sbc = &brq->sbc;
2010 brq->mrq.stop = &brq->stop;
2011
2012 brq->sbc.opcode = MMC_SET_BLOCK_COUNT;
2013 brq->sbc.arg = MMC_CMD23_ARG_PACKED | (packed->blocks + hdr_blocks);
2014 brq->sbc.flags = MMC_RSP_R1 | MMC_CMD_AC;
2015
2016 brq->cmd.opcode = MMC_WRITE_MULTIPLE_BLOCK;
2017 brq->cmd.arg = blk_rq_pos(req);
2018 if (!mmc_card_blockaddr(card))
2019 brq->cmd.arg <<= 9;
2020 brq->cmd.flags = MMC_RSP_SPI_R1 | MMC_RSP_R1 | MMC_CMD_ADTC;
2021
2022 brq->data.blksz = 512;
2023 brq->data.blocks = packed->blocks + hdr_blocks;
2024 brq->data.flags |= MMC_DATA_WRITE;
2025
2026 brq->stop.opcode = MMC_STOP_TRANSMISSION;
2027 brq->stop.arg = 0;
2028 brq->stop.flags = MMC_RSP_SPI_R1B | MMC_RSP_R1B | MMC_CMD_AC;
2029
2030 mmc_set_data_timeout(&brq->data, card);
2031
2032 brq->data.sg = mqrq->sg;
2033 brq->data.sg_len = mmc_queue_map_sg(mq, mqrq);
2034 pr_err("%s: sglen = %d\n", __func__, brq->data.sg_len);
2035
2036 mqrq->mmc_active.mrq = &brq->mrq;
2037 mqrq->mmc_active.err_check = mmc_blk_packed_err_check;
2038
2039 mmc_queue_bounce_pre(mqrq);
2040 }
2041
2042 static int mmc_blk_cmd_err(struct mmc_blk_data *md, struct mmc_card *card,
2043 struct mmc_blk_request *brq, struct request *req,
2044 int ret)
2045 {
2046 struct mmc_queue_req *mq_rq;
2047 mq_rq = container_of(brq, struct mmc_queue_req, brq);
2048
2049 /*
2050 * If this is an SD card and we're writing, we can first
2051 * mark the known good sectors as ok.
2052 *
2053 * If the card is not SD, we can still ok written sectors
2054 * as reported by the controller (which might be less than
2055 * the real number of written sectors, but never more).
2056 */
2057 if (mmc_card_sd(card)) {
2058 u32 blocks;
2059
2060 blocks = mmc_sd_num_wr_blocks(card);
2061 if (blocks != (u32)-1) {
2062 ret = blk_end_request(req, 0, blocks << 9);
2063 }
2064 } else {
2065 if (!mmc_packed_cmd(mq_rq->cmd_type))
2066 ret = blk_end_request(req, 0, brq->data.bytes_xfered);
2067 }
2068 return ret;
2069 }
2070
2071 static int mmc_blk_end_packed_req(struct mmc_queue_req *mq_rq)
2072 {
2073 struct request *prq;
2074 struct mmc_packed *packed = mq_rq->packed;
2075 int idx = packed->idx_failure, i = 0;
2076 int ret = 0;
2077
2078 BUG_ON(!packed);
2079
2080 while (!list_empty(&packed->list)) {
2081 prq = list_entry_rq(packed->list.next);
2082 if (idx == i) {
2083 /* retry from error index */
2084 packed->nr_entries -= idx;
2085 mq_rq->req = prq;
2086 ret = 1;
2087
2088 if (packed->nr_entries == MMC_PACKED_NR_SINGLE) {
2089 list_del_init(&prq->queuelist);
2090 mmc_blk_clear_packed(mq_rq);
2091 }
2092 return ret;
2093 }
2094 list_del_init(&prq->queuelist);
2095 blk_end_request(prq, 0, blk_rq_bytes(prq));
2096 i++;
2097 }
2098
2099 mmc_blk_clear_packed(mq_rq);
2100 return ret;
2101 }
2102
2103 static void mmc_blk_abort_packed_req(struct mmc_queue_req *mq_rq)
2104 {
2105 struct request *prq;
2106 struct mmc_packed *packed = mq_rq->packed;
2107
2108 BUG_ON(!packed);
2109
2110 while (!list_empty(&packed->list)) {
2111 prq = list_entry_rq(packed->list.next);
2112 list_del_init(&prq->queuelist);
2113 blk_end_request(prq, -EIO, blk_rq_bytes(prq));
2114 }
2115
2116 mmc_blk_clear_packed(mq_rq);
2117 }
2118
2119 static void mmc_blk_revert_packed_req(struct mmc_queue *mq,
2120 struct mmc_queue_req *mq_rq)
2121 {
2122 struct request *prq;
2123 struct request_queue *q = mq->queue;
2124 struct mmc_packed *packed = mq_rq->packed;
2125
2126 BUG_ON(!packed);
2127
2128 while (!list_empty(&packed->list)) {
2129 prq = list_entry_rq(packed->list.prev);
2130 if (prq->queuelist.prev != &packed->list) {
2131 list_del_init(&prq->queuelist);
2132 spin_lock_irq(q->queue_lock);
2133 blk_requeue_request(mq->queue, prq);
2134 spin_unlock_irq(q->queue_lock);
2135 } else {
2136 list_del_init(&prq->queuelist);
2137 }
2138 }
2139
2140 mmc_blk_clear_packed(mq_rq);
2141 }
2142 #if defined(FEATURE_STORAGE_PERF_INDEX)
2143 #define PRT_TIME_PERIOD 500000000
2144 #define UP_LIMITS_4BYTE 4294967295UL //((4*1024*1024*1024)-1)
2145 #define ID_CNT 10
2146 pid_t mmcqd[ID_CNT]={0};
2147 bool start_async_req[ID_CNT] = {0};
2148 unsigned long long start_async_req_time[ID_CNT] = {0};
2149 static unsigned long long mmcqd_tag_t1[ID_CNT]={0}, mmccid_tag_t1=0;
2150 unsigned long long mmcqd_t_usage_wr[ID_CNT]={0}, mmcqd_t_usage_rd[ID_CNT]={0};
2151 unsigned int mmcqd_rq_size_wr[ID_CNT]={0}, mmcqd_rq_size_rd[ID_CNT]={0};
2152 static unsigned int mmcqd_wr_offset_tag[ID_CNT]={0}, mmcqd_rd_offset_tag[ID_CNT]={0}, mmcqd_wr_offset[ID_CNT]={0}, mmcqd_rd_offset[ID_CNT]={0};
2153 static unsigned int mmcqd_wr_bit[ID_CNT]={0},mmcqd_wr_tract[ID_CNT]={0};
2154 static unsigned int mmcqd_rd_bit[ID_CNT]={0},mmcqd_rd_tract[ID_CNT]={0};
2155 static unsigned int mmcqd_wr_break[ID_CNT]={0}, mmcqd_rd_break[ID_CNT]={0};
2156 unsigned int mmcqd_rq_count[ID_CNT]={0}, mmcqd_wr_rq_count[ID_CNT]={0}, mmcqd_rd_rq_count[ID_CNT]={0};
2157 extern u32 g_u32_cid[4];
2158 #ifdef FEATURE_STORAGE_META_LOG
2159 int check_perdev_minors = CONFIG_MMC_BLOCK_MINORS;
2160 struct metadata_rwlogger metadata_logger[10] = {{{0}}};
2161 #endif
2162
2163 unsigned int mmcqd_work_percent[ID_CNT]={0};
2164 unsigned int mmcqd_w_throughput[ID_CNT]={0};
2165 unsigned int mmcqd_r_throughput[ID_CNT]={0};
2166 unsigned int mmcqd_read_clear[ID_CNT]={0};
2167
2168 static void g_var_clear(unsigned int idx)
2169 {
2170 mmcqd_t_usage_wr[idx]=0;
2171 mmcqd_t_usage_rd[idx]=0;
2172 mmcqd_rq_size_wr[idx]=0;
2173 mmcqd_rq_size_rd[idx]=0;
2174 mmcqd_rq_count[idx]=0;
2175 mmcqd_wr_offset[idx]=0;
2176 mmcqd_rd_offset[idx]=0;
2177 mmcqd_wr_break[idx]=0;
2178 mmcqd_rd_break[idx]=0;
2179 mmcqd_wr_tract[idx]=0;
2180 mmcqd_wr_bit[idx]=0;
2181 mmcqd_rd_tract[idx]=0;
2182 mmcqd_rd_bit[idx]=0;
2183 mmcqd_wr_rq_count[idx]=0;
2184 mmcqd_rd_rq_count[idx]=0;
2185 }
2186
2187 unsigned int find_mmcqd_index(void)
2188 {
2189 pid_t mmcqd_pid=0;
2190 unsigned int idx=0;
2191 unsigned char i=0;
2192
2193 mmcqd_pid = task_pid_nr(current);
2194
2195 if(mmcqd[0] ==0) {
2196 mmcqd[0] = mmcqd_pid;
2197 start_async_req[0]=0;
2198 }
2199
2200 for(i=0;i<ID_CNT;i++)
2201 {
2202 if(mmcqd_pid == mmcqd[i])
2203 {
2204 idx=i;
2205 break;
2206 }
2207 if ((mmcqd[i] == 0) ||( i==ID_CNT-1))
2208 {
2209 mmcqd[i]=mmcqd_pid;
2210 start_async_req[i]=0;
2211 idx=i;
2212 break;
2213 }
2214 }
2215 return idx;
2216 }
2217
2218 #endif
2219 //#undef FEATURE_STORAGE_PID_LOGGER
2220 #if defined(FEATURE_STORAGE_PID_LOGGER)
2221
2222 struct struct_pid_logger g_pid_logger[PID_ID_CNT]={{0,0,{0},{0},{0},{0}}};
2223
2224
2225
2226 unsigned char *page_logger = NULL;
2227 spinlock_t g_locker;
2228
2229 #endif
2230 static int mmc_blk_issue_rw_rq(struct mmc_queue *mq, struct request *rqc)
2231 {
2232 struct mmc_blk_data *md = mq->data;
2233 struct mmc_card *card = md->queue.card;
2234 struct mmc_blk_request *brq = &mq->mqrq_cur->brq;
2235 int ret = 1, disable_multi = 0, retry = 0, type;
2236 enum mmc_blk_status status;
2237 struct mmc_queue_req *mq_rq;
2238 struct request *req = rqc;
2239 struct mmc_async_req *areq;
2240 const u8 packed_nr = 2;
2241 u8 reqs = 0;
2242 unsigned long long time1 = 0;
2243 #if defined(FEATURE_STORAGE_PERF_INDEX)
2244 pid_t mmcqd_pid=0;
2245 unsigned long long t_period=0, t_usage=0;
2246 unsigned int t_percent=0;
2247 unsigned int perf_meter=0;
2248 unsigned int rq_byte=0,rq_sector=0,sect_offset=0;
2249 unsigned int diversity=0;
2250 unsigned int idx=0;
2251 #ifdef FEATURE_STORAGE_META_LOG
2252 unsigned int mmcmetaindex=0;
2253 #endif
2254 #endif
2255 #if defined(FEATURE_STORAGE_PID_LOGGER)
2256 unsigned int index=0;
2257 #endif
2258
2259 if (!rqc && !mq->mqrq_prev->req)
2260 return 0;
2261 time1 = sched_clock();
2262
2263 if (rqc)
2264 reqs = mmc_blk_prep_packed_list(mq, rqc);
2265 #if defined(FEATURE_STORAGE_PERF_INDEX)
2266 mmcqd_pid = task_pid_nr(current);
2267
2268 idx = find_mmcqd_index();
2269
2270 mmcqd_read_clear[idx] = 1;
2271 if(mmccid_tag_t1==0)
2272 mmccid_tag_t1 = time1;
2273 t_period = time1 - mmccid_tag_t1;
2274 if(t_period >= (unsigned long long )((PRT_TIME_PERIOD)*(unsigned long long )10))
2275 {
2276 xlog_printk(ANDROID_LOG_DEBUG, "BLOCK_TAG", "MMC Queue Thread:%d, %d, %d, %d, %d \n", mmcqd[0], mmcqd[1], mmcqd[2], mmcqd[3], mmcqd[4]);
2277 xlog_printk(ANDROID_LOG_DEBUG, "BLOCK_TAG", "MMC CID: %lx %lx %lx %lx \n", g_u32_cid[0], g_u32_cid[1], g_u32_cid[2], g_u32_cid[3]);
2278 mmccid_tag_t1 = time1;
2279 }
2280 if(mmcqd_tag_t1[idx]==0)
2281 mmcqd_tag_t1[idx] = time1;
2282 t_period = time1 - mmcqd_tag_t1[idx];
2283
2284 if(t_period >= (unsigned long long )PRT_TIME_PERIOD)
2285 {
2286 mmcqd_read_clear[idx] = 2;
2287 mmcqd_work_percent[idx] = 1;
2288 mmcqd_r_throughput[idx] = 0;
2289 mmcqd_w_throughput[idx] = 0;
2290 t_usage = mmcqd_t_usage_wr [idx] + mmcqd_t_usage_rd[idx];
2291 if(t_period > t_usage*100)
2292 xlog_printk(ANDROID_LOG_DEBUG, "BLOCK_TAG", "mmcqd:%d Workload < 1%%, duty %lld, period %lld, req_cnt=%d \n", mmcqd[idx], t_usage, t_period, mmcqd_rq_count[idx]);
2293 else
2294 {
2295 do_div(t_period, 100); //boundary issue
2296 t_percent =((unsigned int)t_usage)/((unsigned int)t_period);
2297 mmcqd_work_percent[idx] = t_percent;
2298 xlog_printk(ANDROID_LOG_DEBUG, "BLOCK_TAG", "mmcqd:%d Workload=%d%%, duty %lld, period %lld00, req_cnt=%d \n", mmcqd[idx], t_percent, t_usage, t_period, mmcqd_rq_count[idx]); //period %lld00 == period %lld x100
2299 }
2300 if(mmcqd_wr_rq_count[idx] >= 2)
2301 {
2302 diversity = mmcqd_wr_offset[idx]/(mmcqd_wr_rq_count[idx]-1);
2303 xlog_printk(ANDROID_LOG_DEBUG, "BLOCK_TAG", "mmcqd:%d Write Diversity=%d sectors offset, req_cnt=%d, break_cnt=%d, tract_cnt=%d, bit_cnt=%d\n", mmcqd[idx], diversity, mmcqd_wr_rq_count[idx], mmcqd_wr_break[idx], mmcqd_wr_tract[idx], mmcqd_wr_bit[idx]);
2304 }
2305 if(mmcqd_rd_rq_count[idx] >= 2)
2306 {
2307 diversity = mmcqd_rd_offset[idx]/(mmcqd_rd_rq_count[idx]-1);
2308 xlog_printk(ANDROID_LOG_DEBUG, "BLOCK_TAG", "mmcqd:%d Read Diversity=%d sectors offset, req_cnt=%d, break_cnt=%d, tract_cnt=%d, bit_cnt=%d\n", mmcqd[idx], diversity, mmcqd_rd_rq_count[idx], mmcqd_rd_break[idx], mmcqd_rd_tract[idx], mmcqd_rd_bit[idx]);
2309 }
2310 if(mmcqd_t_usage_wr[idx])
2311 {
2312 do_div(mmcqd_t_usage_wr[idx], 1000000); //boundary issue
2313 if(mmcqd_t_usage_wr[idx]) // discard print if duration will <1ms
2314 {
2315 perf_meter = (mmcqd_rq_size_wr[idx])/((unsigned int)mmcqd_t_usage_wr[idx]); //kb/s
2316 mmcqd_w_throughput[idx] = perf_meter;
2317 xlog_printk(ANDROID_LOG_DEBUG, "BLOCK_TAG", "mmcqd:%d Write Throughput=%d kB/s, size: %d bytes, time:%lld ms\n", mmcqd[idx], perf_meter, mmcqd_rq_size_wr[idx], mmcqd_t_usage_wr[idx]);
2318 }
2319 }
2320 if(mmcqd_t_usage_rd[idx])
2321 {
2322 do_div(mmcqd_t_usage_rd[idx], 1000000); //boundary issue
2323 if(mmcqd_t_usage_rd[idx]) // discard print if duration will <1ms
2324 {
2325 perf_meter = (mmcqd_rq_size_rd[idx])/((unsigned int)mmcqd_t_usage_rd[idx]); //kb/s
2326 mmcqd_r_throughput[idx] = perf_meter;
2327 xlog_printk(ANDROID_LOG_DEBUG, "BLOCK_TAG", "mmcqd:%d Read Throughput=%d kB/s, size: %d bytes, time:%lld ms\n", mmcqd[idx], perf_meter, mmcqd_rq_size_rd[idx], mmcqd_t_usage_rd[idx]);
2328 }
2329 }
2330 mmcqd_tag_t1[idx]=time1;
2331 g_var_clear(idx);
2332 #ifdef FEATURE_STORAGE_META_LOG
2333 mmcmetaindex = mmc_get_devidx(md->disk);
2334 xlog_printk(ANDROID_LOG_DEBUG, "BLOCK_TAG", "mmcqd metarw WR:%d NWR:%d HR:%d WDR:%d HDR:%d WW:%d NWW:%d HW:%d\n",
2335 metadata_logger[mmcmetaindex].metadata_rw_logger[0], metadata_logger[mmcmetaindex].metadata_rw_logger[1],
2336 metadata_logger[mmcmetaindex].metadata_rw_logger[2], metadata_logger[mmcmetaindex].metadata_rw_logger[3],
2337 metadata_logger[mmcmetaindex].metadata_rw_logger[4], metadata_logger[mmcmetaindex].metadata_rw_logger[5],
2338 metadata_logger[mmcmetaindex].metadata_rw_logger[6], metadata_logger[mmcmetaindex].metadata_rw_logger[7]);
2339 clear_metadata_rw_status(md->disk->first_minor);
2340 #endif
2341 #if defined(FEATURE_STORAGE_PID_LOGGER)
2342 do {
2343 int i;
2344 for(index=0; index<PID_ID_CNT; index++) {
2345
2346 if( g_pid_logger[index].current_pid!=0 && g_pid_logger[index].current_pid == mmcqd_pid)
2347 break;
2348 }
2349 if( index == PID_ID_CNT )
2350 break;
2351 for( i=0; i<PID_LOGGER_COUNT; i++) {
2352 //printk(KERN_INFO"hank mmcqd %d %d", g_pid_logger[index].pid_logger[i], mmcqd_pid);
2353 if( g_pid_logger[index].pid_logger[i] == 0)
2354 break;
2355 sprintf (g_pid_logger[index].pid_buffer+i*37, "{%05d:%05d:%08d:%05d:%08d}", g_pid_logger[index].pid_logger[i], g_pid_logger[index].pid_logger_counter[i], g_pid_logger[index].pid_logger_length[i], g_pid_logger[index].pid_logger_r_counter[i], g_pid_logger[index].pid_logger_r_length[i]);
2356
2357 }
2358 if( i != 0) {
2359 xlog_printk(ANDROID_LOG_DEBUG, "BLOCK_TAG", "mmcqd pid:%d %s\n", g_pid_logger[index].current_pid, g_pid_logger[index].pid_buffer);
2360 //xlog_printk(ANDROID_LOG_DEBUG, "BLOCK_TAG", "sizeof(&(g_pid_logger[index].pid_logger)):%d\n", sizeof(unsigned short)*PID_LOGGER_COUNT);
2361 //memset( &(g_pid_logger[index].pid_logger), 0, sizeof(struct struct_pid_logger)-(unsigned long)&(((struct struct_pid_logger *)0)->pid_logger));
2362 memset( &(g_pid_logger[index].pid_logger), 0, sizeof(unsigned short)*PID_LOGGER_COUNT);
2363 memset( &(g_pid_logger[index].pid_logger_counter), 0, sizeof(unsigned short)*PID_LOGGER_COUNT);
2364 memset( &(g_pid_logger[index].pid_logger_length), 0, sizeof(unsigned int)*PID_LOGGER_COUNT);
2365 memset( &(g_pid_logger[index].pid_logger_r_counter), 0, sizeof(unsigned short)*PID_LOGGER_COUNT);
2366 memset( &(g_pid_logger[index].pid_logger_r_length), 0, sizeof(unsigned int)*PID_LOGGER_COUNT);
2367 memset( &(g_pid_logger[index].pid_buffer), 0, sizeof(char)*1024);
2368
2369
2370 }
2371 g_pid_logger[index].pid_buffer[0] = '\0';
2372
2373 } while(0);
2374 #endif
2375
2376 #if defined(FEATURE_STORAGE_VMSTAT_LOGGER)
2377 xlog_printk(ANDROID_LOG_DEBUG, "BLOCK_TAG", "vmstat (FP:%ld)(FD:%ld)(ND:%ld)(WB:%ld)(NW:%ld)\n",
2378 ((global_page_state(NR_FILE_PAGES)) << (PAGE_SHIFT - 10)),
2379 ((global_page_state(NR_FILE_DIRTY)) << (PAGE_SHIFT - 10)),
2380 ((global_page_state(NR_DIRTIED)) << (PAGE_SHIFT - 10)),
2381 ((global_page_state(NR_WRITEBACK)) << (PAGE_SHIFT - 10)),
2382 ((global_page_state(NR_WRITTEN)) << (PAGE_SHIFT - 10)));
2383 #endif
2384
2385 }
2386 if( rqc )
2387 {
2388 rq_byte = blk_rq_bytes(rqc);
2389 rq_sector = blk_rq_sectors(rqc);
2390 if(rq_data_dir(rqc) == WRITE)
2391 {
2392 if(mmcqd_wr_offset_tag[idx]>0)
2393 {
2394 sect_offset = abs(blk_rq_pos(rqc) - mmcqd_wr_offset_tag[idx]);
2395 mmcqd_wr_offset[idx] += sect_offset;
2396 if(sect_offset == 1)
2397 mmcqd_wr_break[idx]++;
2398 }
2399 mmcqd_wr_offset_tag[idx] = blk_rq_pos(rqc) + rq_sector;
2400 if(rq_sector <= 1) //512 bytes
2401 mmcqd_wr_bit[idx] ++;
2402 else if(rq_sector >= 1016) //508kB
2403 mmcqd_wr_tract[idx] ++;
2404 }
2405 else //read
2406 {
2407 if(mmcqd_rd_offset_tag[idx]>0)
2408 {
2409 sect_offset = abs(blk_rq_pos(rqc) - mmcqd_rd_offset_tag[idx]);
2410 mmcqd_rd_offset[idx] += sect_offset;
2411 if(sect_offset == 1)
2412 mmcqd_rd_break[idx]++;
2413 }
2414 mmcqd_rd_offset_tag[idx] = blk_rq_pos(rqc) + rq_sector;
2415 if(rq_sector <= 1) //512 bytes
2416 mmcqd_rd_bit[idx] ++;
2417 else if(rq_sector >= 1016) //508kB
2418 mmcqd_rd_tract[idx] ++;
2419 }
2420 }
2421 #endif
2422 do {
2423 if (rqc) {
2424 /*
2425 * When 4KB native sector is enabled, only 8 blocks
2426 * multiple read or write is allowed
2427 */
2428 if ((brq->data.blocks & 0x07) &&
2429 (card->ext_csd.data_sector_size == 4096)) {
2430 pr_err("%s: Transfer size is not 4KB sector size aligned\n",
2431 req->rq_disk->disk_name);
2432 mq_rq = mq->mqrq_cur;
2433 goto cmd_abort;
2434 }
2435
2436 if (reqs >= packed_nr)
2437 mmc_blk_packed_hdr_wrq_prep(mq->mqrq_cur,
2438 card, mq);
2439 else
2440 mmc_blk_rw_rq_prep(mq->mqrq_cur, card, 0, mq);
2441 areq = &mq->mqrq_cur->mmc_active;
2442 } else
2443 areq = NULL;
2444 areq = mmc_start_req(card->host, areq, (int *) &status);
2445 if (!areq) {
2446 if (status == MMC_BLK_NEW_REQUEST)
2447 mq->flags |= MMC_QUEUE_NEW_REQUEST;
2448 return 0;
2449 }
2450
2451 mq_rq = container_of(areq, struct mmc_queue_req, mmc_active);
2452 brq = &mq_rq->brq;
2453 req = mq_rq->req;
2454 type = rq_data_dir(req) == READ ? MMC_BLK_READ : MMC_BLK_WRITE;
2455 mmc_queue_bounce_post(mq_rq);
2456
2457 switch (status) {
2458 case MMC_BLK_SUCCESS:
2459 case MMC_BLK_PARTIAL:
2460 /*
2461 * A block was successfully transferred.
2462 */
2463 mmc_blk_reset_success(md, type);
2464
2465 if (mmc_packed_cmd(mq_rq->cmd_type)) {
2466 ret = mmc_blk_end_packed_req(mq_rq);
2467 break;
2468 } else {
2469 ret = blk_end_request(req, 0,
2470 brq->data.bytes_xfered);
2471 }
2472
2473 // if (card && card->host && card->host->areq)
2474 // met_mmc_end(card->host, card->host->areq);
2475
2476 /*
2477 * If the blk_end_request function returns non-zero even
2478 * though all data has been transferred and no errors
2479 * were returned by the host controller, it's a bug.
2480 */
2481 if (status == MMC_BLK_SUCCESS && ret) {
2482 pr_err("%s BUG rq_tot %d d_xfer %d\n",
2483 __func__, blk_rq_bytes(req),
2484 brq->data.bytes_xfered);
2485 rqc = NULL;
2486 goto cmd_abort;
2487 }
2488 break;
2489 case MMC_BLK_CMD_ERR:
2490 ret = mmc_blk_cmd_err(md, card, brq, req, ret);
2491 if (!mmc_blk_reset(md, card->host, type))
2492 break;
2493 goto cmd_abort;
2494 case MMC_BLK_RETRY:
2495 if (retry++ < 5)
2496 break;
2497 /* Fall through */
2498 case MMC_BLK_ABORT:
2499 if (!mmc_blk_reset(md, card->host, type))
2500 break;
2501 goto cmd_abort;
2502 case MMC_BLK_DATA_ERR: {
2503 int err;
2504
2505 err = mmc_blk_reset(md, card->host, type);
2506 if (!err)
2507 break;
2508 if (err == -ENODEV ||
2509 mmc_packed_cmd(mq_rq->cmd_type))
2510 goto cmd_abort;
2511 /* Fall through */
2512 }
2513 case MMC_BLK_ECC_ERR:
2514 if (brq->data.blocks > 1) {
2515 /* Redo read one sector at a time */
2516 pr_warning("%s: retrying using single block read\n",
2517 req->rq_disk->disk_name);
2518 disable_multi = 1;
2519 break;
2520 }
2521 /*
2522 * After an error, we redo I/O one sector at a
2523 * time, so we only reach here after trying to
2524 * read a single sector.
2525 */
2526 ret = blk_end_request(req, -EIO,
2527 brq->data.blksz);
2528 if (!ret)
2529 goto start_new_req;
2530 break;
2531 case MMC_BLK_NOMEDIUM:
2532 goto cmd_abort;
2533 default:
2534 pr_err("%s: Unhandled return value (%d)",
2535 req->rq_disk->disk_name, status);
2536 goto cmd_abort;
2537 }
2538
2539 if (ret) {
2540 if (mmc_packed_cmd(mq_rq->cmd_type)) {
2541 if (!mq_rq->packed->retries)
2542 goto cmd_abort;
2543 mmc_blk_packed_hdr_wrq_prep(mq_rq, card, mq);
2544 mmc_start_req(card->host,
2545 &mq_rq->mmc_active, NULL);
2546 } else {
2547
2548 /*
2549 * In case of a incomplete request
2550 * prepare it again and resend.
2551 */
2552 mmc_blk_rw_rq_prep(mq_rq, card,
2553 disable_multi, mq);
2554 mmc_start_req(card->host,
2555 &mq_rq->mmc_active, NULL);
2556 }
2557 }
2558 } while (ret);
2559
2560 return 1;
2561
2562 cmd_abort:
2563 if (mmc_packed_cmd(mq_rq->cmd_type)) {
2564 mmc_blk_abort_packed_req(mq_rq);
2565 } else {
2566 if (mmc_card_removed(card))
2567 req->cmd_flags |= REQ_QUIET;
2568 while (ret)
2569 ret = blk_end_request(req, -EIO,
2570 blk_rq_cur_bytes(req));
2571 }
2572
2573 start_new_req:
2574 if (rqc) {
2575 if (mmc_card_removed(card)) {
2576 rqc->cmd_flags |= REQ_QUIET;
2577 blk_end_request_all(rqc, -EIO);
2578 } else {
2579 /*
2580 * If current request is packed, it needs to put back.
2581 */
2582 if (mmc_packed_cmd(mq->mqrq_cur->cmd_type))
2583 mmc_blk_revert_packed_req(mq, mq->mqrq_cur);
2584
2585 mmc_blk_rw_rq_prep(mq->mqrq_cur, card, 0, mq);
2586 mmc_start_req(card->host,
2587 &mq->mqrq_cur->mmc_active, NULL);
2588 }
2589 }
2590
2591 return 0;
2592 }
2593
2594 static int mmc_blk_issue_rq(struct mmc_queue *mq, struct request *req)
2595 {
2596 int ret;
2597 struct mmc_blk_data *md = mq->data;
2598 struct mmc_card *card = md->queue.card;
2599 struct mmc_host *host = card->host;
2600 unsigned long flags;
2601 unsigned int cmd_flags = req ? req->cmd_flags : 0;
2602
2603 #ifdef CONFIG_MMC_BLOCK_DEFERRED_RESUME
2604 if (mmc_bus_needs_resume(card->host))
2605 mmc_resume_bus(card->host);
2606 #endif
2607
2608 if (req && !mq->mqrq_prev->req)
2609 /* claim host only for the first request */
2610 mmc_claim_host(card->host);
2611
2612 ret = mmc_blk_part_switch(card, md);
2613 if (ret) {
2614 if (req) {
2615 blk_end_request_all(req, -EIO);
2616 }
2617 ret = 0;
2618 goto out;
2619 }
2620
2621 mq->flags &= ~MMC_QUEUE_NEW_REQUEST;
2622 if (cmd_flags & REQ_DISCARD) {
2623 /* complete ongoing async transfer before issuing discard */
2624 if (card->host->areq)
2625 mmc_blk_issue_rw_rq(mq, NULL);
2626 if (req->cmd_flags & REQ_SECURE &&
2627 !(card->quirks & MMC_QUIRK_SEC_ERASE_TRIM_BROKEN))
2628 ret = mmc_blk_issue_secdiscard_rq(mq, req);
2629 else
2630 ret = mmc_blk_issue_discard_rq(mq, req);
2631 } else if (cmd_flags & REQ_FLUSH) {
2632 /* complete ongoing async transfer before issuing flush */
2633 if (card->host->areq)
2634 mmc_blk_issue_rw_rq(mq, NULL);
2635 ret = mmc_blk_issue_flush(mq, req);
2636 } else {
2637 if (!req && host->areq) {
2638 spin_lock_irqsave(&host->context_info.lock, flags);
2639 host->context_info.is_waiting_last_req = true;
2640 spin_unlock_irqrestore(&host->context_info.lock, flags);
2641 }
2642 ret = mmc_blk_issue_rw_rq(mq, req);
2643 }
2644
2645 out:
2646 if ((!req && !(mq->flags & MMC_QUEUE_NEW_REQUEST)) ||
2647 (cmd_flags & MMC_REQ_SPECIAL_MASK))
2648 /*
2649 * Release host when there are no more requests
2650 * and after special request(discard, flush) is done.
2651 * In case sepecial request, there is no reentry to
2652 * the 'mmc_blk_issue_rq' with 'mqrq_prev->req'.
2653 */
2654 mmc_release_host(card->host);
2655 return ret;
2656 }
2657
2658 static inline int mmc_blk_readonly(struct mmc_card *card)
2659 {
2660 return mmc_card_readonly(card) ||
2661 !(card->csd.cmdclass & CCC_BLOCK_WRITE);
2662 }
2663
2664 //#if defined(FEATURE_STORAGE_PID_LOGGER)
2665 //extern unsigned long get_memory_size(void);
2666 //#endif
2667 #ifdef CONFIG_MTK_EXTMEM
2668 extern void* extmem_malloc_page_align(size_t bytes);
2669 #endif
2670 static struct mmc_blk_data *mmc_blk_alloc_req(struct mmc_card *card,
2671 struct device *parent,
2672 sector_t size,
2673 bool default_ro,
2674 const char *subname,
2675 int area_type)
2676 {
2677 struct mmc_blk_data *md;
2678 int devidx, ret;
2679
2680 devidx = find_first_zero_bit(dev_use, max_devices);
2681 if (devidx >= max_devices)
2682 return ERR_PTR(-ENOSPC);
2683 __set_bit(devidx, dev_use);
2684
2685 md = kzalloc(sizeof(struct mmc_blk_data), GFP_KERNEL);
2686 if (!md) {
2687 ret = -ENOMEM;
2688 goto out;
2689 }
2690
2691 /*
2692 * !subname implies we are creating main mmc_blk_data that will be
2693 * associated with mmc_card with mmc_set_drvdata. Due to device
2694 * partitions, devidx will not coincide with a per-physical card
2695 * index anymore so we keep track of a name index.
2696 */
2697 if (!subname) {
2698 md->name_idx = find_first_zero_bit(name_use, max_devices);
2699 __set_bit(md->name_idx, name_use);
2700 } else
2701 md->name_idx = ((struct mmc_blk_data *)
2702 dev_to_disk(parent)->private_data)->name_idx;
2703
2704 md->area_type = area_type;
2705
2706 /*
2707 * Set the read-only status based on the supported commands
2708 * and the write protect switch.
2709 */
2710 md->read_only = mmc_blk_readonly(card);
2711
2712 md->disk = alloc_disk(perdev_minors);
2713 if (md->disk == NULL) {
2714 ret = -ENOMEM;
2715 goto err_kfree;
2716 }
2717
2718 spin_lock_init(&md->lock);
2719 INIT_LIST_HEAD(&md->part);
2720 md->usage = 1;
2721
2722 ret = mmc_init_queue(&md->queue, card, &md->lock, subname);
2723 if (ret)
2724 goto err_putdisk;
2725 #if defined(FEATURE_STORAGE_PID_LOGGER)
2726 if( !page_logger){
2727 //num_page_logger = sizeof(struct page_pid_logger);
2728 //page_logger = vmalloc(num_physpages*sizeof(struct page_pid_logger));
2729 // solution: use get_memory_size to obtain the size from start pfn to max pfn
2730
2731 //unsigned long count = get_memory_size() >> PAGE_SHIFT;
2732 unsigned long count = get_max_DRAM_size() >> PAGE_SHIFT;
2733 #ifdef CONFIG_MTK_EXTMEM
2734 page_logger = extmem_malloc_page_align(count * sizeof(struct page_pid_logger));
2735 #else
2736 page_logger = vmalloc(count * sizeof(struct page_pid_logger));
2737 #endif
2738 if( page_logger) {
2739 memset( page_logger, -1, count*sizeof( struct page_pid_logger));
2740 }
2741 spin_lock_init(&g_locker);
2742 }
2743 #endif
2744 #if defined(FEATURE_STORAGE_META_LOG)
2745 check_perdev_minors = perdev_minors;
2746 #endif
2747
2748 md->queue.issue_fn = mmc_blk_issue_rq;
2749 md->queue.data = md;
2750
2751 md->disk->major = MMC_BLOCK_MAJOR;
2752 md->disk->first_minor = devidx * perdev_minors;
2753 md->disk->fops = &mmc_bdops;
2754 md->disk->private_data = md;
2755 md->disk->queue = md->queue.queue;
2756 md->disk->driverfs_dev = parent;
2757 set_disk_ro(md->disk, md->read_only || default_ro);
2758 md->disk->flags = GENHD_FL_EXT_DEVT;
2759 if (area_type & MMC_BLK_DATA_AREA_RPMB)
2760 md->disk->flags |= GENHD_FL_NO_PART_SCAN;
2761
2762 /*
2763 * As discussed on lkml, GENHD_FL_REMOVABLE should:
2764 *
2765 * - be set for removable media with permanent block devices
2766 * - be unset for removable block devices with permanent media
2767 *
2768 * Since MMC block devices clearly fall under the second
2769 * case, we do not set GENHD_FL_REMOVABLE. Userspace
2770 * should use the block device creation/destruction hotplug
2771 * messages to tell when the card is present.
2772 */
2773
2774 snprintf(md->disk->disk_name, sizeof(md->disk->disk_name),
2775 "mmcblk%d%s", md->name_idx, subname ? subname : "");
2776
2777 if (mmc_card_mmc(card))
2778 blk_queue_logical_block_size(md->queue.queue,
2779 card->ext_csd.data_sector_size);
2780 else
2781 blk_queue_logical_block_size(md->queue.queue, 512);
2782
2783 set_capacity(md->disk, size);
2784
2785 if (mmc_host_cmd23(card->host)) {
2786 if (mmc_card_mmc(card) ||
2787 (mmc_card_sd(card) &&
2788 card->scr.cmds & SD_SCR_CMD23_SUPPORT))
2789 md->flags |= MMC_BLK_CMD23;
2790 }
2791
2792 if (mmc_card_mmc(card) &&
2793 md->flags & MMC_BLK_CMD23 &&
2794 ((card->ext_csd.rel_param & EXT_CSD_WR_REL_PARAM_EN) ||
2795 card->ext_csd.rel_sectors)) {
2796 md->flags |= MMC_BLK_REL_WR;
2797 blk_queue_flush(md->queue.queue, REQ_FLUSH | REQ_FUA);
2798 }
2799
2800 if (mmc_card_mmc(card) &&
2801 (area_type == MMC_BLK_DATA_AREA_MAIN) &&
2802 (md->flags & MMC_BLK_CMD23) &&
2803 card->ext_csd.packed_event_en) {
2804 if (!mmc_packed_init(&md->queue, card))
2805 md->flags |= MMC_BLK_PACKED_CMD;
2806 }
2807
2808 return md;
2809
2810 err_putdisk:
2811 put_disk(md->disk);
2812 err_kfree:
2813 kfree(md);
2814 out:
2815 return ERR_PTR(ret);
2816 }
2817
2818 static struct mmc_blk_data *mmc_blk_alloc(struct mmc_card *card)
2819 {
2820 sector_t size;
2821 #ifdef CONFIG_MTK_EMMC_SUPPORT
2822 unsigned int l_reserve;
2823 struct storage_info s_info = {0};
2824 #endif
2825 struct mmc_blk_data *md;
2826
2827 if (!mmc_card_sd(card) && mmc_card_blockaddr(card)) {
2828 /*
2829 * The EXT_CSD sector count is in number or 512 byte
2830 * sectors.
2831 */
2832 size = card->ext_csd.sectors;
2833 } else {
2834 /*
2835 * The CSD capacity field is in units of read_blkbits.
2836 * set_capacity takes units of 512 bytes.
2837 */
2838 size = card->csd.capacity << (card->csd.read_blkbits - 9);
2839 }
2840
2841 if(!mmc_card_sd(card)){
2842 #ifdef CONFIG_MTK_EMMC_SUPPORT
2843 msdc_get_info(EMMC_CARD_BOOT, EMMC_RESERVE, &s_info);
2844 l_reserve = s_info.emmc_reserve;
2845 printk("l_reserve = 0x%x\n", l_reserve);
2846 size -= l_reserve; /*reserved for 64MB (emmc otp + emmc combo offset + reserved)*/
2847 #endif
2848 }
2849 md = mmc_blk_alloc_req(card, &card->dev, size, false, NULL,
2850 MMC_BLK_DATA_AREA_MAIN);
2851 return md;
2852 }
2853
2854 static int mmc_blk_alloc_part(struct mmc_card *card,
2855 struct mmc_blk_data *md,
2856 unsigned int part_type,
2857 sector_t size,
2858 bool default_ro,
2859 const char *subname,
2860 int area_type)
2861 {
2862 char cap_str[10];
2863 struct mmc_blk_data *part_md;
2864
2865 part_md = mmc_blk_alloc_req(card, disk_to_dev(md->disk), size, default_ro,
2866 subname, area_type);
2867 if (IS_ERR(part_md))
2868 return PTR_ERR(part_md);
2869 part_md->part_type = part_type;
2870 list_add(&part_md->part, &md->part);
2871
2872 string_get_size((u64)get_capacity(part_md->disk) << 9, STRING_UNITS_2,
2873 cap_str, sizeof(cap_str));
2874 pr_info("%s: %s %s partition %u %s\n",
2875 part_md->disk->disk_name, mmc_card_id(card),
2876 mmc_card_name(card), part_md->part_type, cap_str);
2877 return 0;
2878 }
2879
2880 /* MMC Physical partitions consist of two boot partitions and
2881 * up to four general purpose partitions.
2882 * For each partition enabled in EXT_CSD a block device will be allocatedi
2883 * to provide access to the partition.
2884 */
2885
2886 static int mmc_blk_alloc_parts(struct mmc_card *card, struct mmc_blk_data *md)
2887 {
2888 int idx, ret = 0;
2889
2890 if (!mmc_card_mmc(card))
2891 return 0;
2892
2893 for (idx = 0; idx < card->nr_parts; idx++) {
2894 if (card->part[idx].size) {
2895 ret = mmc_blk_alloc_part(card, md,
2896 card->part[idx].part_cfg,
2897 card->part[idx].size >> 9,
2898 card->part[idx].force_ro,
2899 card->part[idx].name,
2900 card->part[idx].area_type);
2901 if (ret)
2902 return ret;
2903 }
2904 }
2905
2906 return ret;
2907 }
2908
2909 static void mmc_blk_remove_req(struct mmc_blk_data *md)
2910 {
2911 struct mmc_card *card;
2912
2913 if (md) {
2914 card = md->queue.card;
2915 if (md->disk->flags & GENHD_FL_UP) {
2916 device_remove_file(disk_to_dev(md->disk), &md->force_ro);
2917 if ((md->area_type & MMC_BLK_DATA_AREA_BOOT) &&
2918 card->ext_csd.boot_ro_lockable)
2919 device_remove_file(disk_to_dev(md->disk),
2920 &md->power_ro_lock);
2921
2922 /* Stop new requests from getting into the queue */
2923 del_gendisk(md->disk);
2924 }
2925
2926 /* Then flush out any already in there */
2927 mmc_cleanup_queue(&md->queue);
2928 if (md->flags & MMC_BLK_PACKED_CMD)
2929 mmc_packed_clean(&md->queue);
2930 mmc_blk_put(md);
2931 }
2932 }
2933
2934 static void mmc_blk_remove_parts(struct mmc_card *card,
2935 struct mmc_blk_data *md)
2936 {
2937 struct list_head *pos, *q;
2938 struct mmc_blk_data *part_md;
2939
2940 __clear_bit(md->name_idx, name_use);
2941 list_for_each_safe(pos, q, &md->part) {
2942 part_md = list_entry(pos, struct mmc_blk_data, part);
2943 list_del(pos);
2944 mmc_blk_remove_req(part_md);
2945 }
2946 }
2947
2948 static int mmc_add_disk(struct mmc_blk_data *md)
2949 {
2950 int ret;
2951 struct mmc_card *card = md->queue.card;
2952
2953 add_disk(md->disk);
2954 md->force_ro.show = force_ro_show;
2955 md->force_ro.store = force_ro_store;
2956 sysfs_attr_init(&md->force_ro.attr);
2957 md->force_ro.attr.name = "force_ro";
2958 md->force_ro.attr.mode = S_IRUGO | S_IWUSR;
2959 ret = device_create_file(disk_to_dev(md->disk), &md->force_ro);
2960 if (ret)
2961 goto force_ro_fail;
2962
2963 if ((md->area_type & MMC_BLK_DATA_AREA_BOOT) &&
2964 card->ext_csd.boot_ro_lockable) {
2965 umode_t mode;
2966
2967 if (card->ext_csd.boot_ro_lock & EXT_CSD_BOOT_WP_B_PWR_WP_DIS)
2968 mode = S_IRUGO;
2969 else
2970 mode = S_IRUGO | S_IWUSR;
2971
2972 md->power_ro_lock.show = power_ro_lock_show;
2973 md->power_ro_lock.store = power_ro_lock_store;
2974 sysfs_attr_init(&md->power_ro_lock.attr);
2975 md->power_ro_lock.attr.mode = mode;
2976 md->power_ro_lock.attr.name =
2977 "ro_lock_until_next_power_on";
2978 ret = device_create_file(disk_to_dev(md->disk),
2979 &md->power_ro_lock);
2980 if (ret)
2981 goto power_ro_lock_fail;
2982 }
2983 return ret;
2984
2985 power_ro_lock_fail:
2986 device_remove_file(disk_to_dev(md->disk), &md->force_ro);
2987 force_ro_fail:
2988 del_gendisk(md->disk);
2989
2990 return ret;
2991 }
2992
2993 #define CID_MANFID_SANDISK 0x2
2994 #define CID_MANFID_TOSHIBA 0x11
2995 #define CID_MANFID_MICRON 0x13
2996 #define CID_MANFID_SAMSUNG 0x15
2997 #define CID_MANFID_SANDISK_NEW 0x45
2998 #define CID_MANFID_HYNIX 0x90
2999 #define CID_MANFID_KSI 0x70
3000
3001 static const struct mmc_fixup blk_fixups[] =
3002 {
3003 MMC_FIXUP("SEM02G", CID_MANFID_SANDISK, 0x100, add_quirk,
3004 MMC_QUIRK_INAND_CMD38),
3005 MMC_FIXUP("SEM04G", CID_MANFID_SANDISK, 0x100, add_quirk,
3006 MMC_QUIRK_INAND_CMD38),
3007 MMC_FIXUP("SEM08G", CID_MANFID_SANDISK, 0x100, add_quirk,
3008 MMC_QUIRK_INAND_CMD38),
3009 MMC_FIXUP("SEM16G", CID_MANFID_SANDISK, 0x100, add_quirk,
3010 MMC_QUIRK_INAND_CMD38),
3011 MMC_FIXUP("SEM32G", CID_MANFID_SANDISK, 0x100, add_quirk,
3012 MMC_QUIRK_INAND_CMD38),
3013 MMC_FIXUP(CID_NAME_ANY, CID_MANFID_SANDISK_NEW, CID_OEMID_ANY, add_quirk,
3014 MMC_QUIRK_PON),
3015 /*
3016 * Some MMC cards experience performance degradation with CMD23
3017 * instead of CMD12-bounded multiblock transfers. For now we'll
3018 * black list what's bad...
3019 * - Certain Toshiba cards.
3020 *
3021 * N.B. This doesn't affect SD cards.
3022 */
3023 MMC_FIXUP("MMC08G", CID_MANFID_TOSHIBA, CID_OEMID_ANY, add_quirk_mmc,
3024 MMC_QUIRK_BLK_NO_CMD23),
3025 MMC_FIXUP("MMC16G", CID_MANFID_TOSHIBA, CID_OEMID_ANY, add_quirk_mmc,
3026 MMC_QUIRK_BLK_NO_CMD23),
3027 MMC_FIXUP("MMC32G", CID_MANFID_TOSHIBA, CID_OEMID_ANY, add_quirk_mmc,
3028 MMC_QUIRK_BLK_NO_CMD23),
3029
3030 /*
3031 * Some Micron MMC cards needs longer data read timeout than
3032 * indicated in CSD.
3033 */
3034 MMC_FIXUP(CID_NAME_ANY, CID_MANFID_MICRON, 0x200, add_quirk_mmc,
3035 MMC_QUIRK_LONG_READ_TIME),
3036
3037 /*
3038 * On these Samsung MoviNAND parts, performing secure erase or
3039 * secure trim can result in unrecoverable corruption due to a
3040 * firmware bug.
3041 */
3042 MMC_FIXUP("M8G2FA", CID_MANFID_SAMSUNG, CID_OEMID_ANY, add_quirk_mmc,
3043 MMC_QUIRK_SEC_ERASE_TRIM_BROKEN),
3044 MMC_FIXUP("MAG4FA", CID_MANFID_SAMSUNG, CID_OEMID_ANY, add_quirk_mmc,
3045 MMC_QUIRK_SEC_ERASE_TRIM_BROKEN),
3046 MMC_FIXUP("MBG8FA", CID_MANFID_SAMSUNG, CID_OEMID_ANY, add_quirk_mmc,
3047 MMC_QUIRK_SEC_ERASE_TRIM_BROKEN),
3048 MMC_FIXUP("MCGAFA", CID_MANFID_SAMSUNG, CID_OEMID_ANY, add_quirk_mmc,
3049 MMC_QUIRK_SEC_ERASE_TRIM_BROKEN),
3050 MMC_FIXUP("VAL00M", CID_MANFID_SAMSUNG, CID_OEMID_ANY, add_quirk_mmc,
3051 MMC_QUIRK_SEC_ERASE_TRIM_BROKEN),
3052 MMC_FIXUP("VYL00M", CID_MANFID_SAMSUNG, CID_OEMID_ANY, add_quirk_mmc,
3053 MMC_QUIRK_SEC_ERASE_TRIM_BROKEN),
3054 MMC_FIXUP("KYL00M", CID_MANFID_SAMSUNG, CID_OEMID_ANY, add_quirk_mmc,
3055 MMC_QUIRK_SEC_ERASE_TRIM_BROKEN),
3056 MMC_FIXUP("VZL00M", CID_MANFID_SAMSUNG, CID_OEMID_ANY, add_quirk_mmc,
3057 MMC_QUIRK_SEC_ERASE_TRIM_BROKEN),
3058 #ifdef CONFIG_MTK_EMMC_CACHE
3059 /*
3060 * Some MMC cards cache feature, cannot flush the previous cache data by force programming or reliable write
3061 * which cannot gurrantee the strong order betwee meta data and file data.
3062 */
3063
3064 /*
3065 * Toshiba eMMC after enable cache feature, write performance drop, because flush operation waste much time
3066 */
3067 MMC_FIXUP(CID_NAME_ANY, CID_MANFID_TOSHIBA, CID_OEMID_ANY, add_quirk_mmc,
3068 MMC_QUIRK_DISABLE_CACHE),
3069 #endif
3070
3071 /* Hynix 4.41 trim will lead boot up failed. */
3072 MMC_FIXUP(CID_NAME_ANY, CID_MANFID_HYNIX, CID_OEMID_ANY, add_quirk_mmc,
3073 MMC_QUIRK_TRIM_UNSTABLE),
3074
3075 /* KSI PRV=0x3 trim will lead write performance drop. */
3076 MMC_FIXUP(CID_NAME_ANY, CID_MANFID_KSI, CID_OEMID_ANY, add_quirk_mmc_ksi_v03_skip_trim,
3077 MMC_QUIRK_KSI_V03_SKIP_TRIM),
3078
3079 END_FIXUP
3080 };
3081
3082 #if defined(CONFIG_MTK_EMMC_SUPPORT) && !defined(CONFIG_MTK_GPT_SCHEME_SUPPORT)
3083 extern void emmc_create_sys_symlink (struct mmc_card *card);
3084 #endif
3085 static int mmc_blk_probe(struct mmc_card *card)
3086 {
3087 struct mmc_blk_data *md, *part_md;
3088 char cap_str[10];
3089
3090 /*
3091 * Check that the card supports the command class(es) we need.
3092 */
3093 if (!(card->csd.cmdclass & CCC_BLOCK_READ))
3094 return -ENODEV;
3095
3096 md = mmc_blk_alloc(card);
3097 if (IS_ERR(md))
3098 return PTR_ERR(md);
3099
3100 string_get_size((u64)get_capacity(md->disk) << 9, STRING_UNITS_2,
3101 cap_str, sizeof(cap_str));
3102 pr_info("%s: %s %s %s %s\n",
3103 md->disk->disk_name, mmc_card_id(card), mmc_card_name(card),
3104 cap_str, md->read_only ? "(ro)" : "");
3105
3106 if (mmc_blk_alloc_parts(card, md))
3107 goto out;
3108
3109 mmc_set_drvdata(card, md);
3110 mmc_fixup_device(card, blk_fixups);
3111
3112 printk("[%s]: %s by manufacturer settings, quirks=0x%x\n", __func__, md->disk->disk_name, card->quirks);
3113
3114 #ifdef CONFIG_MMC_BLOCK_DEFERRED_RESUME
3115 mmc_set_bus_resume_policy(card->host, 1);
3116 #endif
3117 if (mmc_add_disk(md))
3118 goto out;
3119
3120 list_for_each_entry(part_md, &md->part, part) {
3121 if (mmc_add_disk(part_md))
3122 goto out;
3123 }
3124 #if defined(CONFIG_MTK_EMMC_SUPPORT) && !defined(CONFIG_MTK_GPT_SCHEME_SUPPORT)
3125 emmc_create_sys_symlink(card);
3126 #endif
3127 return 0;
3128
3129 out:
3130 mmc_blk_remove_parts(card, md);
3131 mmc_blk_remove_req(md);
3132 return 0;
3133 }
3134
3135 static void mmc_blk_remove(struct mmc_card *card)
3136 {
3137 struct mmc_blk_data *md = mmc_get_drvdata(card);
3138
3139 mmc_blk_remove_parts(card, md);
3140 mmc_claim_host(card->host);
3141 mmc_blk_part_switch(card, md);
3142 mmc_release_host(card->host);
3143 mmc_blk_remove_req(md);
3144 mmc_set_drvdata(card, NULL);
3145 #ifdef CONFIG_MMC_BLOCK_DEFERRED_RESUME
3146 mmc_set_bus_resume_policy(card->host, 0);
3147 #endif
3148 }
3149
3150 #ifdef CONFIG_PM
3151 static int mmc_blk_suspend(struct mmc_card *card)
3152 {
3153 struct mmc_blk_data *part_md;
3154 struct mmc_blk_data *md = mmc_get_drvdata(card);
3155
3156 if (md) {
3157 mmc_queue_suspend(&md->queue);
3158 list_for_each_entry(part_md, &md->part, part) {
3159 mmc_queue_suspend(&part_md->queue);
3160 }
3161 }
3162 return 0;
3163 }
3164
3165 static int mmc_blk_resume(struct mmc_card *card)
3166 {
3167 struct mmc_blk_data *part_md;
3168 struct mmc_blk_data *md = mmc_get_drvdata(card);
3169
3170 if (md) {
3171 /*
3172 * Resume involves the card going into idle state,
3173 * so current partition is always the main one.
3174 */
3175 md->part_curr = md->part_type;
3176 mmc_queue_resume(&md->queue);
3177 list_for_each_entry(part_md, &md->part, part) {
3178 mmc_queue_resume(&part_md->queue);
3179 }
3180 }
3181 return 0;
3182 }
3183 #else
3184 #define mmc_blk_suspend NULL
3185 #define mmc_blk_resume NULL
3186 #endif
3187
3188 static struct mmc_driver mmc_driver = {
3189 .drv = {
3190 .name = "mmcblk",
3191 },
3192 .probe = mmc_blk_probe,
3193 .remove = mmc_blk_remove,
3194 .suspend = mmc_blk_suspend,
3195 .resume = mmc_blk_resume,
3196 };
3197
3198 static int __init mmc_blk_init(void)
3199 {
3200 int res;
3201
3202 if (perdev_minors != CONFIG_MMC_BLOCK_MINORS)
3203 pr_info("mmcblk: using %d minors per device\n", perdev_minors);
3204
3205 max_devices = 256 / perdev_minors;
3206
3207 res = register_blkdev(MMC_BLOCK_MAJOR, "mmc");
3208 if (res)
3209 goto out;
3210
3211 res = mmc_register_driver(&mmc_driver);
3212 if (res)
3213 goto out2;
3214
3215 return 0;
3216 out2:
3217 unregister_blkdev(MMC_BLOCK_MAJOR, "mmc");
3218 out:
3219 return res;
3220 }
3221
3222 static void __exit mmc_blk_exit(void)
3223 {
3224 mmc_unregister_driver(&mmc_driver);
3225 unregister_blkdev(MMC_BLOCK_MAJOR, "mmc");
3226 }
3227
3228 module_init(mmc_blk_init);
3229 module_exit(mmc_blk_exit);
3230
3231 MODULE_LICENSE("GPL");
3232 MODULE_DESCRIPTION("Multimedia Card (MMC) block device driver");
3233