Merge tag 'v3.10.64' into update
[GitHub/mt8127/android_kernel_alcatel_ttab.git] / drivers / mmc / card / block.c
1 /*
2 * Block driver for media (i.e., flash cards)
3 *
4 * Copyright 2002 Hewlett-Packard Company
5 * Copyright 2005-2008 Pierre Ossman
6 *
7 * Use consistent with the GNU GPL is permitted,
8 * provided that this copyright notice is
9 * preserved in its entirety in all copies and derived works.
10 *
11 * HEWLETT-PACKARD COMPANY MAKES NO WARRANTIES, EXPRESSED OR IMPLIED,
12 * AS TO THE USEFULNESS OR CORRECTNESS OF THIS CODE OR ITS
13 * FITNESS FOR ANY PARTICULAR PURPOSE.
14 *
15 * Many thanks to Alessandro Rubini and Jonathan Corbet!
16 *
17 * Author: Andrew Christian
18 * 28 May 2002
19 */
20 #include <linux/moduleparam.h>
21 #include <linux/module.h>
22 #include <linux/init.h>
23
24 #include <linux/kernel.h>
25 #include <linux/fs.h>
26 #include <linux/slab.h>
27 #include <linux/errno.h>
28 #include <linux/hdreg.h>
29 #include <linux/kdev_t.h>
30 #include <linux/blkdev.h>
31 #include <linux/mutex.h>
32 #include <linux/scatterlist.h>
33 #include <linux/string_helpers.h>
34 #include <linux/delay.h>
35 #include <linux/capability.h>
36 #include <linux/compat.h>
37
38 #define CREATE_TRACE_POINTS
39 #include <trace/events/mmc.h>
40
41 #include <linux/mmc/ioctl.h>
42 #include <linux/mmc/card.h>
43 #include <linux/mmc/host.h>
44 #include <linux/mmc/mmc.h>
45 #include <linux/mmc/sd.h>
46
47 #include <asm/uaccess.h>
48
49 #include "queue.h"
50 #include <mach/mtk_meminfo.h>
51
52 //add vmstat info with block tag log
53 #include <linux/vmstat.h>
54 #define FEATURE_STORAGE_VMSTAT_LOGGER
55
56
57 #include <linux/xlog.h>
58 #include <asm/div64.h>
59 #include <linux/vmalloc.h>
60
61 #include <linux/mmc/sd_misc.h>
62
63 #define MET_USER_EVENT_SUPPORT
64 #include <linux/met_drv.h>
65
66 #define FEATURE_STORAGE_PERF_INDEX
67 //enable storage log in user load
68 #if 0
69 #ifdef USER_BUILD_KERNEL
70 #undef FEATURE_STORAGE_PERF_INDEX
71 #endif
72 #endif
73
74 MODULE_ALIAS("mmc:block");
75 #ifdef MODULE_PARAM_PREFIX
76 #undef MODULE_PARAM_PREFIX
77 #endif
78 #define MODULE_PARAM_PREFIX "mmcblk."
79
80 #define INAND_CMD38_ARG_EXT_CSD 113
81 #define INAND_CMD38_ARG_ERASE 0x00
82 #define INAND_CMD38_ARG_TRIM 0x01
83 #define INAND_CMD38_ARG_SECERASE 0x80
84 #define INAND_CMD38_ARG_SECTRIM1 0x81
85 #define INAND_CMD38_ARG_SECTRIM2 0x88
86 #define MMC_BLK_TIMEOUT_MS (10 * 60 * 1000) /* 10 minute timeout */
87
88 #define mmc_req_rel_wr(req) (((req->cmd_flags & REQ_FUA) || \
89 (req->cmd_flags & REQ_META)) && \
90 (rq_data_dir(req) == WRITE))
91 #define PACKED_CMD_VER 0x01
92 #define PACKED_CMD_WR 0x02
93
94 static DEFINE_MUTEX(block_mutex);
95
96 /*
97 * The defaults come from config options but can be overriden by module
98 * or bootarg options.
99 */
100 static int perdev_minors = CONFIG_MMC_BLOCK_MINORS;
101
102 /*
103 * We've only got one major, so number of mmcblk devices is
104 * limited to 256 / number of minors per device.
105 */
106 static int max_devices;
107
108 /* 256 minors, so at most 256 separate devices */
109 static DECLARE_BITMAP(dev_use, 256);
110 static DECLARE_BITMAP(name_use, 256);
111
112 /*
113 * There is one mmc_blk_data per slot.
114 */
115 struct mmc_blk_data {
116 spinlock_t lock;
117 struct gendisk *disk;
118 struct mmc_queue queue;
119 struct list_head part;
120
121 unsigned int flags;
122 #define MMC_BLK_CMD23 (1 << 0) /* Can do SET_BLOCK_COUNT for multiblock */
123 #define MMC_BLK_REL_WR (1 << 1) /* MMC Reliable write support */
124 #define MMC_BLK_PACKED_CMD (1 << 2) /* MMC packed command support */
125
126 unsigned int usage;
127 unsigned int read_only;
128 unsigned int part_type;
129 unsigned int name_idx;
130 unsigned int reset_done;
131 #define MMC_BLK_READ BIT(0)
132 #define MMC_BLK_WRITE BIT(1)
133 #define MMC_BLK_DISCARD BIT(2)
134 #define MMC_BLK_SECDISCARD BIT(3)
135
136 /*
137 * Only set in main mmc_blk_data associated
138 * with mmc_card with mmc_set_drvdata, and keeps
139 * track of the current selected device partition.
140 */
141 unsigned int part_curr;
142 struct device_attribute force_ro;
143 struct device_attribute power_ro_lock;
144 int area_type;
145 };
146
147 static DEFINE_MUTEX(open_lock);
148
149 enum {
150 MMC_PACKED_NR_IDX = -1,
151 MMC_PACKED_NR_ZERO,
152 MMC_PACKED_NR_SINGLE,
153 };
154
155 module_param(perdev_minors, int, 0444);
156 MODULE_PARM_DESC(perdev_minors, "Minors numbers to allocate per device");
157
158 static inline int mmc_blk_part_switch(struct mmc_card *card,
159 struct mmc_blk_data *md);
160 static int get_card_status(struct mmc_card *card, u32 *status, int retries);
161
162 #ifndef CONFIG_MTK_FPGA
163 #include <linux/met_ftrace_bio.h>
164 #endif
165
166 char mmc_get_rw_type(u32 opcode)
167 {
168 switch (opcode)
169 {
170 case MMC_READ_SINGLE_BLOCK:
171 case MMC_READ_MULTIPLE_BLOCK:
172 return 'R';
173 case MMC_WRITE_BLOCK:
174 case MMC_WRITE_MULTIPLE_BLOCK:
175 return 'W';
176 default:
177 // Unknown opcode!!!
178 return 'X';
179 }
180 }
181
182 inline int check_met_mmc_async_req_legal(struct mmc_host *host, struct mmc_async_req *areq)
183 {
184 int is_legal = 0;
185
186 if (!((host == NULL) || (areq == NULL) || (areq->mrq == NULL)
187 || (areq->mrq->cmd == NULL) || (areq->mrq->data == NULL)
188 || (host->card == NULL))) {
189 is_legal = 1;
190 }
191
192 return is_legal;
193 }
194
195 inline int check_met_mmc_blk_data_legal(struct mmc_blk_data *md)
196 {
197 int is_legal = 0;
198
199 if (!((md == NULL) || (md->disk == NULL))) {
200 is_legal = 1;
201 }
202
203 return is_legal;
204 }
205
206 inline int check_met_mmc_req_legal(struct mmc_host *host, struct mmc_request *req)
207 {
208 int is_legal = 0;
209
210 if (!((host == NULL) || (req == NULL) || (req->cmd == NULL)
211 || (req->data == NULL) || (host->card == NULL))) {
212 is_legal = 1;
213 }
214
215 return is_legal;
216 }
217
218 void met_mmc_insert(struct mmc_host *host, struct mmc_async_req *areq)
219 {
220 struct mmc_blk_data *md;
221 char type;
222
223 if (!check_met_mmc_async_req_legal(host, areq))
224 return;
225
226 md = mmc_get_drvdata(host->card);
227 if (!check_met_mmc_blk_data_legal(md))
228 return;
229
230 type = mmc_get_rw_type(areq->mrq->cmd->opcode);
231 if (type == 'X')
232 return;
233
234 #ifndef CONFIG_MTK_FPGA
235 MET_FTRACE_PRINTK(met_mmc_insert, md, areq, type);
236 #endif
237 }
238
239 void met_mmc_dma_map(struct mmc_host *host, struct mmc_async_req *areq)
240 {
241 struct mmc_blk_data *md;
242 char type;
243
244 if (!check_met_mmc_async_req_legal(host, areq))
245 return;
246
247 md = mmc_get_drvdata(host->card);
248 if (!check_met_mmc_blk_data_legal(md))
249 return;
250
251 type = mmc_get_rw_type(areq->mrq->cmd->opcode);
252 if (type == 'X')
253 return;
254 #ifndef CONFIG_MTK_FPGA
255 MET_FTRACE_PRINTK(met_mmc_dma_map, md, areq, type);
256 #endif
257 }
258
259 //void met_mmc_issue(struct mmc_host *host, struct mmc_async_req *areq)
260 //{
261 // struct mmc_blk_data *md;
262 // char type;
263 //
264 // if (!check_met_mmc_async_req_legal(host, areq))
265 // return;
266 //
267 // md = mmc_get_drvdata(host->card);
268 //
269 // type = mmc_get_rw_type(areq->mrq->cmd->opcode);
270 // if (type == 'X')
271 // return;
272 //
273 // MET_FTRACE_PRINTK(met_mmc_issue, md, areq, type);
274 //}
275
276 void met_mmc_issue(struct mmc_host *host, struct mmc_request *req)
277 {
278 struct mmc_blk_data *md;
279 char type;
280
281 if (!check_met_mmc_req_legal(host, req))
282 return;
283
284 md = mmc_get_drvdata(host->card);
285 if (!check_met_mmc_blk_data_legal(md))
286 return;
287
288 type = mmc_get_rw_type(req->cmd->opcode);
289 if (type == 'X')
290 return;
291 #ifndef CONFIG_MTK_FPGA
292 MET_FTRACE_PRINTK(met_mmc_issue, md, req, type);
293 #endif
294 }
295
296 void met_mmc_send_cmd(struct mmc_host *host, struct mmc_command *cmd)
297 {
298 struct mmc_blk_data *md = mmc_get_drvdata(host->card);
299 char type;
300
301 type = mmc_get_rw_type(cmd->opcode);
302 if (type == 'X')
303 return;
304
305 trace_printk("%d,%d %c %d + %d [%s]\n",
306 md->disk->major, md->disk->first_minor, type,
307 cmd->arg, cmd->data->blocks,
308 current->comm);
309 }
310
311 void met_mmc_xfr_done(struct mmc_host *host, struct mmc_command *cmd)
312 {
313 struct mmc_blk_data *md=mmc_get_drvdata(host->card);
314 char type;
315
316 type = mmc_get_rw_type(cmd->opcode);
317 if (type == 'X')
318 return;
319
320 trace_printk("%d,%d %c %d + %d [%s]\n",
321 md->disk->major, md->disk->first_minor, type,
322 cmd->arg, cmd->data->blocks,
323 current->comm);
324 }
325
326 void met_mmc_wait_xfr(struct mmc_host *host, struct mmc_async_req *areq)
327 {
328 struct mmc_blk_data *md = mmc_get_drvdata(host->card);
329 char type;
330
331 type = mmc_get_rw_type(areq->mrq->cmd->opcode);
332 if (type == 'X')
333 return;
334
335 trace_printk("%d,%d %c %d + %d [%s]\n",
336 md->disk->major, md->disk->first_minor, type,
337 areq->mrq->cmd->arg, areq->mrq->data->blocks,
338 current->comm);
339
340 }
341
342 void met_mmc_tuning_start(struct mmc_host *host, struct mmc_command *cmd)
343 {
344 struct mmc_blk_data *md = mmc_get_drvdata(host->card);
345 char type;
346
347 type = mmc_get_rw_type(cmd->opcode);
348 if (type == 'X')
349 return;
350
351 trace_printk("%d,%d %c %d + %d [%s]\n",
352 md->disk->major, md->disk->first_minor, type,
353 cmd->arg, cmd->data->blocks,
354 current->comm);
355 }
356
357 void met_mmc_tuning_end(struct mmc_host *host, struct mmc_command *cmd)
358 {
359 struct mmc_blk_data *md = mmc_get_drvdata(host->card);
360 char type;
361
362 type = mmc_get_rw_type(cmd->opcode);
363 if (type == 'X')
364 return;
365
366 trace_printk("%d,%d %c %d + %d [%s]\n",
367 md->disk->major, md->disk->first_minor, type,
368 cmd->arg, cmd->data->blocks,
369 current->comm);
370 }
371
372 void met_mmc_complete(struct mmc_host *host, struct mmc_async_req *areq)
373 {
374 struct mmc_blk_data *md;
375 char type;
376
377 if (!check_met_mmc_async_req_legal(host, areq))
378 return;
379
380 md = mmc_get_drvdata(host->card);
381 if (!check_met_mmc_blk_data_legal(md))
382 return;
383
384 type = mmc_get_rw_type(areq->mrq->cmd->opcode);
385 if (type == 'X')
386 return;
387 #ifndef CONFIG_MTK_FPGA
388 MET_FTRACE_PRINTK(met_mmc_complete, md, areq, type);
389 #endif
390 }
391
392 void met_mmc_dma_unmap_start(struct mmc_host *host, struct mmc_async_req *areq)
393 {
394 struct mmc_blk_data *md;
395 char type;
396
397 if (!check_met_mmc_async_req_legal(host, areq))
398 return;
399
400 md = mmc_get_drvdata(host->card);
401 if (!check_met_mmc_blk_data_legal(md))
402 return;
403
404 type = mmc_get_rw_type(areq->mrq->cmd->opcode);
405 if (type == 'X')
406 return;
407 #ifndef CONFIG_MTK_FPGA
408 MET_FTRACE_PRINTK(met_mmc_dma_unmap_start, md, areq, type);
409 #endif
410 }
411
412 void met_mmc_dma_unmap_stop(struct mmc_host *host, struct mmc_async_req *areq)
413 {
414 struct mmc_blk_data *md;
415 char type;
416
417 if (!check_met_mmc_async_req_legal(host, areq))
418 return;
419
420 md = mmc_get_drvdata(host->card);
421 if (!check_met_mmc_blk_data_legal(md))
422 return;
423
424 type = mmc_get_rw_type(areq->mrq->cmd->opcode);
425 if (type == 'X')
426 return;
427 #ifndef CONFIG_MTK_FPGA
428 MET_FTRACE_PRINTK(met_mmc_dma_unmap_stop, md, areq, type);
429 #endif
430 }
431
432 void met_mmc_continue_req_end(struct mmc_host *host, struct mmc_async_req *areq)
433 {
434 struct mmc_blk_data *md;
435 char type;
436
437 if (!check_met_mmc_async_req_legal(host, areq))
438 return;
439
440 md = mmc_get_drvdata(host->card);
441 if (!check_met_mmc_blk_data_legal(md))
442 return;
443
444 type = mmc_get_rw_type(areq->mrq->cmd->opcode);
445 if (type == 'X')
446 return;
447 #ifndef CONFIG_MTK_FPGA
448 MET_FTRACE_PRINTK(met_mmc_continue_req_end, md, areq, type);
449 #endif
450 }
451
452 void met_mmc_dma_stop(struct mmc_host *host, struct mmc_async_req *areq, unsigned int bd_num)
453 {
454 struct mmc_blk_data *md;
455 char type;
456
457 if (!check_met_mmc_async_req_legal(host, areq))
458 return;
459
460 md = mmc_get_drvdata(host->card);
461 if (!check_met_mmc_blk_data_legal(md))
462 return;
463
464 type = mmc_get_rw_type(areq->mrq->cmd->opcode);
465 if (type == 'X')
466 return;
467 #ifndef CONFIG_MTK_FPGA
468 MET_FTRACE_PRINTK(met_mmc_dma_stop, md, areq, type, bd_num);
469 #endif
470 }
471
472 //void met_mmc_end(struct mmc_host *host, struct mmc_async_req *areq)
473 //{
474 // struct mmc_blk_data *md;
475 // char type;
476 //
477 // if (areq && areq->mrq && host && host->card) {
478 // type = mmc_get_rw_type(areq->mrq->cmd->opcode);
479 // if (type == 'X')
480 // return;
481 //
482 // md = mmc_get_drvdata(host->card);
483 //
484 // if (areq && areq->mrq)
485 // {
486 // trace_printk("%d,%d %c %d + %d [%s]\n",
487 // md->disk->major, md->disk->first_minor, type,
488 // areq->mrq->cmd->arg, areq->mrq->data->blocks,
489 // current->comm);
490 // }
491 // }
492 //}
493
494 static inline void mmc_blk_clear_packed(struct mmc_queue_req *mqrq)
495 {
496 struct mmc_packed *packed = mqrq->packed;
497
498 BUG_ON(!packed);
499
500 mqrq->cmd_type = MMC_PACKED_NONE;
501 packed->nr_entries = MMC_PACKED_NR_ZERO;
502 packed->idx_failure = MMC_PACKED_NR_IDX;
503 packed->retries = 0;
504 packed->blocks = 0;
505 }
506
507 static struct mmc_blk_data *mmc_blk_get(struct gendisk *disk)
508 {
509 struct mmc_blk_data *md;
510
511 mutex_lock(&open_lock);
512 md = disk->private_data;
513 if (md && md->usage == 0)
514 md = NULL;
515 if (md)
516 md->usage++;
517 mutex_unlock(&open_lock);
518
519 return md;
520 }
521
522 static inline int mmc_get_devidx(struct gendisk *disk)
523 {
524 int devidx = disk->first_minor / perdev_minors;
525 return devidx;
526 }
527
528 static void mmc_blk_put(struct mmc_blk_data *md)
529 {
530 mutex_lock(&open_lock);
531 md->usage--;
532 if (md->usage == 0) {
533 int devidx = mmc_get_devidx(md->disk);
534 blk_cleanup_queue(md->queue.queue);
535
536 __clear_bit(devidx, dev_use);
537
538 put_disk(md->disk);
539 kfree(md);
540 }
541 mutex_unlock(&open_lock);
542 }
543
544 static ssize_t power_ro_lock_show(struct device *dev,
545 struct device_attribute *attr, char *buf)
546 {
547 int ret;
548 struct mmc_blk_data *md = mmc_blk_get(dev_to_disk(dev));
549 struct mmc_card *card = md->queue.card;
550 int locked = 0;
551
552 if (card->ext_csd.boot_ro_lock & EXT_CSD_BOOT_WP_B_PERM_WP_EN)
553 locked = 2;
554 else if (card->ext_csd.boot_ro_lock & EXT_CSD_BOOT_WP_B_PWR_WP_EN)
555 locked = 1;
556
557 ret = snprintf(buf, PAGE_SIZE, "%d\n", locked);
558
559 return ret;
560 }
561
562 static ssize_t power_ro_lock_store(struct device *dev,
563 struct device_attribute *attr, const char *buf, size_t count)
564 {
565 int ret;
566 struct mmc_blk_data *md, *part_md;
567 struct mmc_card *card;
568 unsigned long set;
569
570 if (kstrtoul(buf, 0, &set))
571 return -EINVAL;
572
573 if (set != 1)
574 return count;
575
576 md = mmc_blk_get(dev_to_disk(dev));
577 card = md->queue.card;
578
579 mmc_claim_host(card->host);
580
581 ret = mmc_switch(card, EXT_CSD_CMD_SET_NORMAL, EXT_CSD_BOOT_WP,
582 card->ext_csd.boot_ro_lock |
583 EXT_CSD_BOOT_WP_B_PWR_WP_EN,
584 card->ext_csd.part_time);
585 if (ret)
586 pr_err("%s: Locking boot partition ro until next power on failed: %d\n", md->disk->disk_name, ret);
587 else
588 card->ext_csd.boot_ro_lock |= EXT_CSD_BOOT_WP_B_PWR_WP_EN;
589
590 mmc_release_host(card->host);
591
592 if (!ret) {
593 pr_info("%s: Locking boot partition ro until next power on\n",
594 md->disk->disk_name);
595 set_disk_ro(md->disk, 1);
596
597 list_for_each_entry(part_md, &md->part, part)
598 if (part_md->area_type == MMC_BLK_DATA_AREA_BOOT) {
599 pr_info("%s: Locking boot partition ro until next power on\n", part_md->disk->disk_name);
600 set_disk_ro(part_md->disk, 1);
601 }
602 }
603
604 mmc_blk_put(md);
605 return count;
606 }
607
608 static ssize_t force_ro_show(struct device *dev, struct device_attribute *attr,
609 char *buf)
610 {
611 int ret;
612 struct mmc_blk_data *md = mmc_blk_get(dev_to_disk(dev));
613
614 ret = snprintf(buf, PAGE_SIZE, "%d\n",
615 get_disk_ro(dev_to_disk(dev)) ^
616 md->read_only);
617 mmc_blk_put(md);
618 return ret;
619 }
620
621 static ssize_t force_ro_store(struct device *dev, struct device_attribute *attr,
622 const char *buf, size_t count)
623 {
624 int ret;
625 char *end;
626 struct mmc_blk_data *md = mmc_blk_get(dev_to_disk(dev));
627 unsigned long set = simple_strtoul(buf, &end, 0);
628 if (end == buf) {
629 ret = -EINVAL;
630 goto out;
631 }
632
633 set_disk_ro(dev_to_disk(dev), set || md->read_only);
634 ret = count;
635 out:
636 mmc_blk_put(md);
637 return ret;
638 }
639
640 static int mmc_blk_open(struct block_device *bdev, fmode_t mode)
641 {
642 struct mmc_blk_data *md = mmc_blk_get(bdev->bd_disk);
643 int ret = -ENXIO;
644
645 mutex_lock(&block_mutex);
646 if (md) {
647 if (md->usage == 2)
648 check_disk_change(bdev);
649 ret = 0;
650
651 if ((mode & FMODE_WRITE) && md->read_only) {
652 mmc_blk_put(md);
653 ret = -EROFS;
654 }
655 }
656 mutex_unlock(&block_mutex);
657
658 return ret;
659 }
660
661 static void mmc_blk_release(struct gendisk *disk, fmode_t mode)
662 {
663 struct mmc_blk_data *md = disk->private_data;
664
665 mutex_lock(&block_mutex);
666 mmc_blk_put(md);
667 mutex_unlock(&block_mutex);
668 }
669
670 static int
671 mmc_blk_getgeo(struct block_device *bdev, struct hd_geometry *geo)
672 {
673 geo->cylinders = get_capacity(bdev->bd_disk) / (4 * 16);
674 geo->heads = 4;
675 geo->sectors = 16;
676 return 0;
677 }
678
679 struct mmc_blk_ioc_data {
680 struct mmc_ioc_cmd ic;
681 unsigned char *buf;
682 u64 buf_bytes;
683 };
684
685 static struct mmc_blk_ioc_data *mmc_blk_ioctl_copy_from_user(
686 struct mmc_ioc_cmd __user *user)
687 {
688 struct mmc_blk_ioc_data *idata;
689 int err;
690
691 idata = kzalloc(sizeof(*idata), GFP_KERNEL);
692 if (!idata) {
693 err = -ENOMEM;
694 goto out;
695 }
696
697 if (copy_from_user(&idata->ic, user, sizeof(idata->ic))) {
698 err = -EFAULT;
699 goto idata_err;
700 }
701
702 idata->buf_bytes = (u64) idata->ic.blksz * idata->ic.blocks;
703 if (idata->buf_bytes > MMC_IOC_MAX_BYTES) {
704 err = -EOVERFLOW;
705 goto idata_err;
706 }
707
708 if (!idata->buf_bytes)
709 return idata;
710
711 idata->buf = kzalloc(idata->buf_bytes, GFP_KERNEL);
712 if (!idata->buf) {
713 err = -ENOMEM;
714 goto idata_err;
715 }
716
717 if (copy_from_user(idata->buf, (void __user *)(unsigned long)
718 idata->ic.data_ptr, idata->buf_bytes)) {
719 err = -EFAULT;
720 goto copy_err;
721 }
722
723 return idata;
724
725 copy_err:
726 kfree(idata->buf);
727 idata_err:
728 kfree(idata);
729 out:
730 return ERR_PTR(err);
731 }
732
733 static int ioctl_rpmb_card_status_poll(struct mmc_card *card, u32 *status,
734 u32 retries_max)
735 {
736 int err;
737 u32 retry_count = 0;
738
739 if (!status || !retries_max)
740 return -EINVAL;
741
742 do {
743 err = get_card_status(card, status, 5);
744 if (err)
745 break;
746
747 if (!R1_STATUS(*status) &&
748 (R1_CURRENT_STATE(*status) != R1_STATE_PRG))
749 break; /* RPMB programming operation complete */
750
751 /*
752 * Rechedule to give the MMC device a chance to continue
753 * processing the previous command without being polled too
754 * frequently.
755 */
756 usleep_range(1000, 5000);
757 } while (++retry_count < retries_max);
758
759 if (retry_count == retries_max)
760 err = -EPERM;
761
762 return err;
763 }
764
765 static int mmc_blk_ioctl_cmd(struct block_device *bdev,
766 struct mmc_ioc_cmd __user *ic_ptr)
767 {
768 struct mmc_blk_ioc_data *idata;
769 struct mmc_blk_data *md;
770 struct mmc_card *card;
771 struct mmc_command cmd = {0};
772 struct mmc_data data = {0};
773 struct mmc_request mrq = {NULL};
774 struct scatterlist sg;
775 int err;
776 int is_rpmb = false;
777 u32 status = 0;
778
779 /*
780 * The caller must have CAP_SYS_RAWIO, and must be calling this on the
781 * whole block device, not on a partition. This prevents overspray
782 * between sibling partitions.
783 */
784 if ((!capable(CAP_SYS_RAWIO)) || (bdev != bdev->bd_contains))
785 return -EPERM;
786
787 idata = mmc_blk_ioctl_copy_from_user(ic_ptr);
788 if (IS_ERR(idata))
789 return PTR_ERR(idata);
790
791 md = mmc_blk_get(bdev->bd_disk);
792 if (!md) {
793 err = -EINVAL;
794 goto cmd_err;
795 }
796
797 if (md->area_type & MMC_BLK_DATA_AREA_RPMB)
798 is_rpmb = true;
799
800 card = md->queue.card;
801 if (IS_ERR(card)) {
802 err = PTR_ERR(card);
803 goto cmd_done;
804 }
805
806 cmd.opcode = idata->ic.opcode;
807 cmd.arg = idata->ic.arg;
808 cmd.flags = idata->ic.flags;
809
810 if (idata->buf_bytes) {
811 data.sg = &sg;
812 data.sg_len = 1;
813 data.blksz = idata->ic.blksz;
814 data.blocks = idata->ic.blocks;
815
816 sg_init_one(data.sg, idata->buf, idata->buf_bytes);
817
818 if (idata->ic.write_flag)
819 data.flags = MMC_DATA_WRITE;
820 else
821 data.flags = MMC_DATA_READ;
822
823 /* data.flags must already be set before doing this. */
824 mmc_set_data_timeout(&data, card);
825
826 /* Allow overriding the timeout_ns for empirical tuning. */
827 if (idata->ic.data_timeout_ns)
828 data.timeout_ns = idata->ic.data_timeout_ns;
829
830 if ((cmd.flags & MMC_RSP_R1B) == MMC_RSP_R1B) {
831 /*
832 * Pretend this is a data transfer and rely on the
833 * host driver to compute timeout. When all host
834 * drivers support cmd.cmd_timeout for R1B, this
835 * can be changed to:
836 *
837 * mrq.data = NULL;
838 * cmd.cmd_timeout = idata->ic.cmd_timeout_ms;
839 */
840 data.timeout_ns = idata->ic.cmd_timeout_ms * 1000000;
841 }
842
843 mrq.data = &data;
844 }
845
846 mrq.cmd = &cmd;
847
848 mmc_claim_host(card->host);
849
850 err = mmc_blk_part_switch(card, md);
851 if (err)
852 goto cmd_rel_host;
853
854 if (idata->ic.is_acmd) {
855 err = mmc_app_cmd(card->host, card);
856 if (err)
857 goto cmd_rel_host;
858 }
859
860 if (is_rpmb) {
861 err = mmc_set_blockcount(card, data.blocks,
862 idata->ic.write_flag & (1 << 31));
863 if (err)
864 goto cmd_rel_host;
865 }
866
867 mmc_wait_for_req(card->host, &mrq);
868
869 if (cmd.error) {
870 dev_err(mmc_dev(card->host), "%s: cmd error %d\n",
871 __func__, cmd.error);
872 err = cmd.error;
873 goto cmd_rel_host;
874 }
875 if (data.error) {
876 dev_err(mmc_dev(card->host), "%s: data error %d\n",
877 __func__, data.error);
878 err = data.error;
879 goto cmd_rel_host;
880 }
881
882 /*
883 * According to the SD specs, some commands require a delay after
884 * issuing the command.
885 */
886 if (idata->ic.postsleep_min_us)
887 usleep_range(idata->ic.postsleep_min_us, idata->ic.postsleep_max_us);
888
889 if (copy_to_user(&(ic_ptr->response), cmd.resp, sizeof(cmd.resp))) {
890 err = -EFAULT;
891 goto cmd_rel_host;
892 }
893
894 if (!idata->ic.write_flag) {
895 if (copy_to_user((void __user *)(unsigned long) idata->ic.data_ptr,
896 idata->buf, idata->buf_bytes)) {
897 err = -EFAULT;
898 goto cmd_rel_host;
899 }
900 }
901
902 if (is_rpmb) {
903 /*
904 * Ensure RPMB command has completed by polling CMD13
905 * "Send Status".
906 */
907 err = ioctl_rpmb_card_status_poll(card, &status, 5);
908 if (err)
909 dev_err(mmc_dev(card->host),
910 "%s: Card Status=0x%08X, error %d\n",
911 __func__, status, err);
912 }
913
914 cmd_rel_host:
915 mmc_release_host(card->host);
916
917 cmd_done:
918 mmc_blk_put(md);
919 cmd_err:
920 kfree(idata->buf);
921 kfree(idata);
922 return err;
923 }
924
925 static int mmc_blk_ioctl(struct block_device *bdev, fmode_t mode,
926 unsigned int cmd, unsigned long arg)
927 {
928 int ret = -EINVAL;
929 if (cmd == MMC_IOC_CMD)
930 ret = mmc_blk_ioctl_cmd(bdev, (struct mmc_ioc_cmd __user *)arg);
931 return ret;
932 }
933
934 #ifdef CONFIG_COMPAT
935 static int mmc_blk_compat_ioctl(struct block_device *bdev, fmode_t mode,
936 unsigned int cmd, unsigned long arg)
937 {
938 return mmc_blk_ioctl(bdev, mode, cmd, (unsigned long) compat_ptr(arg));
939 }
940 #endif
941
942 static const struct block_device_operations mmc_bdops = {
943 .open = mmc_blk_open,
944 .release = mmc_blk_release,
945 .getgeo = mmc_blk_getgeo,
946 .owner = THIS_MODULE,
947 .ioctl = mmc_blk_ioctl,
948 #ifdef CONFIG_COMPAT
949 .compat_ioctl = mmc_blk_compat_ioctl,
950 #endif
951 };
952
953 static inline int mmc_blk_part_switch(struct mmc_card *card,
954 struct mmc_blk_data *md)
955 {
956 int ret;
957 struct mmc_blk_data *main_md = mmc_get_drvdata(card);
958
959 if (main_md->part_curr == md->part_type)
960 return 0;
961
962 if (mmc_card_mmc(card)) {
963 u8 part_config = card->ext_csd.part_config;
964
965 part_config &= ~EXT_CSD_PART_CONFIG_ACC_MASK;
966 part_config |= md->part_type;
967
968 ret = mmc_switch(card, EXT_CSD_CMD_SET_NORMAL,
969 EXT_CSD_PART_CONFIG, part_config,
970 card->ext_csd.part_time);
971 if (ret)
972 return ret;
973
974 card->ext_csd.part_config = part_config;
975 }
976
977 main_md->part_curr = md->part_type;
978 return 0;
979 }
980
981 static u32 mmc_sd_num_wr_blocks(struct mmc_card *card)
982 {
983 int err;
984 u32 result;
985 __be32 *blocks;
986
987 struct mmc_request mrq = {NULL};
988 struct mmc_command cmd = {0};
989 struct mmc_data data = {0};
990
991 struct scatterlist sg;
992
993 cmd.opcode = MMC_APP_CMD;
994 cmd.arg = card->rca << 16;
995 cmd.flags = MMC_RSP_SPI_R1 | MMC_RSP_R1 | MMC_CMD_AC;
996
997 err = mmc_wait_for_cmd(card->host, &cmd, 0);
998 if (err)
999 return (u32)-1;
1000 if (!mmc_host_is_spi(card->host) && !(cmd.resp[0] & R1_APP_CMD))
1001 return (u32)-1;
1002
1003 memset(&cmd, 0, sizeof(struct mmc_command));
1004
1005 cmd.opcode = SD_APP_SEND_NUM_WR_BLKS;
1006 cmd.arg = 0;
1007 cmd.flags = MMC_RSP_SPI_R1 | MMC_RSP_R1 | MMC_CMD_ADTC;
1008
1009 data.blksz = 4;
1010 data.blocks = 1;
1011 data.flags = MMC_DATA_READ;
1012 data.sg = &sg;
1013 data.sg_len = 1;
1014 mmc_set_data_timeout(&data, card);
1015
1016 mrq.cmd = &cmd;
1017 mrq.data = &data;
1018
1019 blocks = kmalloc(4, GFP_KERNEL);
1020 if (!blocks)
1021 return (u32)-1;
1022
1023 sg_init_one(&sg, blocks, 4);
1024
1025 mmc_wait_for_req(card->host, &mrq);
1026
1027 result = ntohl(*blocks);
1028 kfree(blocks);
1029
1030 if (cmd.error || data.error)
1031 result = (u32)-1;
1032
1033 return result;
1034 }
1035
1036 u32 __mmc_sd_num_wr_blocks(struct mmc_card *card)
1037 {
1038 return mmc_sd_num_wr_blocks(card);
1039 }
1040 EXPORT_SYMBOL(__mmc_sd_num_wr_blocks);
1041
1042 static int send_stop(struct mmc_card *card, u32 *status)
1043 {
1044 struct mmc_command cmd = {0};
1045 int err;
1046
1047 cmd.opcode = MMC_STOP_TRANSMISSION;
1048 cmd.flags = MMC_RSP_SPI_R1B | MMC_RSP_R1B | MMC_CMD_AC;
1049 err = mmc_wait_for_cmd(card->host, &cmd, 5);
1050 if (err == 0)
1051 *status = cmd.resp[0];
1052 return err;
1053 }
1054
1055 static int get_card_status(struct mmc_card *card, u32 *status, int retries)
1056 {
1057 struct mmc_command cmd = {0};
1058 int err;
1059
1060 cmd.opcode = MMC_SEND_STATUS;
1061 if (!mmc_host_is_spi(card->host))
1062 cmd.arg = card->rca << 16;
1063 cmd.flags = MMC_RSP_SPI_R2 | MMC_RSP_R1 | MMC_CMD_AC;
1064 err = mmc_wait_for_cmd(card->host, &cmd, retries);
1065 if (err == 0)
1066 *status = cmd.resp[0];
1067 return err;
1068 }
1069
1070 #define ERR_NOMEDIUM 3
1071 #define ERR_RETRY 2
1072 #define ERR_ABORT 1
1073 #define ERR_CONTINUE 0
1074
1075 static int mmc_blk_cmd_error(struct request *req, const char *name, int error,
1076 bool status_valid, u32 status)
1077 {
1078 switch (error) {
1079 case -EILSEQ:
1080 /* response crc error, retry the r/w cmd */
1081 pr_err("%s: %s sending %s command, card status %#x\n",
1082 req->rq_disk->disk_name, "response CRC error",
1083 name, status);
1084 return ERR_RETRY;
1085
1086 case -ETIMEDOUT:
1087 pr_err("%s: %s sending %s command, card status %#x\n",
1088 req->rq_disk->disk_name, "timed out", name, status);
1089
1090 /* If the status cmd initially failed, retry the r/w cmd */
1091 if (!status_valid) {
1092 pr_err("%s: status not valid, retrying timeout\n", req->rq_disk->disk_name);
1093 return ERR_RETRY;
1094 }
1095 /*
1096 * If it was a r/w cmd crc error, or illegal command
1097 * (eg, issued in wrong state) then retry - we should
1098 * have corrected the state problem above.
1099 */
1100 if (status & (R1_COM_CRC_ERROR | R1_ILLEGAL_COMMAND)) {
1101 pr_err("%s: command error, retrying timeout\n", req->rq_disk->disk_name);
1102 return ERR_RETRY;
1103 }
1104
1105 /* Otherwise abort the command */
1106 pr_err("%s: not retrying timeout\n", req->rq_disk->disk_name);
1107 return ERR_ABORT;
1108
1109 default:
1110 /* We don't understand the error code the driver gave us */
1111 pr_err("%s: unknown error %d sending read/write command, card status %#x\n",
1112 req->rq_disk->disk_name, error, status);
1113 return ERR_ABORT;
1114 }
1115 }
1116
1117 /*
1118 * Initial r/w and stop cmd error recovery.
1119 * We don't know whether the card received the r/w cmd or not, so try to
1120 * restore things back to a sane state. Essentially, we do this as follows:
1121 * - Obtain card status. If the first attempt to obtain card status fails,
1122 * the status word will reflect the failed status cmd, not the failed
1123 * r/w cmd. If we fail to obtain card status, it suggests we can no
1124 * longer communicate with the card.
1125 * - Check the card state. If the card received the cmd but there was a
1126 * transient problem with the response, it might still be in a data transfer
1127 * mode. Try to send it a stop command. If this fails, we can't recover.
1128 * - If the r/w cmd failed due to a response CRC error, it was probably
1129 * transient, so retry the cmd.
1130 * - If the r/w cmd timed out, but we didn't get the r/w cmd status, retry.
1131 * - If the r/w cmd timed out, and the r/w cmd failed due to CRC error or
1132 * illegal cmd, retry.
1133 * Otherwise we don't understand what happened, so abort.
1134 */
1135 static int mmc_blk_cmd_recovery(struct mmc_card *card, struct request *req,
1136 struct mmc_blk_request *brq, int *ecc_err, int *gen_err)
1137 {
1138 bool prev_cmd_status_valid = true;
1139 u32 status, stop_status = 0;
1140 int err, retry;
1141
1142 if (mmc_card_removed(card))
1143 return ERR_NOMEDIUM;
1144
1145 /*
1146 * Try to get card status which indicates both the card state
1147 * and why there was no response. If the first attempt fails,
1148 * we can't be sure the returned status is for the r/w command.
1149 */
1150 for (retry = 2; retry >= 0; retry--) {
1151 err = get_card_status(card, &status, 0);
1152 if (!err)
1153 break;
1154
1155 prev_cmd_status_valid = false;
1156 pr_err("%s: error %d sending status command, %sing\n",
1157 req->rq_disk->disk_name, err, retry ? "retry" : "abort");
1158 }
1159
1160 /* We couldn't get a response from the card. Give up. */
1161 if (err) {
1162 /* Check if the card is removed */
1163 if (mmc_detect_card_removed(card->host))
1164 return ERR_NOMEDIUM;
1165 return ERR_ABORT;
1166 }
1167
1168 /* Flag ECC errors */
1169 if ((status & R1_CARD_ECC_FAILED) ||
1170 (brq->stop.resp[0] & R1_CARD_ECC_FAILED) ||
1171 (brq->cmd.resp[0] & R1_CARD_ECC_FAILED))
1172 *ecc_err = 1;
1173
1174 /* Flag General errors */
1175 if (!mmc_host_is_spi(card->host) && rq_data_dir(req) != READ)
1176 if ((status & R1_ERROR) ||
1177 (brq->stop.resp[0] & R1_ERROR)) {
1178 pr_err("%s: %s: general error sending stop or status command, stop cmd response %#x, card status %#x\n",
1179 req->rq_disk->disk_name, __func__,
1180 brq->stop.resp[0], status);
1181 *gen_err = 1;
1182 }
1183
1184 /*
1185 * Check the current card state. If it is in some data transfer
1186 * mode, tell it to stop (and hopefully transition back to TRAN.)
1187 */
1188 if (R1_CURRENT_STATE(status) == R1_STATE_DATA ||
1189 R1_CURRENT_STATE(status) == R1_STATE_RCV) {
1190 err = send_stop(card, &stop_status);
1191 if (err)
1192 {
1193 get_card_status(card,&status,0);
1194 if ((R1_CURRENT_STATE(status) == R1_STATE_TRAN) ||(R1_CURRENT_STATE(status) == R1_STATE_PRG)){
1195 err=0;
1196 stop_status=0;
1197 pr_err("b card status %d \n",status);
1198 }
1199 else
1200 pr_err("g card status %d \n",status);
1201 }
1202 if (err)
1203 pr_err("%s: error %d sending stop command\n",
1204 req->rq_disk->disk_name, err);
1205
1206 /*
1207 * If the stop cmd also timed out, the card is probably
1208 * not present, so abort. Other errors are bad news too.
1209 */
1210 if (err)
1211 return ERR_ABORT;
1212 if (stop_status & R1_CARD_ECC_FAILED)
1213 *ecc_err = 1;
1214 if (!mmc_host_is_spi(card->host) && rq_data_dir(req) != READ)
1215 if (stop_status & R1_ERROR) {
1216 pr_err("%s: %s: general error sending stop command, stop cmd response %#x\n",
1217 req->rq_disk->disk_name, __func__,
1218 stop_status);
1219 *gen_err = 1;
1220 }
1221 }
1222
1223 /* Check for set block count errors */
1224 if (brq->sbc.error)
1225 return mmc_blk_cmd_error(req, "SET_BLOCK_COUNT", brq->sbc.error,
1226 prev_cmd_status_valid, status);
1227
1228 /* Check for r/w command errors */
1229 if (brq->cmd.error)
1230 return mmc_blk_cmd_error(req, "r/w cmd", brq->cmd.error,
1231 prev_cmd_status_valid, status);
1232
1233 /* Data errors */
1234 if (!brq->stop.error)
1235 return ERR_CONTINUE;
1236
1237 /* Now for stop errors. These aren't fatal to the transfer. */
1238 pr_err("%s: error %d sending stop command, original cmd response %#x, card status %#x\n",
1239 req->rq_disk->disk_name, brq->stop.error,
1240 brq->cmd.resp[0], status);
1241
1242 /*
1243 * Subsitute in our own stop status as this will give the error
1244 * state which happened during the execution of the r/w command.
1245 */
1246 if (stop_status) {
1247 brq->stop.resp[0] = stop_status;
1248 brq->stop.error = 0;
1249 }
1250 return ERR_CONTINUE;
1251 }
1252
1253 static int mmc_blk_reset(struct mmc_blk_data *md, struct mmc_host *host,
1254 int type)
1255 {
1256 int err;
1257
1258 if (md->reset_done & type)
1259 return -EEXIST;
1260
1261 md->reset_done |= type;
1262 err = mmc_hw_reset(host);
1263 /* Ensure we switch back to the correct partition */
1264 if (err != -EOPNOTSUPP) {
1265 struct mmc_blk_data *main_md = mmc_get_drvdata(host->card);
1266 int part_err;
1267
1268 main_md->part_curr = main_md->part_type;
1269 part_err = mmc_blk_part_switch(host->card, md);
1270 if (part_err) {
1271 /*
1272 * We have failed to get back into the correct
1273 * partition, so we need to abort the whole request.
1274 */
1275 return -ENODEV;
1276 }
1277 }
1278 return err;
1279 }
1280
1281 static inline void mmc_blk_reset_success(struct mmc_blk_data *md, int type)
1282 {
1283 md->reset_done &= ~type;
1284 }
1285
1286 static int mmc_blk_issue_discard_rq(struct mmc_queue *mq, struct request *req)
1287 {
1288 struct mmc_blk_data *md = mq->data;
1289 struct mmc_card *card = md->queue.card;
1290 unsigned int from, nr, arg;
1291 int err = 0, type = MMC_BLK_DISCARD;
1292
1293 if (!mmc_can_erase(card)) {
1294 err = -EOPNOTSUPP;
1295 goto out;
1296 }
1297
1298 from = blk_rq_pos(req);
1299 nr = blk_rq_sectors(req);
1300
1301 if (mmc_can_discard(card))
1302 arg = MMC_DISCARD_ARG;
1303 else if (mmc_can_trim(card))
1304 arg = MMC_TRIM_ARG;
1305 else
1306 arg = MMC_ERASE_ARG;
1307 retry:
1308 if (card->quirks & MMC_QUIRK_INAND_CMD38) {
1309 err = mmc_switch(card, EXT_CSD_CMD_SET_NORMAL,
1310 INAND_CMD38_ARG_EXT_CSD,
1311 arg == MMC_TRIM_ARG ?
1312 INAND_CMD38_ARG_TRIM :
1313 INAND_CMD38_ARG_ERASE,
1314 0);
1315 if (err)
1316 goto out;
1317 }
1318 err = mmc_erase(card, from, nr, arg);
1319 out:
1320 if (err == -EIO && !mmc_blk_reset(md, card->host, type))
1321 goto retry;
1322 if (!err)
1323 mmc_blk_reset_success(md, type);
1324 blk_end_request(req, err, blk_rq_bytes(req));
1325
1326 return err ? 0 : 1;
1327 }
1328
1329 static int mmc_blk_issue_secdiscard_rq(struct mmc_queue *mq,
1330 struct request *req)
1331 {
1332 struct mmc_blk_data *md = mq->data;
1333 struct mmc_card *card = md->queue.card;
1334 unsigned int from, nr, arg, trim_arg, erase_arg;
1335 int err = 0, type = MMC_BLK_SECDISCARD;
1336
1337 if (!(mmc_can_secure_erase_trim(card) || mmc_can_sanitize(card))) {
1338 err = -EOPNOTSUPP;
1339 goto out;
1340 }
1341
1342 from = blk_rq_pos(req);
1343 nr = blk_rq_sectors(req);
1344
1345 /* The sanitize operation is supported at v4.5 only */
1346 if (mmc_can_sanitize(card)) {
1347 erase_arg = MMC_ERASE_ARG;
1348 trim_arg = MMC_TRIM_ARG;
1349 } else {
1350 erase_arg = MMC_SECURE_ERASE_ARG;
1351 trim_arg = MMC_SECURE_TRIM1_ARG;
1352 }
1353
1354 if (mmc_erase_group_aligned(card, from, nr))
1355 arg = erase_arg;
1356 else if (mmc_can_trim(card))
1357 arg = trim_arg;
1358 else {
1359 err = -EINVAL;
1360 goto out;
1361 }
1362 retry:
1363 if (card->quirks & MMC_QUIRK_INAND_CMD38) {
1364 err = mmc_switch(card, EXT_CSD_CMD_SET_NORMAL,
1365 INAND_CMD38_ARG_EXT_CSD,
1366 arg == MMC_SECURE_TRIM1_ARG ?
1367 INAND_CMD38_ARG_SECTRIM1 :
1368 INAND_CMD38_ARG_SECERASE,
1369 0);
1370 if (err)
1371 goto out_retry;
1372 }
1373
1374 err = mmc_erase(card, from, nr, arg);
1375 if (err == -EIO)
1376 goto out_retry;
1377 if (err)
1378 goto out;
1379
1380 if (arg == MMC_SECURE_TRIM1_ARG) {
1381 if (card->quirks & MMC_QUIRK_INAND_CMD38) {
1382 err = mmc_switch(card, EXT_CSD_CMD_SET_NORMAL,
1383 INAND_CMD38_ARG_EXT_CSD,
1384 INAND_CMD38_ARG_SECTRIM2,
1385 0);
1386 if (err)
1387 goto out_retry;
1388 }
1389
1390 err = mmc_erase(card, from, nr, MMC_SECURE_TRIM2_ARG);
1391 if (err == -EIO)
1392 goto out_retry;
1393 if (err)
1394 goto out;
1395 }
1396
1397 if (mmc_can_sanitize(card)) {
1398 trace_mmc_blk_erase_start(EXT_CSD_SANITIZE_START, 0, 0);
1399 err = mmc_switch(card, EXT_CSD_CMD_SET_NORMAL,
1400 EXT_CSD_SANITIZE_START, 1, 0);
1401 trace_mmc_blk_erase_end(EXT_CSD_SANITIZE_START, 0, 0);
1402 }
1403 out_retry:
1404 if (err && !mmc_blk_reset(md, card->host, type))
1405 goto retry;
1406 if (!err)
1407 mmc_blk_reset_success(md, type);
1408 out:
1409 blk_end_request(req, err, blk_rq_bytes(req));
1410
1411 return err ? 0 : 1;
1412 }
1413
1414 static int mmc_blk_issue_flush(struct mmc_queue *mq, struct request *req)
1415 {
1416 struct mmc_blk_data *md = mq->data;
1417 struct mmc_card *card = md->queue.card;
1418 int ret = 0;
1419
1420 ret = mmc_flush_cache(card);
1421 if (ret)
1422 ret = -EIO;
1423
1424 blk_end_request_all(req, ret);
1425
1426 return ret ? 0 : 1;
1427 }
1428
1429 /*
1430 * Reformat current write as a reliable write, supporting
1431 * both legacy and the enhanced reliable write MMC cards.
1432 * In each transfer we'll handle only as much as a single
1433 * reliable write can handle, thus finish the request in
1434 * partial completions.
1435 */
1436 static inline void mmc_apply_rel_rw(struct mmc_blk_request *brq,
1437 struct mmc_card *card,
1438 struct request *req)
1439 {
1440 if (!(card->ext_csd.rel_param & EXT_CSD_WR_REL_PARAM_EN)) {
1441 /* Legacy mode imposes restrictions on transfers. */
1442 if (!IS_ALIGNED(brq->cmd.arg, card->ext_csd.rel_sectors))
1443 brq->data.blocks = 1;
1444
1445 if (brq->data.blocks > card->ext_csd.rel_sectors)
1446 brq->data.blocks = card->ext_csd.rel_sectors;
1447 else if (brq->data.blocks < card->ext_csd.rel_sectors)
1448 brq->data.blocks = 1;
1449 }
1450 }
1451
1452 #define CMD_ERRORS \
1453 (R1_OUT_OF_RANGE | /* Command argument out of range */ \
1454 R1_ADDRESS_ERROR | /* Misaligned address */ \
1455 R1_BLOCK_LEN_ERROR | /* Transferred block length incorrect */\
1456 R1_WP_VIOLATION | /* Tried to write to protected block */ \
1457 R1_CC_ERROR | /* Card controller error */ \
1458 R1_ERROR) /* General/unknown error */
1459
1460 static int mmc_blk_err_check(struct mmc_card *card,
1461 struct mmc_async_req *areq)
1462 {
1463 struct mmc_queue_req *mq_mrq = container_of(areq, struct mmc_queue_req,
1464 mmc_active);
1465 struct mmc_blk_request *brq = &mq_mrq->brq;
1466 struct request *req = mq_mrq->req;
1467 int ecc_err = 0, gen_err = 0;
1468
1469 /*
1470 * sbc.error indicates a problem with the set block count
1471 * command. No data will have been transferred.
1472 *
1473 * cmd.error indicates a problem with the r/w command. No
1474 * data will have been transferred.
1475 *
1476 * stop.error indicates a problem with the stop command. Data
1477 * may have been transferred, or may still be transferring.
1478 */
1479 if (brq->sbc.error || brq->cmd.error || brq->stop.error ||
1480 brq->data.error) {
1481 switch (mmc_blk_cmd_recovery(card, req, brq, &ecc_err, &gen_err)) {
1482 case ERR_RETRY:
1483 return MMC_BLK_RETRY;
1484 case ERR_ABORT:
1485 return MMC_BLK_ABORT;
1486 case ERR_NOMEDIUM:
1487 return MMC_BLK_NOMEDIUM;
1488 case ERR_CONTINUE:
1489 break;
1490 }
1491 }
1492
1493 /*
1494 * Check for errors relating to the execution of the
1495 * initial command - such as address errors. No data
1496 * has been transferred.
1497 */
1498 if (brq->cmd.resp[0] & CMD_ERRORS) {
1499 pr_err("%s: r/w command failed, status = %#x\n",
1500 req->rq_disk->disk_name, brq->cmd.resp[0]);
1501 return MMC_BLK_ABORT;
1502 }
1503
1504 /*
1505 * Everything else is either success, or a data error of some
1506 * kind. If it was a write, we may have transitioned to
1507 * program mode, which we have to wait for it to complete.
1508 */
1509 if (!mmc_host_is_spi(card->host) && rq_data_dir(req) != READ) {
1510 u32 status;
1511 unsigned long timeout;
1512
1513 /* Check stop command response */
1514 if (brq->stop.resp[0] & R1_ERROR) {
1515 pr_err("%s: %s: general error sending stop command, stop cmd response %#x\n",
1516 req->rq_disk->disk_name, __func__,
1517 brq->stop.resp[0]);
1518 gen_err = 1;
1519 }
1520
1521 timeout = jiffies + msecs_to_jiffies(MMC_BLK_TIMEOUT_MS);
1522 do {
1523 int err = get_card_status(card, &status, 5);
1524 if (err) {
1525 pr_err("%s: error %d requesting status\n",
1526 req->rq_disk->disk_name, err);
1527 return MMC_BLK_CMD_ERR;
1528 }
1529
1530 if (status & R1_ERROR) {
1531 pr_err("%s: %s: general error sending status command, card status %#x\n",
1532 req->rq_disk->disk_name, __func__,
1533 status);
1534 gen_err = 1;
1535 }
1536
1537 /* Timeout if the device never becomes ready for data
1538 * and never leaves the program state.
1539 */
1540 if (time_after(jiffies, timeout)) {
1541 pr_err("%s: Card stuck in programming state!"\
1542 " %s %s\n", mmc_hostname(card->host),
1543 req->rq_disk->disk_name, __func__);
1544
1545 return MMC_BLK_CMD_ERR;
1546 }
1547 /*
1548 * Some cards mishandle the status bits,
1549 * so make sure to check both the busy
1550 * indication and the card state.
1551 */
1552 } while (!(status & R1_READY_FOR_DATA) ||
1553 (R1_CURRENT_STATE(status) == R1_STATE_PRG));
1554 }
1555
1556 /* if general error occurs, retry the write operation. */
1557 if (gen_err) {
1558 pr_warn("%s: retrying write for general error\n",
1559 req->rq_disk->disk_name);
1560 return MMC_BLK_RETRY;
1561 }
1562
1563 if (brq->data.error) {
1564 pr_err("%s: error %d transferring data, sector %u, nr %u, cmd response %#x, card status %#x\n",
1565 req->rq_disk->disk_name, brq->data.error,
1566 (unsigned)blk_rq_pos(req),
1567 (unsigned)blk_rq_sectors(req),
1568 brq->cmd.resp[0], brq->stop.resp[0]);
1569
1570 if (rq_data_dir(req) == READ) {
1571 if (ecc_err)
1572 return MMC_BLK_ECC_ERR;
1573 return MMC_BLK_DATA_ERR;
1574 } else {
1575 return MMC_BLK_CMD_ERR;
1576 }
1577 }
1578
1579 if (!brq->data.bytes_xfered)
1580 return MMC_BLK_RETRY;
1581
1582 if (mmc_packed_cmd(mq_mrq->cmd_type)) {
1583 if (unlikely(brq->data.blocks << 9 != brq->data.bytes_xfered))
1584 return MMC_BLK_PARTIAL;
1585 else
1586 return MMC_BLK_SUCCESS;
1587 }
1588
1589 if (blk_rq_bytes(req) != brq->data.bytes_xfered)
1590 return MMC_BLK_PARTIAL;
1591
1592 return MMC_BLK_SUCCESS;
1593 }
1594
1595 static int mmc_blk_packed_err_check(struct mmc_card *card,
1596 struct mmc_async_req *areq)
1597 {
1598 struct mmc_queue_req *mq_rq = container_of(areq, struct mmc_queue_req,
1599 mmc_active);
1600 struct request *req = mq_rq->req;
1601 struct mmc_packed *packed = mq_rq->packed;
1602 int err, check, status;
1603 u8 *ext_csd;
1604
1605 BUG_ON(!packed);
1606
1607 packed->retries--;
1608 check = mmc_blk_err_check(card, areq);
1609 err = get_card_status(card, &status, 0);
1610 if (err) {
1611 pr_err("%s: error %d sending status command\n",
1612 req->rq_disk->disk_name, err);
1613 return MMC_BLK_ABORT;
1614 }
1615
1616 if (status & R1_EXCEPTION_EVENT) {
1617 ext_csd = kzalloc(512, GFP_KERNEL);
1618 if (!ext_csd) {
1619 pr_err("%s: unable to allocate buffer for ext_csd\n",
1620 req->rq_disk->disk_name);
1621 return -ENOMEM;
1622 }
1623
1624 err = mmc_send_ext_csd(card, ext_csd);
1625 if (err) {
1626 pr_err("%s: error %d sending ext_csd\n",
1627 req->rq_disk->disk_name, err);
1628 check = MMC_BLK_ABORT;
1629 goto free;
1630 }
1631
1632 if ((ext_csd[EXT_CSD_EXP_EVENTS_STATUS] &
1633 EXT_CSD_PACKED_FAILURE) &&
1634 (ext_csd[EXT_CSD_PACKED_CMD_STATUS] &
1635 EXT_CSD_PACKED_GENERIC_ERROR)) {
1636 if (ext_csd[EXT_CSD_PACKED_CMD_STATUS] &
1637 EXT_CSD_PACKED_INDEXED_ERROR) {
1638 packed->idx_failure =
1639 ext_csd[EXT_CSD_PACKED_FAILURE_INDEX] - 1;
1640 check = MMC_BLK_PARTIAL;
1641 }
1642 pr_err("%s: packed cmd failed, nr %u, sectors %u, "
1643 "failure index: %d\n",
1644 req->rq_disk->disk_name, packed->nr_entries,
1645 packed->blocks, packed->idx_failure);
1646 }
1647 free:
1648 kfree(ext_csd);
1649 }
1650
1651 return check;
1652 }
1653
1654 static void mmc_blk_rw_rq_prep(struct mmc_queue_req *mqrq,
1655 struct mmc_card *card,
1656 int disable_multi,
1657 struct mmc_queue *mq)
1658 {
1659 u32 readcmd, writecmd;
1660 struct mmc_blk_request *brq = &mqrq->brq;
1661 struct request *req = mqrq->req;
1662 struct mmc_blk_data *md = mq->data;
1663 bool do_data_tag;
1664
1665 /*
1666 * Reliable writes are used to implement Forced Unit Access and
1667 * REQ_META accesses, and are supported only on MMCs.
1668 *
1669 * XXX: this really needs a good explanation of why REQ_META
1670 * is treated special.
1671 */
1672 bool do_rel_wr = ((req->cmd_flags & REQ_FUA) ||
1673 (req->cmd_flags & REQ_META)) &&
1674 (rq_data_dir(req) == WRITE) &&
1675 (md->flags & MMC_BLK_REL_WR);
1676
1677 memset(brq, 0, sizeof(struct mmc_blk_request));
1678 brq->mrq.cmd = &brq->cmd;
1679 brq->mrq.data = &brq->data;
1680
1681 brq->cmd.arg = blk_rq_pos(req);
1682 if (!mmc_card_blockaddr(card))
1683 brq->cmd.arg <<= 9;
1684 brq->cmd.flags = MMC_RSP_SPI_R1 | MMC_RSP_R1 | MMC_CMD_ADTC;
1685 brq->data.blksz = 512;
1686 brq->stop.opcode = MMC_STOP_TRANSMISSION;
1687 brq->stop.arg = 0;
1688 brq->stop.flags = MMC_RSP_SPI_R1B | MMC_RSP_R1B | MMC_CMD_AC;
1689 brq->data.blocks = blk_rq_sectors(req);
1690
1691 /*
1692 * The block layer doesn't support all sector count
1693 * restrictions, so we need to be prepared for too big
1694 * requests.
1695 */
1696 if (brq->data.blocks > card->host->max_blk_count)
1697 brq->data.blocks = card->host->max_blk_count;
1698
1699 if (brq->data.blocks > 1) {
1700 /*
1701 * After a read error, we redo the request one sector
1702 * at a time in order to accurately determine which
1703 * sectors can be read successfully.
1704 */
1705 if (disable_multi)
1706 brq->data.blocks = 1;
1707
1708 /* Some controllers can't do multiblock reads due to hw bugs */
1709 if (card->host->caps2 & MMC_CAP2_NO_MULTI_READ &&
1710 rq_data_dir(req) == READ)
1711 brq->data.blocks = 1;
1712 }
1713
1714 if (brq->data.blocks > 1 || do_rel_wr) {
1715 /* SPI multiblock writes terminate using a special
1716 * token, not a STOP_TRANSMISSION request.
1717 */
1718 if (!mmc_host_is_spi(card->host) ||
1719 rq_data_dir(req) == READ)
1720 brq->mrq.stop = &brq->stop;
1721 readcmd = MMC_READ_MULTIPLE_BLOCK;
1722 writecmd = MMC_WRITE_MULTIPLE_BLOCK;
1723 } else {
1724 brq->mrq.stop = NULL;
1725 readcmd = MMC_READ_SINGLE_BLOCK;
1726 writecmd = MMC_WRITE_BLOCK;
1727 }
1728 #ifdef CONFIG_MTK_EMMC_CACHE
1729 /* for non-cacheable system data,
1730 * the implementation of reliable write / force prg write,
1731 * must be applied with mutli write cmd
1732 * */
1733 if (mmc_card_mmc(card) && (card->ext_csd.cache_ctrl & 0x1)){
1734 writecmd = MMC_WRITE_MULTIPLE_BLOCK;
1735 }
1736 #endif
1737 if (rq_data_dir(req) == READ) {
1738 brq->cmd.opcode = readcmd;
1739 brq->data.flags |= MMC_DATA_READ;
1740 } else {
1741 brq->cmd.opcode = writecmd;
1742 brq->data.flags |= MMC_DATA_WRITE;
1743 }
1744
1745 if (do_rel_wr)
1746 mmc_apply_rel_rw(brq, card, req);
1747
1748 /*
1749 * Data tag is used only during writing meta data to speed
1750 * up write and any subsequent read of this meta data
1751 */
1752 do_data_tag = (card->ext_csd.data_tag_unit_size) &&
1753 (req->cmd_flags & REQ_META) &&
1754 (rq_data_dir(req) == WRITE) &&
1755 ((brq->data.blocks * brq->data.blksz) >=
1756 card->ext_csd.data_tag_unit_size);
1757
1758 /*
1759 * Pre-defined multi-block transfers are preferable to
1760 * open ended-ones (and necessary for reliable writes).
1761 * However, it is not sufficient to just send CMD23,
1762 * and avoid the final CMD12, as on an error condition
1763 * CMD12 (stop) needs to be sent anyway. This, coupled
1764 * with Auto-CMD23 enhancements provided by some
1765 * hosts, means that the complexity of dealing
1766 * with this is best left to the host. If CMD23 is
1767 * supported by card and host, we'll fill sbc in and let
1768 * the host deal with handling it correctly. This means
1769 * that for hosts that don't expose MMC_CAP_CMD23, no
1770 * change of behavior will be observed.
1771 *
1772 * N.B: Some MMC cards experience perf degradation.
1773 * We'll avoid using CMD23-bounded multiblock writes for
1774 * these, while retaining features like reliable writes.
1775 */
1776 if ((md->flags & MMC_BLK_CMD23) && mmc_op_multi(brq->cmd.opcode) &&
1777 (do_rel_wr || !(card->quirks & MMC_QUIRK_BLK_NO_CMD23) ||
1778 do_data_tag)) {
1779 brq->sbc.opcode = MMC_SET_BLOCK_COUNT;
1780 brq->sbc.arg = brq->data.blocks |
1781 (do_rel_wr ? (1 << 31) : 0) |
1782 (do_data_tag ? (1 << 29) : 0);
1783 brq->sbc.flags = MMC_RSP_R1 | MMC_CMD_AC;
1784 brq->mrq.sbc = &brq->sbc;
1785 }
1786
1787 mmc_set_data_timeout(&brq->data, card);
1788
1789 brq->data.sg = mqrq->sg;
1790 brq->data.sg_len = mmc_queue_map_sg(mq, mqrq);
1791
1792 if (brq->data.sg_len > 1024)
1793 pr_err("%s:%d sglen = %x\n", __func__, __LINE__, brq->data.sg_len);
1794
1795 /*
1796 * Adjust the sg list so it is the same size as the
1797 * request.
1798 */
1799 if (brq->data.blocks != blk_rq_sectors(req)) {
1800 int i, data_size = brq->data.blocks << 9;
1801 struct scatterlist *sg;
1802
1803 for_each_sg(brq->data.sg, sg, brq->data.sg_len, i) {
1804 data_size -= sg->length;
1805 if (data_size <= 0) {
1806 sg->length += data_size;
1807 i++;
1808 break;
1809 }
1810 }
1811 brq->data.sg_len = i;
1812 pr_err("%s:%d sglen = %x\n", __func__, __LINE__, brq->data.sg_len);
1813 }
1814
1815 mqrq->mmc_active.mrq = &brq->mrq;
1816 mqrq->mmc_active.err_check = mmc_blk_err_check;
1817
1818 mmc_queue_bounce_pre(mqrq);
1819 }
1820
1821 static inline u8 mmc_calc_packed_hdr_segs(struct request_queue *q,
1822 struct mmc_card *card)
1823 {
1824 unsigned int hdr_sz = mmc_large_sector(card) ? 4096 : 512;
1825 unsigned int max_seg_sz = queue_max_segment_size(q);
1826 unsigned int len, nr_segs = 0;
1827
1828 do {
1829 len = min(hdr_sz, max_seg_sz);
1830 hdr_sz -= len;
1831 nr_segs++;
1832 } while (hdr_sz);
1833
1834 return nr_segs;
1835 }
1836
1837 static u8 mmc_blk_prep_packed_list(struct mmc_queue *mq, struct request *req)
1838 {
1839 struct request_queue *q = mq->queue;
1840 struct mmc_card *card = mq->card;
1841 struct request *cur = req, *next = NULL;
1842 struct mmc_blk_data *md = mq->data;
1843 struct mmc_queue_req *mqrq = mq->mqrq_cur;
1844 bool en_rel_wr = card->ext_csd.rel_param & EXT_CSD_WR_REL_PARAM_EN;
1845 unsigned int req_sectors = 0, phys_segments = 0;
1846 unsigned int max_blk_count, max_phys_segs;
1847 bool put_back = true;
1848 u8 max_packed_rw = 0;
1849 u8 reqs = 0;
1850
1851 if (!(md->flags & MMC_BLK_PACKED_CMD))
1852 goto no_packed;
1853
1854 if ((rq_data_dir(cur) == WRITE) &&
1855 mmc_host_packed_wr(card->host))
1856 max_packed_rw = card->ext_csd.max_packed_writes;
1857
1858 if (max_packed_rw == 0)
1859 goto no_packed;
1860
1861 if (mmc_req_rel_wr(cur) &&
1862 (md->flags & MMC_BLK_REL_WR) && !en_rel_wr)
1863 goto no_packed;
1864
1865 if (mmc_large_sector(card) &&
1866 !IS_ALIGNED(blk_rq_sectors(cur), 8))
1867 goto no_packed;
1868
1869 mmc_blk_clear_packed(mqrq);
1870
1871 max_blk_count = min(card->host->max_blk_count,
1872 card->host->max_req_size >> 9);
1873 if (unlikely(max_blk_count > 0xffff))
1874 max_blk_count = 0xffff;
1875
1876 max_phys_segs = queue_max_segments(q);
1877 req_sectors += blk_rq_sectors(cur);
1878 phys_segments += cur->nr_phys_segments;
1879
1880 if (rq_data_dir(cur) == WRITE) {
1881 req_sectors += mmc_large_sector(card) ? 8 : 1;
1882 phys_segments += mmc_calc_packed_hdr_segs(q, card);
1883 }
1884
1885 do {
1886 if (reqs >= max_packed_rw - 1) {
1887 put_back = false;
1888 break;
1889 }
1890
1891 spin_lock_irq(q->queue_lock);
1892 next = blk_fetch_request(q);
1893 spin_unlock_irq(q->queue_lock);
1894 if (!next) {
1895 put_back = false;
1896 break;
1897 }
1898
1899 if (mmc_large_sector(card) &&
1900 !IS_ALIGNED(blk_rq_sectors(next), 8))
1901 break;
1902
1903 if (next->cmd_flags & REQ_DISCARD ||
1904 next->cmd_flags & REQ_FLUSH)
1905 break;
1906
1907 if (rq_data_dir(cur) != rq_data_dir(next))
1908 break;
1909
1910 if (mmc_req_rel_wr(next) &&
1911 (md->flags & MMC_BLK_REL_WR) && !en_rel_wr)
1912 break;
1913
1914 req_sectors += blk_rq_sectors(next);
1915 if (req_sectors > max_blk_count)
1916 break;
1917
1918 phys_segments += next->nr_phys_segments;
1919 if (phys_segments > max_phys_segs)
1920 break;
1921
1922 list_add_tail(&next->queuelist, &mqrq->packed->list);
1923 cur = next;
1924 reqs++;
1925 } while (1);
1926
1927 if (put_back) {
1928 spin_lock_irq(q->queue_lock);
1929 blk_requeue_request(q, next);
1930 spin_unlock_irq(q->queue_lock);
1931 }
1932
1933 if (reqs > 0) {
1934 list_add(&req->queuelist, &mqrq->packed->list);
1935 mqrq->packed->nr_entries = ++reqs;
1936 mqrq->packed->retries = reqs;
1937 return reqs;
1938 }
1939
1940 no_packed:
1941 mqrq->cmd_type = MMC_PACKED_NONE;
1942 return 0;
1943 }
1944
1945 static void mmc_blk_packed_hdr_wrq_prep(struct mmc_queue_req *mqrq,
1946 struct mmc_card *card,
1947 struct mmc_queue *mq)
1948 {
1949 struct mmc_blk_request *brq = &mqrq->brq;
1950 struct request *req = mqrq->req;
1951 struct request *prq;
1952 struct mmc_blk_data *md = mq->data;
1953 struct mmc_packed *packed = mqrq->packed;
1954 bool do_rel_wr, do_data_tag;
1955 u32 *packed_cmd_hdr;
1956 u8 hdr_blocks;
1957 u8 i = 1;
1958
1959 BUG_ON(!packed);
1960
1961 mqrq->cmd_type = MMC_PACKED_WRITE;
1962 packed->blocks = 0;
1963 packed->idx_failure = MMC_PACKED_NR_IDX;
1964
1965 packed_cmd_hdr = packed->cmd_hdr;
1966 memset(packed_cmd_hdr, 0, sizeof(packed->cmd_hdr));
1967 packed_cmd_hdr[0] = (packed->nr_entries << 16) |
1968 (PACKED_CMD_WR << 8) | PACKED_CMD_VER;
1969 hdr_blocks = mmc_large_sector(card) ? 8 : 1;
1970
1971 /*
1972 * Argument for each entry of packed group
1973 */
1974 list_for_each_entry(prq, &packed->list, queuelist) {
1975 do_rel_wr = mmc_req_rel_wr(prq) && (md->flags & MMC_BLK_REL_WR);
1976 do_data_tag = (card->ext_csd.data_tag_unit_size) &&
1977 (prq->cmd_flags & REQ_META) &&
1978 (rq_data_dir(prq) == WRITE) &&
1979 ((brq->data.blocks * brq->data.blksz) >=
1980 card->ext_csd.data_tag_unit_size);
1981 /* Argument of CMD23 */
1982 packed_cmd_hdr[(i * 2)] =
1983 (do_rel_wr ? MMC_CMD23_ARG_REL_WR : 0) |
1984 (do_data_tag ? MMC_CMD23_ARG_TAG_REQ : 0) |
1985 blk_rq_sectors(prq);
1986 /* Argument of CMD18 or CMD25 */
1987 packed_cmd_hdr[((i * 2)) + 1] =
1988 mmc_card_blockaddr(card) ?
1989 blk_rq_pos(prq) : blk_rq_pos(prq) << 9;
1990 packed->blocks += blk_rq_sectors(prq);
1991 i++;
1992 }
1993
1994 memset(brq, 0, sizeof(struct mmc_blk_request));
1995 brq->mrq.cmd = &brq->cmd;
1996 brq->mrq.data = &brq->data;
1997 brq->mrq.sbc = &brq->sbc;
1998 brq->mrq.stop = &brq->stop;
1999
2000 brq->sbc.opcode = MMC_SET_BLOCK_COUNT;
2001 brq->sbc.arg = MMC_CMD23_ARG_PACKED | (packed->blocks + hdr_blocks);
2002 brq->sbc.flags = MMC_RSP_R1 | MMC_CMD_AC;
2003
2004 brq->cmd.opcode = MMC_WRITE_MULTIPLE_BLOCK;
2005 brq->cmd.arg = blk_rq_pos(req);
2006 if (!mmc_card_blockaddr(card))
2007 brq->cmd.arg <<= 9;
2008 brq->cmd.flags = MMC_RSP_SPI_R1 | MMC_RSP_R1 | MMC_CMD_ADTC;
2009
2010 brq->data.blksz = 512;
2011 brq->data.blocks = packed->blocks + hdr_blocks;
2012 brq->data.flags |= MMC_DATA_WRITE;
2013
2014 brq->stop.opcode = MMC_STOP_TRANSMISSION;
2015 brq->stop.arg = 0;
2016 brq->stop.flags = MMC_RSP_SPI_R1B | MMC_RSP_R1B | MMC_CMD_AC;
2017
2018 mmc_set_data_timeout(&brq->data, card);
2019
2020 brq->data.sg = mqrq->sg;
2021 brq->data.sg_len = mmc_queue_map_sg(mq, mqrq);
2022 pr_err("%s: sglen = %d\n", __func__, brq->data.sg_len);
2023
2024 mqrq->mmc_active.mrq = &brq->mrq;
2025 mqrq->mmc_active.err_check = mmc_blk_packed_err_check;
2026
2027 mmc_queue_bounce_pre(mqrq);
2028 }
2029
2030 static int mmc_blk_cmd_err(struct mmc_blk_data *md, struct mmc_card *card,
2031 struct mmc_blk_request *brq, struct request *req,
2032 int ret)
2033 {
2034 struct mmc_queue_req *mq_rq;
2035 mq_rq = container_of(brq, struct mmc_queue_req, brq);
2036
2037 /*
2038 * If this is an SD card and we're writing, we can first
2039 * mark the known good sectors as ok.
2040 *
2041 * If the card is not SD, we can still ok written sectors
2042 * as reported by the controller (which might be less than
2043 * the real number of written sectors, but never more).
2044 */
2045 if (mmc_card_sd(card)) {
2046 u32 blocks;
2047
2048 blocks = mmc_sd_num_wr_blocks(card);
2049 if (blocks != (u32)-1) {
2050 ret = blk_end_request(req, 0, blocks << 9);
2051 }
2052 } else {
2053 if (!mmc_packed_cmd(mq_rq->cmd_type))
2054 ret = blk_end_request(req, 0, brq->data.bytes_xfered);
2055 }
2056 return ret;
2057 }
2058
2059 static int mmc_blk_end_packed_req(struct mmc_queue_req *mq_rq)
2060 {
2061 struct request *prq;
2062 struct mmc_packed *packed = mq_rq->packed;
2063 int idx = packed->idx_failure, i = 0;
2064 int ret = 0;
2065
2066 BUG_ON(!packed);
2067
2068 while (!list_empty(&packed->list)) {
2069 prq = list_entry_rq(packed->list.next);
2070 if (idx == i) {
2071 /* retry from error index */
2072 packed->nr_entries -= idx;
2073 mq_rq->req = prq;
2074 ret = 1;
2075
2076 if (packed->nr_entries == MMC_PACKED_NR_SINGLE) {
2077 list_del_init(&prq->queuelist);
2078 mmc_blk_clear_packed(mq_rq);
2079 }
2080 return ret;
2081 }
2082 list_del_init(&prq->queuelist);
2083 blk_end_request(prq, 0, blk_rq_bytes(prq));
2084 i++;
2085 }
2086
2087 mmc_blk_clear_packed(mq_rq);
2088 return ret;
2089 }
2090
2091 static void mmc_blk_abort_packed_req(struct mmc_queue_req *mq_rq)
2092 {
2093 struct request *prq;
2094 struct mmc_packed *packed = mq_rq->packed;
2095
2096 BUG_ON(!packed);
2097
2098 while (!list_empty(&packed->list)) {
2099 prq = list_entry_rq(packed->list.next);
2100 list_del_init(&prq->queuelist);
2101 blk_end_request(prq, -EIO, blk_rq_bytes(prq));
2102 }
2103
2104 mmc_blk_clear_packed(mq_rq);
2105 }
2106
2107 static void mmc_blk_revert_packed_req(struct mmc_queue *mq,
2108 struct mmc_queue_req *mq_rq)
2109 {
2110 struct request *prq;
2111 struct request_queue *q = mq->queue;
2112 struct mmc_packed *packed = mq_rq->packed;
2113
2114 BUG_ON(!packed);
2115
2116 while (!list_empty(&packed->list)) {
2117 prq = list_entry_rq(packed->list.prev);
2118 if (prq->queuelist.prev != &packed->list) {
2119 list_del_init(&prq->queuelist);
2120 spin_lock_irq(q->queue_lock);
2121 blk_requeue_request(mq->queue, prq);
2122 spin_unlock_irq(q->queue_lock);
2123 } else {
2124 list_del_init(&prq->queuelist);
2125 }
2126 }
2127
2128 mmc_blk_clear_packed(mq_rq);
2129 }
2130 #if defined(FEATURE_STORAGE_PERF_INDEX)
2131 #define PRT_TIME_PERIOD 500000000
2132 #define UP_LIMITS_4BYTE 4294967295UL //((4*1024*1024*1024)-1)
2133 #define ID_CNT 10
2134 pid_t mmcqd[ID_CNT]={0};
2135 bool start_async_req[ID_CNT] = {0};
2136 unsigned long long start_async_req_time[ID_CNT] = {0};
2137 static unsigned long long mmcqd_tag_t1[ID_CNT]={0}, mmccid_tag_t1=0;
2138 unsigned long long mmcqd_t_usage_wr[ID_CNT]={0}, mmcqd_t_usage_rd[ID_CNT]={0};
2139 unsigned int mmcqd_rq_size_wr[ID_CNT]={0}, mmcqd_rq_size_rd[ID_CNT]={0};
2140 static unsigned int mmcqd_wr_offset_tag[ID_CNT]={0}, mmcqd_rd_offset_tag[ID_CNT]={0}, mmcqd_wr_offset[ID_CNT]={0}, mmcqd_rd_offset[ID_CNT]={0};
2141 static unsigned int mmcqd_wr_bit[ID_CNT]={0},mmcqd_wr_tract[ID_CNT]={0};
2142 static unsigned int mmcqd_rd_bit[ID_CNT]={0},mmcqd_rd_tract[ID_CNT]={0};
2143 static unsigned int mmcqd_wr_break[ID_CNT]={0}, mmcqd_rd_break[ID_CNT]={0};
2144 unsigned int mmcqd_rq_count[ID_CNT]={0}, mmcqd_wr_rq_count[ID_CNT]={0}, mmcqd_rd_rq_count[ID_CNT]={0};
2145 extern u32 g_u32_cid[4];
2146 #ifdef FEATURE_STORAGE_META_LOG
2147 int check_perdev_minors = CONFIG_MMC_BLOCK_MINORS;
2148 struct metadata_rwlogger metadata_logger[10] = {{{0}}};
2149 #endif
2150
2151 unsigned int mmcqd_work_percent[ID_CNT]={0};
2152 unsigned int mmcqd_w_throughput[ID_CNT]={0};
2153 unsigned int mmcqd_r_throughput[ID_CNT]={0};
2154 unsigned int mmcqd_read_clear[ID_CNT]={0};
2155
2156 static void g_var_clear(unsigned int idx)
2157 {
2158 mmcqd_t_usage_wr[idx]=0;
2159 mmcqd_t_usage_rd[idx]=0;
2160 mmcqd_rq_size_wr[idx]=0;
2161 mmcqd_rq_size_rd[idx]=0;
2162 mmcqd_rq_count[idx]=0;
2163 mmcqd_wr_offset[idx]=0;
2164 mmcqd_rd_offset[idx]=0;
2165 mmcqd_wr_break[idx]=0;
2166 mmcqd_rd_break[idx]=0;
2167 mmcqd_wr_tract[idx]=0;
2168 mmcqd_wr_bit[idx]=0;
2169 mmcqd_rd_tract[idx]=0;
2170 mmcqd_rd_bit[idx]=0;
2171 mmcqd_wr_rq_count[idx]=0;
2172 mmcqd_rd_rq_count[idx]=0;
2173 }
2174
2175 unsigned int find_mmcqd_index(void)
2176 {
2177 pid_t mmcqd_pid=0;
2178 unsigned int idx=0;
2179 unsigned char i=0;
2180
2181 mmcqd_pid = task_pid_nr(current);
2182
2183 if(mmcqd[0] ==0) {
2184 mmcqd[0] = mmcqd_pid;
2185 start_async_req[0]=0;
2186 }
2187
2188 for(i=0;i<ID_CNT;i++)
2189 {
2190 if(mmcqd_pid == mmcqd[i])
2191 {
2192 idx=i;
2193 break;
2194 }
2195 if ((mmcqd[i] == 0) ||( i==ID_CNT-1))
2196 {
2197 mmcqd[i]=mmcqd_pid;
2198 start_async_req[i]=0;
2199 idx=i;
2200 break;
2201 }
2202 }
2203 return idx;
2204 }
2205
2206 #endif
2207 //#undef FEATURE_STORAGE_PID_LOGGER
2208 #if defined(FEATURE_STORAGE_PID_LOGGER)
2209
2210 struct struct_pid_logger g_pid_logger[PID_ID_CNT]={{0,0,{0},{0},{0},{0}}};
2211
2212
2213
2214 unsigned char *page_logger = NULL;
2215 spinlock_t g_locker;
2216
2217 #endif
2218 static int mmc_blk_issue_rw_rq(struct mmc_queue *mq, struct request *rqc)
2219 {
2220 struct mmc_blk_data *md = mq->data;
2221 struct mmc_card *card = md->queue.card;
2222 struct mmc_blk_request *brq = &mq->mqrq_cur->brq;
2223 int ret = 1, disable_multi = 0, retry = 0, type;
2224 enum mmc_blk_status status;
2225 struct mmc_queue_req *mq_rq;
2226 struct request *req = rqc;
2227 struct mmc_async_req *areq;
2228 const u8 packed_nr = 2;
2229 u8 reqs = 0;
2230 unsigned long long time1 = 0;
2231 #if defined(FEATURE_STORAGE_PERF_INDEX)
2232 pid_t mmcqd_pid=0;
2233 unsigned long long t_period=0, t_usage=0;
2234 unsigned int t_percent=0;
2235 unsigned int perf_meter=0;
2236 unsigned int rq_byte=0,rq_sector=0,sect_offset=0;
2237 unsigned int diversity=0;
2238 unsigned int idx=0;
2239 #ifdef FEATURE_STORAGE_META_LOG
2240 unsigned int mmcmetaindex=0;
2241 #endif
2242 #endif
2243 #if defined(FEATURE_STORAGE_PID_LOGGER)
2244 unsigned int index=0;
2245 #endif
2246
2247 if (!rqc && !mq->mqrq_prev->req)
2248 return 0;
2249 time1 = sched_clock();
2250
2251 if (rqc)
2252 reqs = mmc_blk_prep_packed_list(mq, rqc);
2253 #if defined(FEATURE_STORAGE_PERF_INDEX)
2254 mmcqd_pid = task_pid_nr(current);
2255
2256 idx = find_mmcqd_index();
2257
2258 mmcqd_read_clear[idx] = 1;
2259 if(mmccid_tag_t1==0)
2260 mmccid_tag_t1 = time1;
2261 t_period = time1 - mmccid_tag_t1;
2262 if(t_period >= (unsigned long long )((PRT_TIME_PERIOD)*(unsigned long long )10))
2263 {
2264 xlog_printk(ANDROID_LOG_DEBUG, "BLOCK_TAG", "MMC Queue Thread:%d, %d, %d, %d, %d \n", mmcqd[0], mmcqd[1], mmcqd[2], mmcqd[3], mmcqd[4]);
2265 xlog_printk(ANDROID_LOG_DEBUG, "BLOCK_TAG", "MMC CID: %lx %lx %lx %lx \n", g_u32_cid[0], g_u32_cid[1], g_u32_cid[2], g_u32_cid[3]);
2266 mmccid_tag_t1 = time1;
2267 }
2268 if(mmcqd_tag_t1[idx]==0)
2269 mmcqd_tag_t1[idx] = time1;
2270 t_period = time1 - mmcqd_tag_t1[idx];
2271
2272 if(t_period >= (unsigned long long )PRT_TIME_PERIOD)
2273 {
2274 mmcqd_read_clear[idx] = 2;
2275 mmcqd_work_percent[idx] = 1;
2276 mmcqd_r_throughput[idx] = 0;
2277 mmcqd_w_throughput[idx] = 0;
2278 t_usage = mmcqd_t_usage_wr [idx] + mmcqd_t_usage_rd[idx];
2279 if(t_period > t_usage*100)
2280 xlog_printk(ANDROID_LOG_DEBUG, "BLOCK_TAG", "mmcqd:%d Workload < 1%%, duty %lld, period %lld, req_cnt=%d \n", mmcqd[idx], t_usage, t_period, mmcqd_rq_count[idx]);
2281 else
2282 {
2283 do_div(t_period, 100); //boundary issue
2284 t_percent =((unsigned int)t_usage)/((unsigned int)t_period);
2285 mmcqd_work_percent[idx] = t_percent;
2286 xlog_printk(ANDROID_LOG_DEBUG, "BLOCK_TAG", "mmcqd:%d Workload=%d%%, duty %lld, period %lld00, req_cnt=%d \n", mmcqd[idx], t_percent, t_usage, t_period, mmcqd_rq_count[idx]); //period %lld00 == period %lld x100
2287 }
2288 if(mmcqd_wr_rq_count[idx] >= 2)
2289 {
2290 diversity = mmcqd_wr_offset[idx]/(mmcqd_wr_rq_count[idx]-1);
2291 xlog_printk(ANDROID_LOG_DEBUG, "BLOCK_TAG", "mmcqd:%d Write Diversity=%d sectors offset, req_cnt=%d, break_cnt=%d, tract_cnt=%d, bit_cnt=%d\n", mmcqd[idx], diversity, mmcqd_wr_rq_count[idx], mmcqd_wr_break[idx], mmcqd_wr_tract[idx], mmcqd_wr_bit[idx]);
2292 }
2293 if(mmcqd_rd_rq_count[idx] >= 2)
2294 {
2295 diversity = mmcqd_rd_offset[idx]/(mmcqd_rd_rq_count[idx]-1);
2296 xlog_printk(ANDROID_LOG_DEBUG, "BLOCK_TAG", "mmcqd:%d Read Diversity=%d sectors offset, req_cnt=%d, break_cnt=%d, tract_cnt=%d, bit_cnt=%d\n", mmcqd[idx], diversity, mmcqd_rd_rq_count[idx], mmcqd_rd_break[idx], mmcqd_rd_tract[idx], mmcqd_rd_bit[idx]);
2297 }
2298 if(mmcqd_t_usage_wr[idx])
2299 {
2300 do_div(mmcqd_t_usage_wr[idx], 1000000); //boundary issue
2301 if(mmcqd_t_usage_wr[idx]) // discard print if duration will <1ms
2302 {
2303 perf_meter = (mmcqd_rq_size_wr[idx])/((unsigned int)mmcqd_t_usage_wr[idx]); //kb/s
2304 mmcqd_w_throughput[idx] = perf_meter;
2305 xlog_printk(ANDROID_LOG_DEBUG, "BLOCK_TAG", "mmcqd:%d Write Throughput=%d kB/s, size: %d bytes, time:%lld ms\n", mmcqd[idx], perf_meter, mmcqd_rq_size_wr[idx], mmcqd_t_usage_wr[idx]);
2306 }
2307 }
2308 if(mmcqd_t_usage_rd[idx])
2309 {
2310 do_div(mmcqd_t_usage_rd[idx], 1000000); //boundary issue
2311 if(mmcqd_t_usage_rd[idx]) // discard print if duration will <1ms
2312 {
2313 perf_meter = (mmcqd_rq_size_rd[idx])/((unsigned int)mmcqd_t_usage_rd[idx]); //kb/s
2314 mmcqd_r_throughput[idx] = perf_meter;
2315 xlog_printk(ANDROID_LOG_DEBUG, "BLOCK_TAG", "mmcqd:%d Read Throughput=%d kB/s, size: %d bytes, time:%lld ms\n", mmcqd[idx], perf_meter, mmcqd_rq_size_rd[idx], mmcqd_t_usage_rd[idx]);
2316 }
2317 }
2318 mmcqd_tag_t1[idx]=time1;
2319 g_var_clear(idx);
2320 #ifdef FEATURE_STORAGE_META_LOG
2321 mmcmetaindex = mmc_get_devidx(md->disk);
2322 xlog_printk(ANDROID_LOG_DEBUG, "BLOCK_TAG", "mmcqd metarw WR:%d NWR:%d HR:%d WDR:%d HDR:%d WW:%d NWW:%d HW:%d\n",
2323 metadata_logger[mmcmetaindex].metadata_rw_logger[0], metadata_logger[mmcmetaindex].metadata_rw_logger[1],
2324 metadata_logger[mmcmetaindex].metadata_rw_logger[2], metadata_logger[mmcmetaindex].metadata_rw_logger[3],
2325 metadata_logger[mmcmetaindex].metadata_rw_logger[4], metadata_logger[mmcmetaindex].metadata_rw_logger[5],
2326 metadata_logger[mmcmetaindex].metadata_rw_logger[6], metadata_logger[mmcmetaindex].metadata_rw_logger[7]);
2327 clear_metadata_rw_status(md->disk->first_minor);
2328 #endif
2329 #if defined(FEATURE_STORAGE_PID_LOGGER)
2330 do {
2331 int i;
2332 for(index=0; index<PID_ID_CNT; index++) {
2333
2334 if( g_pid_logger[index].current_pid!=0 && g_pid_logger[index].current_pid == mmcqd_pid)
2335 break;
2336 }
2337 if( index == PID_ID_CNT )
2338 break;
2339 for( i=0; i<PID_LOGGER_COUNT; i++) {
2340 //printk(KERN_INFO"hank mmcqd %d %d", g_pid_logger[index].pid_logger[i], mmcqd_pid);
2341 if( g_pid_logger[index].pid_logger[i] == 0)
2342 break;
2343 sprintf (g_pid_logger[index].pid_buffer+i*37, "{%05d:%05d:%08d:%05d:%08d}", g_pid_logger[index].pid_logger[i], g_pid_logger[index].pid_logger_counter[i], g_pid_logger[index].pid_logger_length[i], g_pid_logger[index].pid_logger_r_counter[i], g_pid_logger[index].pid_logger_r_length[i]);
2344
2345 }
2346 if( i != 0) {
2347 xlog_printk(ANDROID_LOG_DEBUG, "BLOCK_TAG", "mmcqd pid:%d %s\n", g_pid_logger[index].current_pid, g_pid_logger[index].pid_buffer);
2348 //xlog_printk(ANDROID_LOG_DEBUG, "BLOCK_TAG", "sizeof(&(g_pid_logger[index].pid_logger)):%d\n", sizeof(unsigned short)*PID_LOGGER_COUNT);
2349 //memset( &(g_pid_logger[index].pid_logger), 0, sizeof(struct struct_pid_logger)-(unsigned long)&(((struct struct_pid_logger *)0)->pid_logger));
2350 memset( &(g_pid_logger[index].pid_logger), 0, sizeof(unsigned short)*PID_LOGGER_COUNT);
2351 memset( &(g_pid_logger[index].pid_logger_counter), 0, sizeof(unsigned short)*PID_LOGGER_COUNT);
2352 memset( &(g_pid_logger[index].pid_logger_length), 0, sizeof(unsigned int)*PID_LOGGER_COUNT);
2353 memset( &(g_pid_logger[index].pid_logger_r_counter), 0, sizeof(unsigned short)*PID_LOGGER_COUNT);
2354 memset( &(g_pid_logger[index].pid_logger_r_length), 0, sizeof(unsigned int)*PID_LOGGER_COUNT);
2355 memset( &(g_pid_logger[index].pid_buffer), 0, sizeof(char)*1024);
2356
2357
2358 }
2359 g_pid_logger[index].pid_buffer[0] = '\0';
2360
2361 } while(0);
2362 #endif
2363
2364 #if defined(FEATURE_STORAGE_VMSTAT_LOGGER)
2365 xlog_printk(ANDROID_LOG_DEBUG, "BLOCK_TAG", "vmstat (FP:%ld)(FD:%ld)(ND:%ld)(WB:%ld)(NW:%ld)\n",
2366 ((global_page_state(NR_FILE_PAGES)) << (PAGE_SHIFT - 10)),
2367 ((global_page_state(NR_FILE_DIRTY)) << (PAGE_SHIFT - 10)),
2368 ((global_page_state(NR_DIRTIED)) << (PAGE_SHIFT - 10)),
2369 ((global_page_state(NR_WRITEBACK)) << (PAGE_SHIFT - 10)),
2370 ((global_page_state(NR_WRITTEN)) << (PAGE_SHIFT - 10)));
2371 #endif
2372
2373 }
2374 if( rqc )
2375 {
2376 rq_byte = blk_rq_bytes(rqc);
2377 rq_sector = blk_rq_sectors(rqc);
2378 if(rq_data_dir(rqc) == WRITE)
2379 {
2380 if(mmcqd_wr_offset_tag[idx]>0)
2381 {
2382 sect_offset = abs(blk_rq_pos(rqc) - mmcqd_wr_offset_tag[idx]);
2383 mmcqd_wr_offset[idx] += sect_offset;
2384 if(sect_offset == 1)
2385 mmcqd_wr_break[idx]++;
2386 }
2387 mmcqd_wr_offset_tag[idx] = blk_rq_pos(rqc) + rq_sector;
2388 if(rq_sector <= 1) //512 bytes
2389 mmcqd_wr_bit[idx] ++;
2390 else if(rq_sector >= 1016) //508kB
2391 mmcqd_wr_tract[idx] ++;
2392 }
2393 else //read
2394 {
2395 if(mmcqd_rd_offset_tag[idx]>0)
2396 {
2397 sect_offset = abs(blk_rq_pos(rqc) - mmcqd_rd_offset_tag[idx]);
2398 mmcqd_rd_offset[idx] += sect_offset;
2399 if(sect_offset == 1)
2400 mmcqd_rd_break[idx]++;
2401 }
2402 mmcqd_rd_offset_tag[idx] = blk_rq_pos(rqc) + rq_sector;
2403 if(rq_sector <= 1) //512 bytes
2404 mmcqd_rd_bit[idx] ++;
2405 else if(rq_sector >= 1016) //508kB
2406 mmcqd_rd_tract[idx] ++;
2407 }
2408 }
2409 #endif
2410 do {
2411 if (rqc) {
2412 /*
2413 * When 4KB native sector is enabled, only 8 blocks
2414 * multiple read or write is allowed
2415 */
2416 if ((brq->data.blocks & 0x07) &&
2417 (card->ext_csd.data_sector_size == 4096)) {
2418 pr_err("%s: Transfer size is not 4KB sector size aligned\n",
2419 req->rq_disk->disk_name);
2420 mq_rq = mq->mqrq_cur;
2421 goto cmd_abort;
2422 }
2423
2424 if (reqs >= packed_nr)
2425 mmc_blk_packed_hdr_wrq_prep(mq->mqrq_cur,
2426 card, mq);
2427 else
2428 mmc_blk_rw_rq_prep(mq->mqrq_cur, card, 0, mq);
2429 areq = &mq->mqrq_cur->mmc_active;
2430 } else
2431 areq = NULL;
2432 areq = mmc_start_req(card->host, areq, (int *) &status);
2433 if (!areq) {
2434 if (status == MMC_BLK_NEW_REQUEST)
2435 mq->flags |= MMC_QUEUE_NEW_REQUEST;
2436 return 0;
2437 }
2438
2439 mq_rq = container_of(areq, struct mmc_queue_req, mmc_active);
2440 brq = &mq_rq->brq;
2441 req = mq_rq->req;
2442 type = rq_data_dir(req) == READ ? MMC_BLK_READ : MMC_BLK_WRITE;
2443 mmc_queue_bounce_post(mq_rq);
2444
2445 switch (status) {
2446 case MMC_BLK_SUCCESS:
2447 case MMC_BLK_PARTIAL:
2448 /*
2449 * A block was successfully transferred.
2450 */
2451 mmc_blk_reset_success(md, type);
2452
2453 if (mmc_packed_cmd(mq_rq->cmd_type)) {
2454 ret = mmc_blk_end_packed_req(mq_rq);
2455 break;
2456 } else {
2457 ret = blk_end_request(req, 0,
2458 brq->data.bytes_xfered);
2459 }
2460
2461 // if (card && card->host && card->host->areq)
2462 // met_mmc_end(card->host, card->host->areq);
2463
2464 /*
2465 * If the blk_end_request function returns non-zero even
2466 * though all data has been transferred and no errors
2467 * were returned by the host controller, it's a bug.
2468 */
2469 if (status == MMC_BLK_SUCCESS && ret) {
2470 pr_err("%s BUG rq_tot %d d_xfer %d\n",
2471 __func__, blk_rq_bytes(req),
2472 brq->data.bytes_xfered);
2473 rqc = NULL;
2474 goto cmd_abort;
2475 }
2476 break;
2477 case MMC_BLK_CMD_ERR:
2478 ret = mmc_blk_cmd_err(md, card, brq, req, ret);
2479 if (!mmc_blk_reset(md, card->host, type))
2480 break;
2481 goto cmd_abort;
2482 case MMC_BLK_RETRY:
2483 if (retry++ < 5)
2484 break;
2485 /* Fall through */
2486 case MMC_BLK_ABORT:
2487 if (!mmc_blk_reset(md, card->host, type))
2488 break;
2489 goto cmd_abort;
2490 case MMC_BLK_DATA_ERR: {
2491 int err;
2492
2493 err = mmc_blk_reset(md, card->host, type);
2494 if (!err)
2495 break;
2496 if (err == -ENODEV ||
2497 mmc_packed_cmd(mq_rq->cmd_type))
2498 goto cmd_abort;
2499 /* Fall through */
2500 }
2501 case MMC_BLK_ECC_ERR:
2502 if (brq->data.blocks > 1) {
2503 /* Redo read one sector at a time */
2504 pr_warning("%s: retrying using single block read\n",
2505 req->rq_disk->disk_name);
2506 disable_multi = 1;
2507 break;
2508 }
2509 /*
2510 * After an error, we redo I/O one sector at a
2511 * time, so we only reach here after trying to
2512 * read a single sector.
2513 */
2514 ret = blk_end_request(req, -EIO,
2515 brq->data.blksz);
2516 if (!ret)
2517 goto start_new_req;
2518 break;
2519 case MMC_BLK_NOMEDIUM:
2520 goto cmd_abort;
2521 default:
2522 pr_err("%s: Unhandled return value (%d)",
2523 req->rq_disk->disk_name, status);
2524 goto cmd_abort;
2525 }
2526
2527 if (ret) {
2528 if (mmc_packed_cmd(mq_rq->cmd_type)) {
2529 if (!mq_rq->packed->retries)
2530 goto cmd_abort;
2531 mmc_blk_packed_hdr_wrq_prep(mq_rq, card, mq);
2532 mmc_start_req(card->host,
2533 &mq_rq->mmc_active, NULL);
2534 } else {
2535
2536 /*
2537 * In case of a incomplete request
2538 * prepare it again and resend.
2539 */
2540 mmc_blk_rw_rq_prep(mq_rq, card,
2541 disable_multi, mq);
2542 mmc_start_req(card->host,
2543 &mq_rq->mmc_active, NULL);
2544 }
2545 }
2546 } while (ret);
2547
2548 return 1;
2549
2550 cmd_abort:
2551 if (mmc_packed_cmd(mq_rq->cmd_type)) {
2552 mmc_blk_abort_packed_req(mq_rq);
2553 } else {
2554 if (mmc_card_removed(card))
2555 req->cmd_flags |= REQ_QUIET;
2556 while (ret)
2557 ret = blk_end_request(req, -EIO,
2558 blk_rq_cur_bytes(req));
2559 }
2560
2561 start_new_req:
2562 if (rqc) {
2563 if (mmc_card_removed(card)) {
2564 rqc->cmd_flags |= REQ_QUIET;
2565 blk_end_request_all(rqc, -EIO);
2566 } else {
2567 /*
2568 * If current request is packed, it needs to put back.
2569 */
2570 if (mmc_packed_cmd(mq->mqrq_cur->cmd_type))
2571 mmc_blk_revert_packed_req(mq, mq->mqrq_cur);
2572
2573 mmc_blk_rw_rq_prep(mq->mqrq_cur, card, 0, mq);
2574 mmc_start_req(card->host,
2575 &mq->mqrq_cur->mmc_active, NULL);
2576 }
2577 }
2578
2579 return 0;
2580 }
2581
2582 static int mmc_blk_issue_rq(struct mmc_queue *mq, struct request *req)
2583 {
2584 int ret;
2585 struct mmc_blk_data *md = mq->data;
2586 struct mmc_card *card = md->queue.card;
2587 struct mmc_host *host = card->host;
2588 unsigned long flags;
2589 unsigned int cmd_flags = req ? req->cmd_flags : 0;
2590
2591 #ifdef CONFIG_MMC_BLOCK_DEFERRED_RESUME
2592 if (mmc_bus_needs_resume(card->host))
2593 mmc_resume_bus(card->host);
2594 #endif
2595
2596 if (req && !mq->mqrq_prev->req)
2597 /* claim host only for the first request */
2598 mmc_claim_host(card->host);
2599
2600 ret = mmc_blk_part_switch(card, md);
2601 if (ret) {
2602 if (req) {
2603 blk_end_request_all(req, -EIO);
2604 }
2605 ret = 0;
2606 goto out;
2607 }
2608
2609 mq->flags &= ~MMC_QUEUE_NEW_REQUEST;
2610 if (cmd_flags & REQ_DISCARD) {
2611 /* complete ongoing async transfer before issuing discard */
2612 if (card->host->areq)
2613 mmc_blk_issue_rw_rq(mq, NULL);
2614 if (req->cmd_flags & REQ_SECURE &&
2615 !(card->quirks & MMC_QUIRK_SEC_ERASE_TRIM_BROKEN))
2616 ret = mmc_blk_issue_secdiscard_rq(mq, req);
2617 else
2618 ret = mmc_blk_issue_discard_rq(mq, req);
2619 } else if (cmd_flags & REQ_FLUSH) {
2620 /* complete ongoing async transfer before issuing flush */
2621 if (card->host->areq)
2622 mmc_blk_issue_rw_rq(mq, NULL);
2623 ret = mmc_blk_issue_flush(mq, req);
2624 } else {
2625 if (!req && host->areq) {
2626 spin_lock_irqsave(&host->context_info.lock, flags);
2627 host->context_info.is_waiting_last_req = true;
2628 spin_unlock_irqrestore(&host->context_info.lock, flags);
2629 }
2630 ret = mmc_blk_issue_rw_rq(mq, req);
2631 }
2632
2633 out:
2634 if ((!req && !(mq->flags & MMC_QUEUE_NEW_REQUEST)) ||
2635 (cmd_flags & MMC_REQ_SPECIAL_MASK))
2636 /*
2637 * Release host when there are no more requests
2638 * and after special request(discard, flush) is done.
2639 * In case sepecial request, there is no reentry to
2640 * the 'mmc_blk_issue_rq' with 'mqrq_prev->req'.
2641 */
2642 mmc_release_host(card->host);
2643 return ret;
2644 }
2645
2646 static inline int mmc_blk_readonly(struct mmc_card *card)
2647 {
2648 return mmc_card_readonly(card) ||
2649 !(card->csd.cmdclass & CCC_BLOCK_WRITE);
2650 }
2651
2652 //#if defined(FEATURE_STORAGE_PID_LOGGER)
2653 //extern unsigned long get_memory_size(void);
2654 //#endif
2655 #ifdef CONFIG_MTK_EXTMEM
2656 extern void* extmem_malloc_page_align(size_t bytes);
2657 #endif
2658 static struct mmc_blk_data *mmc_blk_alloc_req(struct mmc_card *card,
2659 struct device *parent,
2660 sector_t size,
2661 bool default_ro,
2662 const char *subname,
2663 int area_type)
2664 {
2665 struct mmc_blk_data *md;
2666 int devidx, ret;
2667
2668 devidx = find_first_zero_bit(dev_use, max_devices);
2669 if (devidx >= max_devices)
2670 return ERR_PTR(-ENOSPC);
2671 __set_bit(devidx, dev_use);
2672
2673 md = kzalloc(sizeof(struct mmc_blk_data), GFP_KERNEL);
2674 if (!md) {
2675 ret = -ENOMEM;
2676 goto out;
2677 }
2678
2679 /*
2680 * !subname implies we are creating main mmc_blk_data that will be
2681 * associated with mmc_card with mmc_set_drvdata. Due to device
2682 * partitions, devidx will not coincide with a per-physical card
2683 * index anymore so we keep track of a name index.
2684 */
2685 if (!subname) {
2686 md->name_idx = find_first_zero_bit(name_use, max_devices);
2687 __set_bit(md->name_idx, name_use);
2688 } else
2689 md->name_idx = ((struct mmc_blk_data *)
2690 dev_to_disk(parent)->private_data)->name_idx;
2691
2692 md->area_type = area_type;
2693
2694 /*
2695 * Set the read-only status based on the supported commands
2696 * and the write protect switch.
2697 */
2698 md->read_only = mmc_blk_readonly(card);
2699
2700 md->disk = alloc_disk(perdev_minors);
2701 if (md->disk == NULL) {
2702 ret = -ENOMEM;
2703 goto err_kfree;
2704 }
2705
2706 spin_lock_init(&md->lock);
2707 INIT_LIST_HEAD(&md->part);
2708 md->usage = 1;
2709
2710 ret = mmc_init_queue(&md->queue, card, &md->lock, subname);
2711 if (ret)
2712 goto err_putdisk;
2713 #if defined(FEATURE_STORAGE_PID_LOGGER)
2714 if( !page_logger){
2715 //num_page_logger = sizeof(struct page_pid_logger);
2716 //page_logger = vmalloc(num_physpages*sizeof(struct page_pid_logger));
2717 // solution: use get_memory_size to obtain the size from start pfn to max pfn
2718
2719 //unsigned long count = get_memory_size() >> PAGE_SHIFT;
2720 unsigned long count = get_max_DRAM_size() >> PAGE_SHIFT;
2721 #ifdef CONFIG_MTK_EXTMEM
2722 page_logger = extmem_malloc_page_align(count * sizeof(struct page_pid_logger));
2723 #else
2724 page_logger = vmalloc(count * sizeof(struct page_pid_logger));
2725 #endif
2726 if( page_logger) {
2727 memset( page_logger, -1, count*sizeof( struct page_pid_logger));
2728 }
2729 spin_lock_init(&g_locker);
2730 }
2731 #endif
2732 #if defined(FEATURE_STORAGE_META_LOG)
2733 check_perdev_minors = perdev_minors;
2734 #endif
2735
2736 md->queue.issue_fn = mmc_blk_issue_rq;
2737 md->queue.data = md;
2738
2739 md->disk->major = MMC_BLOCK_MAJOR;
2740 md->disk->first_minor = devidx * perdev_minors;
2741 md->disk->fops = &mmc_bdops;
2742 md->disk->private_data = md;
2743 md->disk->queue = md->queue.queue;
2744 md->disk->driverfs_dev = parent;
2745 set_disk_ro(md->disk, md->read_only || default_ro);
2746 md->disk->flags = GENHD_FL_EXT_DEVT;
2747 if (area_type & MMC_BLK_DATA_AREA_RPMB)
2748 md->disk->flags |= GENHD_FL_NO_PART_SCAN;
2749
2750 /*
2751 * As discussed on lkml, GENHD_FL_REMOVABLE should:
2752 *
2753 * - be set for removable media with permanent block devices
2754 * - be unset for removable block devices with permanent media
2755 *
2756 * Since MMC block devices clearly fall under the second
2757 * case, we do not set GENHD_FL_REMOVABLE. Userspace
2758 * should use the block device creation/destruction hotplug
2759 * messages to tell when the card is present.
2760 */
2761
2762 snprintf(md->disk->disk_name, sizeof(md->disk->disk_name),
2763 "mmcblk%d%s", md->name_idx, subname ? subname : "");
2764
2765 if (mmc_card_mmc(card))
2766 blk_queue_logical_block_size(md->queue.queue,
2767 card->ext_csd.data_sector_size);
2768 else
2769 blk_queue_logical_block_size(md->queue.queue, 512);
2770
2771 set_capacity(md->disk, size);
2772
2773 if (mmc_host_cmd23(card->host)) {
2774 if (mmc_card_mmc(card) ||
2775 (mmc_card_sd(card) &&
2776 card->scr.cmds & SD_SCR_CMD23_SUPPORT))
2777 md->flags |= MMC_BLK_CMD23;
2778 }
2779
2780 if (mmc_card_mmc(card) &&
2781 md->flags & MMC_BLK_CMD23 &&
2782 ((card->ext_csd.rel_param & EXT_CSD_WR_REL_PARAM_EN) ||
2783 card->ext_csd.rel_sectors)) {
2784 md->flags |= MMC_BLK_REL_WR;
2785 blk_queue_flush(md->queue.queue, REQ_FLUSH | REQ_FUA);
2786 }
2787
2788 if (mmc_card_mmc(card) &&
2789 (area_type == MMC_BLK_DATA_AREA_MAIN) &&
2790 (md->flags & MMC_BLK_CMD23) &&
2791 card->ext_csd.packed_event_en) {
2792 if (!mmc_packed_init(&md->queue, card))
2793 md->flags |= MMC_BLK_PACKED_CMD;
2794 }
2795
2796 return md;
2797
2798 err_putdisk:
2799 put_disk(md->disk);
2800 err_kfree:
2801 kfree(md);
2802 out:
2803 return ERR_PTR(ret);
2804 }
2805
2806 static struct mmc_blk_data *mmc_blk_alloc(struct mmc_card *card)
2807 {
2808 sector_t size;
2809 #ifdef CONFIG_MTK_EMMC_SUPPORT
2810 unsigned int l_reserve;
2811 struct storage_info s_info = {0};
2812 #endif
2813 struct mmc_blk_data *md;
2814
2815 if (!mmc_card_sd(card) && mmc_card_blockaddr(card)) {
2816 /*
2817 * The EXT_CSD sector count is in number or 512 byte
2818 * sectors.
2819 */
2820 size = card->ext_csd.sectors;
2821 } else {
2822 /*
2823 * The CSD capacity field is in units of read_blkbits.
2824 * set_capacity takes units of 512 bytes.
2825 */
2826 size = card->csd.capacity << (card->csd.read_blkbits - 9);
2827 }
2828
2829 if(!mmc_card_sd(card)){
2830 #ifdef CONFIG_MTK_EMMC_SUPPORT
2831 msdc_get_info(EMMC_CARD_BOOT, EMMC_RESERVE, &s_info);
2832 l_reserve = s_info.emmc_reserve;
2833 printk("l_reserve = 0x%x\n", l_reserve);
2834 size -= l_reserve; /*reserved for 64MB (emmc otp + emmc combo offset + reserved)*/
2835 #endif
2836 }
2837 md = mmc_blk_alloc_req(card, &card->dev, size, false, NULL,
2838 MMC_BLK_DATA_AREA_MAIN);
2839 return md;
2840 }
2841
2842 static int mmc_blk_alloc_part(struct mmc_card *card,
2843 struct mmc_blk_data *md,
2844 unsigned int part_type,
2845 sector_t size,
2846 bool default_ro,
2847 const char *subname,
2848 int area_type)
2849 {
2850 char cap_str[10];
2851 struct mmc_blk_data *part_md;
2852
2853 part_md = mmc_blk_alloc_req(card, disk_to_dev(md->disk), size, default_ro,
2854 subname, area_type);
2855 if (IS_ERR(part_md))
2856 return PTR_ERR(part_md);
2857 part_md->part_type = part_type;
2858 list_add(&part_md->part, &md->part);
2859
2860 string_get_size((u64)get_capacity(part_md->disk) << 9, STRING_UNITS_2,
2861 cap_str, sizeof(cap_str));
2862 pr_info("%s: %s %s partition %u %s\n",
2863 part_md->disk->disk_name, mmc_card_id(card),
2864 mmc_card_name(card), part_md->part_type, cap_str);
2865 return 0;
2866 }
2867
2868 /* MMC Physical partitions consist of two boot partitions and
2869 * up to four general purpose partitions.
2870 * For each partition enabled in EXT_CSD a block device will be allocatedi
2871 * to provide access to the partition.
2872 */
2873
2874 static int mmc_blk_alloc_parts(struct mmc_card *card, struct mmc_blk_data *md)
2875 {
2876 int idx, ret = 0;
2877
2878 if (!mmc_card_mmc(card))
2879 return 0;
2880
2881 for (idx = 0; idx < card->nr_parts; idx++) {
2882 if (card->part[idx].size) {
2883 ret = mmc_blk_alloc_part(card, md,
2884 card->part[idx].part_cfg,
2885 card->part[idx].size >> 9,
2886 card->part[idx].force_ro,
2887 card->part[idx].name,
2888 card->part[idx].area_type);
2889 if (ret)
2890 return ret;
2891 }
2892 }
2893
2894 return ret;
2895 }
2896
2897 static void mmc_blk_remove_req(struct mmc_blk_data *md)
2898 {
2899 struct mmc_card *card;
2900
2901 if (md) {
2902 card = md->queue.card;
2903 if (md->disk->flags & GENHD_FL_UP) {
2904 device_remove_file(disk_to_dev(md->disk), &md->force_ro);
2905 if ((md->area_type & MMC_BLK_DATA_AREA_BOOT) &&
2906 card->ext_csd.boot_ro_lockable)
2907 device_remove_file(disk_to_dev(md->disk),
2908 &md->power_ro_lock);
2909
2910 /* Stop new requests from getting into the queue */
2911 del_gendisk(md->disk);
2912 }
2913
2914 /* Then flush out any already in there */
2915 mmc_cleanup_queue(&md->queue);
2916 if (md->flags & MMC_BLK_PACKED_CMD)
2917 mmc_packed_clean(&md->queue);
2918 mmc_blk_put(md);
2919 }
2920 }
2921
2922 static void mmc_blk_remove_parts(struct mmc_card *card,
2923 struct mmc_blk_data *md)
2924 {
2925 struct list_head *pos, *q;
2926 struct mmc_blk_data *part_md;
2927
2928 __clear_bit(md->name_idx, name_use);
2929 list_for_each_safe(pos, q, &md->part) {
2930 part_md = list_entry(pos, struct mmc_blk_data, part);
2931 list_del(pos);
2932 mmc_blk_remove_req(part_md);
2933 }
2934 }
2935
2936 static int mmc_add_disk(struct mmc_blk_data *md)
2937 {
2938 int ret;
2939 struct mmc_card *card = md->queue.card;
2940
2941 add_disk(md->disk);
2942 md->force_ro.show = force_ro_show;
2943 md->force_ro.store = force_ro_store;
2944 sysfs_attr_init(&md->force_ro.attr);
2945 md->force_ro.attr.name = "force_ro";
2946 md->force_ro.attr.mode = S_IRUGO | S_IWUSR;
2947 ret = device_create_file(disk_to_dev(md->disk), &md->force_ro);
2948 if (ret)
2949 goto force_ro_fail;
2950
2951 if ((md->area_type & MMC_BLK_DATA_AREA_BOOT) &&
2952 card->ext_csd.boot_ro_lockable) {
2953 umode_t mode;
2954
2955 if (card->ext_csd.boot_ro_lock & EXT_CSD_BOOT_WP_B_PWR_WP_DIS)
2956 mode = S_IRUGO;
2957 else
2958 mode = S_IRUGO | S_IWUSR;
2959
2960 md->power_ro_lock.show = power_ro_lock_show;
2961 md->power_ro_lock.store = power_ro_lock_store;
2962 sysfs_attr_init(&md->power_ro_lock.attr);
2963 md->power_ro_lock.attr.mode = mode;
2964 md->power_ro_lock.attr.name =
2965 "ro_lock_until_next_power_on";
2966 ret = device_create_file(disk_to_dev(md->disk),
2967 &md->power_ro_lock);
2968 if (ret)
2969 goto power_ro_lock_fail;
2970 }
2971 return ret;
2972
2973 power_ro_lock_fail:
2974 device_remove_file(disk_to_dev(md->disk), &md->force_ro);
2975 force_ro_fail:
2976 del_gendisk(md->disk);
2977
2978 return ret;
2979 }
2980
2981 #define CID_MANFID_SANDISK 0x2
2982 #define CID_MANFID_TOSHIBA 0x11
2983 #define CID_MANFID_MICRON 0x13
2984 #define CID_MANFID_SAMSUNG 0x15
2985 #define CID_MANFID_SANDISK_NEW 0x45
2986 #define CID_MANFID_HYNIX 0x90
2987 #define CID_MANFID_KSI 0x70
2988
2989 static const struct mmc_fixup blk_fixups[] =
2990 {
2991 MMC_FIXUP("SEM02G", CID_MANFID_SANDISK, 0x100, add_quirk,
2992 MMC_QUIRK_INAND_CMD38),
2993 MMC_FIXUP("SEM04G", CID_MANFID_SANDISK, 0x100, add_quirk,
2994 MMC_QUIRK_INAND_CMD38),
2995 MMC_FIXUP("SEM08G", CID_MANFID_SANDISK, 0x100, add_quirk,
2996 MMC_QUIRK_INAND_CMD38),
2997 MMC_FIXUP("SEM16G", CID_MANFID_SANDISK, 0x100, add_quirk,
2998 MMC_QUIRK_INAND_CMD38),
2999 MMC_FIXUP("SEM32G", CID_MANFID_SANDISK, 0x100, add_quirk,
3000 MMC_QUIRK_INAND_CMD38),
3001 MMC_FIXUP(CID_NAME_ANY, CID_MANFID_SANDISK_NEW, CID_OEMID_ANY, add_quirk,
3002 MMC_QUIRK_PON),
3003 /*
3004 * Some MMC cards experience performance degradation with CMD23
3005 * instead of CMD12-bounded multiblock transfers. For now we'll
3006 * black list what's bad...
3007 * - Certain Toshiba cards.
3008 *
3009 * N.B. This doesn't affect SD cards.
3010 */
3011 MMC_FIXUP("MMC08G", CID_MANFID_TOSHIBA, CID_OEMID_ANY, add_quirk_mmc,
3012 MMC_QUIRK_BLK_NO_CMD23),
3013 MMC_FIXUP("MMC16G", CID_MANFID_TOSHIBA, CID_OEMID_ANY, add_quirk_mmc,
3014 MMC_QUIRK_BLK_NO_CMD23),
3015 MMC_FIXUP("MMC32G", CID_MANFID_TOSHIBA, CID_OEMID_ANY, add_quirk_mmc,
3016 MMC_QUIRK_BLK_NO_CMD23),
3017
3018 /*
3019 * Some Micron MMC cards needs longer data read timeout than
3020 * indicated in CSD.
3021 */
3022 MMC_FIXUP(CID_NAME_ANY, CID_MANFID_MICRON, 0x200, add_quirk_mmc,
3023 MMC_QUIRK_LONG_READ_TIME),
3024
3025 /*
3026 * On these Samsung MoviNAND parts, performing secure erase or
3027 * secure trim can result in unrecoverable corruption due to a
3028 * firmware bug.
3029 */
3030 MMC_FIXUP("M8G2FA", CID_MANFID_SAMSUNG, CID_OEMID_ANY, add_quirk_mmc,
3031 MMC_QUIRK_SEC_ERASE_TRIM_BROKEN),
3032 MMC_FIXUP("MAG4FA", CID_MANFID_SAMSUNG, CID_OEMID_ANY, add_quirk_mmc,
3033 MMC_QUIRK_SEC_ERASE_TRIM_BROKEN),
3034 MMC_FIXUP("MBG8FA", CID_MANFID_SAMSUNG, CID_OEMID_ANY, add_quirk_mmc,
3035 MMC_QUIRK_SEC_ERASE_TRIM_BROKEN),
3036 MMC_FIXUP("MCGAFA", CID_MANFID_SAMSUNG, CID_OEMID_ANY, add_quirk_mmc,
3037 MMC_QUIRK_SEC_ERASE_TRIM_BROKEN),
3038 MMC_FIXUP("VAL00M", CID_MANFID_SAMSUNG, CID_OEMID_ANY, add_quirk_mmc,
3039 MMC_QUIRK_SEC_ERASE_TRIM_BROKEN),
3040 MMC_FIXUP("VYL00M", CID_MANFID_SAMSUNG, CID_OEMID_ANY, add_quirk_mmc,
3041 MMC_QUIRK_SEC_ERASE_TRIM_BROKEN),
3042 MMC_FIXUP("KYL00M", CID_MANFID_SAMSUNG, CID_OEMID_ANY, add_quirk_mmc,
3043 MMC_QUIRK_SEC_ERASE_TRIM_BROKEN),
3044 MMC_FIXUP("VZL00M", CID_MANFID_SAMSUNG, CID_OEMID_ANY, add_quirk_mmc,
3045 MMC_QUIRK_SEC_ERASE_TRIM_BROKEN),
3046 #ifdef CONFIG_MTK_EMMC_CACHE
3047 /*
3048 * Some MMC cards cache feature, cannot flush the previous cache data by force programming or reliable write
3049 * which cannot gurrantee the strong order betwee meta data and file data.
3050 */
3051
3052 /*
3053 * Toshiba eMMC after enable cache feature, write performance drop, because flush operation waste much time
3054 */
3055 MMC_FIXUP(CID_NAME_ANY, CID_MANFID_TOSHIBA, CID_OEMID_ANY, add_quirk_mmc,
3056 MMC_QUIRK_DISABLE_CACHE),
3057 #endif
3058
3059 /* Hynix 4.41 trim will lead boot up failed. */
3060 MMC_FIXUP(CID_NAME_ANY, CID_MANFID_HYNIX, CID_OEMID_ANY, add_quirk_mmc,
3061 MMC_QUIRK_TRIM_UNSTABLE),
3062
3063 /* KSI PRV=0x3 trim will lead write performance drop. */
3064 MMC_FIXUP(CID_NAME_ANY, CID_MANFID_KSI, CID_OEMID_ANY, add_quirk_mmc_ksi_v03_skip_trim,
3065 MMC_QUIRK_KSI_V03_SKIP_TRIM),
3066
3067 END_FIXUP
3068 };
3069
3070 #if defined(CONFIG_MTK_EMMC_SUPPORT) && !defined(CONFIG_MTK_GPT_SCHEME_SUPPORT)
3071 extern void emmc_create_sys_symlink (struct mmc_card *card);
3072 #endif
3073 static int mmc_blk_probe(struct mmc_card *card)
3074 {
3075 struct mmc_blk_data *md, *part_md;
3076 char cap_str[10];
3077
3078 /*
3079 * Check that the card supports the command class(es) we need.
3080 */
3081 if (!(card->csd.cmdclass & CCC_BLOCK_READ))
3082 return -ENODEV;
3083
3084 md = mmc_blk_alloc(card);
3085 if (IS_ERR(md))
3086 return PTR_ERR(md);
3087
3088 string_get_size((u64)get_capacity(md->disk) << 9, STRING_UNITS_2,
3089 cap_str, sizeof(cap_str));
3090 pr_info("%s: %s %s %s %s\n",
3091 md->disk->disk_name, mmc_card_id(card), mmc_card_name(card),
3092 cap_str, md->read_only ? "(ro)" : "");
3093
3094 if (mmc_blk_alloc_parts(card, md))
3095 goto out;
3096
3097 mmc_set_drvdata(card, md);
3098 mmc_fixup_device(card, blk_fixups);
3099
3100 printk("[%s]: %s by manufacturer settings, quirks=0x%x\n", __func__, md->disk->disk_name, card->quirks);
3101
3102 #ifdef CONFIG_MMC_BLOCK_DEFERRED_RESUME
3103 mmc_set_bus_resume_policy(card->host, 1);
3104 #endif
3105 if (mmc_add_disk(md))
3106 goto out;
3107
3108 list_for_each_entry(part_md, &md->part, part) {
3109 if (mmc_add_disk(part_md))
3110 goto out;
3111 }
3112 #if defined(CONFIG_MTK_EMMC_SUPPORT) && !defined(CONFIG_MTK_GPT_SCHEME_SUPPORT)
3113 emmc_create_sys_symlink(card);
3114 #endif
3115 return 0;
3116
3117 out:
3118 mmc_blk_remove_parts(card, md);
3119 mmc_blk_remove_req(md);
3120 return 0;
3121 }
3122
3123 static void mmc_blk_remove(struct mmc_card *card)
3124 {
3125 struct mmc_blk_data *md = mmc_get_drvdata(card);
3126
3127 mmc_blk_remove_parts(card, md);
3128 mmc_claim_host(card->host);
3129 mmc_blk_part_switch(card, md);
3130 mmc_release_host(card->host);
3131 mmc_blk_remove_req(md);
3132 mmc_set_drvdata(card, NULL);
3133 #ifdef CONFIG_MMC_BLOCK_DEFERRED_RESUME
3134 mmc_set_bus_resume_policy(card->host, 0);
3135 #endif
3136 }
3137
3138 #ifdef CONFIG_PM
3139 static int mmc_blk_suspend(struct mmc_card *card)
3140 {
3141 struct mmc_blk_data *part_md;
3142 struct mmc_blk_data *md = mmc_get_drvdata(card);
3143
3144 if (md) {
3145 mmc_queue_suspend(&md->queue);
3146 list_for_each_entry(part_md, &md->part, part) {
3147 mmc_queue_suspend(&part_md->queue);
3148 }
3149 }
3150 return 0;
3151 }
3152
3153 static int mmc_blk_resume(struct mmc_card *card)
3154 {
3155 struct mmc_blk_data *part_md;
3156 struct mmc_blk_data *md = mmc_get_drvdata(card);
3157
3158 if (md) {
3159 /*
3160 * Resume involves the card going into idle state,
3161 * so current partition is always the main one.
3162 */
3163 md->part_curr = md->part_type;
3164 mmc_queue_resume(&md->queue);
3165 list_for_each_entry(part_md, &md->part, part) {
3166 mmc_queue_resume(&part_md->queue);
3167 }
3168 }
3169 return 0;
3170 }
3171 #else
3172 #define mmc_blk_suspend NULL
3173 #define mmc_blk_resume NULL
3174 #endif
3175
3176 static struct mmc_driver mmc_driver = {
3177 .drv = {
3178 .name = "mmcblk",
3179 },
3180 .probe = mmc_blk_probe,
3181 .remove = mmc_blk_remove,
3182 .suspend = mmc_blk_suspend,
3183 .resume = mmc_blk_resume,
3184 };
3185
3186 static int __init mmc_blk_init(void)
3187 {
3188 int res;
3189
3190 if (perdev_minors != CONFIG_MMC_BLOCK_MINORS)
3191 pr_info("mmcblk: using %d minors per device\n", perdev_minors);
3192
3193 max_devices = 256 / perdev_minors;
3194
3195 res = register_blkdev(MMC_BLOCK_MAJOR, "mmc");
3196 if (res)
3197 goto out;
3198
3199 res = mmc_register_driver(&mmc_driver);
3200 if (res)
3201 goto out2;
3202
3203 return 0;
3204 out2:
3205 unregister_blkdev(MMC_BLOCK_MAJOR, "mmc");
3206 out:
3207 return res;
3208 }
3209
3210 static void __exit mmc_blk_exit(void)
3211 {
3212 mmc_unregister_driver(&mmc_driver);
3213 unregister_blkdev(MMC_BLOCK_MAJOR, "mmc");
3214 }
3215
3216 module_init(mmc_blk_init);
3217 module_exit(mmc_blk_exit);
3218
3219 MODULE_LICENSE("GPL");
3220 MODULE_DESCRIPTION("Multimedia Card (MMC) block device driver");
3221