mmc: card: Fixup request missing in mmc_blk_issue_rw_rq
[GitHub/mt8127/android_kernel_alcatel_ttab.git] / drivers / mmc / card / block.c
CommitLineData
1da177e4
LT
1/*
2 * Block driver for media (i.e., flash cards)
3 *
4 * Copyright 2002 Hewlett-Packard Company
979ce720 5 * Copyright 2005-2008 Pierre Ossman
1da177e4
LT
6 *
7 * Use consistent with the GNU GPL is permitted,
8 * provided that this copyright notice is
9 * preserved in its entirety in all copies and derived works.
10 *
11 * HEWLETT-PACKARD COMPANY MAKES NO WARRANTIES, EXPRESSED OR IMPLIED,
12 * AS TO THE USEFULNESS OR CORRECTNESS OF THIS CODE OR ITS
13 * FITNESS FOR ANY PARTICULAR PURPOSE.
14 *
15 * Many thanks to Alessandro Rubini and Jonathan Corbet!
16 *
17 * Author: Andrew Christian
18 * 28 May 2002
19 */
20#include <linux/moduleparam.h>
21#include <linux/module.h>
22#include <linux/init.h>
23
1da177e4
LT
24#include <linux/kernel.h>
25#include <linux/fs.h>
5a0e3ad6 26#include <linux/slab.h>
1da177e4
LT
27#include <linux/errno.h>
28#include <linux/hdreg.h>
29#include <linux/kdev_t.h>
30#include <linux/blkdev.h>
a621aaed 31#include <linux/mutex.h>
ec5a19dd 32#include <linux/scatterlist.h>
a7bbb573 33#include <linux/string_helpers.h>
cb87ea28
JC
34#include <linux/delay.h>
35#include <linux/capability.h>
36#include <linux/compat.h>
1da177e4 37
cb87ea28 38#include <linux/mmc/ioctl.h>
1da177e4 39#include <linux/mmc/card.h>
385e3227 40#include <linux/mmc/host.h>
da7fbe58
PO
41#include <linux/mmc/mmc.h>
42#include <linux/mmc/sd.h>
1da177e4 43
1da177e4
LT
44#include <asm/uaccess.h>
45
98ac2162 46#include "queue.h"
1da177e4 47
6b0b6285 48MODULE_ALIAS("mmc:block");
5e71b7a6
OJ
49#ifdef MODULE_PARAM_PREFIX
50#undef MODULE_PARAM_PREFIX
51#endif
52#define MODULE_PARAM_PREFIX "mmcblk."
53
6a7a6b45
AW
54#define INAND_CMD38_ARG_EXT_CSD 113
55#define INAND_CMD38_ARG_ERASE 0x00
56#define INAND_CMD38_ARG_TRIM 0x01
57#define INAND_CMD38_ARG_SECERASE 0x80
58#define INAND_CMD38_ARG_SECTRIM1 0x81
59#define INAND_CMD38_ARG_SECTRIM2 0x88
8fee476b 60#define MMC_BLK_TIMEOUT_MS (10 * 60 * 1000) /* 10 minute timeout */
6a7a6b45 61
ce39f9d1
SJ
62#define mmc_req_rel_wr(req) (((req->cmd_flags & REQ_FUA) || \
63 (req->cmd_flags & REQ_META)) && \
64 (rq_data_dir(req) == WRITE))
65#define PACKED_CMD_VER 0x01
66#define PACKED_CMD_WR 0x02
67
5e71b7a6 68static DEFINE_MUTEX(block_mutex);
6b0b6285 69
1da177e4 70/*
5e71b7a6
OJ
71 * The defaults come from config options but can be overriden by module
72 * or bootarg options.
1da177e4 73 */
5e71b7a6 74static int perdev_minors = CONFIG_MMC_BLOCK_MINORS;
1dff3144 75
5e71b7a6
OJ
76/*
77 * We've only got one major, so number of mmcblk devices is
78 * limited to 256 / number of minors per device.
79 */
80static int max_devices;
81
82/* 256 minors, so at most 256 separate devices */
83static DECLARE_BITMAP(dev_use, 256);
f06c9153 84static DECLARE_BITMAP(name_use, 256);
1da177e4 85
1da177e4
LT
86/*
87 * There is one mmc_blk_data per slot.
88 */
89struct mmc_blk_data {
90 spinlock_t lock;
91 struct gendisk *disk;
92 struct mmc_queue queue;
371a689f 93 struct list_head part;
1da177e4 94
d0c97cfb
AW
95 unsigned int flags;
96#define MMC_BLK_CMD23 (1 << 0) /* Can do SET_BLOCK_COUNT for multiblock */
97#define MMC_BLK_REL_WR (1 << 1) /* MMC Reliable write support */
ce39f9d1 98#define MMC_BLK_PACKED_CMD (1 << 2) /* MMC packed command support */
d0c97cfb 99
1da177e4 100 unsigned int usage;
a6f6c96b 101 unsigned int read_only;
371a689f 102 unsigned int part_type;
f06c9153 103 unsigned int name_idx;
67716327
AH
104 unsigned int reset_done;
105#define MMC_BLK_READ BIT(0)
106#define MMC_BLK_WRITE BIT(1)
107#define MMC_BLK_DISCARD BIT(2)
108#define MMC_BLK_SECDISCARD BIT(3)
371a689f
AW
109
110 /*
111 * Only set in main mmc_blk_data associated
112 * with mmc_card with mmc_set_drvdata, and keeps
113 * track of the current selected device partition.
114 */
115 unsigned int part_curr;
116 struct device_attribute force_ro;
add710ea
JR
117 struct device_attribute power_ro_lock;
118 int area_type;
1da177e4
LT
119};
120
a621aaed 121static DEFINE_MUTEX(open_lock);
1da177e4 122
ce39f9d1
SJ
123enum {
124 MMC_PACKED_NR_IDX = -1,
125 MMC_PACKED_NR_ZERO,
126 MMC_PACKED_NR_SINGLE,
127};
128
5e71b7a6
OJ
129module_param(perdev_minors, int, 0444);
130MODULE_PARM_DESC(perdev_minors, "Minors numbers to allocate per device");
131
8d1e977d
LP
132static inline int mmc_blk_part_switch(struct mmc_card *card,
133 struct mmc_blk_data *md);
134static int get_card_status(struct mmc_card *card, u32 *status, int retries);
135
ce39f9d1
SJ
136static inline void mmc_blk_clear_packed(struct mmc_queue_req *mqrq)
137{
138 struct mmc_packed *packed = mqrq->packed;
139
140 BUG_ON(!packed);
141
142 mqrq->cmd_type = MMC_PACKED_NONE;
143 packed->nr_entries = MMC_PACKED_NR_ZERO;
144 packed->idx_failure = MMC_PACKED_NR_IDX;
145 packed->retries = 0;
146 packed->blocks = 0;
147}
148
1da177e4
LT
149static struct mmc_blk_data *mmc_blk_get(struct gendisk *disk)
150{
151 struct mmc_blk_data *md;
152
a621aaed 153 mutex_lock(&open_lock);
1da177e4
LT
154 md = disk->private_data;
155 if (md && md->usage == 0)
156 md = NULL;
157 if (md)
158 md->usage++;
a621aaed 159 mutex_unlock(&open_lock);
1da177e4
LT
160
161 return md;
162}
163
371a689f
AW
164static inline int mmc_get_devidx(struct gendisk *disk)
165{
166 int devmaj = MAJOR(disk_devt(disk));
167 int devidx = MINOR(disk_devt(disk)) / perdev_minors;
168
169 if (!devmaj)
170 devidx = disk->first_minor / perdev_minors;
171 return devidx;
172}
173
1da177e4
LT
174static void mmc_blk_put(struct mmc_blk_data *md)
175{
a621aaed 176 mutex_lock(&open_lock);
1da177e4
LT
177 md->usage--;
178 if (md->usage == 0) {
371a689f 179 int devidx = mmc_get_devidx(md->disk);
5fa83ce2
AH
180 blk_cleanup_queue(md->queue.queue);
181
1dff3144
DW
182 __clear_bit(devidx, dev_use);
183
1da177e4 184 put_disk(md->disk);
1da177e4
LT
185 kfree(md);
186 }
a621aaed 187 mutex_unlock(&open_lock);
1da177e4
LT
188}
189
add710ea
JR
190static ssize_t power_ro_lock_show(struct device *dev,
191 struct device_attribute *attr, char *buf)
192{
193 int ret;
194 struct mmc_blk_data *md = mmc_blk_get(dev_to_disk(dev));
195 struct mmc_card *card = md->queue.card;
196 int locked = 0;
197
198 if (card->ext_csd.boot_ro_lock & EXT_CSD_BOOT_WP_B_PERM_WP_EN)
199 locked = 2;
200 else if (card->ext_csd.boot_ro_lock & EXT_CSD_BOOT_WP_B_PWR_WP_EN)
201 locked = 1;
202
203 ret = snprintf(buf, PAGE_SIZE, "%d\n", locked);
204
1ac6c9e2
TW
205 mmc_blk_put(md);
206
add710ea
JR
207 return ret;
208}
209
210static ssize_t power_ro_lock_store(struct device *dev,
211 struct device_attribute *attr, const char *buf, size_t count)
212{
213 int ret;
214 struct mmc_blk_data *md, *part_md;
215 struct mmc_card *card;
216 unsigned long set;
217
218 if (kstrtoul(buf, 0, &set))
219 return -EINVAL;
220
221 if (set != 1)
222 return count;
223
224 md = mmc_blk_get(dev_to_disk(dev));
225 card = md->queue.card;
226
227 mmc_claim_host(card->host);
228
229 ret = mmc_switch(card, EXT_CSD_CMD_SET_NORMAL, EXT_CSD_BOOT_WP,
230 card->ext_csd.boot_ro_lock |
231 EXT_CSD_BOOT_WP_B_PWR_WP_EN,
232 card->ext_csd.part_time);
233 if (ret)
234 pr_err("%s: Locking boot partition ro until next power on failed: %d\n", md->disk->disk_name, ret);
235 else
236 card->ext_csd.boot_ro_lock |= EXT_CSD_BOOT_WP_B_PWR_WP_EN;
237
238 mmc_release_host(card->host);
239
240 if (!ret) {
241 pr_info("%s: Locking boot partition ro until next power on\n",
242 md->disk->disk_name);
243 set_disk_ro(md->disk, 1);
244
245 list_for_each_entry(part_md, &md->part, part)
246 if (part_md->area_type == MMC_BLK_DATA_AREA_BOOT) {
247 pr_info("%s: Locking boot partition ro until next power on\n", part_md->disk->disk_name);
248 set_disk_ro(part_md->disk, 1);
249 }
250 }
251
252 mmc_blk_put(md);
253 return count;
254}
255
371a689f
AW
256static ssize_t force_ro_show(struct device *dev, struct device_attribute *attr,
257 char *buf)
258{
259 int ret;
260 struct mmc_blk_data *md = mmc_blk_get(dev_to_disk(dev));
261
5d316a31 262 ret = snprintf(buf, PAGE_SIZE, "%d\n",
371a689f
AW
263 get_disk_ro(dev_to_disk(dev)) ^
264 md->read_only);
265 mmc_blk_put(md);
266 return ret;
267}
268
269static ssize_t force_ro_store(struct device *dev, struct device_attribute *attr,
270 const char *buf, size_t count)
271{
272 int ret;
273 char *end;
274 struct mmc_blk_data *md = mmc_blk_get(dev_to_disk(dev));
275 unsigned long set = simple_strtoul(buf, &end, 0);
276 if (end == buf) {
277 ret = -EINVAL;
278 goto out;
279 }
280
281 set_disk_ro(dev_to_disk(dev), set || md->read_only);
282 ret = count;
283out:
284 mmc_blk_put(md);
285 return ret;
286}
287
a5a1561f 288static int mmc_blk_open(struct block_device *bdev, fmode_t mode)
1da177e4 289{
a5a1561f 290 struct mmc_blk_data *md = mmc_blk_get(bdev->bd_disk);
1da177e4
LT
291 int ret = -ENXIO;
292
2a48fc0a 293 mutex_lock(&block_mutex);
1da177e4
LT
294 if (md) {
295 if (md->usage == 2)
a5a1561f 296 check_disk_change(bdev);
1da177e4 297 ret = 0;
a00fc090 298
a5a1561f 299 if ((mode & FMODE_WRITE) && md->read_only) {
70bb0896 300 mmc_blk_put(md);
a00fc090 301 ret = -EROFS;
70bb0896 302 }
1da177e4 303 }
2a48fc0a 304 mutex_unlock(&block_mutex);
1da177e4
LT
305
306 return ret;
307}
308
db2a144b 309static void mmc_blk_release(struct gendisk *disk, fmode_t mode)
1da177e4 310{
a5a1561f 311 struct mmc_blk_data *md = disk->private_data;
1da177e4 312
2a48fc0a 313 mutex_lock(&block_mutex);
1da177e4 314 mmc_blk_put(md);
2a48fc0a 315 mutex_unlock(&block_mutex);
1da177e4
LT
316}
317
318static int
a885c8c4 319mmc_blk_getgeo(struct block_device *bdev, struct hd_geometry *geo)
1da177e4 320{
a885c8c4
CH
321 geo->cylinders = get_capacity(bdev->bd_disk) / (4 * 16);
322 geo->heads = 4;
323 geo->sectors = 16;
324 return 0;
1da177e4
LT
325}
326
cb87ea28
JC
327struct mmc_blk_ioc_data {
328 struct mmc_ioc_cmd ic;
329 unsigned char *buf;
330 u64 buf_bytes;
331};
332
333static struct mmc_blk_ioc_data *mmc_blk_ioctl_copy_from_user(
334 struct mmc_ioc_cmd __user *user)
335{
336 struct mmc_blk_ioc_data *idata;
337 int err;
338
339 idata = kzalloc(sizeof(*idata), GFP_KERNEL);
340 if (!idata) {
341 err = -ENOMEM;
aea253ec 342 goto out;
cb87ea28
JC
343 }
344
345 if (copy_from_user(&idata->ic, user, sizeof(idata->ic))) {
346 err = -EFAULT;
aea253ec 347 goto idata_err;
cb87ea28
JC
348 }
349
350 idata->buf_bytes = (u64) idata->ic.blksz * idata->ic.blocks;
351 if (idata->buf_bytes > MMC_IOC_MAX_BYTES) {
352 err = -EOVERFLOW;
aea253ec 353 goto idata_err;
cb87ea28
JC
354 }
355
4d6144de
JR
356 if (!idata->buf_bytes)
357 return idata;
358
cb87ea28
JC
359 idata->buf = kzalloc(idata->buf_bytes, GFP_KERNEL);
360 if (!idata->buf) {
361 err = -ENOMEM;
aea253ec 362 goto idata_err;
cb87ea28
JC
363 }
364
365 if (copy_from_user(idata->buf, (void __user *)(unsigned long)
366 idata->ic.data_ptr, idata->buf_bytes)) {
367 err = -EFAULT;
368 goto copy_err;
369 }
370
371 return idata;
372
373copy_err:
374 kfree(idata->buf);
aea253ec 375idata_err:
cb87ea28 376 kfree(idata);
aea253ec 377out:
cb87ea28 378 return ERR_PTR(err);
cb87ea28
JC
379}
380
8d1e977d
LP
381static int ioctl_rpmb_card_status_poll(struct mmc_card *card, u32 *status,
382 u32 retries_max)
383{
384 int err;
385 u32 retry_count = 0;
386
387 if (!status || !retries_max)
388 return -EINVAL;
389
390 do {
391 err = get_card_status(card, status, 5);
392 if (err)
393 break;
394
395 if (!R1_STATUS(*status) &&
396 (R1_CURRENT_STATE(*status) != R1_STATE_PRG))
397 break; /* RPMB programming operation complete */
398
399 /*
400 * Rechedule to give the MMC device a chance to continue
401 * processing the previous command without being polled too
402 * frequently.
403 */
404 usleep_range(1000, 5000);
405 } while (++retry_count < retries_max);
406
407 if (retry_count == retries_max)
408 err = -EPERM;
409
410 return err;
411}
412
cb87ea28
JC
413static int mmc_blk_ioctl_cmd(struct block_device *bdev,
414 struct mmc_ioc_cmd __user *ic_ptr)
415{
416 struct mmc_blk_ioc_data *idata;
417 struct mmc_blk_data *md;
418 struct mmc_card *card;
419 struct mmc_command cmd = {0};
420 struct mmc_data data = {0};
ad5fd972 421 struct mmc_request mrq = {NULL};
cb87ea28
JC
422 struct scatterlist sg;
423 int err;
8d1e977d
LP
424 int is_rpmb = false;
425 u32 status = 0;
cb87ea28
JC
426
427 /*
428 * The caller must have CAP_SYS_RAWIO, and must be calling this on the
429 * whole block device, not on a partition. This prevents overspray
430 * between sibling partitions.
431 */
432 if ((!capable(CAP_SYS_RAWIO)) || (bdev != bdev->bd_contains))
433 return -EPERM;
434
435 idata = mmc_blk_ioctl_copy_from_user(ic_ptr);
436 if (IS_ERR(idata))
437 return PTR_ERR(idata);
438
cb87ea28
JC
439 md = mmc_blk_get(bdev->bd_disk);
440 if (!md) {
441 err = -EINVAL;
1c02f000 442 goto cmd_err;
cb87ea28
JC
443 }
444
8d1e977d
LP
445 if (md->area_type & MMC_BLK_DATA_AREA_RPMB)
446 is_rpmb = true;
447
cb87ea28
JC
448 card = md->queue.card;
449 if (IS_ERR(card)) {
450 err = PTR_ERR(card);
451 goto cmd_done;
452 }
453
4d6144de
JR
454 cmd.opcode = idata->ic.opcode;
455 cmd.arg = idata->ic.arg;
456 cmd.flags = idata->ic.flags;
457
458 if (idata->buf_bytes) {
459 data.sg = &sg;
460 data.sg_len = 1;
461 data.blksz = idata->ic.blksz;
462 data.blocks = idata->ic.blocks;
463
464 sg_init_one(data.sg, idata->buf, idata->buf_bytes);
465
466 if (idata->ic.write_flag)
467 data.flags = MMC_DATA_WRITE;
468 else
469 data.flags = MMC_DATA_READ;
470
471 /* data.flags must already be set before doing this. */
472 mmc_set_data_timeout(&data, card);
473
474 /* Allow overriding the timeout_ns for empirical tuning. */
475 if (idata->ic.data_timeout_ns)
476 data.timeout_ns = idata->ic.data_timeout_ns;
477
478 if ((cmd.flags & MMC_RSP_R1B) == MMC_RSP_R1B) {
479 /*
480 * Pretend this is a data transfer and rely on the
481 * host driver to compute timeout. When all host
482 * drivers support cmd.cmd_timeout for R1B, this
483 * can be changed to:
484 *
485 * mrq.data = NULL;
486 * cmd.cmd_timeout = idata->ic.cmd_timeout_ms;
487 */
488 data.timeout_ns = idata->ic.cmd_timeout_ms * 1000000;
489 }
490
491 mrq.data = &data;
492 }
493
494 mrq.cmd = &cmd;
495
cb87ea28
JC
496 mmc_claim_host(card->host);
497
8d1e977d
LP
498 err = mmc_blk_part_switch(card, md);
499 if (err)
500 goto cmd_rel_host;
501
cb87ea28
JC
502 if (idata->ic.is_acmd) {
503 err = mmc_app_cmd(card->host, card);
504 if (err)
505 goto cmd_rel_host;
506 }
507
8d1e977d
LP
508 if (is_rpmb) {
509 err = mmc_set_blockcount(card, data.blocks,
510 idata->ic.write_flag & (1 << 31));
511 if (err)
512 goto cmd_rel_host;
513 }
514
cb87ea28
JC
515 mmc_wait_for_req(card->host, &mrq);
516
517 if (cmd.error) {
518 dev_err(mmc_dev(card->host), "%s: cmd error %d\n",
519 __func__, cmd.error);
520 err = cmd.error;
521 goto cmd_rel_host;
522 }
523 if (data.error) {
524 dev_err(mmc_dev(card->host), "%s: data error %d\n",
525 __func__, data.error);
526 err = data.error;
527 goto cmd_rel_host;
528 }
529
530 /*
531 * According to the SD specs, some commands require a delay after
532 * issuing the command.
533 */
534 if (idata->ic.postsleep_min_us)
535 usleep_range(idata->ic.postsleep_min_us, idata->ic.postsleep_max_us);
536
537 if (copy_to_user(&(ic_ptr->response), cmd.resp, sizeof(cmd.resp))) {
538 err = -EFAULT;
539 goto cmd_rel_host;
540 }
541
542 if (!idata->ic.write_flag) {
543 if (copy_to_user((void __user *)(unsigned long) idata->ic.data_ptr,
544 idata->buf, idata->buf_bytes)) {
545 err = -EFAULT;
546 goto cmd_rel_host;
547 }
548 }
549
8d1e977d
LP
550 if (is_rpmb) {
551 /*
552 * Ensure RPMB command has completed by polling CMD13
553 * "Send Status".
554 */
555 err = ioctl_rpmb_card_status_poll(card, &status, 5);
556 if (err)
557 dev_err(mmc_dev(card->host),
558 "%s: Card Status=0x%08X, error %d\n",
559 __func__, status, err);
560 }
561
cb87ea28
JC
562cmd_rel_host:
563 mmc_release_host(card->host);
564
565cmd_done:
566 mmc_blk_put(md);
1c02f000 567cmd_err:
cb87ea28
JC
568 kfree(idata->buf);
569 kfree(idata);
570 return err;
571}
572
573static int mmc_blk_ioctl(struct block_device *bdev, fmode_t mode,
574 unsigned int cmd, unsigned long arg)
575{
576 int ret = -EINVAL;
577 if (cmd == MMC_IOC_CMD)
578 ret = mmc_blk_ioctl_cmd(bdev, (struct mmc_ioc_cmd __user *)arg);
579 return ret;
580}
581
582#ifdef CONFIG_COMPAT
583static int mmc_blk_compat_ioctl(struct block_device *bdev, fmode_t mode,
584 unsigned int cmd, unsigned long arg)
585{
586 return mmc_blk_ioctl(bdev, mode, cmd, (unsigned long) compat_ptr(arg));
587}
588#endif
589
83d5cde4 590static const struct block_device_operations mmc_bdops = {
a5a1561f
AV
591 .open = mmc_blk_open,
592 .release = mmc_blk_release,
a885c8c4 593 .getgeo = mmc_blk_getgeo,
1da177e4 594 .owner = THIS_MODULE,
cb87ea28
JC
595 .ioctl = mmc_blk_ioctl,
596#ifdef CONFIG_COMPAT
597 .compat_ioctl = mmc_blk_compat_ioctl,
598#endif
1da177e4
LT
599};
600
371a689f
AW
601static inline int mmc_blk_part_switch(struct mmc_card *card,
602 struct mmc_blk_data *md)
603{
604 int ret;
605 struct mmc_blk_data *main_md = mmc_get_drvdata(card);
0d7d85ca 606
371a689f
AW
607 if (main_md->part_curr == md->part_type)
608 return 0;
609
610 if (mmc_card_mmc(card)) {
0d7d85ca
AH
611 u8 part_config = card->ext_csd.part_config;
612
613 part_config &= ~EXT_CSD_PART_CONFIG_ACC_MASK;
614 part_config |= md->part_type;
371a689f
AW
615
616 ret = mmc_switch(card, EXT_CSD_CMD_SET_NORMAL,
0d7d85ca 617 EXT_CSD_PART_CONFIG, part_config,
371a689f
AW
618 card->ext_csd.part_time);
619 if (ret)
620 return ret;
0d7d85ca
AH
621
622 card->ext_csd.part_config = part_config;
67716327 623 }
371a689f
AW
624
625 main_md->part_curr = md->part_type;
626 return 0;
627}
628
ec5a19dd
PO
629static u32 mmc_sd_num_wr_blocks(struct mmc_card *card)
630{
631 int err;
051913da
BD
632 u32 result;
633 __be32 *blocks;
ec5a19dd 634
ad5fd972 635 struct mmc_request mrq = {NULL};
1278dba1 636 struct mmc_command cmd = {0};
a61ad2b4 637 struct mmc_data data = {0};
ec5a19dd
PO
638
639 struct scatterlist sg;
640
ec5a19dd
PO
641 cmd.opcode = MMC_APP_CMD;
642 cmd.arg = card->rca << 16;
7213d175 643 cmd.flags = MMC_RSP_SPI_R1 | MMC_RSP_R1 | MMC_CMD_AC;
ec5a19dd
PO
644
645 err = mmc_wait_for_cmd(card->host, &cmd, 0);
7213d175
DB
646 if (err)
647 return (u32)-1;
648 if (!mmc_host_is_spi(card->host) && !(cmd.resp[0] & R1_APP_CMD))
ec5a19dd
PO
649 return (u32)-1;
650
651 memset(&cmd, 0, sizeof(struct mmc_command));
652
653 cmd.opcode = SD_APP_SEND_NUM_WR_BLKS;
654 cmd.arg = 0;
7213d175 655 cmd.flags = MMC_RSP_SPI_R1 | MMC_RSP_R1 | MMC_CMD_ADTC;
ec5a19dd 656
ec5a19dd
PO
657 data.blksz = 4;
658 data.blocks = 1;
659 data.flags = MMC_DATA_READ;
660 data.sg = &sg;
661 data.sg_len = 1;
d380443c 662 mmc_set_data_timeout(&data, card);
ec5a19dd 663
ec5a19dd
PO
664 mrq.cmd = &cmd;
665 mrq.data = &data;
666
051913da
BD
667 blocks = kmalloc(4, GFP_KERNEL);
668 if (!blocks)
669 return (u32)-1;
670
671 sg_init_one(&sg, blocks, 4);
ec5a19dd
PO
672
673 mmc_wait_for_req(card->host, &mrq);
674
051913da
BD
675 result = ntohl(*blocks);
676 kfree(blocks);
677
17b0429d 678 if (cmd.error || data.error)
051913da 679 result = (u32)-1;
ec5a19dd 680
051913da 681 return result;
ec5a19dd
PO
682}
683
a01f3ccf
RKAL
684static int send_stop(struct mmc_card *card, u32 *status)
685{
686 struct mmc_command cmd = {0};
687 int err;
688
689 cmd.opcode = MMC_STOP_TRANSMISSION;
690 cmd.flags = MMC_RSP_SPI_R1B | MMC_RSP_R1B | MMC_CMD_AC;
691 err = mmc_wait_for_cmd(card->host, &cmd, 5);
692 if (err == 0)
693 *status = cmd.resp[0];
694 return err;
695}
696
0a2d4048 697static int get_card_status(struct mmc_card *card, u32 *status, int retries)
504f191f 698{
1278dba1 699 struct mmc_command cmd = {0};
504f191f
AH
700 int err;
701
504f191f
AH
702 cmd.opcode = MMC_SEND_STATUS;
703 if (!mmc_host_is_spi(card->host))
704 cmd.arg = card->rca << 16;
705 cmd.flags = MMC_RSP_SPI_R2 | MMC_RSP_R1 | MMC_CMD_AC;
0a2d4048
RKAL
706 err = mmc_wait_for_cmd(card->host, &cmd, retries);
707 if (err == 0)
708 *status = cmd.resp[0];
709 return err;
504f191f
AH
710}
711
a8ad82cc 712#define ERR_NOMEDIUM 3
a01f3ccf
RKAL
713#define ERR_RETRY 2
714#define ERR_ABORT 1
715#define ERR_CONTINUE 0
716
717static int mmc_blk_cmd_error(struct request *req, const char *name, int error,
718 bool status_valid, u32 status)
719{
720 switch (error) {
721 case -EILSEQ:
722 /* response crc error, retry the r/w cmd */
723 pr_err("%s: %s sending %s command, card status %#x\n",
724 req->rq_disk->disk_name, "response CRC error",
725 name, status);
726 return ERR_RETRY;
727
728 case -ETIMEDOUT:
729 pr_err("%s: %s sending %s command, card status %#x\n",
730 req->rq_disk->disk_name, "timed out", name, status);
731
732 /* If the status cmd initially failed, retry the r/w cmd */
733 if (!status_valid)
734 return ERR_RETRY;
735
736 /*
737 * If it was a r/w cmd crc error, or illegal command
738 * (eg, issued in wrong state) then retry - we should
739 * have corrected the state problem above.
740 */
741 if (status & (R1_COM_CRC_ERROR | R1_ILLEGAL_COMMAND))
742 return ERR_RETRY;
743
744 /* Otherwise abort the command */
745 return ERR_ABORT;
746
747 default:
748 /* We don't understand the error code the driver gave us */
749 pr_err("%s: unknown error %d sending read/write command, card status %#x\n",
750 req->rq_disk->disk_name, error, status);
751 return ERR_ABORT;
752 }
753}
754
755/*
756 * Initial r/w and stop cmd error recovery.
757 * We don't know whether the card received the r/w cmd or not, so try to
758 * restore things back to a sane state. Essentially, we do this as follows:
759 * - Obtain card status. If the first attempt to obtain card status fails,
760 * the status word will reflect the failed status cmd, not the failed
761 * r/w cmd. If we fail to obtain card status, it suggests we can no
762 * longer communicate with the card.
763 * - Check the card state. If the card received the cmd but there was a
764 * transient problem with the response, it might still be in a data transfer
765 * mode. Try to send it a stop command. If this fails, we can't recover.
766 * - If the r/w cmd failed due to a response CRC error, it was probably
767 * transient, so retry the cmd.
768 * - If the r/w cmd timed out, but we didn't get the r/w cmd status, retry.
769 * - If the r/w cmd timed out, and the r/w cmd failed due to CRC error or
770 * illegal cmd, retry.
771 * Otherwise we don't understand what happened, so abort.
772 */
773static int mmc_blk_cmd_recovery(struct mmc_card *card, struct request *req,
604ae797 774 struct mmc_blk_request *brq, int *ecc_err, int *gen_err)
a01f3ccf
RKAL
775{
776 bool prev_cmd_status_valid = true;
777 u32 status, stop_status = 0;
778 int err, retry;
779
a8ad82cc
SRT
780 if (mmc_card_removed(card))
781 return ERR_NOMEDIUM;
782
a01f3ccf
RKAL
783 /*
784 * Try to get card status which indicates both the card state
785 * and why there was no response. If the first attempt fails,
786 * we can't be sure the returned status is for the r/w command.
787 */
788 for (retry = 2; retry >= 0; retry--) {
789 err = get_card_status(card, &status, 0);
790 if (!err)
791 break;
792
793 prev_cmd_status_valid = false;
794 pr_err("%s: error %d sending status command, %sing\n",
795 req->rq_disk->disk_name, err, retry ? "retry" : "abort");
796 }
797
798 /* We couldn't get a response from the card. Give up. */
a8ad82cc
SRT
799 if (err) {
800 /* Check if the card is removed */
801 if (mmc_detect_card_removed(card->host))
802 return ERR_NOMEDIUM;
a01f3ccf 803 return ERR_ABORT;
a8ad82cc 804 }
a01f3ccf 805
67716327
AH
806 /* Flag ECC errors */
807 if ((status & R1_CARD_ECC_FAILED) ||
808 (brq->stop.resp[0] & R1_CARD_ECC_FAILED) ||
809 (brq->cmd.resp[0] & R1_CARD_ECC_FAILED))
810 *ecc_err = 1;
811
604ae797
KY
812 /* Flag General errors */
813 if (!mmc_host_is_spi(card->host) && rq_data_dir(req) != READ)
814 if ((status & R1_ERROR) ||
815 (brq->stop.resp[0] & R1_ERROR)) {
816 pr_err("%s: %s: general error sending stop or status command, stop cmd response %#x, card status %#x\n",
817 req->rq_disk->disk_name, __func__,
818 brq->stop.resp[0], status);
819 *gen_err = 1;
820 }
821
a01f3ccf
RKAL
822 /*
823 * Check the current card state. If it is in some data transfer
824 * mode, tell it to stop (and hopefully transition back to TRAN.)
825 */
826 if (R1_CURRENT_STATE(status) == R1_STATE_DATA ||
827 R1_CURRENT_STATE(status) == R1_STATE_RCV) {
828 err = send_stop(card, &stop_status);
829 if (err)
830 pr_err("%s: error %d sending stop command\n",
831 req->rq_disk->disk_name, err);
832
833 /*
834 * If the stop cmd also timed out, the card is probably
835 * not present, so abort. Other errors are bad news too.
836 */
837 if (err)
838 return ERR_ABORT;
67716327
AH
839 if (stop_status & R1_CARD_ECC_FAILED)
840 *ecc_err = 1;
604ae797
KY
841 if (!mmc_host_is_spi(card->host) && rq_data_dir(req) != READ)
842 if (stop_status & R1_ERROR) {
843 pr_err("%s: %s: general error sending stop command, stop cmd response %#x\n",
844 req->rq_disk->disk_name, __func__,
845 stop_status);
846 *gen_err = 1;
847 }
a01f3ccf
RKAL
848 }
849
850 /* Check for set block count errors */
851 if (brq->sbc.error)
852 return mmc_blk_cmd_error(req, "SET_BLOCK_COUNT", brq->sbc.error,
853 prev_cmd_status_valid, status);
854
855 /* Check for r/w command errors */
856 if (brq->cmd.error)
857 return mmc_blk_cmd_error(req, "r/w cmd", brq->cmd.error,
858 prev_cmd_status_valid, status);
859
67716327
AH
860 /* Data errors */
861 if (!brq->stop.error)
862 return ERR_CONTINUE;
863
a01f3ccf
RKAL
864 /* Now for stop errors. These aren't fatal to the transfer. */
865 pr_err("%s: error %d sending stop command, original cmd response %#x, card status %#x\n",
866 req->rq_disk->disk_name, brq->stop.error,
867 brq->cmd.resp[0], status);
868
869 /*
870 * Subsitute in our own stop status as this will give the error
871 * state which happened during the execution of the r/w command.
872 */
873 if (stop_status) {
874 brq->stop.resp[0] = stop_status;
875 brq->stop.error = 0;
876 }
877 return ERR_CONTINUE;
878}
879
67716327
AH
880static int mmc_blk_reset(struct mmc_blk_data *md, struct mmc_host *host,
881 int type)
882{
883 int err;
884
885 if (md->reset_done & type)
886 return -EEXIST;
887
888 md->reset_done |= type;
889 err = mmc_hw_reset(host);
890 /* Ensure we switch back to the correct partition */
891 if (err != -EOPNOTSUPP) {
892 struct mmc_blk_data *main_md = mmc_get_drvdata(host->card);
893 int part_err;
894
895 main_md->part_curr = main_md->part_type;
896 part_err = mmc_blk_part_switch(host->card, md);
897 if (part_err) {
898 /*
899 * We have failed to get back into the correct
900 * partition, so we need to abort the whole request.
901 */
902 return -ENODEV;
903 }
904 }
905 return err;
906}
907
908static inline void mmc_blk_reset_success(struct mmc_blk_data *md, int type)
909{
910 md->reset_done &= ~type;
911}
912
6186ada9
CD
913int mmc_access_rpmb(struct mmc_queue *mq)
914{
915 struct mmc_blk_data *md = mq->data;
916 /*
917 * If this is a RPMB partition access, return ture
918 */
919 if (md && md->part_type == EXT_CSD_PART_CONFIG_ACC_RPMB)
920 return true;
921
922 return false;
923}
924
bd788c96
AH
925static int mmc_blk_issue_discard_rq(struct mmc_queue *mq, struct request *req)
926{
927 struct mmc_blk_data *md = mq->data;
928 struct mmc_card *card = md->queue.card;
929 unsigned int from, nr, arg;
67716327 930 int err = 0, type = MMC_BLK_DISCARD;
bd788c96 931
bd788c96
AH
932 if (!mmc_can_erase(card)) {
933 err = -EOPNOTSUPP;
934 goto out;
935 }
936
937 from = blk_rq_pos(req);
938 nr = blk_rq_sectors(req);
939
b3bf9153
KP
940 if (mmc_can_discard(card))
941 arg = MMC_DISCARD_ARG;
942 else if (mmc_can_trim(card))
bd788c96
AH
943 arg = MMC_TRIM_ARG;
944 else
945 arg = MMC_ERASE_ARG;
67716327 946retry:
6a7a6b45
AW
947 if (card->quirks & MMC_QUIRK_INAND_CMD38) {
948 err = mmc_switch(card, EXT_CSD_CMD_SET_NORMAL,
949 INAND_CMD38_ARG_EXT_CSD,
950 arg == MMC_TRIM_ARG ?
951 INAND_CMD38_ARG_TRIM :
952 INAND_CMD38_ARG_ERASE,
953 0);
954 if (err)
955 goto out;
956 }
bd788c96
AH
957 err = mmc_erase(card, from, nr, arg);
958out:
67716327
AH
959 if (err == -EIO && !mmc_blk_reset(md, card->host, type))
960 goto retry;
961 if (!err)
962 mmc_blk_reset_success(md, type);
ecf8b5d0 963 blk_end_request(req, err, blk_rq_bytes(req));
bd788c96 964
bd788c96
AH
965 return err ? 0 : 1;
966}
967
49804548
AH
968static int mmc_blk_issue_secdiscard_rq(struct mmc_queue *mq,
969 struct request *req)
970{
971 struct mmc_blk_data *md = mq->data;
972 struct mmc_card *card = md->queue.card;
28302812 973 unsigned int from, nr, arg, trim_arg, erase_arg;
67716327 974 int err = 0, type = MMC_BLK_SECDISCARD;
49804548 975
d9ddd629 976 if (!(mmc_can_secure_erase_trim(card) || mmc_can_sanitize(card))) {
49804548
AH
977 err = -EOPNOTSUPP;
978 goto out;
979 }
980
28302812
AH
981 from = blk_rq_pos(req);
982 nr = blk_rq_sectors(req);
983
d9ddd629
KP
984 /* The sanitize operation is supported at v4.5 only */
985 if (mmc_can_sanitize(card)) {
28302812
AH
986 erase_arg = MMC_ERASE_ARG;
987 trim_arg = MMC_TRIM_ARG;
988 } else {
989 erase_arg = MMC_SECURE_ERASE_ARG;
990 trim_arg = MMC_SECURE_TRIM1_ARG;
d9ddd629
KP
991 }
992
28302812
AH
993 if (mmc_erase_group_aligned(card, from, nr))
994 arg = erase_arg;
995 else if (mmc_can_trim(card))
996 arg = trim_arg;
997 else {
998 err = -EINVAL;
999 goto out;
1000 }
67716327 1001retry:
6a7a6b45
AW
1002 if (card->quirks & MMC_QUIRK_INAND_CMD38) {
1003 err = mmc_switch(card, EXT_CSD_CMD_SET_NORMAL,
1004 INAND_CMD38_ARG_EXT_CSD,
1005 arg == MMC_SECURE_TRIM1_ARG ?
1006 INAND_CMD38_ARG_SECTRIM1 :
1007 INAND_CMD38_ARG_SECERASE,
1008 0);
1009 if (err)
28302812 1010 goto out_retry;
6a7a6b45 1011 }
28302812 1012
49804548 1013 err = mmc_erase(card, from, nr, arg);
28302812
AH
1014 if (err == -EIO)
1015 goto out_retry;
1016 if (err)
1017 goto out;
1018
1019 if (arg == MMC_SECURE_TRIM1_ARG) {
6a7a6b45
AW
1020 if (card->quirks & MMC_QUIRK_INAND_CMD38) {
1021 err = mmc_switch(card, EXT_CSD_CMD_SET_NORMAL,
1022 INAND_CMD38_ARG_EXT_CSD,
1023 INAND_CMD38_ARG_SECTRIM2,
1024 0);
1025 if (err)
28302812 1026 goto out_retry;
6a7a6b45 1027 }
28302812 1028
49804548 1029 err = mmc_erase(card, from, nr, MMC_SECURE_TRIM2_ARG);
28302812
AH
1030 if (err == -EIO)
1031 goto out_retry;
1032 if (err)
1033 goto out;
6a7a6b45 1034 }
28302812
AH
1035
1036 if (mmc_can_sanitize(card))
1037 err = mmc_switch(card, EXT_CSD_CMD_SET_NORMAL,
1038 EXT_CSD_SANITIZE_START, 1, 0);
1039out_retry:
1040 if (err && !mmc_blk_reset(md, card->host, type))
67716327
AH
1041 goto retry;
1042 if (!err)
1043 mmc_blk_reset_success(md, type);
28302812 1044out:
ecf8b5d0 1045 blk_end_request(req, err, blk_rq_bytes(req));
49804548 1046
49804548
AH
1047 return err ? 0 : 1;
1048}
1049
f4c5522b
AW
1050static int mmc_blk_issue_flush(struct mmc_queue *mq, struct request *req)
1051{
1052 struct mmc_blk_data *md = mq->data;
881d1c25
SJ
1053 struct mmc_card *card = md->queue.card;
1054 int ret = 0;
1055
1056 ret = mmc_flush_cache(card);
1057 if (ret)
1058 ret = -EIO;
f4c5522b 1059
ecf8b5d0 1060 blk_end_request_all(req, ret);
f4c5522b 1061
881d1c25 1062 return ret ? 0 : 1;
f4c5522b
AW
1063}
1064
1065/*
1066 * Reformat current write as a reliable write, supporting
1067 * both legacy and the enhanced reliable write MMC cards.
1068 * In each transfer we'll handle only as much as a single
1069 * reliable write can handle, thus finish the request in
1070 * partial completions.
1071 */
d0c97cfb
AW
1072static inline void mmc_apply_rel_rw(struct mmc_blk_request *brq,
1073 struct mmc_card *card,
1074 struct request *req)
f4c5522b 1075{
f4c5522b
AW
1076 if (!(card->ext_csd.rel_param & EXT_CSD_WR_REL_PARAM_EN)) {
1077 /* Legacy mode imposes restrictions on transfers. */
1078 if (!IS_ALIGNED(brq->cmd.arg, card->ext_csd.rel_sectors))
1079 brq->data.blocks = 1;
1080
1081 if (brq->data.blocks > card->ext_csd.rel_sectors)
1082 brq->data.blocks = card->ext_csd.rel_sectors;
1083 else if (brq->data.blocks < card->ext_csd.rel_sectors)
1084 brq->data.blocks = 1;
1085 }
f4c5522b
AW
1086}
1087
4c2b8f26
RKAL
1088#define CMD_ERRORS \
1089 (R1_OUT_OF_RANGE | /* Command argument out of range */ \
1090 R1_ADDRESS_ERROR | /* Misaligned address */ \
1091 R1_BLOCK_LEN_ERROR | /* Transferred block length incorrect */\
1092 R1_WP_VIOLATION | /* Tried to write to protected block */ \
1093 R1_CC_ERROR | /* Card controller error */ \
1094 R1_ERROR) /* General/unknown error */
1095
ee8a43a5
PF
1096static int mmc_blk_err_check(struct mmc_card *card,
1097 struct mmc_async_req *areq)
d78d4a8a 1098{
ee8a43a5
PF
1099 struct mmc_queue_req *mq_mrq = container_of(areq, struct mmc_queue_req,
1100 mmc_active);
1101 struct mmc_blk_request *brq = &mq_mrq->brq;
1102 struct request *req = mq_mrq->req;
604ae797 1103 int ecc_err = 0, gen_err = 0;
d78d4a8a
PF
1104
1105 /*
1106 * sbc.error indicates a problem with the set block count
1107 * command. No data will have been transferred.
1108 *
1109 * cmd.error indicates a problem with the r/w command. No
1110 * data will have been transferred.
1111 *
1112 * stop.error indicates a problem with the stop command. Data
1113 * may have been transferred, or may still be transferring.
1114 */
67716327
AH
1115 if (brq->sbc.error || brq->cmd.error || brq->stop.error ||
1116 brq->data.error) {
604ae797 1117 switch (mmc_blk_cmd_recovery(card, req, brq, &ecc_err, &gen_err)) {
d78d4a8a
PF
1118 case ERR_RETRY:
1119 return MMC_BLK_RETRY;
1120 case ERR_ABORT:
1121 return MMC_BLK_ABORT;
a8ad82cc
SRT
1122 case ERR_NOMEDIUM:
1123 return MMC_BLK_NOMEDIUM;
d78d4a8a
PF
1124 case ERR_CONTINUE:
1125 break;
1126 }
1127 }
1128
1129 /*
1130 * Check for errors relating to the execution of the
1131 * initial command - such as address errors. No data
1132 * has been transferred.
1133 */
1134 if (brq->cmd.resp[0] & CMD_ERRORS) {
1135 pr_err("%s: r/w command failed, status = %#x\n",
1136 req->rq_disk->disk_name, brq->cmd.resp[0]);
1137 return MMC_BLK_ABORT;
1138 }
1139
1140 /*
1141 * Everything else is either success, or a data error of some
1142 * kind. If it was a write, we may have transitioned to
1143 * program mode, which we have to wait for it to complete.
1144 */
1145 if (!mmc_host_is_spi(card->host) && rq_data_dir(req) != READ) {
1146 u32 status;
8fee476b
TR
1147 unsigned long timeout;
1148
604ae797
KY
1149 /* Check stop command response */
1150 if (brq->stop.resp[0] & R1_ERROR) {
1151 pr_err("%s: %s: general error sending stop command, stop cmd response %#x\n",
1152 req->rq_disk->disk_name, __func__,
1153 brq->stop.resp[0]);
1154 gen_err = 1;
1155 }
1156
8fee476b 1157 timeout = jiffies + msecs_to_jiffies(MMC_BLK_TIMEOUT_MS);
d78d4a8a
PF
1158 do {
1159 int err = get_card_status(card, &status, 5);
1160 if (err) {
a3c76eb9 1161 pr_err("%s: error %d requesting status\n",
d78d4a8a
PF
1162 req->rq_disk->disk_name, err);
1163 return MMC_BLK_CMD_ERR;
1164 }
8fee476b 1165
604ae797
KY
1166 if (status & R1_ERROR) {
1167 pr_err("%s: %s: general error sending status command, card status %#x\n",
1168 req->rq_disk->disk_name, __func__,
1169 status);
1170 gen_err = 1;
1171 }
1172
8fee476b
TR
1173 /* Timeout if the device never becomes ready for data
1174 * and never leaves the program state.
1175 */
1176 if (time_after(jiffies, timeout)) {
1177 pr_err("%s: Card stuck in programming state!"\
1178 " %s %s\n", mmc_hostname(card->host),
1179 req->rq_disk->disk_name, __func__);
1180
1181 return MMC_BLK_CMD_ERR;
1182 }
d78d4a8a
PF
1183 /*
1184 * Some cards mishandle the status bits,
1185 * so make sure to check both the busy
1186 * indication and the card state.
1187 */
1188 } while (!(status & R1_READY_FOR_DATA) ||
1189 (R1_CURRENT_STATE(status) == R1_STATE_PRG));
1190 }
1191
604ae797
KY
1192 /* if general error occurs, retry the write operation. */
1193 if (gen_err) {
1194 pr_warn("%s: retrying write for general error\n",
1195 req->rq_disk->disk_name);
1196 return MMC_BLK_RETRY;
1197 }
1198
d78d4a8a
PF
1199 if (brq->data.error) {
1200 pr_err("%s: error %d transferring data, sector %u, nr %u, cmd response %#x, card status %#x\n",
1201 req->rq_disk->disk_name, brq->data.error,
1202 (unsigned)blk_rq_pos(req),
1203 (unsigned)blk_rq_sectors(req),
1204 brq->cmd.resp[0], brq->stop.resp[0]);
1205
1206 if (rq_data_dir(req) == READ) {
67716327
AH
1207 if (ecc_err)
1208 return MMC_BLK_ECC_ERR;
d78d4a8a
PF
1209 return MMC_BLK_DATA_ERR;
1210 } else {
1211 return MMC_BLK_CMD_ERR;
1212 }
1213 }
1214
67716327
AH
1215 if (!brq->data.bytes_xfered)
1216 return MMC_BLK_RETRY;
d78d4a8a 1217
ce39f9d1
SJ
1218 if (mmc_packed_cmd(mq_mrq->cmd_type)) {
1219 if (unlikely(brq->data.blocks << 9 != brq->data.bytes_xfered))
1220 return MMC_BLK_PARTIAL;
1221 else
1222 return MMC_BLK_SUCCESS;
1223 }
1224
67716327
AH
1225 if (blk_rq_bytes(req) != brq->data.bytes_xfered)
1226 return MMC_BLK_PARTIAL;
1227
1228 return MMC_BLK_SUCCESS;
d78d4a8a
PF
1229}
1230
ce39f9d1
SJ
1231static int mmc_blk_packed_err_check(struct mmc_card *card,
1232 struct mmc_async_req *areq)
1233{
1234 struct mmc_queue_req *mq_rq = container_of(areq, struct mmc_queue_req,
1235 mmc_active);
1236 struct request *req = mq_rq->req;
1237 struct mmc_packed *packed = mq_rq->packed;
1238 int err, check, status;
1239 u8 *ext_csd;
1240
1241 BUG_ON(!packed);
1242
1243 packed->retries--;
1244 check = mmc_blk_err_check(card, areq);
1245 err = get_card_status(card, &status, 0);
1246 if (err) {
1247 pr_err("%s: error %d sending status command\n",
1248 req->rq_disk->disk_name, err);
1249 return MMC_BLK_ABORT;
1250 }
1251
1252 if (status & R1_EXCEPTION_EVENT) {
1253 ext_csd = kzalloc(512, GFP_KERNEL);
1254 if (!ext_csd) {
1255 pr_err("%s: unable to allocate buffer for ext_csd\n",
1256 req->rq_disk->disk_name);
1257 return -ENOMEM;
1258 }
1259
1260 err = mmc_send_ext_csd(card, ext_csd);
1261 if (err) {
1262 pr_err("%s: error %d sending ext_csd\n",
1263 req->rq_disk->disk_name, err);
1264 check = MMC_BLK_ABORT;
1265 goto free;
1266 }
1267
1268 if ((ext_csd[EXT_CSD_EXP_EVENTS_STATUS] &
1269 EXT_CSD_PACKED_FAILURE) &&
1270 (ext_csd[EXT_CSD_PACKED_CMD_STATUS] &
1271 EXT_CSD_PACKED_GENERIC_ERROR)) {
1272 if (ext_csd[EXT_CSD_PACKED_CMD_STATUS] &
1273 EXT_CSD_PACKED_INDEXED_ERROR) {
1274 packed->idx_failure =
1275 ext_csd[EXT_CSD_PACKED_FAILURE_INDEX] - 1;
1276 check = MMC_BLK_PARTIAL;
1277 }
1278 pr_err("%s: packed cmd failed, nr %u, sectors %u, "
1279 "failure index: %d\n",
1280 req->rq_disk->disk_name, packed->nr_entries,
1281 packed->blocks, packed->idx_failure);
1282 }
1283free:
1284 kfree(ext_csd);
1285 }
1286
1287 return check;
1288}
1289
54d49d77
PF
1290static void mmc_blk_rw_rq_prep(struct mmc_queue_req *mqrq,
1291 struct mmc_card *card,
1292 int disable_multi,
1293 struct mmc_queue *mq)
1da177e4 1294{
54d49d77
PF
1295 u32 readcmd, writecmd;
1296 struct mmc_blk_request *brq = &mqrq->brq;
1297 struct request *req = mqrq->req;
1da177e4 1298 struct mmc_blk_data *md = mq->data;
4265900e 1299 bool do_data_tag;
1da177e4 1300
f4c5522b
AW
1301 /*
1302 * Reliable writes are used to implement Forced Unit Access and
1303 * REQ_META accesses, and are supported only on MMCs.
65299a3b
CH
1304 *
1305 * XXX: this really needs a good explanation of why REQ_META
1306 * is treated special.
f4c5522b
AW
1307 */
1308 bool do_rel_wr = ((req->cmd_flags & REQ_FUA) ||
1309 (req->cmd_flags & REQ_META)) &&
1310 (rq_data_dir(req) == WRITE) &&
d0c97cfb 1311 (md->flags & MMC_BLK_REL_WR);
f4c5522b 1312
54d49d77
PF
1313 memset(brq, 0, sizeof(struct mmc_blk_request));
1314 brq->mrq.cmd = &brq->cmd;
1315 brq->mrq.data = &brq->data;
1da177e4 1316
54d49d77
PF
1317 brq->cmd.arg = blk_rq_pos(req);
1318 if (!mmc_card_blockaddr(card))
1319 brq->cmd.arg <<= 9;
1320 brq->cmd.flags = MMC_RSP_SPI_R1 | MMC_RSP_R1 | MMC_CMD_ADTC;
1321 brq->data.blksz = 512;
1322 brq->stop.opcode = MMC_STOP_TRANSMISSION;
1323 brq->stop.arg = 0;
1324 brq->stop.flags = MMC_RSP_SPI_R1B | MMC_RSP_R1B | MMC_CMD_AC;
1325 brq->data.blocks = blk_rq_sectors(req);
6a79e391 1326
54d49d77
PF
1327 /*
1328 * The block layer doesn't support all sector count
1329 * restrictions, so we need to be prepared for too big
1330 * requests.
1331 */
1332 if (brq->data.blocks > card->host->max_blk_count)
1333 brq->data.blocks = card->host->max_blk_count;
1da177e4 1334
2bf22b39
PW
1335 if (brq->data.blocks > 1) {
1336 /*
1337 * After a read error, we redo the request one sector
1338 * at a time in order to accurately determine which
1339 * sectors can be read successfully.
1340 */
1341 if (disable_multi)
1342 brq->data.blocks = 1;
1343
1344 /* Some controllers can't do multiblock reads due to hw bugs */
1345 if (card->host->caps2 & MMC_CAP2_NO_MULTI_READ &&
1346 rq_data_dir(req) == READ)
1347 brq->data.blocks = 1;
1348 }
d0c97cfb 1349
54d49d77
PF
1350 if (brq->data.blocks > 1 || do_rel_wr) {
1351 /* SPI multiblock writes terminate using a special
1352 * token, not a STOP_TRANSMISSION request.
d0c97cfb 1353 */
54d49d77
PF
1354 if (!mmc_host_is_spi(card->host) ||
1355 rq_data_dir(req) == READ)
1356 brq->mrq.stop = &brq->stop;
1357 readcmd = MMC_READ_MULTIPLE_BLOCK;
1358 writecmd = MMC_WRITE_MULTIPLE_BLOCK;
1359 } else {
1360 brq->mrq.stop = NULL;
1361 readcmd = MMC_READ_SINGLE_BLOCK;
1362 writecmd = MMC_WRITE_BLOCK;
1363 }
1364 if (rq_data_dir(req) == READ) {
1365 brq->cmd.opcode = readcmd;
1366 brq->data.flags |= MMC_DATA_READ;
1367 } else {
1368 brq->cmd.opcode = writecmd;
1369 brq->data.flags |= MMC_DATA_WRITE;
1370 }
d0c97cfb 1371
54d49d77
PF
1372 if (do_rel_wr)
1373 mmc_apply_rel_rw(brq, card, req);
f4c5522b 1374
4265900e
SD
1375 /*
1376 * Data tag is used only during writing meta data to speed
1377 * up write and any subsequent read of this meta data
1378 */
1379 do_data_tag = (card->ext_csd.data_tag_unit_size) &&
1380 (req->cmd_flags & REQ_META) &&
1381 (rq_data_dir(req) == WRITE) &&
1382 ((brq->data.blocks * brq->data.blksz) >=
1383 card->ext_csd.data_tag_unit_size);
1384
54d49d77
PF
1385 /*
1386 * Pre-defined multi-block transfers are preferable to
1387 * open ended-ones (and necessary for reliable writes).
1388 * However, it is not sufficient to just send CMD23,
1389 * and avoid the final CMD12, as on an error condition
1390 * CMD12 (stop) needs to be sent anyway. This, coupled
1391 * with Auto-CMD23 enhancements provided by some
1392 * hosts, means that the complexity of dealing
1393 * with this is best left to the host. If CMD23 is
1394 * supported by card and host, we'll fill sbc in and let
1395 * the host deal with handling it correctly. This means
1396 * that for hosts that don't expose MMC_CAP_CMD23, no
1397 * change of behavior will be observed.
1398 *
1399 * N.B: Some MMC cards experience perf degradation.
1400 * We'll avoid using CMD23-bounded multiblock writes for
1401 * these, while retaining features like reliable writes.
1402 */
4265900e
SD
1403 if ((md->flags & MMC_BLK_CMD23) && mmc_op_multi(brq->cmd.opcode) &&
1404 (do_rel_wr || !(card->quirks & MMC_QUIRK_BLK_NO_CMD23) ||
1405 do_data_tag)) {
54d49d77
PF
1406 brq->sbc.opcode = MMC_SET_BLOCK_COUNT;
1407 brq->sbc.arg = brq->data.blocks |
4265900e
SD
1408 (do_rel_wr ? (1 << 31) : 0) |
1409 (do_data_tag ? (1 << 29) : 0);
54d49d77
PF
1410 brq->sbc.flags = MMC_RSP_R1 | MMC_CMD_AC;
1411 brq->mrq.sbc = &brq->sbc;
1412 }
98ccf149 1413
54d49d77
PF
1414 mmc_set_data_timeout(&brq->data, card);
1415
1416 brq->data.sg = mqrq->sg;
1417 brq->data.sg_len = mmc_queue_map_sg(mq, mqrq);
1418
1419 /*
1420 * Adjust the sg list so it is the same size as the
1421 * request.
1422 */
1423 if (brq->data.blocks != blk_rq_sectors(req)) {
1424 int i, data_size = brq->data.blocks << 9;
1425 struct scatterlist *sg;
1426
1427 for_each_sg(brq->data.sg, sg, brq->data.sg_len, i) {
1428 data_size -= sg->length;
1429 if (data_size <= 0) {
1430 sg->length += data_size;
1431 i++;
1432 break;
6a79e391 1433 }
6a79e391 1434 }
54d49d77
PF
1435 brq->data.sg_len = i;
1436 }
1437
ee8a43a5
PF
1438 mqrq->mmc_active.mrq = &brq->mrq;
1439 mqrq->mmc_active.err_check = mmc_blk_err_check;
1440
54d49d77
PF
1441 mmc_queue_bounce_pre(mqrq);
1442}
6a79e391 1443
ce39f9d1
SJ
1444static inline u8 mmc_calc_packed_hdr_segs(struct request_queue *q,
1445 struct mmc_card *card)
1446{
1447 unsigned int hdr_sz = mmc_large_sector(card) ? 4096 : 512;
1448 unsigned int max_seg_sz = queue_max_segment_size(q);
1449 unsigned int len, nr_segs = 0;
1450
1451 do {
1452 len = min(hdr_sz, max_seg_sz);
1453 hdr_sz -= len;
1454 nr_segs++;
1455 } while (hdr_sz);
1456
1457 return nr_segs;
1458}
1459
1460static u8 mmc_blk_prep_packed_list(struct mmc_queue *mq, struct request *req)
1461{
1462 struct request_queue *q = mq->queue;
1463 struct mmc_card *card = mq->card;
1464 struct request *cur = req, *next = NULL;
1465 struct mmc_blk_data *md = mq->data;
1466 struct mmc_queue_req *mqrq = mq->mqrq_cur;
1467 bool en_rel_wr = card->ext_csd.rel_param & EXT_CSD_WR_REL_PARAM_EN;
1468 unsigned int req_sectors = 0, phys_segments = 0;
1469 unsigned int max_blk_count, max_phys_segs;
1470 bool put_back = true;
1471 u8 max_packed_rw = 0;
1472 u8 reqs = 0;
1473
1474 if (!(md->flags & MMC_BLK_PACKED_CMD))
1475 goto no_packed;
1476
1477 if ((rq_data_dir(cur) == WRITE) &&
1478 mmc_host_packed_wr(card->host))
1479 max_packed_rw = card->ext_csd.max_packed_writes;
1480
1481 if (max_packed_rw == 0)
1482 goto no_packed;
1483
1484 if (mmc_req_rel_wr(cur) &&
1485 (md->flags & MMC_BLK_REL_WR) && !en_rel_wr)
1486 goto no_packed;
1487
1488 if (mmc_large_sector(card) &&
1489 !IS_ALIGNED(blk_rq_sectors(cur), 8))
1490 goto no_packed;
1491
1492 mmc_blk_clear_packed(mqrq);
1493
1494 max_blk_count = min(card->host->max_blk_count,
1495 card->host->max_req_size >> 9);
1496 if (unlikely(max_blk_count > 0xffff))
1497 max_blk_count = 0xffff;
1498
1499 max_phys_segs = queue_max_segments(q);
1500 req_sectors += blk_rq_sectors(cur);
1501 phys_segments += cur->nr_phys_segments;
1502
1503 if (rq_data_dir(cur) == WRITE) {
1504 req_sectors += mmc_large_sector(card) ? 8 : 1;
1505 phys_segments += mmc_calc_packed_hdr_segs(q, card);
1506 }
1507
1508 do {
1509 if (reqs >= max_packed_rw - 1) {
1510 put_back = false;
1511 break;
1512 }
1513
1514 spin_lock_irq(q->queue_lock);
1515 next = blk_fetch_request(q);
1516 spin_unlock_irq(q->queue_lock);
1517 if (!next) {
1518 put_back = false;
1519 break;
1520 }
1521
1522 if (mmc_large_sector(card) &&
1523 !IS_ALIGNED(blk_rq_sectors(next), 8))
1524 break;
1525
1526 if (next->cmd_flags & REQ_DISCARD ||
1527 next->cmd_flags & REQ_FLUSH)
1528 break;
1529
1530 if (rq_data_dir(cur) != rq_data_dir(next))
1531 break;
1532
1533 if (mmc_req_rel_wr(next) &&
1534 (md->flags & MMC_BLK_REL_WR) && !en_rel_wr)
1535 break;
1536
1537 req_sectors += blk_rq_sectors(next);
1538 if (req_sectors > max_blk_count)
1539 break;
1540
1541 phys_segments += next->nr_phys_segments;
1542 if (phys_segments > max_phys_segs)
1543 break;
1544
1545 list_add_tail(&next->queuelist, &mqrq->packed->list);
1546 cur = next;
1547 reqs++;
1548 } while (1);
1549
1550 if (put_back) {
1551 spin_lock_irq(q->queue_lock);
1552 blk_requeue_request(q, next);
1553 spin_unlock_irq(q->queue_lock);
1554 }
1555
1556 if (reqs > 0) {
1557 list_add(&req->queuelist, &mqrq->packed->list);
1558 mqrq->packed->nr_entries = ++reqs;
1559 mqrq->packed->retries = reqs;
1560 return reqs;
1561 }
1562
1563no_packed:
1564 mqrq->cmd_type = MMC_PACKED_NONE;
1565 return 0;
1566}
1567
1568static void mmc_blk_packed_hdr_wrq_prep(struct mmc_queue_req *mqrq,
1569 struct mmc_card *card,
1570 struct mmc_queue *mq)
1571{
1572 struct mmc_blk_request *brq = &mqrq->brq;
1573 struct request *req = mqrq->req;
1574 struct request *prq;
1575 struct mmc_blk_data *md = mq->data;
1576 struct mmc_packed *packed = mqrq->packed;
1577 bool do_rel_wr, do_data_tag;
1578 u32 *packed_cmd_hdr;
1579 u8 hdr_blocks;
1580 u8 i = 1;
1581
1582 BUG_ON(!packed);
1583
1584 mqrq->cmd_type = MMC_PACKED_WRITE;
1585 packed->blocks = 0;
1586 packed->idx_failure = MMC_PACKED_NR_IDX;
1587
1588 packed_cmd_hdr = packed->cmd_hdr;
1589 memset(packed_cmd_hdr, 0, sizeof(packed->cmd_hdr));
1590 packed_cmd_hdr[0] = (packed->nr_entries << 16) |
1591 (PACKED_CMD_WR << 8) | PACKED_CMD_VER;
1592 hdr_blocks = mmc_large_sector(card) ? 8 : 1;
1593
1594 /*
1595 * Argument for each entry of packed group
1596 */
1597 list_for_each_entry(prq, &packed->list, queuelist) {
1598 do_rel_wr = mmc_req_rel_wr(prq) && (md->flags & MMC_BLK_REL_WR);
1599 do_data_tag = (card->ext_csd.data_tag_unit_size) &&
1600 (prq->cmd_flags & REQ_META) &&
1601 (rq_data_dir(prq) == WRITE) &&
1602 ((brq->data.blocks * brq->data.blksz) >=
1603 card->ext_csd.data_tag_unit_size);
1604 /* Argument of CMD23 */
1605 packed_cmd_hdr[(i * 2)] =
1606 (do_rel_wr ? MMC_CMD23_ARG_REL_WR : 0) |
1607 (do_data_tag ? MMC_CMD23_ARG_TAG_REQ : 0) |
1608 blk_rq_sectors(prq);
1609 /* Argument of CMD18 or CMD25 */
1610 packed_cmd_hdr[((i * 2)) + 1] =
1611 mmc_card_blockaddr(card) ?
1612 blk_rq_pos(prq) : blk_rq_pos(prq) << 9;
1613 packed->blocks += blk_rq_sectors(prq);
1614 i++;
1615 }
1616
1617 memset(brq, 0, sizeof(struct mmc_blk_request));
1618 brq->mrq.cmd = &brq->cmd;
1619 brq->mrq.data = &brq->data;
1620 brq->mrq.sbc = &brq->sbc;
1621 brq->mrq.stop = &brq->stop;
1622
1623 brq->sbc.opcode = MMC_SET_BLOCK_COUNT;
1624 brq->sbc.arg = MMC_CMD23_ARG_PACKED | (packed->blocks + hdr_blocks);
1625 brq->sbc.flags = MMC_RSP_R1 | MMC_CMD_AC;
1626
1627 brq->cmd.opcode = MMC_WRITE_MULTIPLE_BLOCK;
1628 brq->cmd.arg = blk_rq_pos(req);
1629 if (!mmc_card_blockaddr(card))
1630 brq->cmd.arg <<= 9;
1631 brq->cmd.flags = MMC_RSP_SPI_R1 | MMC_RSP_R1 | MMC_CMD_ADTC;
1632
1633 brq->data.blksz = 512;
1634 brq->data.blocks = packed->blocks + hdr_blocks;
1635 brq->data.flags |= MMC_DATA_WRITE;
1636
1637 brq->stop.opcode = MMC_STOP_TRANSMISSION;
1638 brq->stop.arg = 0;
1639 brq->stop.flags = MMC_RSP_SPI_R1B | MMC_RSP_R1B | MMC_CMD_AC;
1640
1641 mmc_set_data_timeout(&brq->data, card);
1642
1643 brq->data.sg = mqrq->sg;
1644 brq->data.sg_len = mmc_queue_map_sg(mq, mqrq);
1645
1646 mqrq->mmc_active.mrq = &brq->mrq;
1647 mqrq->mmc_active.err_check = mmc_blk_packed_err_check;
1648
1649 mmc_queue_bounce_pre(mqrq);
1650}
1651
67716327
AH
1652static int mmc_blk_cmd_err(struct mmc_blk_data *md, struct mmc_card *card,
1653 struct mmc_blk_request *brq, struct request *req,
1654 int ret)
1655{
ce39f9d1
SJ
1656 struct mmc_queue_req *mq_rq;
1657 mq_rq = container_of(brq, struct mmc_queue_req, brq);
1658
67716327
AH
1659 /*
1660 * If this is an SD card and we're writing, we can first
1661 * mark the known good sectors as ok.
1662 *
1663 * If the card is not SD, we can still ok written sectors
1664 * as reported by the controller (which might be less than
1665 * the real number of written sectors, but never more).
1666 */
1667 if (mmc_card_sd(card)) {
1668 u32 blocks;
1669
1670 blocks = mmc_sd_num_wr_blocks(card);
1671 if (blocks != (u32)-1) {
ecf8b5d0 1672 ret = blk_end_request(req, 0, blocks << 9);
67716327
AH
1673 }
1674 } else {
ce39f9d1
SJ
1675 if (!mmc_packed_cmd(mq_rq->cmd_type))
1676 ret = blk_end_request(req, 0, brq->data.bytes_xfered);
67716327
AH
1677 }
1678 return ret;
1679}
1680
ce39f9d1
SJ
1681static int mmc_blk_end_packed_req(struct mmc_queue_req *mq_rq)
1682{
1683 struct request *prq;
1684 struct mmc_packed *packed = mq_rq->packed;
1685 int idx = packed->idx_failure, i = 0;
1686 int ret = 0;
1687
1688 BUG_ON(!packed);
1689
1690 while (!list_empty(&packed->list)) {
1691 prq = list_entry_rq(packed->list.next);
1692 if (idx == i) {
1693 /* retry from error index */
1694 packed->nr_entries -= idx;
1695 mq_rq->req = prq;
1696 ret = 1;
1697
1698 if (packed->nr_entries == MMC_PACKED_NR_SINGLE) {
1699 list_del_init(&prq->queuelist);
1700 mmc_blk_clear_packed(mq_rq);
1701 }
1702 return ret;
1703 }
1704 list_del_init(&prq->queuelist);
1705 blk_end_request(prq, 0, blk_rq_bytes(prq));
1706 i++;
1707 }
1708
1709 mmc_blk_clear_packed(mq_rq);
1710 return ret;
1711}
1712
1713static void mmc_blk_abort_packed_req(struct mmc_queue_req *mq_rq)
1714{
1715 struct request *prq;
1716 struct mmc_packed *packed = mq_rq->packed;
1717
1718 BUG_ON(!packed);
1719
1720 while (!list_empty(&packed->list)) {
1721 prq = list_entry_rq(packed->list.next);
1722 list_del_init(&prq->queuelist);
1723 blk_end_request(prq, -EIO, blk_rq_bytes(prq));
1724 }
1725
1726 mmc_blk_clear_packed(mq_rq);
1727}
1728
1729static void mmc_blk_revert_packed_req(struct mmc_queue *mq,
1730 struct mmc_queue_req *mq_rq)
1731{
1732 struct request *prq;
1733 struct request_queue *q = mq->queue;
1734 struct mmc_packed *packed = mq_rq->packed;
1735
1736 BUG_ON(!packed);
1737
1738 while (!list_empty(&packed->list)) {
1739 prq = list_entry_rq(packed->list.prev);
1740 if (prq->queuelist.prev != &packed->list) {
1741 list_del_init(&prq->queuelist);
1742 spin_lock_irq(q->queue_lock);
1743 blk_requeue_request(mq->queue, prq);
1744 spin_unlock_irq(q->queue_lock);
1745 } else {
1746 list_del_init(&prq->queuelist);
1747 }
1748 }
1749
1750 mmc_blk_clear_packed(mq_rq);
1751}
1752
ee8a43a5 1753static int mmc_blk_issue_rw_rq(struct mmc_queue *mq, struct request *rqc)
54d49d77
PF
1754{
1755 struct mmc_blk_data *md = mq->data;
1756 struct mmc_card *card = md->queue.card;
1757 struct mmc_blk_request *brq = &mq->mqrq_cur->brq;
67716327 1758 int ret = 1, disable_multi = 0, retry = 0, type;
d78d4a8a 1759 enum mmc_blk_status status;
ee8a43a5 1760 struct mmc_queue_req *mq_rq;
a5075eb9 1761 struct request *req = rqc;
ee8a43a5 1762 struct mmc_async_req *areq;
ce39f9d1
SJ
1763 const u8 packed_nr = 2;
1764 u8 reqs = 0;
1da177e4 1765
ee8a43a5
PF
1766 if (!rqc && !mq->mqrq_prev->req)
1767 return 0;
98ccf149 1768
ce39f9d1
SJ
1769 if (rqc)
1770 reqs = mmc_blk_prep_packed_list(mq, rqc);
1771
ee8a43a5
PF
1772 do {
1773 if (rqc) {
a5075eb9
SD
1774 /*
1775 * When 4KB native sector is enabled, only 8 blocks
1776 * multiple read or write is allowed
1777 */
1778 if ((brq->data.blocks & 0x07) &&
1779 (card->ext_csd.data_sector_size == 4096)) {
1780 pr_err("%s: Transfer size is not 4KB sector size aligned\n",
1781 req->rq_disk->disk_name);
ce39f9d1 1782 mq_rq = mq->mqrq_cur;
a5075eb9
SD
1783 goto cmd_abort;
1784 }
ce39f9d1
SJ
1785
1786 if (reqs >= packed_nr)
1787 mmc_blk_packed_hdr_wrq_prep(mq->mqrq_cur,
1788 card, mq);
1789 else
1790 mmc_blk_rw_rq_prep(mq->mqrq_cur, card, 0, mq);
ee8a43a5
PF
1791 areq = &mq->mqrq_cur->mmc_active;
1792 } else
1793 areq = NULL;
1794 areq = mmc_start_req(card->host, areq, (int *) &status);
2220eedf
KD
1795 if (!areq) {
1796 if (status == MMC_BLK_NEW_REQUEST)
1797 mq->flags |= MMC_QUEUE_NEW_REQUEST;
ee8a43a5 1798 return 0;
2220eedf 1799 }
ee8a43a5
PF
1800
1801 mq_rq = container_of(areq, struct mmc_queue_req, mmc_active);
1802 brq = &mq_rq->brq;
1803 req = mq_rq->req;
67716327 1804 type = rq_data_dir(req) == READ ? MMC_BLK_READ : MMC_BLK_WRITE;
ee8a43a5 1805 mmc_queue_bounce_post(mq_rq);
98ccf149 1806
d78d4a8a
PF
1807 switch (status) {
1808 case MMC_BLK_SUCCESS:
1809 case MMC_BLK_PARTIAL:
1810 /*
1811 * A block was successfully transferred.
1812 */
67716327 1813 mmc_blk_reset_success(md, type);
ce39f9d1
SJ
1814
1815 if (mmc_packed_cmd(mq_rq->cmd_type)) {
1816 ret = mmc_blk_end_packed_req(mq_rq);
1817 break;
1818 } else {
1819 ret = blk_end_request(req, 0,
d78d4a8a 1820 brq->data.bytes_xfered);
ce39f9d1
SJ
1821 }
1822
67716327
AH
1823 /*
1824 * If the blk_end_request function returns non-zero even
1825 * though all data has been transferred and no errors
1826 * were returned by the host controller, it's a bug.
1827 */
ee8a43a5 1828 if (status == MMC_BLK_SUCCESS && ret) {
a3c76eb9 1829 pr_err("%s BUG rq_tot %d d_xfer %d\n",
ee8a43a5
PF
1830 __func__, blk_rq_bytes(req),
1831 brq->data.bytes_xfered);
1832 rqc = NULL;
1833 goto cmd_abort;
1834 }
d78d4a8a
PF
1835 break;
1836 case MMC_BLK_CMD_ERR:
67716327 1837 ret = mmc_blk_cmd_err(md, card, brq, req, ret);
21dd5b3d
DW
1838 if (mmc_blk_reset(md, card->host, type))
1839 goto cmd_abort;
1840 if (!ret)
1841 goto start_new_req;
1842 break;
d78d4a8a
PF
1843 case MMC_BLK_RETRY:
1844 if (retry++ < 5)
a01f3ccf 1845 break;
67716327 1846 /* Fall through */
d78d4a8a 1847 case MMC_BLK_ABORT:
67716327
AH
1848 if (!mmc_blk_reset(md, card->host, type))
1849 break;
4c2b8f26 1850 goto cmd_abort;
67716327
AH
1851 case MMC_BLK_DATA_ERR: {
1852 int err;
1853
1854 err = mmc_blk_reset(md, card->host, type);
1855 if (!err)
1856 break;
ce39f9d1
SJ
1857 if (err == -ENODEV ||
1858 mmc_packed_cmd(mq_rq->cmd_type))
67716327
AH
1859 goto cmd_abort;
1860 /* Fall through */
1861 }
1862 case MMC_BLK_ECC_ERR:
1863 if (brq->data.blocks > 1) {
1864 /* Redo read one sector at a time */
1865 pr_warning("%s: retrying using single block read\n",
1866 req->rq_disk->disk_name);
1867 disable_multi = 1;
1868 break;
1869 }
d78d4a8a
PF
1870 /*
1871 * After an error, we redo I/O one sector at a
1872 * time, so we only reach here after trying to
1873 * read a single sector.
1874 */
ecf8b5d0 1875 ret = blk_end_request(req, -EIO,
d78d4a8a 1876 brq->data.blksz);
ee8a43a5
PF
1877 if (!ret)
1878 goto start_new_req;
d78d4a8a 1879 break;
a8ad82cc
SRT
1880 case MMC_BLK_NOMEDIUM:
1881 goto cmd_abort;
2220eedf
KD
1882 default:
1883 pr_err("%s: Unhandled return value (%d)",
1884 req->rq_disk->disk_name, status);
1885 goto cmd_abort;
4c2b8f26
RKAL
1886 }
1887
ee8a43a5 1888 if (ret) {
ce39f9d1
SJ
1889 if (mmc_packed_cmd(mq_rq->cmd_type)) {
1890 if (!mq_rq->packed->retries)
1891 goto cmd_abort;
1892 mmc_blk_packed_hdr_wrq_prep(mq_rq, card, mq);
1893 mmc_start_req(card->host,
1894 &mq_rq->mmc_active, NULL);
1895 } else {
1896
1897 /*
1898 * In case of a incomplete request
1899 * prepare it again and resend.
1900 */
1901 mmc_blk_rw_rq_prep(mq_rq, card,
1902 disable_multi, mq);
1903 mmc_start_req(card->host,
1904 &mq_rq->mmc_active, NULL);
1905 }
ee8a43a5 1906 }
1da177e4
LT
1907 } while (ret);
1908
1da177e4
LT
1909 return 1;
1910
a01f3ccf 1911 cmd_abort:
ce39f9d1
SJ
1912 if (mmc_packed_cmd(mq_rq->cmd_type)) {
1913 mmc_blk_abort_packed_req(mq_rq);
1914 } else {
1915 if (mmc_card_removed(card))
1916 req->cmd_flags |= REQ_QUIET;
1917 while (ret)
1918 ret = blk_end_request(req, -EIO,
1919 blk_rq_cur_bytes(req));
1920 }
1da177e4 1921
ee8a43a5
PF
1922 start_new_req:
1923 if (rqc) {
7a81902f
SJ
1924 if (mmc_card_removed(card)) {
1925 rqc->cmd_flags |= REQ_QUIET;
1926 blk_end_request_all(rqc, -EIO);
1927 } else {
ce39f9d1
SJ
1928 /*
1929 * If current request is packed, it needs to put back.
1930 */
1931 if (mmc_packed_cmd(mq->mqrq_cur->cmd_type))
1932 mmc_blk_revert_packed_req(mq, mq->mqrq_cur);
1933
7a81902f
SJ
1934 mmc_blk_rw_rq_prep(mq->mqrq_cur, card, 0, mq);
1935 mmc_start_req(card->host,
1936 &mq->mqrq_cur->mmc_active, NULL);
1937 }
ee8a43a5
PF
1938 }
1939
1da177e4
LT
1940 return 0;
1941}
1942
bd788c96
AH
1943static int mmc_blk_issue_rq(struct mmc_queue *mq, struct request *req)
1944{
1a258db6
AW
1945 int ret;
1946 struct mmc_blk_data *md = mq->data;
1947 struct mmc_card *card = md->queue.card;
2220eedf
KD
1948 struct mmc_host *host = card->host;
1949 unsigned long flags;
1e06335d 1950 unsigned int cmd_flags = req ? req->cmd_flags : 0;
1a258db6 1951
ee8a43a5
PF
1952 if (req && !mq->mqrq_prev->req)
1953 /* claim host only for the first request */
1954 mmc_claim_host(card->host);
1955
371a689f
AW
1956 ret = mmc_blk_part_switch(card, md);
1957 if (ret) {
0d7d85ca 1958 if (req) {
ecf8b5d0 1959 blk_end_request_all(req, -EIO);
0d7d85ca 1960 }
371a689f
AW
1961 ret = 0;
1962 goto out;
1963 }
1a258db6 1964
2220eedf 1965 mq->flags &= ~MMC_QUEUE_NEW_REQUEST;
1e06335d 1966 if (cmd_flags & REQ_DISCARD) {
ee8a43a5
PF
1967 /* complete ongoing async transfer before issuing discard */
1968 if (card->host->areq)
1969 mmc_blk_issue_rw_rq(mq, NULL);
3550ccdb
IC
1970 if (req->cmd_flags & REQ_SECURE &&
1971 !(card->quirks & MMC_QUIRK_SEC_ERASE_TRIM_BROKEN))
1a258db6 1972 ret = mmc_blk_issue_secdiscard_rq(mq, req);
49804548 1973 else
1a258db6 1974 ret = mmc_blk_issue_discard_rq(mq, req);
1e06335d 1975 } else if (cmd_flags & REQ_FLUSH) {
393f9a08
JC
1976 /* complete ongoing async transfer before issuing flush */
1977 if (card->host->areq)
1978 mmc_blk_issue_rw_rq(mq, NULL);
1a258db6 1979 ret = mmc_blk_issue_flush(mq, req);
49804548 1980 } else {
2220eedf
KD
1981 if (!req && host->areq) {
1982 spin_lock_irqsave(&host->context_info.lock, flags);
1983 host->context_info.is_waiting_last_req = true;
1984 spin_unlock_irqrestore(&host->context_info.lock, flags);
1985 }
1a258db6 1986 ret = mmc_blk_issue_rw_rq(mq, req);
49804548 1987 }
1a258db6 1988
371a689f 1989out:
ef3a69c7 1990 if ((!req && !(mq->flags & MMC_QUEUE_NEW_REQUEST)) ||
1e06335d 1991 (cmd_flags & MMC_REQ_SPECIAL_MASK))
ef3a69c7
SJ
1992 /*
1993 * Release host when there are no more requests
1994 * and after special request(discard, flush) is done.
1995 * In case sepecial request, there is no reentry to
1996 * the 'mmc_blk_issue_rq' with 'mqrq_prev->req'.
1997 */
ee8a43a5 1998 mmc_release_host(card->host);
1a258db6 1999 return ret;
bd788c96 2000}
1da177e4 2001
a6f6c96b
RK
2002static inline int mmc_blk_readonly(struct mmc_card *card)
2003{
2004 return mmc_card_readonly(card) ||
2005 !(card->csd.cmdclass & CCC_BLOCK_WRITE);
2006}
2007
371a689f
AW
2008static struct mmc_blk_data *mmc_blk_alloc_req(struct mmc_card *card,
2009 struct device *parent,
2010 sector_t size,
2011 bool default_ro,
add710ea
JR
2012 const char *subname,
2013 int area_type)
1da177e4
LT
2014{
2015 struct mmc_blk_data *md;
2016 int devidx, ret;
2017
5e71b7a6
OJ
2018 devidx = find_first_zero_bit(dev_use, max_devices);
2019 if (devidx >= max_devices)
1da177e4
LT
2020 return ERR_PTR(-ENOSPC);
2021 __set_bit(devidx, dev_use);
2022
dd00cc48 2023 md = kzalloc(sizeof(struct mmc_blk_data), GFP_KERNEL);
a6f6c96b
RK
2024 if (!md) {
2025 ret = -ENOMEM;
2026 goto out;
2027 }
1da177e4 2028
f06c9153
AW
2029 /*
2030 * !subname implies we are creating main mmc_blk_data that will be
2031 * associated with mmc_card with mmc_set_drvdata. Due to device
2032 * partitions, devidx will not coincide with a per-physical card
2033 * index anymore so we keep track of a name index.
2034 */
2035 if (!subname) {
2036 md->name_idx = find_first_zero_bit(name_use, max_devices);
2037 __set_bit(md->name_idx, name_use);
add710ea 2038 } else
f06c9153
AW
2039 md->name_idx = ((struct mmc_blk_data *)
2040 dev_to_disk(parent)->private_data)->name_idx;
2041
add710ea
JR
2042 md->area_type = area_type;
2043
a6f6c96b
RK
2044 /*
2045 * Set the read-only status based on the supported commands
2046 * and the write protect switch.
2047 */
2048 md->read_only = mmc_blk_readonly(card);
1da177e4 2049
5e71b7a6 2050 md->disk = alloc_disk(perdev_minors);
a6f6c96b
RK
2051 if (md->disk == NULL) {
2052 ret = -ENOMEM;
2053 goto err_kfree;
2054 }
1da177e4 2055
a6f6c96b 2056 spin_lock_init(&md->lock);
371a689f 2057 INIT_LIST_HEAD(&md->part);
a6f6c96b 2058 md->usage = 1;
1da177e4 2059
d09408ad 2060 ret = mmc_init_queue(&md->queue, card, &md->lock, subname);
a6f6c96b
RK
2061 if (ret)
2062 goto err_putdisk;
1da177e4 2063
a6f6c96b
RK
2064 md->queue.issue_fn = mmc_blk_issue_rq;
2065 md->queue.data = md;
d2b18394 2066
fe6b4c88 2067 md->disk->major = MMC_BLOCK_MAJOR;
5e71b7a6 2068 md->disk->first_minor = devidx * perdev_minors;
a6f6c96b
RK
2069 md->disk->fops = &mmc_bdops;
2070 md->disk->private_data = md;
2071 md->disk->queue = md->queue.queue;
371a689f
AW
2072 md->disk->driverfs_dev = parent;
2073 set_disk_ro(md->disk, md->read_only || default_ro);
53d8f974
LP
2074 if (area_type & MMC_BLK_DATA_AREA_RPMB)
2075 md->disk->flags |= GENHD_FL_NO_PART_SCAN;
a6f6c96b
RK
2076
2077 /*
2078 * As discussed on lkml, GENHD_FL_REMOVABLE should:
2079 *
2080 * - be set for removable media with permanent block devices
2081 * - be unset for removable block devices with permanent media
2082 *
2083 * Since MMC block devices clearly fall under the second
2084 * case, we do not set GENHD_FL_REMOVABLE. Userspace
2085 * should use the block device creation/destruction hotplug
2086 * messages to tell when the card is present.
2087 */
2088
f06c9153
AW
2089 snprintf(md->disk->disk_name, sizeof(md->disk->disk_name),
2090 "mmcblk%d%s", md->name_idx, subname ? subname : "");
a6f6c96b 2091
a5075eb9
SD
2092 if (mmc_card_mmc(card))
2093 blk_queue_logical_block_size(md->queue.queue,
2094 card->ext_csd.data_sector_size);
2095 else
2096 blk_queue_logical_block_size(md->queue.queue, 512);
2097
371a689f 2098 set_capacity(md->disk, size);
d0c97cfb 2099
f0d89972
AW
2100 if (mmc_host_cmd23(card->host)) {
2101 if (mmc_card_mmc(card) ||
2102 (mmc_card_sd(card) &&
2103 card->scr.cmds & SD_SCR_CMD23_SUPPORT))
2104 md->flags |= MMC_BLK_CMD23;
2105 }
d0c97cfb
AW
2106
2107 if (mmc_card_mmc(card) &&
2108 md->flags & MMC_BLK_CMD23 &&
2109 ((card->ext_csd.rel_param & EXT_CSD_WR_REL_PARAM_EN) ||
2110 card->ext_csd.rel_sectors)) {
2111 md->flags |= MMC_BLK_REL_WR;
2112 blk_queue_flush(md->queue.queue, REQ_FLUSH | REQ_FUA);
2113 }
2114
ce39f9d1
SJ
2115 if (mmc_card_mmc(card) &&
2116 (area_type == MMC_BLK_DATA_AREA_MAIN) &&
2117 (md->flags & MMC_BLK_CMD23) &&
2118 card->ext_csd.packed_event_en) {
2119 if (!mmc_packed_init(&md->queue, card))
2120 md->flags |= MMC_BLK_PACKED_CMD;
2121 }
2122
371a689f
AW
2123 return md;
2124
2125 err_putdisk:
2126 put_disk(md->disk);
2127 err_kfree:
2128 kfree(md);
2129 out:
2130 return ERR_PTR(ret);
2131}
2132
2133static struct mmc_blk_data *mmc_blk_alloc(struct mmc_card *card)
2134{
2135 sector_t size;
2136 struct mmc_blk_data *md;
a6f6c96b 2137
85a18ad9
PO
2138 if (!mmc_card_sd(card) && mmc_card_blockaddr(card)) {
2139 /*
2140 * The EXT_CSD sector count is in number or 512 byte
2141 * sectors.
2142 */
371a689f 2143 size = card->ext_csd.sectors;
85a18ad9
PO
2144 } else {
2145 /*
2146 * The CSD capacity field is in units of read_blkbits.
2147 * set_capacity takes units of 512 bytes.
2148 */
371a689f 2149 size = card->csd.capacity << (card->csd.read_blkbits - 9);
85a18ad9 2150 }
371a689f 2151
add710ea
JR
2152 md = mmc_blk_alloc_req(card, &card->dev, size, false, NULL,
2153 MMC_BLK_DATA_AREA_MAIN);
1da177e4 2154 return md;
371a689f 2155}
a6f6c96b 2156
371a689f
AW
2157static int mmc_blk_alloc_part(struct mmc_card *card,
2158 struct mmc_blk_data *md,
2159 unsigned int part_type,
2160 sector_t size,
2161 bool default_ro,
add710ea
JR
2162 const char *subname,
2163 int area_type)
371a689f
AW
2164{
2165 char cap_str[10];
2166 struct mmc_blk_data *part_md;
2167
2168 part_md = mmc_blk_alloc_req(card, disk_to_dev(md->disk), size, default_ro,
add710ea 2169 subname, area_type);
371a689f
AW
2170 if (IS_ERR(part_md))
2171 return PTR_ERR(part_md);
2172 part_md->part_type = part_type;
2173 list_add(&part_md->part, &md->part);
2174
2175 string_get_size((u64)get_capacity(part_md->disk) << 9, STRING_UNITS_2,
2176 cap_str, sizeof(cap_str));
a3c76eb9 2177 pr_info("%s: %s %s partition %u %s\n",
371a689f
AW
2178 part_md->disk->disk_name, mmc_card_id(card),
2179 mmc_card_name(card), part_md->part_type, cap_str);
2180 return 0;
2181}
2182
e0c368d5
NJ
2183/* MMC Physical partitions consist of two boot partitions and
2184 * up to four general purpose partitions.
2185 * For each partition enabled in EXT_CSD a block device will be allocatedi
2186 * to provide access to the partition.
2187 */
2188
371a689f
AW
2189static int mmc_blk_alloc_parts(struct mmc_card *card, struct mmc_blk_data *md)
2190{
e0c368d5 2191 int idx, ret = 0;
371a689f
AW
2192
2193 if (!mmc_card_mmc(card))
2194 return 0;
2195
e0c368d5
NJ
2196 for (idx = 0; idx < card->nr_parts; idx++) {
2197 if (card->part[idx].size) {
2198 ret = mmc_blk_alloc_part(card, md,
2199 card->part[idx].part_cfg,
2200 card->part[idx].size >> 9,
2201 card->part[idx].force_ro,
add710ea
JR
2202 card->part[idx].name,
2203 card->part[idx].area_type);
e0c368d5
NJ
2204 if (ret)
2205 return ret;
2206 }
371a689f
AW
2207 }
2208
2209 return ret;
1da177e4
LT
2210}
2211
371a689f
AW
2212static void mmc_blk_remove_req(struct mmc_blk_data *md)
2213{
add710ea
JR
2214 struct mmc_card *card;
2215
371a689f 2216 if (md) {
add710ea 2217 card = md->queue.card;
371a689f
AW
2218 if (md->disk->flags & GENHD_FL_UP) {
2219 device_remove_file(disk_to_dev(md->disk), &md->force_ro);
add710ea
JR
2220 if ((md->area_type & MMC_BLK_DATA_AREA_BOOT) &&
2221 card->ext_csd.boot_ro_lockable)
2222 device_remove_file(disk_to_dev(md->disk),
2223 &md->power_ro_lock);
371a689f
AW
2224
2225 /* Stop new requests from getting into the queue */
2226 del_gendisk(md->disk);
2227 }
2228
2229 /* Then flush out any already in there */
2230 mmc_cleanup_queue(&md->queue);
ce39f9d1
SJ
2231 if (md->flags & MMC_BLK_PACKED_CMD)
2232 mmc_packed_clean(&md->queue);
371a689f
AW
2233 mmc_blk_put(md);
2234 }
2235}
2236
2237static void mmc_blk_remove_parts(struct mmc_card *card,
2238 struct mmc_blk_data *md)
2239{
2240 struct list_head *pos, *q;
2241 struct mmc_blk_data *part_md;
2242
f06c9153 2243 __clear_bit(md->name_idx, name_use);
371a689f
AW
2244 list_for_each_safe(pos, q, &md->part) {
2245 part_md = list_entry(pos, struct mmc_blk_data, part);
2246 list_del(pos);
2247 mmc_blk_remove_req(part_md);
2248 }
2249}
2250
2251static int mmc_add_disk(struct mmc_blk_data *md)
2252{
2253 int ret;
add710ea 2254 struct mmc_card *card = md->queue.card;
371a689f
AW
2255
2256 add_disk(md->disk);
2257 md->force_ro.show = force_ro_show;
2258 md->force_ro.store = force_ro_store;
641c3187 2259 sysfs_attr_init(&md->force_ro.attr);
371a689f
AW
2260 md->force_ro.attr.name = "force_ro";
2261 md->force_ro.attr.mode = S_IRUGO | S_IWUSR;
2262 ret = device_create_file(disk_to_dev(md->disk), &md->force_ro);
2263 if (ret)
add710ea
JR
2264 goto force_ro_fail;
2265
2266 if ((md->area_type & MMC_BLK_DATA_AREA_BOOT) &&
2267 card->ext_csd.boot_ro_lockable) {
88187398 2268 umode_t mode;
add710ea
JR
2269
2270 if (card->ext_csd.boot_ro_lock & EXT_CSD_BOOT_WP_B_PWR_WP_DIS)
2271 mode = S_IRUGO;
2272 else
2273 mode = S_IRUGO | S_IWUSR;
2274
2275 md->power_ro_lock.show = power_ro_lock_show;
2276 md->power_ro_lock.store = power_ro_lock_store;
00d9ac08 2277 sysfs_attr_init(&md->power_ro_lock.attr);
add710ea
JR
2278 md->power_ro_lock.attr.mode = mode;
2279 md->power_ro_lock.attr.name =
2280 "ro_lock_until_next_power_on";
2281 ret = device_create_file(disk_to_dev(md->disk),
2282 &md->power_ro_lock);
2283 if (ret)
2284 goto power_ro_lock_fail;
2285 }
2286 return ret;
2287
2288power_ro_lock_fail:
2289 device_remove_file(disk_to_dev(md->disk), &md->force_ro);
2290force_ro_fail:
2291 del_gendisk(md->disk);
371a689f
AW
2292
2293 return ret;
2294}
2295
c59d4473
CB
2296#define CID_MANFID_SANDISK 0x2
2297#define CID_MANFID_TOSHIBA 0x11
2298#define CID_MANFID_MICRON 0x13
3550ccdb 2299#define CID_MANFID_SAMSUNG 0x15
c59d4473 2300
6f60c222
AW
2301static const struct mmc_fixup blk_fixups[] =
2302{
c59d4473
CB
2303 MMC_FIXUP("SEM02G", CID_MANFID_SANDISK, 0x100, add_quirk,
2304 MMC_QUIRK_INAND_CMD38),
2305 MMC_FIXUP("SEM04G", CID_MANFID_SANDISK, 0x100, add_quirk,
2306 MMC_QUIRK_INAND_CMD38),
2307 MMC_FIXUP("SEM08G", CID_MANFID_SANDISK, 0x100, add_quirk,
2308 MMC_QUIRK_INAND_CMD38),
2309 MMC_FIXUP("SEM16G", CID_MANFID_SANDISK, 0x100, add_quirk,
2310 MMC_QUIRK_INAND_CMD38),
2311 MMC_FIXUP("SEM32G", CID_MANFID_SANDISK, 0x100, add_quirk,
2312 MMC_QUIRK_INAND_CMD38),
d0c97cfb
AW
2313
2314 /*
2315 * Some MMC cards experience performance degradation with CMD23
2316 * instead of CMD12-bounded multiblock transfers. For now we'll
2317 * black list what's bad...
2318 * - Certain Toshiba cards.
2319 *
2320 * N.B. This doesn't affect SD cards.
2321 */
c59d4473 2322 MMC_FIXUP("MMC08G", CID_MANFID_TOSHIBA, CID_OEMID_ANY, add_quirk_mmc,
d0c97cfb 2323 MMC_QUIRK_BLK_NO_CMD23),
c59d4473 2324 MMC_FIXUP("MMC16G", CID_MANFID_TOSHIBA, CID_OEMID_ANY, add_quirk_mmc,
d0c97cfb 2325 MMC_QUIRK_BLK_NO_CMD23),
c59d4473 2326 MMC_FIXUP("MMC32G", CID_MANFID_TOSHIBA, CID_OEMID_ANY, add_quirk_mmc,
d0c97cfb 2327 MMC_QUIRK_BLK_NO_CMD23),
6de5fc9c
SNX
2328
2329 /*
2330 * Some Micron MMC cards needs longer data read timeout than
2331 * indicated in CSD.
2332 */
c59d4473 2333 MMC_FIXUP(CID_NAME_ANY, CID_MANFID_MICRON, 0x200, add_quirk_mmc,
6de5fc9c
SNX
2334 MMC_QUIRK_LONG_READ_TIME),
2335
3550ccdb
IC
2336 /*
2337 * On these Samsung MoviNAND parts, performing secure erase or
2338 * secure trim can result in unrecoverable corruption due to a
2339 * firmware bug.
2340 */
2341 MMC_FIXUP("M8G2FA", CID_MANFID_SAMSUNG, CID_OEMID_ANY, add_quirk_mmc,
2342 MMC_QUIRK_SEC_ERASE_TRIM_BROKEN),
2343 MMC_FIXUP("MAG4FA", CID_MANFID_SAMSUNG, CID_OEMID_ANY, add_quirk_mmc,
2344 MMC_QUIRK_SEC_ERASE_TRIM_BROKEN),
2345 MMC_FIXUP("MBG8FA", CID_MANFID_SAMSUNG, CID_OEMID_ANY, add_quirk_mmc,
2346 MMC_QUIRK_SEC_ERASE_TRIM_BROKEN),
2347 MMC_FIXUP("MCGAFA", CID_MANFID_SAMSUNG, CID_OEMID_ANY, add_quirk_mmc,
2348 MMC_QUIRK_SEC_ERASE_TRIM_BROKEN),
2349 MMC_FIXUP("VAL00M", CID_MANFID_SAMSUNG, CID_OEMID_ANY, add_quirk_mmc,
2350 MMC_QUIRK_SEC_ERASE_TRIM_BROKEN),
2351 MMC_FIXUP("VYL00M", CID_MANFID_SAMSUNG, CID_OEMID_ANY, add_quirk_mmc,
2352 MMC_QUIRK_SEC_ERASE_TRIM_BROKEN),
2353 MMC_FIXUP("KYL00M", CID_MANFID_SAMSUNG, CID_OEMID_ANY, add_quirk_mmc,
2354 MMC_QUIRK_SEC_ERASE_TRIM_BROKEN),
2355 MMC_FIXUP("VZL00M", CID_MANFID_SAMSUNG, CID_OEMID_ANY, add_quirk_mmc,
2356 MMC_QUIRK_SEC_ERASE_TRIM_BROKEN),
2357
6f60c222
AW
2358 END_FIXUP
2359};
2360
1da177e4
LT
2361static int mmc_blk_probe(struct mmc_card *card)
2362{
371a689f 2363 struct mmc_blk_data *md, *part_md;
a7bbb573
PO
2364 char cap_str[10];
2365
912490db
PO
2366 /*
2367 * Check that the card supports the command class(es) we need.
2368 */
2369 if (!(card->csd.cmdclass & CCC_BLOCK_READ))
1da177e4
LT
2370 return -ENODEV;
2371
1da177e4
LT
2372 md = mmc_blk_alloc(card);
2373 if (IS_ERR(md))
2374 return PTR_ERR(md);
2375
444122fd 2376 string_get_size((u64)get_capacity(md->disk) << 9, STRING_UNITS_2,
a7bbb573 2377 cap_str, sizeof(cap_str));
a3c76eb9 2378 pr_info("%s: %s %s %s %s\n",
1da177e4 2379 md->disk->disk_name, mmc_card_id(card), mmc_card_name(card),
a7bbb573 2380 cap_str, md->read_only ? "(ro)" : "");
1da177e4 2381
371a689f
AW
2382 if (mmc_blk_alloc_parts(card, md))
2383 goto out;
2384
1da177e4 2385 mmc_set_drvdata(card, md);
6f60c222
AW
2386 mmc_fixup_device(card, blk_fixups);
2387
371a689f
AW
2388 if (mmc_add_disk(md))
2389 goto out;
2390
2391 list_for_each_entry(part_md, &md->part, part) {
2392 if (mmc_add_disk(part_md))
2393 goto out;
2394 }
1da177e4
LT
2395 return 0;
2396
2397 out:
371a689f
AW
2398 mmc_blk_remove_parts(card, md);
2399 mmc_blk_remove_req(md);
5865f287 2400 return 0;
1da177e4
LT
2401}
2402
2403static void mmc_blk_remove(struct mmc_card *card)
2404{
2405 struct mmc_blk_data *md = mmc_get_drvdata(card);
2406
371a689f 2407 mmc_blk_remove_parts(card, md);
ddd6fa7e
AH
2408 mmc_claim_host(card->host);
2409 mmc_blk_part_switch(card, md);
2410 mmc_release_host(card->host);
371a689f 2411 mmc_blk_remove_req(md);
1da177e4
LT
2412 mmc_set_drvdata(card, NULL);
2413}
2414
2415#ifdef CONFIG_PM
32d317c6 2416static int mmc_blk_suspend(struct mmc_card *card)
1da177e4 2417{
371a689f 2418 struct mmc_blk_data *part_md;
1da177e4
LT
2419 struct mmc_blk_data *md = mmc_get_drvdata(card);
2420
2421 if (md) {
2422 mmc_queue_suspend(&md->queue);
371a689f
AW
2423 list_for_each_entry(part_md, &md->part, part) {
2424 mmc_queue_suspend(&part_md->queue);
2425 }
1da177e4
LT
2426 }
2427 return 0;
2428}
2429
2430static int mmc_blk_resume(struct mmc_card *card)
2431{
371a689f 2432 struct mmc_blk_data *part_md;
1da177e4
LT
2433 struct mmc_blk_data *md = mmc_get_drvdata(card);
2434
2435 if (md) {
371a689f
AW
2436 /*
2437 * Resume involves the card going into idle state,
2438 * so current partition is always the main one.
2439 */
2440 md->part_curr = md->part_type;
1da177e4 2441 mmc_queue_resume(&md->queue);
371a689f
AW
2442 list_for_each_entry(part_md, &md->part, part) {
2443 mmc_queue_resume(&part_md->queue);
2444 }
1da177e4
LT
2445 }
2446 return 0;
2447}
2448#else
2449#define mmc_blk_suspend NULL
2450#define mmc_blk_resume NULL
2451#endif
2452
2453static struct mmc_driver mmc_driver = {
2454 .drv = {
2455 .name = "mmcblk",
2456 },
2457 .probe = mmc_blk_probe,
2458 .remove = mmc_blk_remove,
2459 .suspend = mmc_blk_suspend,
2460 .resume = mmc_blk_resume,
2461};
2462
2463static int __init mmc_blk_init(void)
2464{
9d4e98e9 2465 int res;
1da177e4 2466
5e71b7a6
OJ
2467 if (perdev_minors != CONFIG_MMC_BLOCK_MINORS)
2468 pr_info("mmcblk: using %d minors per device\n", perdev_minors);
2469
2470 max_devices = 256 / perdev_minors;
2471
fe6b4c88
PO
2472 res = register_blkdev(MMC_BLOCK_MAJOR, "mmc");
2473 if (res)
1da177e4 2474 goto out;
1da177e4 2475
9d4e98e9
AM
2476 res = mmc_register_driver(&mmc_driver);
2477 if (res)
2478 goto out2;
1da177e4 2479
9d4e98e9
AM
2480 return 0;
2481 out2:
2482 unregister_blkdev(MMC_BLOCK_MAJOR, "mmc");
1da177e4
LT
2483 out:
2484 return res;
2485}
2486
2487static void __exit mmc_blk_exit(void)
2488{
2489 mmc_unregister_driver(&mmc_driver);
fe6b4c88 2490 unregister_blkdev(MMC_BLOCK_MAJOR, "mmc");
1da177e4
LT
2491}
2492
2493module_init(mmc_blk_init);
2494module_exit(mmc_blk_exit);
2495
2496MODULE_LICENSE("GPL");
2497MODULE_DESCRIPTION("Multimedia Card (MMC) block device driver");
2498