Commit | Line | Data |
---|---|---|
1da177e4 LT |
1 | /* |
2 | * Block driver for media (i.e., flash cards) | |
3 | * | |
4 | * Copyright 2002 Hewlett-Packard Company | |
979ce720 | 5 | * Copyright 2005-2008 Pierre Ossman |
1da177e4 LT |
6 | * |
7 | * Use consistent with the GNU GPL is permitted, | |
8 | * provided that this copyright notice is | |
9 | * preserved in its entirety in all copies and derived works. | |
10 | * | |
11 | * HEWLETT-PACKARD COMPANY MAKES NO WARRANTIES, EXPRESSED OR IMPLIED, | |
12 | * AS TO THE USEFULNESS OR CORRECTNESS OF THIS CODE OR ITS | |
13 | * FITNESS FOR ANY PARTICULAR PURPOSE. | |
14 | * | |
15 | * Many thanks to Alessandro Rubini and Jonathan Corbet! | |
16 | * | |
17 | * Author: Andrew Christian | |
18 | * 28 May 2002 | |
19 | */ | |
20 | #include <linux/moduleparam.h> | |
21 | #include <linux/module.h> | |
22 | #include <linux/init.h> | |
23 | ||
1da177e4 LT |
24 | #include <linux/kernel.h> |
25 | #include <linux/fs.h> | |
5a0e3ad6 | 26 | #include <linux/slab.h> |
1da177e4 LT |
27 | #include <linux/errno.h> |
28 | #include <linux/hdreg.h> | |
29 | #include <linux/kdev_t.h> | |
30 | #include <linux/blkdev.h> | |
a621aaed | 31 | #include <linux/mutex.h> |
ec5a19dd | 32 | #include <linux/scatterlist.h> |
a7bbb573 | 33 | #include <linux/string_helpers.h> |
cb87ea28 JC |
34 | #include <linux/delay.h> |
35 | #include <linux/capability.h> | |
36 | #include <linux/compat.h> | |
1da177e4 | 37 | |
cb87ea28 | 38 | #include <linux/mmc/ioctl.h> |
1da177e4 | 39 | #include <linux/mmc/card.h> |
385e3227 | 40 | #include <linux/mmc/host.h> |
da7fbe58 PO |
41 | #include <linux/mmc/mmc.h> |
42 | #include <linux/mmc/sd.h> | |
1da177e4 LT |
43 | |
44 | #include <asm/system.h> | |
45 | #include <asm/uaccess.h> | |
46 | ||
98ac2162 | 47 | #include "queue.h" |
1da177e4 | 48 | |
6b0b6285 | 49 | MODULE_ALIAS("mmc:block"); |
5e71b7a6 OJ |
50 | #ifdef MODULE_PARAM_PREFIX |
51 | #undef MODULE_PARAM_PREFIX | |
52 | #endif | |
53 | #define MODULE_PARAM_PREFIX "mmcblk." | |
54 | ||
6a7a6b45 AW |
55 | #define INAND_CMD38_ARG_EXT_CSD 113 |
56 | #define INAND_CMD38_ARG_ERASE 0x00 | |
57 | #define INAND_CMD38_ARG_TRIM 0x01 | |
58 | #define INAND_CMD38_ARG_SECERASE 0x80 | |
59 | #define INAND_CMD38_ARG_SECTRIM1 0x81 | |
60 | #define INAND_CMD38_ARG_SECTRIM2 0x88 | |
61 | ||
5e71b7a6 | 62 | static DEFINE_MUTEX(block_mutex); |
6b0b6285 | 63 | |
1da177e4 | 64 | /* |
5e71b7a6 OJ |
65 | * The defaults come from config options but can be overriden by module |
66 | * or bootarg options. | |
1da177e4 | 67 | */ |
5e71b7a6 | 68 | static int perdev_minors = CONFIG_MMC_BLOCK_MINORS; |
1dff3144 | 69 | |
5e71b7a6 OJ |
70 | /* |
71 | * We've only got one major, so number of mmcblk devices is | |
72 | * limited to 256 / number of minors per device. | |
73 | */ | |
74 | static int max_devices; | |
75 | ||
76 | /* 256 minors, so at most 256 separate devices */ | |
77 | static DECLARE_BITMAP(dev_use, 256); | |
f06c9153 | 78 | static DECLARE_BITMAP(name_use, 256); |
1da177e4 | 79 | |
1da177e4 LT |
80 | /* |
81 | * There is one mmc_blk_data per slot. | |
82 | */ | |
83 | struct mmc_blk_data { | |
84 | spinlock_t lock; | |
85 | struct gendisk *disk; | |
86 | struct mmc_queue queue; | |
371a689f | 87 | struct list_head part; |
1da177e4 | 88 | |
d0c97cfb AW |
89 | unsigned int flags; |
90 | #define MMC_BLK_CMD23 (1 << 0) /* Can do SET_BLOCK_COUNT for multiblock */ | |
91 | #define MMC_BLK_REL_WR (1 << 1) /* MMC Reliable write support */ | |
92 | ||
1da177e4 | 93 | unsigned int usage; |
a6f6c96b | 94 | unsigned int read_only; |
371a689f | 95 | unsigned int part_type; |
f06c9153 | 96 | unsigned int name_idx; |
67716327 AH |
97 | unsigned int reset_done; |
98 | #define MMC_BLK_READ BIT(0) | |
99 | #define MMC_BLK_WRITE BIT(1) | |
100 | #define MMC_BLK_DISCARD BIT(2) | |
101 | #define MMC_BLK_SECDISCARD BIT(3) | |
371a689f AW |
102 | |
103 | /* | |
104 | * Only set in main mmc_blk_data associated | |
105 | * with mmc_card with mmc_set_drvdata, and keeps | |
106 | * track of the current selected device partition. | |
107 | */ | |
108 | unsigned int part_curr; | |
109 | struct device_attribute force_ro; | |
add710ea JR |
110 | struct device_attribute power_ro_lock; |
111 | int area_type; | |
1da177e4 LT |
112 | }; |
113 | ||
a621aaed | 114 | static DEFINE_MUTEX(open_lock); |
1da177e4 | 115 | |
d78d4a8a PF |
116 | enum mmc_blk_status { |
117 | MMC_BLK_SUCCESS = 0, | |
118 | MMC_BLK_PARTIAL, | |
d78d4a8a | 119 | MMC_BLK_CMD_ERR, |
67716327 | 120 | MMC_BLK_RETRY, |
d78d4a8a | 121 | MMC_BLK_ABORT, |
67716327 AH |
122 | MMC_BLK_DATA_ERR, |
123 | MMC_BLK_ECC_ERR, | |
a8ad82cc | 124 | MMC_BLK_NOMEDIUM, |
d78d4a8a PF |
125 | }; |
126 | ||
5e71b7a6 OJ |
127 | module_param(perdev_minors, int, 0444); |
128 | MODULE_PARM_DESC(perdev_minors, "Minors numbers to allocate per device"); | |
129 | ||
1da177e4 LT |
130 | static struct mmc_blk_data *mmc_blk_get(struct gendisk *disk) |
131 | { | |
132 | struct mmc_blk_data *md; | |
133 | ||
a621aaed | 134 | mutex_lock(&open_lock); |
1da177e4 LT |
135 | md = disk->private_data; |
136 | if (md && md->usage == 0) | |
137 | md = NULL; | |
138 | if (md) | |
139 | md->usage++; | |
a621aaed | 140 | mutex_unlock(&open_lock); |
1da177e4 LT |
141 | |
142 | return md; | |
143 | } | |
144 | ||
371a689f AW |
145 | static inline int mmc_get_devidx(struct gendisk *disk) |
146 | { | |
147 | int devmaj = MAJOR(disk_devt(disk)); | |
148 | int devidx = MINOR(disk_devt(disk)) / perdev_minors; | |
149 | ||
150 | if (!devmaj) | |
151 | devidx = disk->first_minor / perdev_minors; | |
152 | return devidx; | |
153 | } | |
154 | ||
1da177e4 LT |
155 | static void mmc_blk_put(struct mmc_blk_data *md) |
156 | { | |
a621aaed | 157 | mutex_lock(&open_lock); |
1da177e4 LT |
158 | md->usage--; |
159 | if (md->usage == 0) { | |
371a689f | 160 | int devidx = mmc_get_devidx(md->disk); |
5fa83ce2 AH |
161 | blk_cleanup_queue(md->queue.queue); |
162 | ||
1dff3144 DW |
163 | __clear_bit(devidx, dev_use); |
164 | ||
1da177e4 | 165 | put_disk(md->disk); |
1da177e4 LT |
166 | kfree(md); |
167 | } | |
a621aaed | 168 | mutex_unlock(&open_lock); |
1da177e4 LT |
169 | } |
170 | ||
add710ea JR |
171 | static ssize_t power_ro_lock_show(struct device *dev, |
172 | struct device_attribute *attr, char *buf) | |
173 | { | |
174 | int ret; | |
175 | struct mmc_blk_data *md = mmc_blk_get(dev_to_disk(dev)); | |
176 | struct mmc_card *card = md->queue.card; | |
177 | int locked = 0; | |
178 | ||
179 | if (card->ext_csd.boot_ro_lock & EXT_CSD_BOOT_WP_B_PERM_WP_EN) | |
180 | locked = 2; | |
181 | else if (card->ext_csd.boot_ro_lock & EXT_CSD_BOOT_WP_B_PWR_WP_EN) | |
182 | locked = 1; | |
183 | ||
184 | ret = snprintf(buf, PAGE_SIZE, "%d\n", locked); | |
185 | ||
186 | return ret; | |
187 | } | |
188 | ||
189 | static ssize_t power_ro_lock_store(struct device *dev, | |
190 | struct device_attribute *attr, const char *buf, size_t count) | |
191 | { | |
192 | int ret; | |
193 | struct mmc_blk_data *md, *part_md; | |
194 | struct mmc_card *card; | |
195 | unsigned long set; | |
196 | ||
197 | if (kstrtoul(buf, 0, &set)) | |
198 | return -EINVAL; | |
199 | ||
200 | if (set != 1) | |
201 | return count; | |
202 | ||
203 | md = mmc_blk_get(dev_to_disk(dev)); | |
204 | card = md->queue.card; | |
205 | ||
206 | mmc_claim_host(card->host); | |
207 | ||
208 | ret = mmc_switch(card, EXT_CSD_CMD_SET_NORMAL, EXT_CSD_BOOT_WP, | |
209 | card->ext_csd.boot_ro_lock | | |
210 | EXT_CSD_BOOT_WP_B_PWR_WP_EN, | |
211 | card->ext_csd.part_time); | |
212 | if (ret) | |
213 | pr_err("%s: Locking boot partition ro until next power on failed: %d\n", md->disk->disk_name, ret); | |
214 | else | |
215 | card->ext_csd.boot_ro_lock |= EXT_CSD_BOOT_WP_B_PWR_WP_EN; | |
216 | ||
217 | mmc_release_host(card->host); | |
218 | ||
219 | if (!ret) { | |
220 | pr_info("%s: Locking boot partition ro until next power on\n", | |
221 | md->disk->disk_name); | |
222 | set_disk_ro(md->disk, 1); | |
223 | ||
224 | list_for_each_entry(part_md, &md->part, part) | |
225 | if (part_md->area_type == MMC_BLK_DATA_AREA_BOOT) { | |
226 | pr_info("%s: Locking boot partition ro until next power on\n", part_md->disk->disk_name); | |
227 | set_disk_ro(part_md->disk, 1); | |
228 | } | |
229 | } | |
230 | ||
231 | mmc_blk_put(md); | |
232 | return count; | |
233 | } | |
234 | ||
371a689f AW |
235 | static ssize_t force_ro_show(struct device *dev, struct device_attribute *attr, |
236 | char *buf) | |
237 | { | |
238 | int ret; | |
239 | struct mmc_blk_data *md = mmc_blk_get(dev_to_disk(dev)); | |
240 | ||
241 | ret = snprintf(buf, PAGE_SIZE, "%d", | |
242 | get_disk_ro(dev_to_disk(dev)) ^ | |
243 | md->read_only); | |
244 | mmc_blk_put(md); | |
245 | return ret; | |
246 | } | |
247 | ||
248 | static ssize_t force_ro_store(struct device *dev, struct device_attribute *attr, | |
249 | const char *buf, size_t count) | |
250 | { | |
251 | int ret; | |
252 | char *end; | |
253 | struct mmc_blk_data *md = mmc_blk_get(dev_to_disk(dev)); | |
254 | unsigned long set = simple_strtoul(buf, &end, 0); | |
255 | if (end == buf) { | |
256 | ret = -EINVAL; | |
257 | goto out; | |
258 | } | |
259 | ||
260 | set_disk_ro(dev_to_disk(dev), set || md->read_only); | |
261 | ret = count; | |
262 | out: | |
263 | mmc_blk_put(md); | |
264 | return ret; | |
265 | } | |
266 | ||
a5a1561f | 267 | static int mmc_blk_open(struct block_device *bdev, fmode_t mode) |
1da177e4 | 268 | { |
a5a1561f | 269 | struct mmc_blk_data *md = mmc_blk_get(bdev->bd_disk); |
1da177e4 LT |
270 | int ret = -ENXIO; |
271 | ||
2a48fc0a | 272 | mutex_lock(&block_mutex); |
1da177e4 LT |
273 | if (md) { |
274 | if (md->usage == 2) | |
a5a1561f | 275 | check_disk_change(bdev); |
1da177e4 | 276 | ret = 0; |
a00fc090 | 277 | |
a5a1561f | 278 | if ((mode & FMODE_WRITE) && md->read_only) { |
70bb0896 | 279 | mmc_blk_put(md); |
a00fc090 | 280 | ret = -EROFS; |
70bb0896 | 281 | } |
1da177e4 | 282 | } |
2a48fc0a | 283 | mutex_unlock(&block_mutex); |
1da177e4 LT |
284 | |
285 | return ret; | |
286 | } | |
287 | ||
a5a1561f | 288 | static int mmc_blk_release(struct gendisk *disk, fmode_t mode) |
1da177e4 | 289 | { |
a5a1561f | 290 | struct mmc_blk_data *md = disk->private_data; |
1da177e4 | 291 | |
2a48fc0a | 292 | mutex_lock(&block_mutex); |
1da177e4 | 293 | mmc_blk_put(md); |
2a48fc0a | 294 | mutex_unlock(&block_mutex); |
1da177e4 LT |
295 | return 0; |
296 | } | |
297 | ||
298 | static int | |
a885c8c4 | 299 | mmc_blk_getgeo(struct block_device *bdev, struct hd_geometry *geo) |
1da177e4 | 300 | { |
a885c8c4 CH |
301 | geo->cylinders = get_capacity(bdev->bd_disk) / (4 * 16); |
302 | geo->heads = 4; | |
303 | geo->sectors = 16; | |
304 | return 0; | |
1da177e4 LT |
305 | } |
306 | ||
cb87ea28 JC |
307 | struct mmc_blk_ioc_data { |
308 | struct mmc_ioc_cmd ic; | |
309 | unsigned char *buf; | |
310 | u64 buf_bytes; | |
311 | }; | |
312 | ||
313 | static struct mmc_blk_ioc_data *mmc_blk_ioctl_copy_from_user( | |
314 | struct mmc_ioc_cmd __user *user) | |
315 | { | |
316 | struct mmc_blk_ioc_data *idata; | |
317 | int err; | |
318 | ||
319 | idata = kzalloc(sizeof(*idata), GFP_KERNEL); | |
320 | if (!idata) { | |
321 | err = -ENOMEM; | |
aea253ec | 322 | goto out; |
cb87ea28 JC |
323 | } |
324 | ||
325 | if (copy_from_user(&idata->ic, user, sizeof(idata->ic))) { | |
326 | err = -EFAULT; | |
aea253ec | 327 | goto idata_err; |
cb87ea28 JC |
328 | } |
329 | ||
330 | idata->buf_bytes = (u64) idata->ic.blksz * idata->ic.blocks; | |
331 | if (idata->buf_bytes > MMC_IOC_MAX_BYTES) { | |
332 | err = -EOVERFLOW; | |
aea253ec | 333 | goto idata_err; |
cb87ea28 JC |
334 | } |
335 | ||
4d6144de JR |
336 | if (!idata->buf_bytes) |
337 | return idata; | |
338 | ||
cb87ea28 JC |
339 | idata->buf = kzalloc(idata->buf_bytes, GFP_KERNEL); |
340 | if (!idata->buf) { | |
341 | err = -ENOMEM; | |
aea253ec | 342 | goto idata_err; |
cb87ea28 JC |
343 | } |
344 | ||
345 | if (copy_from_user(idata->buf, (void __user *)(unsigned long) | |
346 | idata->ic.data_ptr, idata->buf_bytes)) { | |
347 | err = -EFAULT; | |
348 | goto copy_err; | |
349 | } | |
350 | ||
351 | return idata; | |
352 | ||
353 | copy_err: | |
354 | kfree(idata->buf); | |
aea253ec | 355 | idata_err: |
cb87ea28 | 356 | kfree(idata); |
aea253ec | 357 | out: |
cb87ea28 | 358 | return ERR_PTR(err); |
cb87ea28 JC |
359 | } |
360 | ||
361 | static int mmc_blk_ioctl_cmd(struct block_device *bdev, | |
362 | struct mmc_ioc_cmd __user *ic_ptr) | |
363 | { | |
364 | struct mmc_blk_ioc_data *idata; | |
365 | struct mmc_blk_data *md; | |
366 | struct mmc_card *card; | |
367 | struct mmc_command cmd = {0}; | |
368 | struct mmc_data data = {0}; | |
ad5fd972 | 369 | struct mmc_request mrq = {NULL}; |
cb87ea28 JC |
370 | struct scatterlist sg; |
371 | int err; | |
372 | ||
373 | /* | |
374 | * The caller must have CAP_SYS_RAWIO, and must be calling this on the | |
375 | * whole block device, not on a partition. This prevents overspray | |
376 | * between sibling partitions. | |
377 | */ | |
378 | if ((!capable(CAP_SYS_RAWIO)) || (bdev != bdev->bd_contains)) | |
379 | return -EPERM; | |
380 | ||
381 | idata = mmc_blk_ioctl_copy_from_user(ic_ptr); | |
382 | if (IS_ERR(idata)) | |
383 | return PTR_ERR(idata); | |
384 | ||
cb87ea28 JC |
385 | md = mmc_blk_get(bdev->bd_disk); |
386 | if (!md) { | |
387 | err = -EINVAL; | |
388 | goto cmd_done; | |
389 | } | |
390 | ||
391 | card = md->queue.card; | |
392 | if (IS_ERR(card)) { | |
393 | err = PTR_ERR(card); | |
394 | goto cmd_done; | |
395 | } | |
396 | ||
4d6144de JR |
397 | cmd.opcode = idata->ic.opcode; |
398 | cmd.arg = idata->ic.arg; | |
399 | cmd.flags = idata->ic.flags; | |
400 | ||
401 | if (idata->buf_bytes) { | |
402 | data.sg = &sg; | |
403 | data.sg_len = 1; | |
404 | data.blksz = idata->ic.blksz; | |
405 | data.blocks = idata->ic.blocks; | |
406 | ||
407 | sg_init_one(data.sg, idata->buf, idata->buf_bytes); | |
408 | ||
409 | if (idata->ic.write_flag) | |
410 | data.flags = MMC_DATA_WRITE; | |
411 | else | |
412 | data.flags = MMC_DATA_READ; | |
413 | ||
414 | /* data.flags must already be set before doing this. */ | |
415 | mmc_set_data_timeout(&data, card); | |
416 | ||
417 | /* Allow overriding the timeout_ns for empirical tuning. */ | |
418 | if (idata->ic.data_timeout_ns) | |
419 | data.timeout_ns = idata->ic.data_timeout_ns; | |
420 | ||
421 | if ((cmd.flags & MMC_RSP_R1B) == MMC_RSP_R1B) { | |
422 | /* | |
423 | * Pretend this is a data transfer and rely on the | |
424 | * host driver to compute timeout. When all host | |
425 | * drivers support cmd.cmd_timeout for R1B, this | |
426 | * can be changed to: | |
427 | * | |
428 | * mrq.data = NULL; | |
429 | * cmd.cmd_timeout = idata->ic.cmd_timeout_ms; | |
430 | */ | |
431 | data.timeout_ns = idata->ic.cmd_timeout_ms * 1000000; | |
432 | } | |
433 | ||
434 | mrq.data = &data; | |
435 | } | |
436 | ||
437 | mrq.cmd = &cmd; | |
438 | ||
cb87ea28 JC |
439 | mmc_claim_host(card->host); |
440 | ||
441 | if (idata->ic.is_acmd) { | |
442 | err = mmc_app_cmd(card->host, card); | |
443 | if (err) | |
444 | goto cmd_rel_host; | |
445 | } | |
446 | ||
cb87ea28 JC |
447 | mmc_wait_for_req(card->host, &mrq); |
448 | ||
449 | if (cmd.error) { | |
450 | dev_err(mmc_dev(card->host), "%s: cmd error %d\n", | |
451 | __func__, cmd.error); | |
452 | err = cmd.error; | |
453 | goto cmd_rel_host; | |
454 | } | |
455 | if (data.error) { | |
456 | dev_err(mmc_dev(card->host), "%s: data error %d\n", | |
457 | __func__, data.error); | |
458 | err = data.error; | |
459 | goto cmd_rel_host; | |
460 | } | |
461 | ||
462 | /* | |
463 | * According to the SD specs, some commands require a delay after | |
464 | * issuing the command. | |
465 | */ | |
466 | if (idata->ic.postsleep_min_us) | |
467 | usleep_range(idata->ic.postsleep_min_us, idata->ic.postsleep_max_us); | |
468 | ||
469 | if (copy_to_user(&(ic_ptr->response), cmd.resp, sizeof(cmd.resp))) { | |
470 | err = -EFAULT; | |
471 | goto cmd_rel_host; | |
472 | } | |
473 | ||
474 | if (!idata->ic.write_flag) { | |
475 | if (copy_to_user((void __user *)(unsigned long) idata->ic.data_ptr, | |
476 | idata->buf, idata->buf_bytes)) { | |
477 | err = -EFAULT; | |
478 | goto cmd_rel_host; | |
479 | } | |
480 | } | |
481 | ||
482 | cmd_rel_host: | |
483 | mmc_release_host(card->host); | |
484 | ||
485 | cmd_done: | |
486 | mmc_blk_put(md); | |
487 | kfree(idata->buf); | |
488 | kfree(idata); | |
489 | return err; | |
490 | } | |
491 | ||
492 | static int mmc_blk_ioctl(struct block_device *bdev, fmode_t mode, | |
493 | unsigned int cmd, unsigned long arg) | |
494 | { | |
495 | int ret = -EINVAL; | |
496 | if (cmd == MMC_IOC_CMD) | |
497 | ret = mmc_blk_ioctl_cmd(bdev, (struct mmc_ioc_cmd __user *)arg); | |
498 | return ret; | |
499 | } | |
500 | ||
501 | #ifdef CONFIG_COMPAT | |
502 | static int mmc_blk_compat_ioctl(struct block_device *bdev, fmode_t mode, | |
503 | unsigned int cmd, unsigned long arg) | |
504 | { | |
505 | return mmc_blk_ioctl(bdev, mode, cmd, (unsigned long) compat_ptr(arg)); | |
506 | } | |
507 | #endif | |
508 | ||
83d5cde4 | 509 | static const struct block_device_operations mmc_bdops = { |
a5a1561f AV |
510 | .open = mmc_blk_open, |
511 | .release = mmc_blk_release, | |
a885c8c4 | 512 | .getgeo = mmc_blk_getgeo, |
1da177e4 | 513 | .owner = THIS_MODULE, |
cb87ea28 JC |
514 | .ioctl = mmc_blk_ioctl, |
515 | #ifdef CONFIG_COMPAT | |
516 | .compat_ioctl = mmc_blk_compat_ioctl, | |
517 | #endif | |
1da177e4 LT |
518 | }; |
519 | ||
371a689f AW |
520 | static inline int mmc_blk_part_switch(struct mmc_card *card, |
521 | struct mmc_blk_data *md) | |
522 | { | |
523 | int ret; | |
524 | struct mmc_blk_data *main_md = mmc_get_drvdata(card); | |
0d7d85ca | 525 | |
371a689f AW |
526 | if (main_md->part_curr == md->part_type) |
527 | return 0; | |
528 | ||
529 | if (mmc_card_mmc(card)) { | |
0d7d85ca AH |
530 | u8 part_config = card->ext_csd.part_config; |
531 | ||
532 | part_config &= ~EXT_CSD_PART_CONFIG_ACC_MASK; | |
533 | part_config |= md->part_type; | |
371a689f AW |
534 | |
535 | ret = mmc_switch(card, EXT_CSD_CMD_SET_NORMAL, | |
0d7d85ca | 536 | EXT_CSD_PART_CONFIG, part_config, |
371a689f AW |
537 | card->ext_csd.part_time); |
538 | if (ret) | |
539 | return ret; | |
0d7d85ca AH |
540 | |
541 | card->ext_csd.part_config = part_config; | |
67716327 | 542 | } |
371a689f AW |
543 | |
544 | main_md->part_curr = md->part_type; | |
545 | return 0; | |
546 | } | |
547 | ||
ec5a19dd PO |
548 | static u32 mmc_sd_num_wr_blocks(struct mmc_card *card) |
549 | { | |
550 | int err; | |
051913da BD |
551 | u32 result; |
552 | __be32 *blocks; | |
ec5a19dd | 553 | |
ad5fd972 | 554 | struct mmc_request mrq = {NULL}; |
1278dba1 | 555 | struct mmc_command cmd = {0}; |
a61ad2b4 | 556 | struct mmc_data data = {0}; |
ec5a19dd PO |
557 | unsigned int timeout_us; |
558 | ||
559 | struct scatterlist sg; | |
560 | ||
ec5a19dd PO |
561 | cmd.opcode = MMC_APP_CMD; |
562 | cmd.arg = card->rca << 16; | |
7213d175 | 563 | cmd.flags = MMC_RSP_SPI_R1 | MMC_RSP_R1 | MMC_CMD_AC; |
ec5a19dd PO |
564 | |
565 | err = mmc_wait_for_cmd(card->host, &cmd, 0); | |
7213d175 DB |
566 | if (err) |
567 | return (u32)-1; | |
568 | if (!mmc_host_is_spi(card->host) && !(cmd.resp[0] & R1_APP_CMD)) | |
ec5a19dd PO |
569 | return (u32)-1; |
570 | ||
571 | memset(&cmd, 0, sizeof(struct mmc_command)); | |
572 | ||
573 | cmd.opcode = SD_APP_SEND_NUM_WR_BLKS; | |
574 | cmd.arg = 0; | |
7213d175 | 575 | cmd.flags = MMC_RSP_SPI_R1 | MMC_RSP_R1 | MMC_CMD_ADTC; |
ec5a19dd | 576 | |
ec5a19dd PO |
577 | data.timeout_ns = card->csd.tacc_ns * 100; |
578 | data.timeout_clks = card->csd.tacc_clks * 100; | |
579 | ||
580 | timeout_us = data.timeout_ns / 1000; | |
581 | timeout_us += data.timeout_clks * 1000 / | |
582 | (card->host->ios.clock / 1000); | |
583 | ||
584 | if (timeout_us > 100000) { | |
585 | data.timeout_ns = 100000000; | |
586 | data.timeout_clks = 0; | |
587 | } | |
588 | ||
589 | data.blksz = 4; | |
590 | data.blocks = 1; | |
591 | data.flags = MMC_DATA_READ; | |
592 | data.sg = &sg; | |
593 | data.sg_len = 1; | |
594 | ||
ec5a19dd PO |
595 | mrq.cmd = &cmd; |
596 | mrq.data = &data; | |
597 | ||
051913da BD |
598 | blocks = kmalloc(4, GFP_KERNEL); |
599 | if (!blocks) | |
600 | return (u32)-1; | |
601 | ||
602 | sg_init_one(&sg, blocks, 4); | |
ec5a19dd PO |
603 | |
604 | mmc_wait_for_req(card->host, &mrq); | |
605 | ||
051913da BD |
606 | result = ntohl(*blocks); |
607 | kfree(blocks); | |
608 | ||
17b0429d | 609 | if (cmd.error || data.error) |
051913da | 610 | result = (u32)-1; |
ec5a19dd | 611 | |
051913da | 612 | return result; |
ec5a19dd PO |
613 | } |
614 | ||
a01f3ccf RKAL |
615 | static int send_stop(struct mmc_card *card, u32 *status) |
616 | { | |
617 | struct mmc_command cmd = {0}; | |
618 | int err; | |
619 | ||
620 | cmd.opcode = MMC_STOP_TRANSMISSION; | |
621 | cmd.flags = MMC_RSP_SPI_R1B | MMC_RSP_R1B | MMC_CMD_AC; | |
622 | err = mmc_wait_for_cmd(card->host, &cmd, 5); | |
623 | if (err == 0) | |
624 | *status = cmd.resp[0]; | |
625 | return err; | |
626 | } | |
627 | ||
0a2d4048 | 628 | static int get_card_status(struct mmc_card *card, u32 *status, int retries) |
504f191f | 629 | { |
1278dba1 | 630 | struct mmc_command cmd = {0}; |
504f191f AH |
631 | int err; |
632 | ||
504f191f AH |
633 | cmd.opcode = MMC_SEND_STATUS; |
634 | if (!mmc_host_is_spi(card->host)) | |
635 | cmd.arg = card->rca << 16; | |
636 | cmd.flags = MMC_RSP_SPI_R2 | MMC_RSP_R1 | MMC_CMD_AC; | |
0a2d4048 RKAL |
637 | err = mmc_wait_for_cmd(card->host, &cmd, retries); |
638 | if (err == 0) | |
639 | *status = cmd.resp[0]; | |
640 | return err; | |
504f191f AH |
641 | } |
642 | ||
a8ad82cc | 643 | #define ERR_NOMEDIUM 3 |
a01f3ccf RKAL |
644 | #define ERR_RETRY 2 |
645 | #define ERR_ABORT 1 | |
646 | #define ERR_CONTINUE 0 | |
647 | ||
648 | static int mmc_blk_cmd_error(struct request *req, const char *name, int error, | |
649 | bool status_valid, u32 status) | |
650 | { | |
651 | switch (error) { | |
652 | case -EILSEQ: | |
653 | /* response crc error, retry the r/w cmd */ | |
654 | pr_err("%s: %s sending %s command, card status %#x\n", | |
655 | req->rq_disk->disk_name, "response CRC error", | |
656 | name, status); | |
657 | return ERR_RETRY; | |
658 | ||
659 | case -ETIMEDOUT: | |
660 | pr_err("%s: %s sending %s command, card status %#x\n", | |
661 | req->rq_disk->disk_name, "timed out", name, status); | |
662 | ||
663 | /* If the status cmd initially failed, retry the r/w cmd */ | |
664 | if (!status_valid) | |
665 | return ERR_RETRY; | |
666 | ||
667 | /* | |
668 | * If it was a r/w cmd crc error, or illegal command | |
669 | * (eg, issued in wrong state) then retry - we should | |
670 | * have corrected the state problem above. | |
671 | */ | |
672 | if (status & (R1_COM_CRC_ERROR | R1_ILLEGAL_COMMAND)) | |
673 | return ERR_RETRY; | |
674 | ||
675 | /* Otherwise abort the command */ | |
676 | return ERR_ABORT; | |
677 | ||
678 | default: | |
679 | /* We don't understand the error code the driver gave us */ | |
680 | pr_err("%s: unknown error %d sending read/write command, card status %#x\n", | |
681 | req->rq_disk->disk_name, error, status); | |
682 | return ERR_ABORT; | |
683 | } | |
684 | } | |
685 | ||
686 | /* | |
687 | * Initial r/w and stop cmd error recovery. | |
688 | * We don't know whether the card received the r/w cmd or not, so try to | |
689 | * restore things back to a sane state. Essentially, we do this as follows: | |
690 | * - Obtain card status. If the first attempt to obtain card status fails, | |
691 | * the status word will reflect the failed status cmd, not the failed | |
692 | * r/w cmd. If we fail to obtain card status, it suggests we can no | |
693 | * longer communicate with the card. | |
694 | * - Check the card state. If the card received the cmd but there was a | |
695 | * transient problem with the response, it might still be in a data transfer | |
696 | * mode. Try to send it a stop command. If this fails, we can't recover. | |
697 | * - If the r/w cmd failed due to a response CRC error, it was probably | |
698 | * transient, so retry the cmd. | |
699 | * - If the r/w cmd timed out, but we didn't get the r/w cmd status, retry. | |
700 | * - If the r/w cmd timed out, and the r/w cmd failed due to CRC error or | |
701 | * illegal cmd, retry. | |
702 | * Otherwise we don't understand what happened, so abort. | |
703 | */ | |
704 | static int mmc_blk_cmd_recovery(struct mmc_card *card, struct request *req, | |
67716327 | 705 | struct mmc_blk_request *brq, int *ecc_err) |
a01f3ccf RKAL |
706 | { |
707 | bool prev_cmd_status_valid = true; | |
708 | u32 status, stop_status = 0; | |
709 | int err, retry; | |
710 | ||
a8ad82cc SRT |
711 | if (mmc_card_removed(card)) |
712 | return ERR_NOMEDIUM; | |
713 | ||
a01f3ccf RKAL |
714 | /* |
715 | * Try to get card status which indicates both the card state | |
716 | * and why there was no response. If the first attempt fails, | |
717 | * we can't be sure the returned status is for the r/w command. | |
718 | */ | |
719 | for (retry = 2; retry >= 0; retry--) { | |
720 | err = get_card_status(card, &status, 0); | |
721 | if (!err) | |
722 | break; | |
723 | ||
724 | prev_cmd_status_valid = false; | |
725 | pr_err("%s: error %d sending status command, %sing\n", | |
726 | req->rq_disk->disk_name, err, retry ? "retry" : "abort"); | |
727 | } | |
728 | ||
729 | /* We couldn't get a response from the card. Give up. */ | |
a8ad82cc SRT |
730 | if (err) { |
731 | /* Check if the card is removed */ | |
732 | if (mmc_detect_card_removed(card->host)) | |
733 | return ERR_NOMEDIUM; | |
a01f3ccf | 734 | return ERR_ABORT; |
a8ad82cc | 735 | } |
a01f3ccf | 736 | |
67716327 AH |
737 | /* Flag ECC errors */ |
738 | if ((status & R1_CARD_ECC_FAILED) || | |
739 | (brq->stop.resp[0] & R1_CARD_ECC_FAILED) || | |
740 | (brq->cmd.resp[0] & R1_CARD_ECC_FAILED)) | |
741 | *ecc_err = 1; | |
742 | ||
a01f3ccf RKAL |
743 | /* |
744 | * Check the current card state. If it is in some data transfer | |
745 | * mode, tell it to stop (and hopefully transition back to TRAN.) | |
746 | */ | |
747 | if (R1_CURRENT_STATE(status) == R1_STATE_DATA || | |
748 | R1_CURRENT_STATE(status) == R1_STATE_RCV) { | |
749 | err = send_stop(card, &stop_status); | |
750 | if (err) | |
751 | pr_err("%s: error %d sending stop command\n", | |
752 | req->rq_disk->disk_name, err); | |
753 | ||
754 | /* | |
755 | * If the stop cmd also timed out, the card is probably | |
756 | * not present, so abort. Other errors are bad news too. | |
757 | */ | |
758 | if (err) | |
759 | return ERR_ABORT; | |
67716327 AH |
760 | if (stop_status & R1_CARD_ECC_FAILED) |
761 | *ecc_err = 1; | |
a01f3ccf RKAL |
762 | } |
763 | ||
764 | /* Check for set block count errors */ | |
765 | if (brq->sbc.error) | |
766 | return mmc_blk_cmd_error(req, "SET_BLOCK_COUNT", brq->sbc.error, | |
767 | prev_cmd_status_valid, status); | |
768 | ||
769 | /* Check for r/w command errors */ | |
770 | if (brq->cmd.error) | |
771 | return mmc_blk_cmd_error(req, "r/w cmd", brq->cmd.error, | |
772 | prev_cmd_status_valid, status); | |
773 | ||
67716327 AH |
774 | /* Data errors */ |
775 | if (!brq->stop.error) | |
776 | return ERR_CONTINUE; | |
777 | ||
a01f3ccf RKAL |
778 | /* Now for stop errors. These aren't fatal to the transfer. */ |
779 | pr_err("%s: error %d sending stop command, original cmd response %#x, card status %#x\n", | |
780 | req->rq_disk->disk_name, brq->stop.error, | |
781 | brq->cmd.resp[0], status); | |
782 | ||
783 | /* | |
784 | * Subsitute in our own stop status as this will give the error | |
785 | * state which happened during the execution of the r/w command. | |
786 | */ | |
787 | if (stop_status) { | |
788 | brq->stop.resp[0] = stop_status; | |
789 | brq->stop.error = 0; | |
790 | } | |
791 | return ERR_CONTINUE; | |
792 | } | |
793 | ||
67716327 AH |
794 | static int mmc_blk_reset(struct mmc_blk_data *md, struct mmc_host *host, |
795 | int type) | |
796 | { | |
797 | int err; | |
798 | ||
799 | if (md->reset_done & type) | |
800 | return -EEXIST; | |
801 | ||
802 | md->reset_done |= type; | |
803 | err = mmc_hw_reset(host); | |
804 | /* Ensure we switch back to the correct partition */ | |
805 | if (err != -EOPNOTSUPP) { | |
806 | struct mmc_blk_data *main_md = mmc_get_drvdata(host->card); | |
807 | int part_err; | |
808 | ||
809 | main_md->part_curr = main_md->part_type; | |
810 | part_err = mmc_blk_part_switch(host->card, md); | |
811 | if (part_err) { | |
812 | /* | |
813 | * We have failed to get back into the correct | |
814 | * partition, so we need to abort the whole request. | |
815 | */ | |
816 | return -ENODEV; | |
817 | } | |
818 | } | |
819 | return err; | |
820 | } | |
821 | ||
822 | static inline void mmc_blk_reset_success(struct mmc_blk_data *md, int type) | |
823 | { | |
824 | md->reset_done &= ~type; | |
825 | } | |
826 | ||
bd788c96 AH |
827 | static int mmc_blk_issue_discard_rq(struct mmc_queue *mq, struct request *req) |
828 | { | |
829 | struct mmc_blk_data *md = mq->data; | |
830 | struct mmc_card *card = md->queue.card; | |
831 | unsigned int from, nr, arg; | |
67716327 | 832 | int err = 0, type = MMC_BLK_DISCARD; |
bd788c96 | 833 | |
bd788c96 AH |
834 | if (!mmc_can_erase(card)) { |
835 | err = -EOPNOTSUPP; | |
836 | goto out; | |
837 | } | |
838 | ||
839 | from = blk_rq_pos(req); | |
840 | nr = blk_rq_sectors(req); | |
841 | ||
b3bf9153 KP |
842 | if (mmc_can_discard(card)) |
843 | arg = MMC_DISCARD_ARG; | |
844 | else if (mmc_can_trim(card)) | |
bd788c96 AH |
845 | arg = MMC_TRIM_ARG; |
846 | else | |
847 | arg = MMC_ERASE_ARG; | |
67716327 | 848 | retry: |
6a7a6b45 AW |
849 | if (card->quirks & MMC_QUIRK_INAND_CMD38) { |
850 | err = mmc_switch(card, EXT_CSD_CMD_SET_NORMAL, | |
851 | INAND_CMD38_ARG_EXT_CSD, | |
852 | arg == MMC_TRIM_ARG ? | |
853 | INAND_CMD38_ARG_TRIM : | |
854 | INAND_CMD38_ARG_ERASE, | |
855 | 0); | |
856 | if (err) | |
857 | goto out; | |
858 | } | |
bd788c96 AH |
859 | err = mmc_erase(card, from, nr, arg); |
860 | out: | |
67716327 AH |
861 | if (err == -EIO && !mmc_blk_reset(md, card->host, type)) |
862 | goto retry; | |
863 | if (!err) | |
864 | mmc_blk_reset_success(md, type); | |
bd788c96 AH |
865 | spin_lock_irq(&md->lock); |
866 | __blk_end_request(req, err, blk_rq_bytes(req)); | |
867 | spin_unlock_irq(&md->lock); | |
868 | ||
bd788c96 AH |
869 | return err ? 0 : 1; |
870 | } | |
871 | ||
49804548 AH |
872 | static int mmc_blk_issue_secdiscard_rq(struct mmc_queue *mq, |
873 | struct request *req) | |
874 | { | |
875 | struct mmc_blk_data *md = mq->data; | |
876 | struct mmc_card *card = md->queue.card; | |
877 | unsigned int from, nr, arg; | |
67716327 | 878 | int err = 0, type = MMC_BLK_SECDISCARD; |
49804548 | 879 | |
d9ddd629 | 880 | if (!(mmc_can_secure_erase_trim(card) || mmc_can_sanitize(card))) { |
49804548 AH |
881 | err = -EOPNOTSUPP; |
882 | goto out; | |
883 | } | |
884 | ||
d9ddd629 KP |
885 | /* The sanitize operation is supported at v4.5 only */ |
886 | if (mmc_can_sanitize(card)) { | |
887 | err = mmc_switch(card, EXT_CSD_CMD_SET_NORMAL, | |
888 | EXT_CSD_SANITIZE_START, 1, 0); | |
889 | goto out; | |
890 | } | |
891 | ||
49804548 AH |
892 | from = blk_rq_pos(req); |
893 | nr = blk_rq_sectors(req); | |
894 | ||
895 | if (mmc_can_trim(card) && !mmc_erase_group_aligned(card, from, nr)) | |
896 | arg = MMC_SECURE_TRIM1_ARG; | |
897 | else | |
898 | arg = MMC_SECURE_ERASE_ARG; | |
67716327 | 899 | retry: |
6a7a6b45 AW |
900 | if (card->quirks & MMC_QUIRK_INAND_CMD38) { |
901 | err = mmc_switch(card, EXT_CSD_CMD_SET_NORMAL, | |
902 | INAND_CMD38_ARG_EXT_CSD, | |
903 | arg == MMC_SECURE_TRIM1_ARG ? | |
904 | INAND_CMD38_ARG_SECTRIM1 : | |
905 | INAND_CMD38_ARG_SECERASE, | |
906 | 0); | |
907 | if (err) | |
908 | goto out; | |
909 | } | |
49804548 | 910 | err = mmc_erase(card, from, nr, arg); |
6a7a6b45 AW |
911 | if (!err && arg == MMC_SECURE_TRIM1_ARG) { |
912 | if (card->quirks & MMC_QUIRK_INAND_CMD38) { | |
913 | err = mmc_switch(card, EXT_CSD_CMD_SET_NORMAL, | |
914 | INAND_CMD38_ARG_EXT_CSD, | |
915 | INAND_CMD38_ARG_SECTRIM2, | |
916 | 0); | |
917 | if (err) | |
918 | goto out; | |
919 | } | |
49804548 | 920 | err = mmc_erase(card, from, nr, MMC_SECURE_TRIM2_ARG); |
6a7a6b45 | 921 | } |
49804548 | 922 | out: |
67716327 AH |
923 | if (err == -EIO && !mmc_blk_reset(md, card->host, type)) |
924 | goto retry; | |
925 | if (!err) | |
926 | mmc_blk_reset_success(md, type); | |
49804548 AH |
927 | spin_lock_irq(&md->lock); |
928 | __blk_end_request(req, err, blk_rq_bytes(req)); | |
929 | spin_unlock_irq(&md->lock); | |
930 | ||
49804548 AH |
931 | return err ? 0 : 1; |
932 | } | |
933 | ||
f4c5522b AW |
934 | static int mmc_blk_issue_flush(struct mmc_queue *mq, struct request *req) |
935 | { | |
936 | struct mmc_blk_data *md = mq->data; | |
881d1c25 SJ |
937 | struct mmc_card *card = md->queue.card; |
938 | int ret = 0; | |
939 | ||
940 | ret = mmc_flush_cache(card); | |
941 | if (ret) | |
942 | ret = -EIO; | |
f4c5522b | 943 | |
f4c5522b | 944 | spin_lock_irq(&md->lock); |
881d1c25 | 945 | __blk_end_request_all(req, ret); |
f4c5522b AW |
946 | spin_unlock_irq(&md->lock); |
947 | ||
881d1c25 | 948 | return ret ? 0 : 1; |
f4c5522b AW |
949 | } |
950 | ||
951 | /* | |
952 | * Reformat current write as a reliable write, supporting | |
953 | * both legacy and the enhanced reliable write MMC cards. | |
954 | * In each transfer we'll handle only as much as a single | |
955 | * reliable write can handle, thus finish the request in | |
956 | * partial completions. | |
957 | */ | |
d0c97cfb AW |
958 | static inline void mmc_apply_rel_rw(struct mmc_blk_request *brq, |
959 | struct mmc_card *card, | |
960 | struct request *req) | |
f4c5522b | 961 | { |
f4c5522b AW |
962 | if (!(card->ext_csd.rel_param & EXT_CSD_WR_REL_PARAM_EN)) { |
963 | /* Legacy mode imposes restrictions on transfers. */ | |
964 | if (!IS_ALIGNED(brq->cmd.arg, card->ext_csd.rel_sectors)) | |
965 | brq->data.blocks = 1; | |
966 | ||
967 | if (brq->data.blocks > card->ext_csd.rel_sectors) | |
968 | brq->data.blocks = card->ext_csd.rel_sectors; | |
969 | else if (brq->data.blocks < card->ext_csd.rel_sectors) | |
970 | brq->data.blocks = 1; | |
971 | } | |
f4c5522b AW |
972 | } |
973 | ||
4c2b8f26 RKAL |
974 | #define CMD_ERRORS \ |
975 | (R1_OUT_OF_RANGE | /* Command argument out of range */ \ | |
976 | R1_ADDRESS_ERROR | /* Misaligned address */ \ | |
977 | R1_BLOCK_LEN_ERROR | /* Transferred block length incorrect */\ | |
978 | R1_WP_VIOLATION | /* Tried to write to protected block */ \ | |
979 | R1_CC_ERROR | /* Card controller error */ \ | |
980 | R1_ERROR) /* General/unknown error */ | |
981 | ||
ee8a43a5 PF |
982 | static int mmc_blk_err_check(struct mmc_card *card, |
983 | struct mmc_async_req *areq) | |
d78d4a8a | 984 | { |
ee8a43a5 PF |
985 | struct mmc_queue_req *mq_mrq = container_of(areq, struct mmc_queue_req, |
986 | mmc_active); | |
987 | struct mmc_blk_request *brq = &mq_mrq->brq; | |
988 | struct request *req = mq_mrq->req; | |
67716327 | 989 | int ecc_err = 0; |
d78d4a8a PF |
990 | |
991 | /* | |
992 | * sbc.error indicates a problem with the set block count | |
993 | * command. No data will have been transferred. | |
994 | * | |
995 | * cmd.error indicates a problem with the r/w command. No | |
996 | * data will have been transferred. | |
997 | * | |
998 | * stop.error indicates a problem with the stop command. Data | |
999 | * may have been transferred, or may still be transferring. | |
1000 | */ | |
67716327 AH |
1001 | if (brq->sbc.error || brq->cmd.error || brq->stop.error || |
1002 | brq->data.error) { | |
1003 | switch (mmc_blk_cmd_recovery(card, req, brq, &ecc_err)) { | |
d78d4a8a PF |
1004 | case ERR_RETRY: |
1005 | return MMC_BLK_RETRY; | |
1006 | case ERR_ABORT: | |
1007 | return MMC_BLK_ABORT; | |
a8ad82cc SRT |
1008 | case ERR_NOMEDIUM: |
1009 | return MMC_BLK_NOMEDIUM; | |
d78d4a8a PF |
1010 | case ERR_CONTINUE: |
1011 | break; | |
1012 | } | |
1013 | } | |
1014 | ||
1015 | /* | |
1016 | * Check for errors relating to the execution of the | |
1017 | * initial command - such as address errors. No data | |
1018 | * has been transferred. | |
1019 | */ | |
1020 | if (brq->cmd.resp[0] & CMD_ERRORS) { | |
1021 | pr_err("%s: r/w command failed, status = %#x\n", | |
1022 | req->rq_disk->disk_name, brq->cmd.resp[0]); | |
1023 | return MMC_BLK_ABORT; | |
1024 | } | |
1025 | ||
1026 | /* | |
1027 | * Everything else is either success, or a data error of some | |
1028 | * kind. If it was a write, we may have transitioned to | |
1029 | * program mode, which we have to wait for it to complete. | |
1030 | */ | |
1031 | if (!mmc_host_is_spi(card->host) && rq_data_dir(req) != READ) { | |
1032 | u32 status; | |
1033 | do { | |
1034 | int err = get_card_status(card, &status, 5); | |
1035 | if (err) { | |
a3c76eb9 | 1036 | pr_err("%s: error %d requesting status\n", |
d78d4a8a PF |
1037 | req->rq_disk->disk_name, err); |
1038 | return MMC_BLK_CMD_ERR; | |
1039 | } | |
1040 | /* | |
1041 | * Some cards mishandle the status bits, | |
1042 | * so make sure to check both the busy | |
1043 | * indication and the card state. | |
1044 | */ | |
1045 | } while (!(status & R1_READY_FOR_DATA) || | |
1046 | (R1_CURRENT_STATE(status) == R1_STATE_PRG)); | |
1047 | } | |
1048 | ||
1049 | if (brq->data.error) { | |
1050 | pr_err("%s: error %d transferring data, sector %u, nr %u, cmd response %#x, card status %#x\n", | |
1051 | req->rq_disk->disk_name, brq->data.error, | |
1052 | (unsigned)blk_rq_pos(req), | |
1053 | (unsigned)blk_rq_sectors(req), | |
1054 | brq->cmd.resp[0], brq->stop.resp[0]); | |
1055 | ||
1056 | if (rq_data_dir(req) == READ) { | |
67716327 AH |
1057 | if (ecc_err) |
1058 | return MMC_BLK_ECC_ERR; | |
d78d4a8a PF |
1059 | return MMC_BLK_DATA_ERR; |
1060 | } else { | |
1061 | return MMC_BLK_CMD_ERR; | |
1062 | } | |
1063 | } | |
1064 | ||
67716327 AH |
1065 | if (!brq->data.bytes_xfered) |
1066 | return MMC_BLK_RETRY; | |
d78d4a8a | 1067 | |
67716327 AH |
1068 | if (blk_rq_bytes(req) != brq->data.bytes_xfered) |
1069 | return MMC_BLK_PARTIAL; | |
1070 | ||
1071 | return MMC_BLK_SUCCESS; | |
d78d4a8a PF |
1072 | } |
1073 | ||
54d49d77 PF |
1074 | static void mmc_blk_rw_rq_prep(struct mmc_queue_req *mqrq, |
1075 | struct mmc_card *card, | |
1076 | int disable_multi, | |
1077 | struct mmc_queue *mq) | |
1da177e4 | 1078 | { |
54d49d77 PF |
1079 | u32 readcmd, writecmd; |
1080 | struct mmc_blk_request *brq = &mqrq->brq; | |
1081 | struct request *req = mqrq->req; | |
1da177e4 | 1082 | struct mmc_blk_data *md = mq->data; |
1da177e4 | 1083 | |
f4c5522b AW |
1084 | /* |
1085 | * Reliable writes are used to implement Forced Unit Access and | |
1086 | * REQ_META accesses, and are supported only on MMCs. | |
65299a3b CH |
1087 | * |
1088 | * XXX: this really needs a good explanation of why REQ_META | |
1089 | * is treated special. | |
f4c5522b AW |
1090 | */ |
1091 | bool do_rel_wr = ((req->cmd_flags & REQ_FUA) || | |
1092 | (req->cmd_flags & REQ_META)) && | |
1093 | (rq_data_dir(req) == WRITE) && | |
d0c97cfb | 1094 | (md->flags & MMC_BLK_REL_WR); |
f4c5522b | 1095 | |
54d49d77 PF |
1096 | memset(brq, 0, sizeof(struct mmc_blk_request)); |
1097 | brq->mrq.cmd = &brq->cmd; | |
1098 | brq->mrq.data = &brq->data; | |
1da177e4 | 1099 | |
54d49d77 PF |
1100 | brq->cmd.arg = blk_rq_pos(req); |
1101 | if (!mmc_card_blockaddr(card)) | |
1102 | brq->cmd.arg <<= 9; | |
1103 | brq->cmd.flags = MMC_RSP_SPI_R1 | MMC_RSP_R1 | MMC_CMD_ADTC; | |
1104 | brq->data.blksz = 512; | |
1105 | brq->stop.opcode = MMC_STOP_TRANSMISSION; | |
1106 | brq->stop.arg = 0; | |
1107 | brq->stop.flags = MMC_RSP_SPI_R1B | MMC_RSP_R1B | MMC_CMD_AC; | |
1108 | brq->data.blocks = blk_rq_sectors(req); | |
6a79e391 | 1109 | |
54d49d77 PF |
1110 | /* |
1111 | * The block layer doesn't support all sector count | |
1112 | * restrictions, so we need to be prepared for too big | |
1113 | * requests. | |
1114 | */ | |
1115 | if (brq->data.blocks > card->host->max_blk_count) | |
1116 | brq->data.blocks = card->host->max_blk_count; | |
1da177e4 | 1117 | |
2bf22b39 PW |
1118 | if (brq->data.blocks > 1) { |
1119 | /* | |
1120 | * After a read error, we redo the request one sector | |
1121 | * at a time in order to accurately determine which | |
1122 | * sectors can be read successfully. | |
1123 | */ | |
1124 | if (disable_multi) | |
1125 | brq->data.blocks = 1; | |
1126 | ||
1127 | /* Some controllers can't do multiblock reads due to hw bugs */ | |
1128 | if (card->host->caps2 & MMC_CAP2_NO_MULTI_READ && | |
1129 | rq_data_dir(req) == READ) | |
1130 | brq->data.blocks = 1; | |
1131 | } | |
d0c97cfb | 1132 | |
54d49d77 PF |
1133 | if (brq->data.blocks > 1 || do_rel_wr) { |
1134 | /* SPI multiblock writes terminate using a special | |
1135 | * token, not a STOP_TRANSMISSION request. | |
d0c97cfb | 1136 | */ |
54d49d77 PF |
1137 | if (!mmc_host_is_spi(card->host) || |
1138 | rq_data_dir(req) == READ) | |
1139 | brq->mrq.stop = &brq->stop; | |
1140 | readcmd = MMC_READ_MULTIPLE_BLOCK; | |
1141 | writecmd = MMC_WRITE_MULTIPLE_BLOCK; | |
1142 | } else { | |
1143 | brq->mrq.stop = NULL; | |
1144 | readcmd = MMC_READ_SINGLE_BLOCK; | |
1145 | writecmd = MMC_WRITE_BLOCK; | |
1146 | } | |
1147 | if (rq_data_dir(req) == READ) { | |
1148 | brq->cmd.opcode = readcmd; | |
1149 | brq->data.flags |= MMC_DATA_READ; | |
1150 | } else { | |
1151 | brq->cmd.opcode = writecmd; | |
1152 | brq->data.flags |= MMC_DATA_WRITE; | |
1153 | } | |
d0c97cfb | 1154 | |
54d49d77 PF |
1155 | if (do_rel_wr) |
1156 | mmc_apply_rel_rw(brq, card, req); | |
f4c5522b | 1157 | |
54d49d77 PF |
1158 | /* |
1159 | * Pre-defined multi-block transfers are preferable to | |
1160 | * open ended-ones (and necessary for reliable writes). | |
1161 | * However, it is not sufficient to just send CMD23, | |
1162 | * and avoid the final CMD12, as on an error condition | |
1163 | * CMD12 (stop) needs to be sent anyway. This, coupled | |
1164 | * with Auto-CMD23 enhancements provided by some | |
1165 | * hosts, means that the complexity of dealing | |
1166 | * with this is best left to the host. If CMD23 is | |
1167 | * supported by card and host, we'll fill sbc in and let | |
1168 | * the host deal with handling it correctly. This means | |
1169 | * that for hosts that don't expose MMC_CAP_CMD23, no | |
1170 | * change of behavior will be observed. | |
1171 | * | |
1172 | * N.B: Some MMC cards experience perf degradation. | |
1173 | * We'll avoid using CMD23-bounded multiblock writes for | |
1174 | * these, while retaining features like reliable writes. | |
1175 | */ | |
b146d26a | 1176 | |
54d49d77 PF |
1177 | if ((md->flags & MMC_BLK_CMD23) && |
1178 | mmc_op_multi(brq->cmd.opcode) && | |
1179 | (do_rel_wr || !(card->quirks & MMC_QUIRK_BLK_NO_CMD23))) { | |
1180 | brq->sbc.opcode = MMC_SET_BLOCK_COUNT; | |
1181 | brq->sbc.arg = brq->data.blocks | | |
1182 | (do_rel_wr ? (1 << 31) : 0); | |
1183 | brq->sbc.flags = MMC_RSP_R1 | MMC_CMD_AC; | |
1184 | brq->mrq.sbc = &brq->sbc; | |
1185 | } | |
98ccf149 | 1186 | |
54d49d77 PF |
1187 | mmc_set_data_timeout(&brq->data, card); |
1188 | ||
1189 | brq->data.sg = mqrq->sg; | |
1190 | brq->data.sg_len = mmc_queue_map_sg(mq, mqrq); | |
1191 | ||
1192 | /* | |
1193 | * Adjust the sg list so it is the same size as the | |
1194 | * request. | |
1195 | */ | |
1196 | if (brq->data.blocks != blk_rq_sectors(req)) { | |
1197 | int i, data_size = brq->data.blocks << 9; | |
1198 | struct scatterlist *sg; | |
1199 | ||
1200 | for_each_sg(brq->data.sg, sg, brq->data.sg_len, i) { | |
1201 | data_size -= sg->length; | |
1202 | if (data_size <= 0) { | |
1203 | sg->length += data_size; | |
1204 | i++; | |
1205 | break; | |
6a79e391 | 1206 | } |
6a79e391 | 1207 | } |
54d49d77 PF |
1208 | brq->data.sg_len = i; |
1209 | } | |
1210 | ||
ee8a43a5 PF |
1211 | mqrq->mmc_active.mrq = &brq->mrq; |
1212 | mqrq->mmc_active.err_check = mmc_blk_err_check; | |
1213 | ||
54d49d77 PF |
1214 | mmc_queue_bounce_pre(mqrq); |
1215 | } | |
6a79e391 | 1216 | |
67716327 AH |
1217 | static int mmc_blk_cmd_err(struct mmc_blk_data *md, struct mmc_card *card, |
1218 | struct mmc_blk_request *brq, struct request *req, | |
1219 | int ret) | |
1220 | { | |
1221 | /* | |
1222 | * If this is an SD card and we're writing, we can first | |
1223 | * mark the known good sectors as ok. | |
1224 | * | |
1225 | * If the card is not SD, we can still ok written sectors | |
1226 | * as reported by the controller (which might be less than | |
1227 | * the real number of written sectors, but never more). | |
1228 | */ | |
1229 | if (mmc_card_sd(card)) { | |
1230 | u32 blocks; | |
1231 | ||
1232 | blocks = mmc_sd_num_wr_blocks(card); | |
1233 | if (blocks != (u32)-1) { | |
1234 | spin_lock_irq(&md->lock); | |
1235 | ret = __blk_end_request(req, 0, blocks << 9); | |
1236 | spin_unlock_irq(&md->lock); | |
1237 | } | |
1238 | } else { | |
1239 | spin_lock_irq(&md->lock); | |
1240 | ret = __blk_end_request(req, 0, brq->data.bytes_xfered); | |
1241 | spin_unlock_irq(&md->lock); | |
1242 | } | |
1243 | return ret; | |
1244 | } | |
1245 | ||
ee8a43a5 | 1246 | static int mmc_blk_issue_rw_rq(struct mmc_queue *mq, struct request *rqc) |
54d49d77 PF |
1247 | { |
1248 | struct mmc_blk_data *md = mq->data; | |
1249 | struct mmc_card *card = md->queue.card; | |
1250 | struct mmc_blk_request *brq = &mq->mqrq_cur->brq; | |
67716327 | 1251 | int ret = 1, disable_multi = 0, retry = 0, type; |
d78d4a8a | 1252 | enum mmc_blk_status status; |
ee8a43a5 PF |
1253 | struct mmc_queue_req *mq_rq; |
1254 | struct request *req; | |
1255 | struct mmc_async_req *areq; | |
1da177e4 | 1256 | |
ee8a43a5 PF |
1257 | if (!rqc && !mq->mqrq_prev->req) |
1258 | return 0; | |
98ccf149 | 1259 | |
ee8a43a5 PF |
1260 | do { |
1261 | if (rqc) { | |
1262 | mmc_blk_rw_rq_prep(mq->mqrq_cur, card, 0, mq); | |
1263 | areq = &mq->mqrq_cur->mmc_active; | |
1264 | } else | |
1265 | areq = NULL; | |
1266 | areq = mmc_start_req(card->host, areq, (int *) &status); | |
1267 | if (!areq) | |
1268 | return 0; | |
1269 | ||
1270 | mq_rq = container_of(areq, struct mmc_queue_req, mmc_active); | |
1271 | brq = &mq_rq->brq; | |
1272 | req = mq_rq->req; | |
67716327 | 1273 | type = rq_data_dir(req) == READ ? MMC_BLK_READ : MMC_BLK_WRITE; |
ee8a43a5 | 1274 | mmc_queue_bounce_post(mq_rq); |
98ccf149 | 1275 | |
d78d4a8a PF |
1276 | switch (status) { |
1277 | case MMC_BLK_SUCCESS: | |
1278 | case MMC_BLK_PARTIAL: | |
1279 | /* | |
1280 | * A block was successfully transferred. | |
1281 | */ | |
67716327 | 1282 | mmc_blk_reset_success(md, type); |
d78d4a8a PF |
1283 | spin_lock_irq(&md->lock); |
1284 | ret = __blk_end_request(req, 0, | |
1285 | brq->data.bytes_xfered); | |
1286 | spin_unlock_irq(&md->lock); | |
67716327 AH |
1287 | /* |
1288 | * If the blk_end_request function returns non-zero even | |
1289 | * though all data has been transferred and no errors | |
1290 | * were returned by the host controller, it's a bug. | |
1291 | */ | |
ee8a43a5 | 1292 | if (status == MMC_BLK_SUCCESS && ret) { |
a3c76eb9 | 1293 | pr_err("%s BUG rq_tot %d d_xfer %d\n", |
ee8a43a5 PF |
1294 | __func__, blk_rq_bytes(req), |
1295 | brq->data.bytes_xfered); | |
1296 | rqc = NULL; | |
1297 | goto cmd_abort; | |
1298 | } | |
d78d4a8a PF |
1299 | break; |
1300 | case MMC_BLK_CMD_ERR: | |
67716327 AH |
1301 | ret = mmc_blk_cmd_err(md, card, brq, req, ret); |
1302 | if (!mmc_blk_reset(md, card->host, type)) | |
1303 | break; | |
1304 | goto cmd_abort; | |
d78d4a8a PF |
1305 | case MMC_BLK_RETRY: |
1306 | if (retry++ < 5) | |
a01f3ccf | 1307 | break; |
67716327 | 1308 | /* Fall through */ |
d78d4a8a | 1309 | case MMC_BLK_ABORT: |
67716327 AH |
1310 | if (!mmc_blk_reset(md, card->host, type)) |
1311 | break; | |
4c2b8f26 | 1312 | goto cmd_abort; |
67716327 AH |
1313 | case MMC_BLK_DATA_ERR: { |
1314 | int err; | |
1315 | ||
1316 | err = mmc_blk_reset(md, card->host, type); | |
1317 | if (!err) | |
1318 | break; | |
1319 | if (err == -ENODEV) | |
1320 | goto cmd_abort; | |
1321 | /* Fall through */ | |
1322 | } | |
1323 | case MMC_BLK_ECC_ERR: | |
1324 | if (brq->data.blocks > 1) { | |
1325 | /* Redo read one sector at a time */ | |
1326 | pr_warning("%s: retrying using single block read\n", | |
1327 | req->rq_disk->disk_name); | |
1328 | disable_multi = 1; | |
1329 | break; | |
1330 | } | |
d78d4a8a PF |
1331 | /* |
1332 | * After an error, we redo I/O one sector at a | |
1333 | * time, so we only reach here after trying to | |
1334 | * read a single sector. | |
1335 | */ | |
1336 | spin_lock_irq(&md->lock); | |
1337 | ret = __blk_end_request(req, -EIO, | |
1338 | brq->data.blksz); | |
1339 | spin_unlock_irq(&md->lock); | |
ee8a43a5 PF |
1340 | if (!ret) |
1341 | goto start_new_req; | |
d78d4a8a | 1342 | break; |
a8ad82cc SRT |
1343 | case MMC_BLK_NOMEDIUM: |
1344 | goto cmd_abort; | |
4c2b8f26 RKAL |
1345 | } |
1346 | ||
ee8a43a5 PF |
1347 | if (ret) { |
1348 | /* | |
67716327 | 1349 | * In case of a incomplete request |
ee8a43a5 PF |
1350 | * prepare it again and resend. |
1351 | */ | |
1352 | mmc_blk_rw_rq_prep(mq_rq, card, disable_multi, mq); | |
1353 | mmc_start_req(card->host, &mq_rq->mmc_active, NULL); | |
1354 | } | |
1da177e4 LT |
1355 | } while (ret); |
1356 | ||
1da177e4 LT |
1357 | return 1; |
1358 | ||
a01f3ccf | 1359 | cmd_abort: |
1da177e4 | 1360 | spin_lock_irq(&md->lock); |
a8ad82cc SRT |
1361 | if (mmc_card_removed(card)) |
1362 | req->cmd_flags |= REQ_QUIET; | |
fd539832 KU |
1363 | while (ret) |
1364 | ret = __blk_end_request(req, -EIO, blk_rq_cur_bytes(req)); | |
1da177e4 LT |
1365 | spin_unlock_irq(&md->lock); |
1366 | ||
ee8a43a5 PF |
1367 | start_new_req: |
1368 | if (rqc) { | |
1369 | mmc_blk_rw_rq_prep(mq->mqrq_cur, card, 0, mq); | |
1370 | mmc_start_req(card->host, &mq->mqrq_cur->mmc_active, NULL); | |
1371 | } | |
1372 | ||
1da177e4 LT |
1373 | return 0; |
1374 | } | |
1375 | ||
bd788c96 AH |
1376 | static int mmc_blk_issue_rq(struct mmc_queue *mq, struct request *req) |
1377 | { | |
1a258db6 AW |
1378 | int ret; |
1379 | struct mmc_blk_data *md = mq->data; | |
1380 | struct mmc_card *card = md->queue.card; | |
1381 | ||
ee8a43a5 PF |
1382 | if (req && !mq->mqrq_prev->req) |
1383 | /* claim host only for the first request */ | |
1384 | mmc_claim_host(card->host); | |
1385 | ||
371a689f AW |
1386 | ret = mmc_blk_part_switch(card, md); |
1387 | if (ret) { | |
0d7d85ca AH |
1388 | if (req) { |
1389 | spin_lock_irq(&md->lock); | |
1390 | __blk_end_request_all(req, -EIO); | |
1391 | spin_unlock_irq(&md->lock); | |
1392 | } | |
371a689f AW |
1393 | ret = 0; |
1394 | goto out; | |
1395 | } | |
1a258db6 | 1396 | |
ee8a43a5 PF |
1397 | if (req && req->cmd_flags & REQ_DISCARD) { |
1398 | /* complete ongoing async transfer before issuing discard */ | |
1399 | if (card->host->areq) | |
1400 | mmc_blk_issue_rw_rq(mq, NULL); | |
49804548 | 1401 | if (req->cmd_flags & REQ_SECURE) |
1a258db6 | 1402 | ret = mmc_blk_issue_secdiscard_rq(mq, req); |
49804548 | 1403 | else |
1a258db6 | 1404 | ret = mmc_blk_issue_discard_rq(mq, req); |
ee8a43a5 | 1405 | } else if (req && req->cmd_flags & REQ_FLUSH) { |
393f9a08 JC |
1406 | /* complete ongoing async transfer before issuing flush */ |
1407 | if (card->host->areq) | |
1408 | mmc_blk_issue_rw_rq(mq, NULL); | |
1a258db6 | 1409 | ret = mmc_blk_issue_flush(mq, req); |
49804548 | 1410 | } else { |
1a258db6 | 1411 | ret = mmc_blk_issue_rw_rq(mq, req); |
49804548 | 1412 | } |
1a258db6 | 1413 | |
371a689f | 1414 | out: |
ee8a43a5 PF |
1415 | if (!req) |
1416 | /* release host only when there are no more requests */ | |
1417 | mmc_release_host(card->host); | |
1a258db6 | 1418 | return ret; |
bd788c96 | 1419 | } |
1da177e4 | 1420 | |
a6f6c96b RK |
1421 | static inline int mmc_blk_readonly(struct mmc_card *card) |
1422 | { | |
1423 | return mmc_card_readonly(card) || | |
1424 | !(card->csd.cmdclass & CCC_BLOCK_WRITE); | |
1425 | } | |
1426 | ||
371a689f AW |
1427 | static struct mmc_blk_data *mmc_blk_alloc_req(struct mmc_card *card, |
1428 | struct device *parent, | |
1429 | sector_t size, | |
1430 | bool default_ro, | |
add710ea JR |
1431 | const char *subname, |
1432 | int area_type) | |
1da177e4 LT |
1433 | { |
1434 | struct mmc_blk_data *md; | |
1435 | int devidx, ret; | |
1436 | ||
5e71b7a6 OJ |
1437 | devidx = find_first_zero_bit(dev_use, max_devices); |
1438 | if (devidx >= max_devices) | |
1da177e4 LT |
1439 | return ERR_PTR(-ENOSPC); |
1440 | __set_bit(devidx, dev_use); | |
1441 | ||
dd00cc48 | 1442 | md = kzalloc(sizeof(struct mmc_blk_data), GFP_KERNEL); |
a6f6c96b RK |
1443 | if (!md) { |
1444 | ret = -ENOMEM; | |
1445 | goto out; | |
1446 | } | |
1da177e4 | 1447 | |
f06c9153 AW |
1448 | /* |
1449 | * !subname implies we are creating main mmc_blk_data that will be | |
1450 | * associated with mmc_card with mmc_set_drvdata. Due to device | |
1451 | * partitions, devidx will not coincide with a per-physical card | |
1452 | * index anymore so we keep track of a name index. | |
1453 | */ | |
1454 | if (!subname) { | |
1455 | md->name_idx = find_first_zero_bit(name_use, max_devices); | |
1456 | __set_bit(md->name_idx, name_use); | |
add710ea | 1457 | } else |
f06c9153 AW |
1458 | md->name_idx = ((struct mmc_blk_data *) |
1459 | dev_to_disk(parent)->private_data)->name_idx; | |
1460 | ||
add710ea JR |
1461 | md->area_type = area_type; |
1462 | ||
a6f6c96b RK |
1463 | /* |
1464 | * Set the read-only status based on the supported commands | |
1465 | * and the write protect switch. | |
1466 | */ | |
1467 | md->read_only = mmc_blk_readonly(card); | |
1da177e4 | 1468 | |
5e71b7a6 | 1469 | md->disk = alloc_disk(perdev_minors); |
a6f6c96b RK |
1470 | if (md->disk == NULL) { |
1471 | ret = -ENOMEM; | |
1472 | goto err_kfree; | |
1473 | } | |
1da177e4 | 1474 | |
a6f6c96b | 1475 | spin_lock_init(&md->lock); |
371a689f | 1476 | INIT_LIST_HEAD(&md->part); |
a6f6c96b | 1477 | md->usage = 1; |
1da177e4 | 1478 | |
d09408ad | 1479 | ret = mmc_init_queue(&md->queue, card, &md->lock, subname); |
a6f6c96b RK |
1480 | if (ret) |
1481 | goto err_putdisk; | |
1da177e4 | 1482 | |
a6f6c96b RK |
1483 | md->queue.issue_fn = mmc_blk_issue_rq; |
1484 | md->queue.data = md; | |
d2b18394 | 1485 | |
fe6b4c88 | 1486 | md->disk->major = MMC_BLOCK_MAJOR; |
5e71b7a6 | 1487 | md->disk->first_minor = devidx * perdev_minors; |
a6f6c96b RK |
1488 | md->disk->fops = &mmc_bdops; |
1489 | md->disk->private_data = md; | |
1490 | md->disk->queue = md->queue.queue; | |
371a689f AW |
1491 | md->disk->driverfs_dev = parent; |
1492 | set_disk_ro(md->disk, md->read_only || default_ro); | |
a6f6c96b RK |
1493 | |
1494 | /* | |
1495 | * As discussed on lkml, GENHD_FL_REMOVABLE should: | |
1496 | * | |
1497 | * - be set for removable media with permanent block devices | |
1498 | * - be unset for removable block devices with permanent media | |
1499 | * | |
1500 | * Since MMC block devices clearly fall under the second | |
1501 | * case, we do not set GENHD_FL_REMOVABLE. Userspace | |
1502 | * should use the block device creation/destruction hotplug | |
1503 | * messages to tell when the card is present. | |
1504 | */ | |
1505 | ||
f06c9153 AW |
1506 | snprintf(md->disk->disk_name, sizeof(md->disk->disk_name), |
1507 | "mmcblk%d%s", md->name_idx, subname ? subname : ""); | |
a6f6c96b | 1508 | |
e1defc4f | 1509 | blk_queue_logical_block_size(md->queue.queue, 512); |
371a689f | 1510 | set_capacity(md->disk, size); |
d0c97cfb | 1511 | |
f0d89972 AW |
1512 | if (mmc_host_cmd23(card->host)) { |
1513 | if (mmc_card_mmc(card) || | |
1514 | (mmc_card_sd(card) && | |
1515 | card->scr.cmds & SD_SCR_CMD23_SUPPORT)) | |
1516 | md->flags |= MMC_BLK_CMD23; | |
1517 | } | |
d0c97cfb AW |
1518 | |
1519 | if (mmc_card_mmc(card) && | |
1520 | md->flags & MMC_BLK_CMD23 && | |
1521 | ((card->ext_csd.rel_param & EXT_CSD_WR_REL_PARAM_EN) || | |
1522 | card->ext_csd.rel_sectors)) { | |
1523 | md->flags |= MMC_BLK_REL_WR; | |
1524 | blk_queue_flush(md->queue.queue, REQ_FLUSH | REQ_FUA); | |
1525 | } | |
1526 | ||
371a689f AW |
1527 | return md; |
1528 | ||
1529 | err_putdisk: | |
1530 | put_disk(md->disk); | |
1531 | err_kfree: | |
1532 | kfree(md); | |
1533 | out: | |
1534 | return ERR_PTR(ret); | |
1535 | } | |
1536 | ||
1537 | static struct mmc_blk_data *mmc_blk_alloc(struct mmc_card *card) | |
1538 | { | |
1539 | sector_t size; | |
1540 | struct mmc_blk_data *md; | |
a6f6c96b | 1541 | |
85a18ad9 PO |
1542 | if (!mmc_card_sd(card) && mmc_card_blockaddr(card)) { |
1543 | /* | |
1544 | * The EXT_CSD sector count is in number or 512 byte | |
1545 | * sectors. | |
1546 | */ | |
371a689f | 1547 | size = card->ext_csd.sectors; |
85a18ad9 PO |
1548 | } else { |
1549 | /* | |
1550 | * The CSD capacity field is in units of read_blkbits. | |
1551 | * set_capacity takes units of 512 bytes. | |
1552 | */ | |
371a689f | 1553 | size = card->csd.capacity << (card->csd.read_blkbits - 9); |
85a18ad9 | 1554 | } |
371a689f | 1555 | |
add710ea JR |
1556 | md = mmc_blk_alloc_req(card, &card->dev, size, false, NULL, |
1557 | MMC_BLK_DATA_AREA_MAIN); | |
1da177e4 | 1558 | return md; |
371a689f | 1559 | } |
a6f6c96b | 1560 | |
371a689f AW |
1561 | static int mmc_blk_alloc_part(struct mmc_card *card, |
1562 | struct mmc_blk_data *md, | |
1563 | unsigned int part_type, | |
1564 | sector_t size, | |
1565 | bool default_ro, | |
add710ea JR |
1566 | const char *subname, |
1567 | int area_type) | |
371a689f AW |
1568 | { |
1569 | char cap_str[10]; | |
1570 | struct mmc_blk_data *part_md; | |
1571 | ||
1572 | part_md = mmc_blk_alloc_req(card, disk_to_dev(md->disk), size, default_ro, | |
add710ea | 1573 | subname, area_type); |
371a689f AW |
1574 | if (IS_ERR(part_md)) |
1575 | return PTR_ERR(part_md); | |
1576 | part_md->part_type = part_type; | |
1577 | list_add(&part_md->part, &md->part); | |
1578 | ||
1579 | string_get_size((u64)get_capacity(part_md->disk) << 9, STRING_UNITS_2, | |
1580 | cap_str, sizeof(cap_str)); | |
a3c76eb9 | 1581 | pr_info("%s: %s %s partition %u %s\n", |
371a689f AW |
1582 | part_md->disk->disk_name, mmc_card_id(card), |
1583 | mmc_card_name(card), part_md->part_type, cap_str); | |
1584 | return 0; | |
1585 | } | |
1586 | ||
e0c368d5 NJ |
1587 | /* MMC Physical partitions consist of two boot partitions and |
1588 | * up to four general purpose partitions. | |
1589 | * For each partition enabled in EXT_CSD a block device will be allocatedi | |
1590 | * to provide access to the partition. | |
1591 | */ | |
1592 | ||
371a689f AW |
1593 | static int mmc_blk_alloc_parts(struct mmc_card *card, struct mmc_blk_data *md) |
1594 | { | |
e0c368d5 | 1595 | int idx, ret = 0; |
371a689f AW |
1596 | |
1597 | if (!mmc_card_mmc(card)) | |
1598 | return 0; | |
1599 | ||
e0c368d5 NJ |
1600 | for (idx = 0; idx < card->nr_parts; idx++) { |
1601 | if (card->part[idx].size) { | |
1602 | ret = mmc_blk_alloc_part(card, md, | |
1603 | card->part[idx].part_cfg, | |
1604 | card->part[idx].size >> 9, | |
1605 | card->part[idx].force_ro, | |
add710ea JR |
1606 | card->part[idx].name, |
1607 | card->part[idx].area_type); | |
e0c368d5 NJ |
1608 | if (ret) |
1609 | return ret; | |
1610 | } | |
371a689f AW |
1611 | } |
1612 | ||
1613 | return ret; | |
1da177e4 LT |
1614 | } |
1615 | ||
1616 | static int | |
1617 | mmc_blk_set_blksize(struct mmc_blk_data *md, struct mmc_card *card) | |
1618 | { | |
1da177e4 LT |
1619 | int err; |
1620 | ||
b855885e | 1621 | mmc_claim_host(card->host); |
0f8d8ea6 | 1622 | err = mmc_set_blocklen(card, 512); |
b855885e | 1623 | mmc_release_host(card->host); |
1da177e4 LT |
1624 | |
1625 | if (err) { | |
a3c76eb9 | 1626 | pr_err("%s: unable to set block size to 512: %d\n", |
0f8d8ea6 | 1627 | md->disk->disk_name, err); |
1da177e4 LT |
1628 | return -EINVAL; |
1629 | } | |
1630 | ||
1631 | return 0; | |
1632 | } | |
1633 | ||
371a689f AW |
1634 | static void mmc_blk_remove_req(struct mmc_blk_data *md) |
1635 | { | |
add710ea JR |
1636 | struct mmc_card *card; |
1637 | ||
371a689f | 1638 | if (md) { |
add710ea | 1639 | card = md->queue.card; |
371a689f AW |
1640 | if (md->disk->flags & GENHD_FL_UP) { |
1641 | device_remove_file(disk_to_dev(md->disk), &md->force_ro); | |
add710ea JR |
1642 | if ((md->area_type & MMC_BLK_DATA_AREA_BOOT) && |
1643 | card->ext_csd.boot_ro_lockable) | |
1644 | device_remove_file(disk_to_dev(md->disk), | |
1645 | &md->power_ro_lock); | |
371a689f AW |
1646 | |
1647 | /* Stop new requests from getting into the queue */ | |
1648 | del_gendisk(md->disk); | |
1649 | } | |
1650 | ||
1651 | /* Then flush out any already in there */ | |
1652 | mmc_cleanup_queue(&md->queue); | |
1653 | mmc_blk_put(md); | |
1654 | } | |
1655 | } | |
1656 | ||
1657 | static void mmc_blk_remove_parts(struct mmc_card *card, | |
1658 | struct mmc_blk_data *md) | |
1659 | { | |
1660 | struct list_head *pos, *q; | |
1661 | struct mmc_blk_data *part_md; | |
1662 | ||
f06c9153 | 1663 | __clear_bit(md->name_idx, name_use); |
371a689f AW |
1664 | list_for_each_safe(pos, q, &md->part) { |
1665 | part_md = list_entry(pos, struct mmc_blk_data, part); | |
1666 | list_del(pos); | |
1667 | mmc_blk_remove_req(part_md); | |
1668 | } | |
1669 | } | |
1670 | ||
1671 | static int mmc_add_disk(struct mmc_blk_data *md) | |
1672 | { | |
1673 | int ret; | |
add710ea | 1674 | struct mmc_card *card = md->queue.card; |
371a689f AW |
1675 | |
1676 | add_disk(md->disk); | |
1677 | md->force_ro.show = force_ro_show; | |
1678 | md->force_ro.store = force_ro_store; | |
641c3187 | 1679 | sysfs_attr_init(&md->force_ro.attr); |
371a689f AW |
1680 | md->force_ro.attr.name = "force_ro"; |
1681 | md->force_ro.attr.mode = S_IRUGO | S_IWUSR; | |
1682 | ret = device_create_file(disk_to_dev(md->disk), &md->force_ro); | |
1683 | if (ret) | |
add710ea JR |
1684 | goto force_ro_fail; |
1685 | ||
1686 | if ((md->area_type & MMC_BLK_DATA_AREA_BOOT) && | |
1687 | card->ext_csd.boot_ro_lockable) { | |
1688 | mode_t mode; | |
1689 | ||
1690 | if (card->ext_csd.boot_ro_lock & EXT_CSD_BOOT_WP_B_PWR_WP_DIS) | |
1691 | mode = S_IRUGO; | |
1692 | else | |
1693 | mode = S_IRUGO | S_IWUSR; | |
1694 | ||
1695 | md->power_ro_lock.show = power_ro_lock_show; | |
1696 | md->power_ro_lock.store = power_ro_lock_store; | |
1697 | md->power_ro_lock.attr.mode = mode; | |
1698 | md->power_ro_lock.attr.name = | |
1699 | "ro_lock_until_next_power_on"; | |
1700 | ret = device_create_file(disk_to_dev(md->disk), | |
1701 | &md->power_ro_lock); | |
1702 | if (ret) | |
1703 | goto power_ro_lock_fail; | |
1704 | } | |
1705 | return ret; | |
1706 | ||
1707 | power_ro_lock_fail: | |
1708 | device_remove_file(disk_to_dev(md->disk), &md->force_ro); | |
1709 | force_ro_fail: | |
1710 | del_gendisk(md->disk); | |
371a689f AW |
1711 | |
1712 | return ret; | |
1713 | } | |
1714 | ||
c59d4473 CB |
1715 | #define CID_MANFID_SANDISK 0x2 |
1716 | #define CID_MANFID_TOSHIBA 0x11 | |
1717 | #define CID_MANFID_MICRON 0x13 | |
1718 | ||
6f60c222 AW |
1719 | static const struct mmc_fixup blk_fixups[] = |
1720 | { | |
c59d4473 CB |
1721 | MMC_FIXUP("SEM02G", CID_MANFID_SANDISK, 0x100, add_quirk, |
1722 | MMC_QUIRK_INAND_CMD38), | |
1723 | MMC_FIXUP("SEM04G", CID_MANFID_SANDISK, 0x100, add_quirk, | |
1724 | MMC_QUIRK_INAND_CMD38), | |
1725 | MMC_FIXUP("SEM08G", CID_MANFID_SANDISK, 0x100, add_quirk, | |
1726 | MMC_QUIRK_INAND_CMD38), | |
1727 | MMC_FIXUP("SEM16G", CID_MANFID_SANDISK, 0x100, add_quirk, | |
1728 | MMC_QUIRK_INAND_CMD38), | |
1729 | MMC_FIXUP("SEM32G", CID_MANFID_SANDISK, 0x100, add_quirk, | |
1730 | MMC_QUIRK_INAND_CMD38), | |
d0c97cfb AW |
1731 | |
1732 | /* | |
1733 | * Some MMC cards experience performance degradation with CMD23 | |
1734 | * instead of CMD12-bounded multiblock transfers. For now we'll | |
1735 | * black list what's bad... | |
1736 | * - Certain Toshiba cards. | |
1737 | * | |
1738 | * N.B. This doesn't affect SD cards. | |
1739 | */ | |
c59d4473 | 1740 | MMC_FIXUP("MMC08G", CID_MANFID_TOSHIBA, CID_OEMID_ANY, add_quirk_mmc, |
d0c97cfb | 1741 | MMC_QUIRK_BLK_NO_CMD23), |
c59d4473 | 1742 | MMC_FIXUP("MMC16G", CID_MANFID_TOSHIBA, CID_OEMID_ANY, add_quirk_mmc, |
d0c97cfb | 1743 | MMC_QUIRK_BLK_NO_CMD23), |
c59d4473 | 1744 | MMC_FIXUP("MMC32G", CID_MANFID_TOSHIBA, CID_OEMID_ANY, add_quirk_mmc, |
d0c97cfb | 1745 | MMC_QUIRK_BLK_NO_CMD23), |
6de5fc9c SNX |
1746 | |
1747 | /* | |
1748 | * Some Micron MMC cards needs longer data read timeout than | |
1749 | * indicated in CSD. | |
1750 | */ | |
c59d4473 | 1751 | MMC_FIXUP(CID_NAME_ANY, CID_MANFID_MICRON, 0x200, add_quirk_mmc, |
6de5fc9c SNX |
1752 | MMC_QUIRK_LONG_READ_TIME), |
1753 | ||
6f60c222 AW |
1754 | END_FIXUP |
1755 | }; | |
1756 | ||
1da177e4 LT |
1757 | static int mmc_blk_probe(struct mmc_card *card) |
1758 | { | |
371a689f | 1759 | struct mmc_blk_data *md, *part_md; |
1da177e4 | 1760 | int err; |
a7bbb573 PO |
1761 | char cap_str[10]; |
1762 | ||
912490db PO |
1763 | /* |
1764 | * Check that the card supports the command class(es) we need. | |
1765 | */ | |
1766 | if (!(card->csd.cmdclass & CCC_BLOCK_READ)) | |
1da177e4 LT |
1767 | return -ENODEV; |
1768 | ||
1da177e4 LT |
1769 | md = mmc_blk_alloc(card); |
1770 | if (IS_ERR(md)) | |
1771 | return PTR_ERR(md); | |
1772 | ||
1773 | err = mmc_blk_set_blksize(md, card); | |
1774 | if (err) | |
1775 | goto out; | |
1776 | ||
444122fd | 1777 | string_get_size((u64)get_capacity(md->disk) << 9, STRING_UNITS_2, |
a7bbb573 | 1778 | cap_str, sizeof(cap_str)); |
a3c76eb9 | 1779 | pr_info("%s: %s %s %s %s\n", |
1da177e4 | 1780 | md->disk->disk_name, mmc_card_id(card), mmc_card_name(card), |
a7bbb573 | 1781 | cap_str, md->read_only ? "(ro)" : ""); |
1da177e4 | 1782 | |
371a689f AW |
1783 | if (mmc_blk_alloc_parts(card, md)) |
1784 | goto out; | |
1785 | ||
1da177e4 | 1786 | mmc_set_drvdata(card, md); |
6f60c222 AW |
1787 | mmc_fixup_device(card, blk_fixups); |
1788 | ||
371a689f AW |
1789 | if (mmc_add_disk(md)) |
1790 | goto out; | |
1791 | ||
1792 | list_for_each_entry(part_md, &md->part, part) { | |
1793 | if (mmc_add_disk(part_md)) | |
1794 | goto out; | |
1795 | } | |
1da177e4 LT |
1796 | return 0; |
1797 | ||
1798 | out: | |
371a689f AW |
1799 | mmc_blk_remove_parts(card, md); |
1800 | mmc_blk_remove_req(md); | |
1da177e4 LT |
1801 | return err; |
1802 | } | |
1803 | ||
1804 | static void mmc_blk_remove(struct mmc_card *card) | |
1805 | { | |
1806 | struct mmc_blk_data *md = mmc_get_drvdata(card); | |
1807 | ||
371a689f | 1808 | mmc_blk_remove_parts(card, md); |
ddd6fa7e AH |
1809 | mmc_claim_host(card->host); |
1810 | mmc_blk_part_switch(card, md); | |
1811 | mmc_release_host(card->host); | |
371a689f | 1812 | mmc_blk_remove_req(md); |
1da177e4 LT |
1813 | mmc_set_drvdata(card, NULL); |
1814 | } | |
1815 | ||
1816 | #ifdef CONFIG_PM | |
1817 | static int mmc_blk_suspend(struct mmc_card *card, pm_message_t state) | |
1818 | { | |
371a689f | 1819 | struct mmc_blk_data *part_md; |
1da177e4 LT |
1820 | struct mmc_blk_data *md = mmc_get_drvdata(card); |
1821 | ||
1822 | if (md) { | |
1823 | mmc_queue_suspend(&md->queue); | |
371a689f AW |
1824 | list_for_each_entry(part_md, &md->part, part) { |
1825 | mmc_queue_suspend(&part_md->queue); | |
1826 | } | |
1da177e4 LT |
1827 | } |
1828 | return 0; | |
1829 | } | |
1830 | ||
1831 | static int mmc_blk_resume(struct mmc_card *card) | |
1832 | { | |
371a689f | 1833 | struct mmc_blk_data *part_md; |
1da177e4 LT |
1834 | struct mmc_blk_data *md = mmc_get_drvdata(card); |
1835 | ||
1836 | if (md) { | |
1837 | mmc_blk_set_blksize(md, card); | |
371a689f AW |
1838 | |
1839 | /* | |
1840 | * Resume involves the card going into idle state, | |
1841 | * so current partition is always the main one. | |
1842 | */ | |
1843 | md->part_curr = md->part_type; | |
1da177e4 | 1844 | mmc_queue_resume(&md->queue); |
371a689f AW |
1845 | list_for_each_entry(part_md, &md->part, part) { |
1846 | mmc_queue_resume(&part_md->queue); | |
1847 | } | |
1da177e4 LT |
1848 | } |
1849 | return 0; | |
1850 | } | |
1851 | #else | |
1852 | #define mmc_blk_suspend NULL | |
1853 | #define mmc_blk_resume NULL | |
1854 | #endif | |
1855 | ||
1856 | static struct mmc_driver mmc_driver = { | |
1857 | .drv = { | |
1858 | .name = "mmcblk", | |
1859 | }, | |
1860 | .probe = mmc_blk_probe, | |
1861 | .remove = mmc_blk_remove, | |
1862 | .suspend = mmc_blk_suspend, | |
1863 | .resume = mmc_blk_resume, | |
1864 | }; | |
1865 | ||
1866 | static int __init mmc_blk_init(void) | |
1867 | { | |
9d4e98e9 | 1868 | int res; |
1da177e4 | 1869 | |
5e71b7a6 OJ |
1870 | if (perdev_minors != CONFIG_MMC_BLOCK_MINORS) |
1871 | pr_info("mmcblk: using %d minors per device\n", perdev_minors); | |
1872 | ||
1873 | max_devices = 256 / perdev_minors; | |
1874 | ||
fe6b4c88 PO |
1875 | res = register_blkdev(MMC_BLOCK_MAJOR, "mmc"); |
1876 | if (res) | |
1da177e4 | 1877 | goto out; |
1da177e4 | 1878 | |
9d4e98e9 AM |
1879 | res = mmc_register_driver(&mmc_driver); |
1880 | if (res) | |
1881 | goto out2; | |
1da177e4 | 1882 | |
9d4e98e9 AM |
1883 | return 0; |
1884 | out2: | |
1885 | unregister_blkdev(MMC_BLOCK_MAJOR, "mmc"); | |
1da177e4 LT |
1886 | out: |
1887 | return res; | |
1888 | } | |
1889 | ||
1890 | static void __exit mmc_blk_exit(void) | |
1891 | { | |
1892 | mmc_unregister_driver(&mmc_driver); | |
fe6b4c88 | 1893 | unregister_blkdev(MMC_BLOCK_MAJOR, "mmc"); |
1da177e4 LT |
1894 | } |
1895 | ||
1896 | module_init(mmc_blk_init); | |
1897 | module_exit(mmc_blk_exit); | |
1898 | ||
1899 | MODULE_LICENSE("GPL"); | |
1900 | MODULE_DESCRIPTION("Multimedia Card (MMC) block device driver"); | |
1901 |