| 1 | /* |
| 2 | * linux/drivers/mmc/card/queue.c |
| 3 | * |
| 4 | * Copyright (C) 2003 Russell King, All Rights Reserved. |
| 5 | * Copyright 2006-2007 Pierre Ossman |
| 6 | * |
| 7 | * This program is free software; you can redistribute it and/or modify |
| 8 | * it under the terms of the GNU General Public License version 2 as |
| 9 | * published by the Free Software Foundation. |
| 10 | * |
| 11 | */ |
| 12 | #include <linux/slab.h> |
| 13 | #include <linux/module.h> |
| 14 | #include <linux/blkdev.h> |
| 15 | #include <linux/freezer.h> |
| 16 | #include <linux/kthread.h> |
| 17 | #include <linux/scatterlist.h> |
| 18 | |
| 19 | #include <linux/mmc/card.h> |
| 20 | #include <linux/mmc/host.h> |
| 21 | #include "queue.h" |
| 22 | #include <linux/mmc/mmc.h> |
| 23 | #define MMC_QUEUE_BOUNCESZ 65536 |
| 24 | |
| 25 | /* |
| 26 | * Prepare a MMC request. This just filters out odd stuff. |
| 27 | */ |
| 28 | static int mmc_prep_request(struct request_queue *q, struct request *req) |
| 29 | { |
| 30 | struct mmc_queue *mq = q->queuedata; |
| 31 | |
| 32 | /* |
| 33 | * We only like normal block requests and discards. |
| 34 | */ |
| 35 | if (req->cmd_type != REQ_TYPE_FS && !(req->cmd_flags & REQ_DISCARD)) { |
| 36 | blk_dump_rq_flags(req, "MMC bad request"); |
| 37 | return BLKPREP_KILL; |
| 38 | } |
| 39 | |
| 40 | if (mq && (mmc_card_removed(mq->card) || mmc_access_rpmb(mq))) |
| 41 | return BLKPREP_KILL; |
| 42 | |
| 43 | req->cmd_flags |= REQ_DONTPREP; |
| 44 | |
| 45 | return BLKPREP_OK; |
| 46 | } |
| 47 | |
| 48 | static int mmc_queue_thread(void *d) |
| 49 | { |
| 50 | struct mmc_queue *mq = d; |
| 51 | struct request_queue *q = mq->queue; |
| 52 | #ifdef MMC_ENABLED_EMPTY_QUEUE_FLUSH |
| 53 | #define UN_FLUSHED 0 |
| 54 | #define FLUSHING 1 |
| 55 | int stop_status = UN_FLUSHED; |
| 56 | #endif |
| 57 | current->flags |= PF_MEMALLOC; |
| 58 | |
| 59 | down(&mq->thread_sem); |
| 60 | do { |
| 61 | struct request *req = NULL; |
| 62 | struct mmc_queue_req *tmp; |
| 63 | unsigned int cmd_flags = 0; |
| 64 | |
| 65 | spin_lock_irq(q->queue_lock); |
| 66 | set_current_state(TASK_INTERRUPTIBLE); |
| 67 | req = blk_fetch_request(q); |
| 68 | mq->mqrq_cur->req = req; |
| 69 | spin_unlock_irq(q->queue_lock); |
| 70 | |
| 71 | if (req || mq->mqrq_prev->req) { |
| 72 | #ifdef MMC_ENABLED_EMPTY_QUEUE_FLUSH |
| 73 | if (!mq->mqrq_prev->req && mq->card && mmc_card_mmc(mq->card) && mq->card->ext_csd.cache_ctrl) { |
| 74 | if(stop_status == FLUSHING){ |
| 75 | mmc_stop_flush(mq->card); |
| 76 | stop_status = UN_FLUSHED; |
| 77 | } |
| 78 | } |
| 79 | #endif |
| 80 | set_current_state(TASK_RUNNING); |
| 81 | cmd_flags = req ? req->cmd_flags : 0; |
| 82 | mq->issue_fn(mq, req); |
| 83 | if (mq->flags & MMC_QUEUE_NEW_REQUEST) { |
| 84 | mq->flags &= ~MMC_QUEUE_NEW_REQUEST; |
| 85 | continue; /* fetch again */ |
| 86 | } |
| 87 | |
| 88 | /* |
| 89 | * Current request becomes previous request |
| 90 | * and vice versa. |
| 91 | * In case of special requests, current request |
| 92 | * has been finished. Do not assign it to previous |
| 93 | * request. |
| 94 | */ |
| 95 | if (cmd_flags & MMC_REQ_SPECIAL_MASK) |
| 96 | mq->mqrq_cur->req = NULL; |
| 97 | |
| 98 | mq->mqrq_prev->brq.mrq.data = NULL; |
| 99 | mq->mqrq_prev->req = NULL; |
| 100 | tmp = mq->mqrq_prev; |
| 101 | mq->mqrq_prev = mq->mqrq_cur; |
| 102 | mq->mqrq_cur = tmp; |
| 103 | } else { |
| 104 | #ifdef MMC_ENABLED_EMPTY_QUEUE_FLUSH |
| 105 | if ((stop_status == UN_FLUSHED) && mq->card && mmc_card_mmc(mq->card) && mq->card->ext_csd.cache_ctrl) { |
| 106 | mmc_start_delayed_flush(mq->card); |
| 107 | stop_status = FLUSHING; |
| 108 | } |
| 109 | #endif |
| 110 | if (kthread_should_stop()) { |
| 111 | set_current_state(TASK_RUNNING); |
| 112 | break; |
| 113 | } |
| 114 | up(&mq->thread_sem); |
| 115 | schedule(); |
| 116 | down(&mq->thread_sem); |
| 117 | } |
| 118 | } while (1); |
| 119 | up(&mq->thread_sem); |
| 120 | |
| 121 | return 0; |
| 122 | } |
| 123 | |
| 124 | /* |
| 125 | * Generic MMC request handler. This is called for any queue on a |
| 126 | * particular host. When the host is not busy, we look for a request |
| 127 | * on any queue on this host, and attempt to issue it. This may |
| 128 | * not be the queue we were asked to process. |
| 129 | */ |
| 130 | static void mmc_request_fn(struct request_queue *q) |
| 131 | { |
| 132 | struct mmc_queue *mq = q->queuedata; |
| 133 | struct request *req; |
| 134 | unsigned long flags; |
| 135 | struct mmc_context_info *cntx; |
| 136 | |
| 137 | if (!mq) { |
| 138 | while ((req = blk_fetch_request(q)) != NULL) { |
| 139 | req->cmd_flags |= REQ_QUIET; |
| 140 | __blk_end_request_all(req, -EIO); |
| 141 | } |
| 142 | return; |
| 143 | } |
| 144 | |
| 145 | cntx = &mq->card->host->context_info; |
| 146 | if (!mq->mqrq_cur->req && mq->mqrq_prev->req) { |
| 147 | /* |
| 148 | * New MMC request arrived when MMC thread may be |
| 149 | * blocked on the previous request to be complete |
| 150 | * with no current request fetched |
| 151 | */ |
| 152 | spin_lock_irqsave(&cntx->lock, flags); |
| 153 | if (cntx->is_waiting_last_req) { |
| 154 | cntx->is_new_req = true; |
| 155 | wake_up_interruptible(&cntx->wait); |
| 156 | } |
| 157 | spin_unlock_irqrestore(&cntx->lock, flags); |
| 158 | } else if (!mq->mqrq_cur->req && !mq->mqrq_prev->req) |
| 159 | wake_up_process(mq->thread); |
| 160 | } |
| 161 | |
| 162 | static struct scatterlist *mmc_alloc_sg(int sg_len, int *err) |
| 163 | { |
| 164 | struct scatterlist *sg; |
| 165 | |
| 166 | sg = kmalloc(sizeof(struct scatterlist)*sg_len, GFP_KERNEL); |
| 167 | if (!sg) |
| 168 | *err = -ENOMEM; |
| 169 | else { |
| 170 | *err = 0; |
| 171 | sg_init_table(sg, sg_len); |
| 172 | } |
| 173 | |
| 174 | return sg; |
| 175 | } |
| 176 | |
| 177 | static void mmc_queue_setup_discard(struct request_queue *q, |
| 178 | struct mmc_card *card) |
| 179 | { |
| 180 | unsigned max_discard; |
| 181 | |
| 182 | max_discard = mmc_calc_max_discard(card); |
| 183 | if (!max_discard) |
| 184 | return; |
| 185 | |
| 186 | queue_flag_set_unlocked(QUEUE_FLAG_DISCARD, q); |
| 187 | q->limits.max_discard_sectors = max_discard; |
| 188 | if (card->erased_byte == 0 && !mmc_can_discard(card)) |
| 189 | q->limits.discard_zeroes_data = 1; |
| 190 | q->limits.discard_granularity = card->pref_erase << 9; |
| 191 | /* granularity must not be greater than max. discard */ |
| 192 | if (card->pref_erase > max_discard) |
| 193 | q->limits.discard_granularity = 0; |
| 194 | if (mmc_can_secure_erase_trim(card) || mmc_can_sanitize(card)) |
| 195 | queue_flag_set_unlocked(QUEUE_FLAG_SECDISCARD, q); |
| 196 | } |
| 197 | |
| 198 | /** |
| 199 | * mmc_init_queue - initialise a queue structure. |
| 200 | * @mq: mmc queue |
| 201 | * @card: mmc card to attach this queue |
| 202 | * @lock: queue lock |
| 203 | * @subname: partition subname |
| 204 | * |
| 205 | * Initialise a MMC card request queue. |
| 206 | */ |
| 207 | int mmc_init_queue(struct mmc_queue *mq, struct mmc_card *card, |
| 208 | spinlock_t *lock, const char *subname) |
| 209 | { |
| 210 | struct mmc_host *host = card->host; |
| 211 | u64 limit = BLK_BOUNCE_ANY; |
| 212 | int ret; |
| 213 | struct mmc_queue_req *mqrq_cur = &mq->mqrq[0]; |
| 214 | struct mmc_queue_req *mqrq_prev = &mq->mqrq[1]; |
| 215 | |
| 216 | if (mmc_dev(host)->dma_mask && *mmc_dev(host)->dma_mask) |
| 217 | limit = *mmc_dev(host)->dma_mask; |
| 218 | |
| 219 | mq->card = card; |
| 220 | mq->queue = blk_init_queue(mmc_request_fn, lock); |
| 221 | if (!mq->queue) |
| 222 | return -ENOMEM; |
| 223 | |
| 224 | #ifdef CONFIG_ZRAM |
| 225 | if (mmc_card_mmc(card) && |
| 226 | (totalram_pages << (PAGE_SHIFT - 10)) <= (256 * 1024)) |
| 227 | mq->queue->backing_dev_info.ra_pages = |
| 228 | (VM_MIN_READAHEAD * 1024) / PAGE_CACHE_SIZE; |
| 229 | #endif // CONFIG_ZRAM |
| 230 | mq->mqrq_cur = mqrq_cur; |
| 231 | mq->mqrq_prev = mqrq_prev; |
| 232 | mq->queue->queuedata = mq; |
| 233 | |
| 234 | blk_queue_prep_rq(mq->queue, mmc_prep_request); |
| 235 | queue_flag_set_unlocked(QUEUE_FLAG_NONROT, mq->queue); |
| 236 | if (mmc_can_erase(card)) |
| 237 | mmc_queue_setup_discard(mq->queue, card); |
| 238 | |
| 239 | #ifdef CONFIG_MMC_BLOCK_BOUNCE |
| 240 | if (host->max_segs == 1) { |
| 241 | unsigned int bouncesz; |
| 242 | |
| 243 | bouncesz = MMC_QUEUE_BOUNCESZ; |
| 244 | |
| 245 | if (bouncesz > host->max_req_size) |
| 246 | bouncesz = host->max_req_size; |
| 247 | if (bouncesz > host->max_seg_size) |
| 248 | bouncesz = host->max_seg_size; |
| 249 | if (bouncesz > (host->max_blk_count * 512)) |
| 250 | bouncesz = host->max_blk_count * 512; |
| 251 | |
| 252 | if (bouncesz > 512) { |
| 253 | mqrq_cur->bounce_buf = kmalloc(bouncesz, GFP_KERNEL); |
| 254 | if (!mqrq_cur->bounce_buf) { |
| 255 | pr_warning("%s: unable to " |
| 256 | "allocate bounce cur buffer\n", |
| 257 | mmc_card_name(card)); |
| 258 | } |
| 259 | mqrq_prev->bounce_buf = kmalloc(bouncesz, GFP_KERNEL); |
| 260 | if (!mqrq_prev->bounce_buf) { |
| 261 | pr_warning("%s: unable to " |
| 262 | "allocate bounce prev buffer\n", |
| 263 | mmc_card_name(card)); |
| 264 | kfree(mqrq_cur->bounce_buf); |
| 265 | mqrq_cur->bounce_buf = NULL; |
| 266 | } |
| 267 | } |
| 268 | |
| 269 | if (mqrq_cur->bounce_buf && mqrq_prev->bounce_buf) { |
| 270 | blk_queue_bounce_limit(mq->queue, BLK_BOUNCE_ANY); |
| 271 | blk_queue_max_hw_sectors(mq->queue, bouncesz / 512); |
| 272 | blk_queue_max_segments(mq->queue, bouncesz / 512); |
| 273 | blk_queue_max_segment_size(mq->queue, bouncesz); |
| 274 | |
| 275 | mqrq_cur->sg = mmc_alloc_sg(1, &ret); |
| 276 | if (ret) |
| 277 | goto cleanup_queue; |
| 278 | |
| 279 | mqrq_cur->bounce_sg = |
| 280 | mmc_alloc_sg(bouncesz / 512, &ret); |
| 281 | if (ret) |
| 282 | goto cleanup_queue; |
| 283 | |
| 284 | mqrq_prev->sg = mmc_alloc_sg(1, &ret); |
| 285 | if (ret) |
| 286 | goto cleanup_queue; |
| 287 | |
| 288 | mqrq_prev->bounce_sg = |
| 289 | mmc_alloc_sg(bouncesz / 512, &ret); |
| 290 | if (ret) |
| 291 | goto cleanup_queue; |
| 292 | } |
| 293 | } |
| 294 | #endif |
| 295 | |
| 296 | if (!mqrq_cur->bounce_buf && !mqrq_prev->bounce_buf) { |
| 297 | blk_queue_bounce_limit(mq->queue, limit); |
| 298 | blk_queue_max_hw_sectors(mq->queue, |
| 299 | min(host->max_blk_count, host->max_req_size / 512)); |
| 300 | blk_queue_max_segments(mq->queue, host->max_segs); |
| 301 | blk_queue_max_segment_size(mq->queue, host->max_seg_size); |
| 302 | |
| 303 | mqrq_cur->sg = mmc_alloc_sg(host->max_segs, &ret); |
| 304 | if (ret) |
| 305 | goto cleanup_queue; |
| 306 | |
| 307 | |
| 308 | mqrq_prev->sg = mmc_alloc_sg(host->max_segs, &ret); |
| 309 | if (ret) |
| 310 | goto cleanup_queue; |
| 311 | } |
| 312 | |
| 313 | sema_init(&mq->thread_sem, 1); |
| 314 | |
| 315 | mq->thread = kthread_run(mmc_queue_thread, mq, "mmcqd/%d%s", |
| 316 | host->index, subname ? subname : ""); |
| 317 | |
| 318 | if (IS_ERR(mq->thread)) { |
| 319 | ret = PTR_ERR(mq->thread); |
| 320 | goto free_bounce_sg; |
| 321 | } |
| 322 | |
| 323 | return 0; |
| 324 | free_bounce_sg: |
| 325 | kfree(mqrq_cur->bounce_sg); |
| 326 | mqrq_cur->bounce_sg = NULL; |
| 327 | kfree(mqrq_prev->bounce_sg); |
| 328 | mqrq_prev->bounce_sg = NULL; |
| 329 | |
| 330 | cleanup_queue: |
| 331 | kfree(mqrq_cur->sg); |
| 332 | mqrq_cur->sg = NULL; |
| 333 | kfree(mqrq_cur->bounce_buf); |
| 334 | mqrq_cur->bounce_buf = NULL; |
| 335 | |
| 336 | kfree(mqrq_prev->sg); |
| 337 | mqrq_prev->sg = NULL; |
| 338 | kfree(mqrq_prev->bounce_buf); |
| 339 | mqrq_prev->bounce_buf = NULL; |
| 340 | |
| 341 | blk_cleanup_queue(mq->queue); |
| 342 | return ret; |
| 343 | } |
| 344 | |
| 345 | void mmc_cleanup_queue(struct mmc_queue *mq) |
| 346 | { |
| 347 | struct request_queue *q = mq->queue; |
| 348 | unsigned long flags; |
| 349 | struct mmc_queue_req *mqrq_cur = mq->mqrq_cur; |
| 350 | struct mmc_queue_req *mqrq_prev = mq->mqrq_prev; |
| 351 | |
| 352 | /* Make sure the queue isn't suspended, as that will deadlock */ |
| 353 | mmc_queue_resume(mq); |
| 354 | |
| 355 | /* Then terminate our worker thread */ |
| 356 | kthread_stop(mq->thread); |
| 357 | |
| 358 | /* Empty the queue */ |
| 359 | spin_lock_irqsave(q->queue_lock, flags); |
| 360 | q->queuedata = NULL; |
| 361 | blk_start_queue(q); |
| 362 | spin_unlock_irqrestore(q->queue_lock, flags); |
| 363 | |
| 364 | kfree(mqrq_cur->bounce_sg); |
| 365 | mqrq_cur->bounce_sg = NULL; |
| 366 | |
| 367 | kfree(mqrq_cur->sg); |
| 368 | mqrq_cur->sg = NULL; |
| 369 | |
| 370 | kfree(mqrq_cur->bounce_buf); |
| 371 | mqrq_cur->bounce_buf = NULL; |
| 372 | |
| 373 | kfree(mqrq_prev->bounce_sg); |
| 374 | mqrq_prev->bounce_sg = NULL; |
| 375 | |
| 376 | kfree(mqrq_prev->sg); |
| 377 | mqrq_prev->sg = NULL; |
| 378 | |
| 379 | kfree(mqrq_prev->bounce_buf); |
| 380 | mqrq_prev->bounce_buf = NULL; |
| 381 | |
| 382 | mq->card = NULL; |
| 383 | } |
| 384 | EXPORT_SYMBOL(mmc_cleanup_queue); |
| 385 | |
| 386 | int mmc_packed_init(struct mmc_queue *mq, struct mmc_card *card) |
| 387 | { |
| 388 | struct mmc_queue_req *mqrq_cur = &mq->mqrq[0]; |
| 389 | struct mmc_queue_req *mqrq_prev = &mq->mqrq[1]; |
| 390 | int ret = 0; |
| 391 | |
| 392 | |
| 393 | mqrq_cur->packed = kzalloc(sizeof(struct mmc_packed), GFP_KERNEL); |
| 394 | if (!mqrq_cur->packed) { |
| 395 | pr_warn("%s: unable to allocate packed cmd for mqrq_cur\n", |
| 396 | mmc_card_name(card)); |
| 397 | ret = -ENOMEM; |
| 398 | goto out; |
| 399 | } |
| 400 | |
| 401 | mqrq_prev->packed = kzalloc(sizeof(struct mmc_packed), GFP_KERNEL); |
| 402 | if (!mqrq_prev->packed) { |
| 403 | pr_warn("%s: unable to allocate packed cmd for mqrq_prev\n", |
| 404 | mmc_card_name(card)); |
| 405 | kfree(mqrq_cur->packed); |
| 406 | mqrq_cur->packed = NULL; |
| 407 | ret = -ENOMEM; |
| 408 | goto out; |
| 409 | } |
| 410 | |
| 411 | INIT_LIST_HEAD(&mqrq_cur->packed->list); |
| 412 | INIT_LIST_HEAD(&mqrq_prev->packed->list); |
| 413 | |
| 414 | out: |
| 415 | return ret; |
| 416 | } |
| 417 | |
| 418 | void mmc_packed_clean(struct mmc_queue *mq) |
| 419 | { |
| 420 | struct mmc_queue_req *mqrq_cur = &mq->mqrq[0]; |
| 421 | struct mmc_queue_req *mqrq_prev = &mq->mqrq[1]; |
| 422 | |
| 423 | kfree(mqrq_cur->packed); |
| 424 | mqrq_cur->packed = NULL; |
| 425 | kfree(mqrq_prev->packed); |
| 426 | mqrq_prev->packed = NULL; |
| 427 | } |
| 428 | |
| 429 | /** |
| 430 | * mmc_queue_suspend - suspend a MMC request queue |
| 431 | * @mq: MMC queue to suspend |
| 432 | * |
| 433 | * Stop the block request queue, and wait for our thread to |
| 434 | * complete any outstanding requests. This ensures that we |
| 435 | * won't suspend while a request is being processed. |
| 436 | */ |
| 437 | void mmc_queue_suspend(struct mmc_queue *mq) |
| 438 | { |
| 439 | struct request_queue *q = mq->queue; |
| 440 | unsigned long flags; |
| 441 | |
| 442 | if (!(mq->flags & MMC_QUEUE_SUSPENDED)) { |
| 443 | mq->flags |= MMC_QUEUE_SUSPENDED; |
| 444 | |
| 445 | spin_lock_irqsave(q->queue_lock, flags); |
| 446 | blk_stop_queue(q); |
| 447 | spin_unlock_irqrestore(q->queue_lock, flags); |
| 448 | |
| 449 | down(&mq->thread_sem); |
| 450 | } |
| 451 | } |
| 452 | |
| 453 | /** |
| 454 | * mmc_queue_resume - resume a previously suspended MMC request queue |
| 455 | * @mq: MMC queue to resume |
| 456 | */ |
| 457 | void mmc_queue_resume(struct mmc_queue *mq) |
| 458 | { |
| 459 | struct request_queue *q = mq->queue; |
| 460 | unsigned long flags; |
| 461 | |
| 462 | if (mq->flags & MMC_QUEUE_SUSPENDED) { |
| 463 | mq->flags &= ~MMC_QUEUE_SUSPENDED; |
| 464 | |
| 465 | up(&mq->thread_sem); |
| 466 | |
| 467 | spin_lock_irqsave(q->queue_lock, flags); |
| 468 | blk_start_queue(q); |
| 469 | spin_unlock_irqrestore(q->queue_lock, flags); |
| 470 | } |
| 471 | } |
| 472 | |
| 473 | static unsigned int mmc_queue_packed_map_sg(struct mmc_queue *mq, |
| 474 | struct mmc_packed *packed, |
| 475 | struct scatterlist *sg, |
| 476 | enum mmc_packed_type cmd_type) |
| 477 | { |
| 478 | struct scatterlist *__sg = sg; |
| 479 | unsigned int sg_len = 0; |
| 480 | struct request *req; |
| 481 | |
| 482 | if (mmc_packed_wr(cmd_type)) { |
| 483 | unsigned int hdr_sz = mmc_large_sector(mq->card) ? 4096 : 512; |
| 484 | unsigned int max_seg_sz = queue_max_segment_size(mq->queue); |
| 485 | unsigned int len, remain, offset = 0; |
| 486 | u8 *buf = (u8 *)packed->cmd_hdr; |
| 487 | |
| 488 | remain = hdr_sz; |
| 489 | do { |
| 490 | len = min(remain, max_seg_sz); |
| 491 | sg_set_buf(__sg, buf + offset, len); |
| 492 | offset += len; |
| 493 | remain -= len; |
| 494 | (__sg++)->page_link &= ~0x02; |
| 495 | sg_len++; |
| 496 | } while (remain); |
| 497 | } |
| 498 | |
| 499 | list_for_each_entry(req, &packed->list, queuelist) { |
| 500 | sg_len += blk_rq_map_sg(mq->queue, req, __sg); |
| 501 | __sg = sg + (sg_len - 1); |
| 502 | (__sg++)->page_link &= ~0x02; |
| 503 | } |
| 504 | sg_mark_end(sg + (sg_len - 1)); |
| 505 | return sg_len; |
| 506 | } |
| 507 | |
| 508 | /* |
| 509 | * Prepare the sg list(s) to be handed of to the host driver |
| 510 | */ |
| 511 | unsigned int mmc_queue_map_sg(struct mmc_queue *mq, struct mmc_queue_req *mqrq) |
| 512 | { |
| 513 | unsigned int sg_len; |
| 514 | size_t buflen; |
| 515 | struct scatterlist *sg; |
| 516 | enum mmc_packed_type cmd_type; |
| 517 | int i; |
| 518 | |
| 519 | cmd_type = mqrq->cmd_type; |
| 520 | |
| 521 | if (!mqrq->bounce_buf) { |
| 522 | if (mmc_packed_cmd(cmd_type)) |
| 523 | return mmc_queue_packed_map_sg(mq, mqrq->packed, |
| 524 | mqrq->sg, cmd_type); |
| 525 | else |
| 526 | return blk_rq_map_sg(mq->queue, mqrq->req, mqrq->sg); |
| 527 | } |
| 528 | |
| 529 | BUG_ON(!mqrq->bounce_sg); |
| 530 | |
| 531 | if (mmc_packed_cmd(cmd_type)) |
| 532 | sg_len = mmc_queue_packed_map_sg(mq, mqrq->packed, |
| 533 | mqrq->bounce_sg, cmd_type); |
| 534 | else |
| 535 | sg_len = blk_rq_map_sg(mq->queue, mqrq->req, mqrq->bounce_sg); |
| 536 | |
| 537 | mqrq->bounce_sg_len = sg_len; |
| 538 | |
| 539 | buflen = 0; |
| 540 | for_each_sg(mqrq->bounce_sg, sg, sg_len, i) |
| 541 | buflen += sg->length; |
| 542 | |
| 543 | sg_init_one(mqrq->sg, mqrq->bounce_buf, buflen); |
| 544 | |
| 545 | return 1; |
| 546 | } |
| 547 | |
| 548 | /* |
| 549 | * If writing, bounce the data to the buffer before the request |
| 550 | * is sent to the host driver |
| 551 | */ |
| 552 | void mmc_queue_bounce_pre(struct mmc_queue_req *mqrq) |
| 553 | { |
| 554 | if (!mqrq->bounce_buf) |
| 555 | return; |
| 556 | |
| 557 | if (rq_data_dir(mqrq->req) != WRITE) |
| 558 | return; |
| 559 | |
| 560 | sg_copy_to_buffer(mqrq->bounce_sg, mqrq->bounce_sg_len, |
| 561 | mqrq->bounce_buf, mqrq->sg[0].length); |
| 562 | } |
| 563 | |
| 564 | /* |
| 565 | * If reading, bounce the data from the buffer after the request |
| 566 | * has been handled by the host driver |
| 567 | */ |
| 568 | void mmc_queue_bounce_post(struct mmc_queue_req *mqrq) |
| 569 | { |
| 570 | if (!mqrq->bounce_buf) |
| 571 | return; |
| 572 | |
| 573 | if (rq_data_dir(mqrq->req) != READ) |
| 574 | return; |
| 575 | |
| 576 | sg_copy_from_buffer(mqrq->bounce_sg, mqrq->bounce_sg_len, |
| 577 | mqrq->bounce_buf, mqrq->sg[0].length); |
| 578 | } |