67526843ef4310067b79e19fe3f58a8b7da86209
2 * linux/drivers/mmc/card/queue.c
4 * Copyright (C) 2003 Russell King, All Rights Reserved.
5 * Copyright 2006-2007 Pierre Ossman
7 * This program is free software; you can redistribute it and/or modify
8 * it under the terms of the GNU General Public License version 2 as
9 * published by the Free Software Foundation.
12 #include <linux/slab.h>
13 #include <linux/module.h>
14 #include <linux/blkdev.h>
15 #include <linux/freezer.h>
16 #include <linux/kthread.h>
17 #include <linux/scatterlist.h>
19 #include <linux/mmc/card.h>
20 #include <linux/mmc/host.h>
22 #include <linux/mmc/mmc.h>
23 #define MMC_QUEUE_BOUNCESZ 65536
26 * Prepare a MMC request. This just filters out odd stuff.
28 static int mmc_prep_request(struct request_queue
*q
, struct request
*req
)
30 struct mmc_queue
*mq
= q
->queuedata
;
33 * We only like normal block requests and discards.
35 if (req
->cmd_type
!= REQ_TYPE_FS
&& !(req
->cmd_flags
& REQ_DISCARD
)) {
36 blk_dump_rq_flags(req
, "MMC bad request");
40 if (mq
&& (mmc_card_removed(mq
->card
) || mmc_access_rpmb(mq
)))
43 req
->cmd_flags
|= REQ_DONTPREP
;
48 static int mmc_queue_thread(void *d
)
50 struct mmc_queue
*mq
= d
;
51 struct request_queue
*q
= mq
->queue
;
52 #ifdef MMC_ENABLED_EMPTY_QUEUE_FLUSH
55 int stop_status
= UN_FLUSHED
;
57 current
->flags
|= PF_MEMALLOC
;
59 down(&mq
->thread_sem
);
61 struct request
*req
= NULL
;
62 struct mmc_queue_req
*tmp
;
63 unsigned int cmd_flags
= 0;
65 spin_lock_irq(q
->queue_lock
);
66 set_current_state(TASK_INTERRUPTIBLE
);
67 req
= blk_fetch_request(q
);
68 mq
->mqrq_cur
->req
= req
;
69 spin_unlock_irq(q
->queue_lock
);
71 if (req
|| mq
->mqrq_prev
->req
) {
72 #ifdef MMC_ENABLED_EMPTY_QUEUE_FLUSH
73 if (!mq
->mqrq_prev
->req
&& mq
->card
&& mmc_card_mmc(mq
->card
) && mq
->card
->ext_csd
.cache_ctrl
) {
74 if(stop_status
== FLUSHING
){
75 mmc_stop_flush(mq
->card
);
76 stop_status
= UN_FLUSHED
;
80 set_current_state(TASK_RUNNING
);
81 cmd_flags
= req
? req
->cmd_flags
: 0;
82 mq
->issue_fn(mq
, req
);
83 if (mq
->flags
& MMC_QUEUE_NEW_REQUEST
) {
84 mq
->flags
&= ~MMC_QUEUE_NEW_REQUEST
;
85 continue; /* fetch again */
89 * Current request becomes previous request
91 * In case of special requests, current request
92 * has been finished. Do not assign it to previous
95 if (cmd_flags
& MMC_REQ_SPECIAL_MASK
)
96 mq
->mqrq_cur
->req
= NULL
;
98 mq
->mqrq_prev
->brq
.mrq
.data
= NULL
;
99 mq
->mqrq_prev
->req
= NULL
;
101 mq
->mqrq_prev
= mq
->mqrq_cur
;
104 #ifdef MMC_ENABLED_EMPTY_QUEUE_FLUSH
105 if ((stop_status
== UN_FLUSHED
) && mq
->card
&& mmc_card_mmc(mq
->card
) && mq
->card
->ext_csd
.cache_ctrl
) {
106 mmc_start_delayed_flush(mq
->card
);
107 stop_status
= FLUSHING
;
110 if (kthread_should_stop()) {
111 set_current_state(TASK_RUNNING
);
116 down(&mq
->thread_sem
);
125 * Generic MMC request handler. This is called for any queue on a
126 * particular host. When the host is not busy, we look for a request
127 * on any queue on this host, and attempt to issue it. This may
128 * not be the queue we were asked to process.
130 static void mmc_request_fn(struct request_queue
*q
)
132 struct mmc_queue
*mq
= q
->queuedata
;
135 struct mmc_context_info
*cntx
;
138 while ((req
= blk_fetch_request(q
)) != NULL
) {
139 req
->cmd_flags
|= REQ_QUIET
;
140 __blk_end_request_all(req
, -EIO
);
145 cntx
= &mq
->card
->host
->context_info
;
146 if (!mq
->mqrq_cur
->req
&& mq
->mqrq_prev
->req
) {
148 * New MMC request arrived when MMC thread may be
149 * blocked on the previous request to be complete
150 * with no current request fetched
152 spin_lock_irqsave(&cntx
->lock
, flags
);
153 if (cntx
->is_waiting_last_req
) {
154 cntx
->is_new_req
= true;
155 wake_up_interruptible(&cntx
->wait
);
157 spin_unlock_irqrestore(&cntx
->lock
, flags
);
158 } else if (!mq
->mqrq_cur
->req
&& !mq
->mqrq_prev
->req
)
159 wake_up_process(mq
->thread
);
162 static struct scatterlist
*mmc_alloc_sg(int sg_len
, int *err
)
164 struct scatterlist
*sg
;
166 sg
= kmalloc(sizeof(struct scatterlist
)*sg_len
, GFP_KERNEL
);
171 sg_init_table(sg
, sg_len
);
177 static void mmc_queue_setup_discard(struct request_queue
*q
,
178 struct mmc_card
*card
)
180 unsigned max_discard
;
182 max_discard
= mmc_calc_max_discard(card
);
186 queue_flag_set_unlocked(QUEUE_FLAG_DISCARD
, q
);
187 q
->limits
.max_discard_sectors
= max_discard
;
188 if (card
->erased_byte
== 0 && !mmc_can_discard(card
))
189 q
->limits
.discard_zeroes_data
= 1;
190 q
->limits
.discard_granularity
= card
->pref_erase
<< 9;
191 /* granularity must not be greater than max. discard */
192 if (card
->pref_erase
> max_discard
)
193 q
->limits
.discard_granularity
= 0;
194 if (mmc_can_secure_erase_trim(card
) || mmc_can_sanitize(card
))
195 queue_flag_set_unlocked(QUEUE_FLAG_SECDISCARD
, q
);
199 * mmc_init_queue - initialise a queue structure.
201 * @card: mmc card to attach this queue
203 * @subname: partition subname
205 * Initialise a MMC card request queue.
207 int mmc_init_queue(struct mmc_queue
*mq
, struct mmc_card
*card
,
208 spinlock_t
*lock
, const char *subname
)
210 struct mmc_host
*host
= card
->host
;
211 u64 limit
= BLK_BOUNCE_ANY
;
213 struct mmc_queue_req
*mqrq_cur
= &mq
->mqrq
[0];
214 struct mmc_queue_req
*mqrq_prev
= &mq
->mqrq
[1];
216 if (mmc_dev(host
)->dma_mask
&& *mmc_dev(host
)->dma_mask
)
217 limit
= *mmc_dev(host
)->dma_mask
;
220 mq
->queue
= blk_init_queue(mmc_request_fn
, lock
);
225 if (mmc_card_mmc(card
) &&
226 (totalram_pages
<< (PAGE_SHIFT
- 10)) <= (256 * 1024))
227 mq
->queue
->backing_dev_info
.ra_pages
=
228 (VM_MIN_READAHEAD
* 1024) / PAGE_CACHE_SIZE
;
229 #endif // CONFIG_ZRAM
230 mq
->mqrq_cur
= mqrq_cur
;
231 mq
->mqrq_prev
= mqrq_prev
;
232 mq
->queue
->queuedata
= mq
;
234 blk_queue_prep_rq(mq
->queue
, mmc_prep_request
);
235 queue_flag_set_unlocked(QUEUE_FLAG_NONROT
, mq
->queue
);
236 if (mmc_can_erase(card
))
237 mmc_queue_setup_discard(mq
->queue
, card
);
239 #ifdef CONFIG_MMC_BLOCK_BOUNCE
240 if (host
->max_segs
== 1) {
241 unsigned int bouncesz
;
243 bouncesz
= MMC_QUEUE_BOUNCESZ
;
245 if (bouncesz
> host
->max_req_size
)
246 bouncesz
= host
->max_req_size
;
247 if (bouncesz
> host
->max_seg_size
)
248 bouncesz
= host
->max_seg_size
;
249 if (bouncesz
> (host
->max_blk_count
* 512))
250 bouncesz
= host
->max_blk_count
* 512;
252 if (bouncesz
> 512) {
253 mqrq_cur
->bounce_buf
= kmalloc(bouncesz
, GFP_KERNEL
);
254 if (!mqrq_cur
->bounce_buf
) {
255 pr_warning("%s: unable to "
256 "allocate bounce cur buffer\n",
257 mmc_card_name(card
));
259 mqrq_prev
->bounce_buf
= kmalloc(bouncesz
, GFP_KERNEL
);
260 if (!mqrq_prev
->bounce_buf
) {
261 pr_warning("%s: unable to "
262 "allocate bounce prev buffer\n",
263 mmc_card_name(card
));
264 kfree(mqrq_cur
->bounce_buf
);
265 mqrq_cur
->bounce_buf
= NULL
;
269 if (mqrq_cur
->bounce_buf
&& mqrq_prev
->bounce_buf
) {
270 blk_queue_bounce_limit(mq
->queue
, BLK_BOUNCE_ANY
);
271 blk_queue_max_hw_sectors(mq
->queue
, bouncesz
/ 512);
272 blk_queue_max_segments(mq
->queue
, bouncesz
/ 512);
273 blk_queue_max_segment_size(mq
->queue
, bouncesz
);
275 mqrq_cur
->sg
= mmc_alloc_sg(1, &ret
);
279 mqrq_cur
->bounce_sg
=
280 mmc_alloc_sg(bouncesz
/ 512, &ret
);
284 mqrq_prev
->sg
= mmc_alloc_sg(1, &ret
);
288 mqrq_prev
->bounce_sg
=
289 mmc_alloc_sg(bouncesz
/ 512, &ret
);
296 if (!mqrq_cur
->bounce_buf
&& !mqrq_prev
->bounce_buf
) {
297 blk_queue_bounce_limit(mq
->queue
, limit
);
298 blk_queue_max_hw_sectors(mq
->queue
,
299 min(host
->max_blk_count
, host
->max_req_size
/ 512));
300 blk_queue_max_segments(mq
->queue
, host
->max_segs
);
301 blk_queue_max_segment_size(mq
->queue
, host
->max_seg_size
);
303 mqrq_cur
->sg
= mmc_alloc_sg(host
->max_segs
, &ret
);
308 mqrq_prev
->sg
= mmc_alloc_sg(host
->max_segs
, &ret
);
313 sema_init(&mq
->thread_sem
, 1);
315 mq
->thread
= kthread_run(mmc_queue_thread
, mq
, "mmcqd/%d%s",
316 host
->index
, subname
? subname
: "");
318 if (IS_ERR(mq
->thread
)) {
319 ret
= PTR_ERR(mq
->thread
);
325 kfree(mqrq_cur
->bounce_sg
);
326 mqrq_cur
->bounce_sg
= NULL
;
327 kfree(mqrq_prev
->bounce_sg
);
328 mqrq_prev
->bounce_sg
= NULL
;
333 kfree(mqrq_cur
->bounce_buf
);
334 mqrq_cur
->bounce_buf
= NULL
;
336 kfree(mqrq_prev
->sg
);
337 mqrq_prev
->sg
= NULL
;
338 kfree(mqrq_prev
->bounce_buf
);
339 mqrq_prev
->bounce_buf
= NULL
;
341 blk_cleanup_queue(mq
->queue
);
345 void mmc_cleanup_queue(struct mmc_queue
*mq
)
347 struct request_queue
*q
= mq
->queue
;
349 struct mmc_queue_req
*mqrq_cur
= mq
->mqrq_cur
;
350 struct mmc_queue_req
*mqrq_prev
= mq
->mqrq_prev
;
352 /* Make sure the queue isn't suspended, as that will deadlock */
353 mmc_queue_resume(mq
);
355 /* Then terminate our worker thread */
356 kthread_stop(mq
->thread
);
358 /* Empty the queue */
359 spin_lock_irqsave(q
->queue_lock
, flags
);
362 spin_unlock_irqrestore(q
->queue_lock
, flags
);
364 kfree(mqrq_cur
->bounce_sg
);
365 mqrq_cur
->bounce_sg
= NULL
;
370 kfree(mqrq_cur
->bounce_buf
);
371 mqrq_cur
->bounce_buf
= NULL
;
373 kfree(mqrq_prev
->bounce_sg
);
374 mqrq_prev
->bounce_sg
= NULL
;
376 kfree(mqrq_prev
->sg
);
377 mqrq_prev
->sg
= NULL
;
379 kfree(mqrq_prev
->bounce_buf
);
380 mqrq_prev
->bounce_buf
= NULL
;
384 EXPORT_SYMBOL(mmc_cleanup_queue
);
386 int mmc_packed_init(struct mmc_queue
*mq
, struct mmc_card
*card
)
388 struct mmc_queue_req
*mqrq_cur
= &mq
->mqrq
[0];
389 struct mmc_queue_req
*mqrq_prev
= &mq
->mqrq
[1];
393 mqrq_cur
->packed
= kzalloc(sizeof(struct mmc_packed
), GFP_KERNEL
);
394 if (!mqrq_cur
->packed
) {
395 pr_warn("%s: unable to allocate packed cmd for mqrq_cur\n",
396 mmc_card_name(card
));
401 mqrq_prev
->packed
= kzalloc(sizeof(struct mmc_packed
), GFP_KERNEL
);
402 if (!mqrq_prev
->packed
) {
403 pr_warn("%s: unable to allocate packed cmd for mqrq_prev\n",
404 mmc_card_name(card
));
405 kfree(mqrq_cur
->packed
);
406 mqrq_cur
->packed
= NULL
;
411 INIT_LIST_HEAD(&mqrq_cur
->packed
->list
);
412 INIT_LIST_HEAD(&mqrq_prev
->packed
->list
);
418 void mmc_packed_clean(struct mmc_queue
*mq
)
420 struct mmc_queue_req
*mqrq_cur
= &mq
->mqrq
[0];
421 struct mmc_queue_req
*mqrq_prev
= &mq
->mqrq
[1];
423 kfree(mqrq_cur
->packed
);
424 mqrq_cur
->packed
= NULL
;
425 kfree(mqrq_prev
->packed
);
426 mqrq_prev
->packed
= NULL
;
430 * mmc_queue_suspend - suspend a MMC request queue
431 * @mq: MMC queue to suspend
433 * Stop the block request queue, and wait for our thread to
434 * complete any outstanding requests. This ensures that we
435 * won't suspend while a request is being processed.
437 void mmc_queue_suspend(struct mmc_queue
*mq
)
439 struct request_queue
*q
= mq
->queue
;
442 if (!(mq
->flags
& MMC_QUEUE_SUSPENDED
)) {
443 mq
->flags
|= MMC_QUEUE_SUSPENDED
;
445 spin_lock_irqsave(q
->queue_lock
, flags
);
447 spin_unlock_irqrestore(q
->queue_lock
, flags
);
449 down(&mq
->thread_sem
);
454 * mmc_queue_resume - resume a previously suspended MMC request queue
455 * @mq: MMC queue to resume
457 void mmc_queue_resume(struct mmc_queue
*mq
)
459 struct request_queue
*q
= mq
->queue
;
462 if (mq
->flags
& MMC_QUEUE_SUSPENDED
) {
463 mq
->flags
&= ~MMC_QUEUE_SUSPENDED
;
467 spin_lock_irqsave(q
->queue_lock
, flags
);
469 spin_unlock_irqrestore(q
->queue_lock
, flags
);
473 static unsigned int mmc_queue_packed_map_sg(struct mmc_queue
*mq
,
474 struct mmc_packed
*packed
,
475 struct scatterlist
*sg
,
476 enum mmc_packed_type cmd_type
)
478 struct scatterlist
*__sg
= sg
;
479 unsigned int sg_len
= 0;
482 if (mmc_packed_wr(cmd_type
)) {
483 unsigned int hdr_sz
= mmc_large_sector(mq
->card
) ? 4096 : 512;
484 unsigned int max_seg_sz
= queue_max_segment_size(mq
->queue
);
485 unsigned int len
, remain
, offset
= 0;
486 u8
*buf
= (u8
*)packed
->cmd_hdr
;
490 len
= min(remain
, max_seg_sz
);
491 sg_set_buf(__sg
, buf
+ offset
, len
);
494 (__sg
++)->page_link
&= ~0x02;
499 list_for_each_entry(req
, &packed
->list
, queuelist
) {
500 sg_len
+= blk_rq_map_sg(mq
->queue
, req
, __sg
);
501 __sg
= sg
+ (sg_len
- 1);
502 (__sg
++)->page_link
&= ~0x02;
504 sg_mark_end(sg
+ (sg_len
- 1));
509 * Prepare the sg list(s) to be handed of to the host driver
511 unsigned int mmc_queue_map_sg(struct mmc_queue
*mq
, struct mmc_queue_req
*mqrq
)
515 struct scatterlist
*sg
;
516 enum mmc_packed_type cmd_type
;
519 cmd_type
= mqrq
->cmd_type
;
521 if (!mqrq
->bounce_buf
) {
522 if (mmc_packed_cmd(cmd_type
))
523 return mmc_queue_packed_map_sg(mq
, mqrq
->packed
,
526 return blk_rq_map_sg(mq
->queue
, mqrq
->req
, mqrq
->sg
);
529 BUG_ON(!mqrq
->bounce_sg
);
531 if (mmc_packed_cmd(cmd_type
))
532 sg_len
= mmc_queue_packed_map_sg(mq
, mqrq
->packed
,
533 mqrq
->bounce_sg
, cmd_type
);
535 sg_len
= blk_rq_map_sg(mq
->queue
, mqrq
->req
, mqrq
->bounce_sg
);
537 mqrq
->bounce_sg_len
= sg_len
;
540 for_each_sg(mqrq
->bounce_sg
, sg
, sg_len
, i
)
541 buflen
+= sg
->length
;
543 sg_init_one(mqrq
->sg
, mqrq
->bounce_buf
, buflen
);
549 * If writing, bounce the data to the buffer before the request
550 * is sent to the host driver
552 void mmc_queue_bounce_pre(struct mmc_queue_req
*mqrq
)
554 if (!mqrq
->bounce_buf
)
557 if (rq_data_dir(mqrq
->req
) != WRITE
)
560 sg_copy_to_buffer(mqrq
->bounce_sg
, mqrq
->bounce_sg_len
,
561 mqrq
->bounce_buf
, mqrq
->sg
[0].length
);
565 * If reading, bounce the data from the buffer after the request
566 * has been handled by the host driver
568 void mmc_queue_bounce_post(struct mmc_queue_req
*mqrq
)
570 if (!mqrq
->bounce_buf
)
573 if (rq_data_dir(mqrq
->req
) != READ
)
576 sg_copy_from_buffer(mqrq
->bounce_sg
, mqrq
->bounce_sg_len
,
577 mqrq
->bounce_buf
, mqrq
->sg
[0].length
);