Commit | Line | Data |
---|---|---|
1da177e4 | 1 | /* |
70f10482 | 2 | * linux/drivers/mmc/card/queue.c |
1da177e4 LT |
3 | * |
4 | * Copyright (C) 2003 Russell King, All Rights Reserved. | |
98ac2162 | 5 | * Copyright 2006-2007 Pierre Ossman |
1da177e4 LT |
6 | * |
7 | * This program is free software; you can redistribute it and/or modify | |
8 | * it under the terms of the GNU General Public License version 2 as | |
9 | * published by the Free Software Foundation. | |
10 | * | |
11 | */ | |
5a0e3ad6 | 12 | #include <linux/slab.h> |
1da177e4 LT |
13 | #include <linux/module.h> |
14 | #include <linux/blkdev.h> | |
83144186 | 15 | #include <linux/freezer.h> |
87598a2b | 16 | #include <linux/kthread.h> |
45711f1a | 17 | #include <linux/scatterlist.h> |
1da177e4 LT |
18 | |
19 | #include <linux/mmc/card.h> | |
20 | #include <linux/mmc/host.h> | |
98ac2162 | 21 | #include "queue.h" |
6fa3eb70 | 22 | #include <linux/mmc/mmc.h> |
98ccf149 PO |
23 | #define MMC_QUEUE_BOUNCESZ 65536 |
24 | ||
1da177e4 | 25 | /* |
9c9f2d63 | 26 | * Prepare a MMC request. This just filters out odd stuff. |
1da177e4 LT |
27 | */ |
28 | static int mmc_prep_request(struct request_queue *q, struct request *req) | |
29 | { | |
a8ad82cc SRT |
30 | struct mmc_queue *mq = q->queuedata; |
31 | ||
9c9f2d63 | 32 | /* |
bd788c96 | 33 | * We only like normal block requests and discards. |
9c9f2d63 | 34 | */ |
bd788c96 | 35 | if (req->cmd_type != REQ_TYPE_FS && !(req->cmd_flags & REQ_DISCARD)) { |
1da177e4 | 36 | blk_dump_rq_flags(req, "MMC bad request"); |
9c9f2d63 | 37 | return BLKPREP_KILL; |
1da177e4 LT |
38 | } |
39 | ||
6186ada9 | 40 | if (mq && (mmc_card_removed(mq->card) || mmc_access_rpmb(mq))) |
a8ad82cc SRT |
41 | return BLKPREP_KILL; |
42 | ||
9c9f2d63 | 43 | req->cmd_flags |= REQ_DONTPREP; |
1da177e4 | 44 | |
9c9f2d63 | 45 | return BLKPREP_OK; |
1da177e4 LT |
46 | } |
47 | ||
48 | static int mmc_queue_thread(void *d) | |
49 | { | |
50 | struct mmc_queue *mq = d; | |
51 | struct request_queue *q = mq->queue; | |
6fa3eb70 S |
52 | #ifdef MMC_ENABLED_EMPTY_QUEUE_FLUSH |
53 | #define UN_FLUSHED 0 | |
54 | #define FLUSHING 1 | |
55 | int stop_status = UN_FLUSHED; | |
56 | #endif | |
83144186 | 57 | current->flags |= PF_MEMALLOC; |
1da177e4 | 58 | |
1da177e4 | 59 | down(&mq->thread_sem); |
1da177e4 LT |
60 | do { |
61 | struct request *req = NULL; | |
ee8a43a5 | 62 | struct mmc_queue_req *tmp; |
369d321e | 63 | unsigned int cmd_flags = 0; |
1da177e4 LT |
64 | |
65 | spin_lock_irq(q->queue_lock); | |
66 | set_current_state(TASK_INTERRUPTIBLE); | |
7eaceacc | 67 | req = blk_fetch_request(q); |
97868a2b | 68 | mq->mqrq_cur->req = req; |
1da177e4 LT |
69 | spin_unlock_irq(q->queue_lock); |
70 | ||
ee8a43a5 | 71 | if (req || mq->mqrq_prev->req) { |
6fa3eb70 S |
72 | #ifdef MMC_ENABLED_EMPTY_QUEUE_FLUSH |
73 | if (!mq->mqrq_prev->req && mq->card && mmc_card_mmc(mq->card) && mq->card->ext_csd.cache_ctrl) { | |
74 | if(stop_status == FLUSHING){ | |
75 | mmc_stop_flush(mq->card); | |
76 | stop_status = UN_FLUSHED; | |
77 | } | |
78 | } | |
79 | #endif | |
ee8a43a5 | 80 | set_current_state(TASK_RUNNING); |
369d321e | 81 | cmd_flags = req ? req->cmd_flags : 0; |
ee8a43a5 | 82 | mq->issue_fn(mq, req); |
2220eedf KD |
83 | if (mq->flags & MMC_QUEUE_NEW_REQUEST) { |
84 | mq->flags &= ~MMC_QUEUE_NEW_REQUEST; | |
85 | continue; /* fetch again */ | |
86 | } | |
45c5a914 SJ |
87 | |
88 | /* | |
89 | * Current request becomes previous request | |
90 | * and vice versa. | |
369d321e SJ |
91 | * In case of special requests, current request |
92 | * has been finished. Do not assign it to previous | |
93 | * request. | |
45c5a914 | 94 | */ |
369d321e SJ |
95 | if (cmd_flags & MMC_REQ_SPECIAL_MASK) |
96 | mq->mqrq_cur->req = NULL; | |
97 | ||
45c5a914 SJ |
98 | mq->mqrq_prev->brq.mrq.data = NULL; |
99 | mq->mqrq_prev->req = NULL; | |
100 | tmp = mq->mqrq_prev; | |
101 | mq->mqrq_prev = mq->mqrq_cur; | |
102 | mq->mqrq_cur = tmp; | |
ee8a43a5 | 103 | } else { |
6fa3eb70 S |
104 | #ifdef MMC_ENABLED_EMPTY_QUEUE_FLUSH |
105 | if ((stop_status == UN_FLUSHED) && mq->card && mmc_card_mmc(mq->card) && mq->card->ext_csd.cache_ctrl) { | |
106 | mmc_start_delayed_flush(mq->card); | |
107 | stop_status = FLUSHING; | |
108 | } | |
109 | #endif | |
7b30d281 VW |
110 | if (kthread_should_stop()) { |
111 | set_current_state(TASK_RUNNING); | |
1da177e4 | 112 | break; |
7b30d281 | 113 | } |
1da177e4 LT |
114 | up(&mq->thread_sem); |
115 | schedule(); | |
116 | down(&mq->thread_sem); | |
1da177e4 | 117 | } |
1da177e4 | 118 | } while (1); |
1da177e4 LT |
119 | up(&mq->thread_sem); |
120 | ||
1da177e4 LT |
121 | return 0; |
122 | } | |
123 | ||
124 | /* | |
125 | * Generic MMC request handler. This is called for any queue on a | |
126 | * particular host. When the host is not busy, we look for a request | |
127 | * on any queue on this host, and attempt to issue it. This may | |
128 | * not be the queue we were asked to process. | |
129 | */ | |
1b50f5f3 | 130 | static void mmc_request_fn(struct request_queue *q) |
1da177e4 LT |
131 | { |
132 | struct mmc_queue *mq = q->queuedata; | |
89b4e133 | 133 | struct request *req; |
2220eedf KD |
134 | unsigned long flags; |
135 | struct mmc_context_info *cntx; | |
89b4e133 PO |
136 | |
137 | if (!mq) { | |
5fa83ce2 AH |
138 | while ((req = blk_fetch_request(q)) != NULL) { |
139 | req->cmd_flags |= REQ_QUIET; | |
296b2f6a | 140 | __blk_end_request_all(req, -EIO); |
5fa83ce2 | 141 | } |
89b4e133 PO |
142 | return; |
143 | } | |
1da177e4 | 144 | |
2220eedf KD |
145 | cntx = &mq->card->host->context_info; |
146 | if (!mq->mqrq_cur->req && mq->mqrq_prev->req) { | |
147 | /* | |
148 | * New MMC request arrived when MMC thread may be | |
149 | * blocked on the previous request to be complete | |
150 | * with no current request fetched | |
151 | */ | |
152 | spin_lock_irqsave(&cntx->lock, flags); | |
153 | if (cntx->is_waiting_last_req) { | |
154 | cntx->is_new_req = true; | |
155 | wake_up_interruptible(&cntx->wait); | |
156 | } | |
157 | spin_unlock_irqrestore(&cntx->lock, flags); | |
158 | } else if (!mq->mqrq_cur->req && !mq->mqrq_prev->req) | |
87598a2b | 159 | wake_up_process(mq->thread); |
1da177e4 LT |
160 | } |
161 | ||
7513cd7a | 162 | static struct scatterlist *mmc_alloc_sg(int sg_len, int *err) |
97868a2b PF |
163 | { |
164 | struct scatterlist *sg; | |
165 | ||
166 | sg = kmalloc(sizeof(struct scatterlist)*sg_len, GFP_KERNEL); | |
167 | if (!sg) | |
168 | *err = -ENOMEM; | |
169 | else { | |
170 | *err = 0; | |
171 | sg_init_table(sg, sg_len); | |
172 | } | |
173 | ||
174 | return sg; | |
175 | } | |
176 | ||
e056a1b5 AH |
177 | static void mmc_queue_setup_discard(struct request_queue *q, |
178 | struct mmc_card *card) | |
179 | { | |
180 | unsigned max_discard; | |
181 | ||
182 | max_discard = mmc_calc_max_discard(card); | |
183 | if (!max_discard) | |
184 | return; | |
185 | ||
186 | queue_flag_set_unlocked(QUEUE_FLAG_DISCARD, q); | |
187 | q->limits.max_discard_sectors = max_discard; | |
7194efb8 | 188 | if (card->erased_byte == 0 && !mmc_can_discard(card)) |
e056a1b5 AH |
189 | q->limits.discard_zeroes_data = 1; |
190 | q->limits.discard_granularity = card->pref_erase << 9; | |
191 | /* granularity must not be greater than max. discard */ | |
192 | if (card->pref_erase > max_discard) | |
193 | q->limits.discard_granularity = 0; | |
d9ddd629 | 194 | if (mmc_can_secure_erase_trim(card) || mmc_can_sanitize(card)) |
e056a1b5 AH |
195 | queue_flag_set_unlocked(QUEUE_FLAG_SECDISCARD, q); |
196 | } | |
197 | ||
1da177e4 LT |
198 | /** |
199 | * mmc_init_queue - initialise a queue structure. | |
200 | * @mq: mmc queue | |
201 | * @card: mmc card to attach this queue | |
202 | * @lock: queue lock | |
d09408ad | 203 | * @subname: partition subname |
1da177e4 LT |
204 | * |
205 | * Initialise a MMC card request queue. | |
206 | */ | |
d09408ad AH |
207 | int mmc_init_queue(struct mmc_queue *mq, struct mmc_card *card, |
208 | spinlock_t *lock, const char *subname) | |
1da177e4 LT |
209 | { |
210 | struct mmc_host *host = card->host; | |
6fa3eb70 | 211 | u64 limit = BLK_BOUNCE_ANY; |
1da177e4 | 212 | int ret; |
97868a2b | 213 | struct mmc_queue_req *mqrq_cur = &mq->mqrq[0]; |
04296b7b | 214 | struct mmc_queue_req *mqrq_prev = &mq->mqrq[1]; |
1da177e4 | 215 | |
fcaf71fd GKH |
216 | if (mmc_dev(host)->dma_mask && *mmc_dev(host)->dma_mask) |
217 | limit = *mmc_dev(host)->dma_mask; | |
1da177e4 LT |
218 | |
219 | mq->card = card; | |
1b50f5f3 | 220 | mq->queue = blk_init_queue(mmc_request_fn, lock); |
1da177e4 LT |
221 | if (!mq->queue) |
222 | return -ENOMEM; | |
223 | ||
6fa3eb70 S |
224 | #ifdef CONFIG_ZRAM |
225 | if (mmc_card_mmc(card) && | |
226 | (totalram_pages << (PAGE_SHIFT - 10)) <= (256 * 1024)) | |
227 | mq->queue->backing_dev_info.ra_pages = | |
228 | (VM_MIN_READAHEAD * 1024) / PAGE_CACHE_SIZE; | |
229 | #endif // CONFIG_ZRAM | |
97868a2b | 230 | mq->mqrq_cur = mqrq_cur; |
04296b7b | 231 | mq->mqrq_prev = mqrq_prev; |
1da177e4 | 232 | mq->queue->queuedata = mq; |
1da177e4 | 233 | |
98ccf149 | 234 | blk_queue_prep_rq(mq->queue, mmc_prep_request); |
8dddfe19 | 235 | queue_flag_set_unlocked(QUEUE_FLAG_NONROT, mq->queue); |
e056a1b5 AH |
236 | if (mmc_can_erase(card)) |
237 | mmc_queue_setup_discard(mq->queue, card); | |
98ccf149 PO |
238 | |
239 | #ifdef CONFIG_MMC_BLOCK_BOUNCE | |
a36274e0 | 240 | if (host->max_segs == 1) { |
aafabfab PO |
241 | unsigned int bouncesz; |
242 | ||
98ccf149 PO |
243 | bouncesz = MMC_QUEUE_BOUNCESZ; |
244 | ||
245 | if (bouncesz > host->max_req_size) | |
246 | bouncesz = host->max_req_size; | |
247 | if (bouncesz > host->max_seg_size) | |
248 | bouncesz = host->max_seg_size; | |
f3eb0aaa PO |
249 | if (bouncesz > (host->max_blk_count * 512)) |
250 | bouncesz = host->max_blk_count * 512; | |
251 | ||
252 | if (bouncesz > 512) { | |
97868a2b PF |
253 | mqrq_cur->bounce_buf = kmalloc(bouncesz, GFP_KERNEL); |
254 | if (!mqrq_cur->bounce_buf) { | |
a3c76eb9 | 255 | pr_warning("%s: unable to " |
97868a2b | 256 | "allocate bounce cur buffer\n", |
f3eb0aaa PO |
257 | mmc_card_name(card)); |
258 | } | |
04296b7b PF |
259 | mqrq_prev->bounce_buf = kmalloc(bouncesz, GFP_KERNEL); |
260 | if (!mqrq_prev->bounce_buf) { | |
a3c76eb9 | 261 | pr_warning("%s: unable to " |
04296b7b PF |
262 | "allocate bounce prev buffer\n", |
263 | mmc_card_name(card)); | |
264 | kfree(mqrq_cur->bounce_buf); | |
265 | mqrq_cur->bounce_buf = NULL; | |
266 | } | |
f3eb0aaa | 267 | } |
98ccf149 | 268 | |
04296b7b | 269 | if (mqrq_cur->bounce_buf && mqrq_prev->bounce_buf) { |
2ff1fa67 | 270 | blk_queue_bounce_limit(mq->queue, BLK_BOUNCE_ANY); |
086fa5ff | 271 | blk_queue_max_hw_sectors(mq->queue, bouncesz / 512); |
8a78362c | 272 | blk_queue_max_segments(mq->queue, bouncesz / 512); |
98ccf149 PO |
273 | blk_queue_max_segment_size(mq->queue, bouncesz); |
274 | ||
97868a2b PF |
275 | mqrq_cur->sg = mmc_alloc_sg(1, &ret); |
276 | if (ret) | |
aafabfab | 277 | goto cleanup_queue; |
98ccf149 | 278 | |
97868a2b PF |
279 | mqrq_cur->bounce_sg = |
280 | mmc_alloc_sg(bouncesz / 512, &ret); | |
281 | if (ret) | |
aafabfab | 282 | goto cleanup_queue; |
97868a2b | 283 | |
04296b7b PF |
284 | mqrq_prev->sg = mmc_alloc_sg(1, &ret); |
285 | if (ret) | |
286 | goto cleanup_queue; | |
287 | ||
288 | mqrq_prev->bounce_sg = | |
289 | mmc_alloc_sg(bouncesz / 512, &ret); | |
290 | if (ret) | |
291 | goto cleanup_queue; | |
98ccf149 PO |
292 | } |
293 | } | |
294 | #endif | |
295 | ||
04296b7b | 296 | if (!mqrq_cur->bounce_buf && !mqrq_prev->bounce_buf) { |
98ccf149 | 297 | blk_queue_bounce_limit(mq->queue, limit); |
086fa5ff | 298 | blk_queue_max_hw_sectors(mq->queue, |
f3eb0aaa | 299 | min(host->max_blk_count, host->max_req_size / 512)); |
a36274e0 | 300 | blk_queue_max_segments(mq->queue, host->max_segs); |
98ccf149 PO |
301 | blk_queue_max_segment_size(mq->queue, host->max_seg_size); |
302 | ||
97868a2b PF |
303 | mqrq_cur->sg = mmc_alloc_sg(host->max_segs, &ret); |
304 | if (ret) | |
98ccf149 | 305 | goto cleanup_queue; |
97868a2b | 306 | |
04296b7b PF |
307 | |
308 | mqrq_prev->sg = mmc_alloc_sg(host->max_segs, &ret); | |
309 | if (ret) | |
310 | goto cleanup_queue; | |
1da177e4 LT |
311 | } |
312 | ||
632cf92a | 313 | sema_init(&mq->thread_sem, 1); |
1da177e4 | 314 | |
d09408ad AH |
315 | mq->thread = kthread_run(mmc_queue_thread, mq, "mmcqd/%d%s", |
316 | host->index, subname ? subname : ""); | |
de528fa3 | 317 | |
87598a2b CH |
318 | if (IS_ERR(mq->thread)) { |
319 | ret = PTR_ERR(mq->thread); | |
98ccf149 | 320 | goto free_bounce_sg; |
1da177e4 LT |
321 | } |
322 | ||
87598a2b | 323 | return 0; |
98ccf149 | 324 | free_bounce_sg: |
97868a2b PF |
325 | kfree(mqrq_cur->bounce_sg); |
326 | mqrq_cur->bounce_sg = NULL; | |
04296b7b PF |
327 | kfree(mqrq_prev->bounce_sg); |
328 | mqrq_prev->bounce_sg = NULL; | |
97868a2b | 329 | |
aafabfab | 330 | cleanup_queue: |
97868a2b PF |
331 | kfree(mqrq_cur->sg); |
332 | mqrq_cur->sg = NULL; | |
333 | kfree(mqrq_cur->bounce_buf); | |
334 | mqrq_cur->bounce_buf = NULL; | |
335 | ||
04296b7b PF |
336 | kfree(mqrq_prev->sg); |
337 | mqrq_prev->sg = NULL; | |
338 | kfree(mqrq_prev->bounce_buf); | |
339 | mqrq_prev->bounce_buf = NULL; | |
340 | ||
1da177e4 | 341 | blk_cleanup_queue(mq->queue); |
1da177e4 LT |
342 | return ret; |
343 | } | |
1da177e4 LT |
344 | |
345 | void mmc_cleanup_queue(struct mmc_queue *mq) | |
346 | { | |
165125e1 | 347 | struct request_queue *q = mq->queue; |
89b4e133 | 348 | unsigned long flags; |
97868a2b | 349 | struct mmc_queue_req *mqrq_cur = mq->mqrq_cur; |
04296b7b | 350 | struct mmc_queue_req *mqrq_prev = mq->mqrq_prev; |
89b4e133 | 351 | |
d2b46f66 PO |
352 | /* Make sure the queue isn't suspended, as that will deadlock */ |
353 | mmc_queue_resume(mq); | |
354 | ||
89b4e133 | 355 | /* Then terminate our worker thread */ |
87598a2b | 356 | kthread_stop(mq->thread); |
1da177e4 | 357 | |
5fa83ce2 AH |
358 | /* Empty the queue */ |
359 | spin_lock_irqsave(q->queue_lock, flags); | |
360 | q->queuedata = NULL; | |
361 | blk_start_queue(q); | |
362 | spin_unlock_irqrestore(q->queue_lock, flags); | |
363 | ||
97868a2b PF |
364 | kfree(mqrq_cur->bounce_sg); |
365 | mqrq_cur->bounce_sg = NULL; | |
98ccf149 | 366 | |
97868a2b PF |
367 | kfree(mqrq_cur->sg); |
368 | mqrq_cur->sg = NULL; | |
1da177e4 | 369 | |
97868a2b PF |
370 | kfree(mqrq_cur->bounce_buf); |
371 | mqrq_cur->bounce_buf = NULL; | |
98ccf149 | 372 | |
04296b7b PF |
373 | kfree(mqrq_prev->bounce_sg); |
374 | mqrq_prev->bounce_sg = NULL; | |
375 | ||
376 | kfree(mqrq_prev->sg); | |
377 | mqrq_prev->sg = NULL; | |
378 | ||
379 | kfree(mqrq_prev->bounce_buf); | |
380 | mqrq_prev->bounce_buf = NULL; | |
381 | ||
1da177e4 LT |
382 | mq->card = NULL; |
383 | } | |
384 | EXPORT_SYMBOL(mmc_cleanup_queue); | |
385 | ||
ce39f9d1 SJ |
386 | int mmc_packed_init(struct mmc_queue *mq, struct mmc_card *card) |
387 | { | |
388 | struct mmc_queue_req *mqrq_cur = &mq->mqrq[0]; | |
389 | struct mmc_queue_req *mqrq_prev = &mq->mqrq[1]; | |
390 | int ret = 0; | |
391 | ||
392 | ||
393 | mqrq_cur->packed = kzalloc(sizeof(struct mmc_packed), GFP_KERNEL); | |
394 | if (!mqrq_cur->packed) { | |
395 | pr_warn("%s: unable to allocate packed cmd for mqrq_cur\n", | |
396 | mmc_card_name(card)); | |
397 | ret = -ENOMEM; | |
398 | goto out; | |
399 | } | |
400 | ||
401 | mqrq_prev->packed = kzalloc(sizeof(struct mmc_packed), GFP_KERNEL); | |
402 | if (!mqrq_prev->packed) { | |
403 | pr_warn("%s: unable to allocate packed cmd for mqrq_prev\n", | |
404 | mmc_card_name(card)); | |
405 | kfree(mqrq_cur->packed); | |
406 | mqrq_cur->packed = NULL; | |
407 | ret = -ENOMEM; | |
408 | goto out; | |
409 | } | |
410 | ||
411 | INIT_LIST_HEAD(&mqrq_cur->packed->list); | |
412 | INIT_LIST_HEAD(&mqrq_prev->packed->list); | |
413 | ||
414 | out: | |
415 | return ret; | |
416 | } | |
417 | ||
418 | void mmc_packed_clean(struct mmc_queue *mq) | |
419 | { | |
420 | struct mmc_queue_req *mqrq_cur = &mq->mqrq[0]; | |
421 | struct mmc_queue_req *mqrq_prev = &mq->mqrq[1]; | |
422 | ||
423 | kfree(mqrq_cur->packed); | |
424 | mqrq_cur->packed = NULL; | |
425 | kfree(mqrq_prev->packed); | |
426 | mqrq_prev->packed = NULL; | |
427 | } | |
428 | ||
1da177e4 LT |
429 | /** |
430 | * mmc_queue_suspend - suspend a MMC request queue | |
431 | * @mq: MMC queue to suspend | |
432 | * | |
433 | * Stop the block request queue, and wait for our thread to | |
434 | * complete any outstanding requests. This ensures that we | |
435 | * won't suspend while a request is being processed. | |
436 | */ | |
437 | void mmc_queue_suspend(struct mmc_queue *mq) | |
438 | { | |
165125e1 | 439 | struct request_queue *q = mq->queue; |
1da177e4 LT |
440 | unsigned long flags; |
441 | ||
442 | if (!(mq->flags & MMC_QUEUE_SUSPENDED)) { | |
443 | mq->flags |= MMC_QUEUE_SUSPENDED; | |
444 | ||
445 | spin_lock_irqsave(q->queue_lock, flags); | |
446 | blk_stop_queue(q); | |
447 | spin_unlock_irqrestore(q->queue_lock, flags); | |
448 | ||
449 | down(&mq->thread_sem); | |
450 | } | |
451 | } | |
1da177e4 LT |
452 | |
453 | /** | |
454 | * mmc_queue_resume - resume a previously suspended MMC request queue | |
455 | * @mq: MMC queue to resume | |
456 | */ | |
457 | void mmc_queue_resume(struct mmc_queue *mq) | |
458 | { | |
165125e1 | 459 | struct request_queue *q = mq->queue; |
1da177e4 LT |
460 | unsigned long flags; |
461 | ||
462 | if (mq->flags & MMC_QUEUE_SUSPENDED) { | |
463 | mq->flags &= ~MMC_QUEUE_SUSPENDED; | |
464 | ||
465 | up(&mq->thread_sem); | |
466 | ||
467 | spin_lock_irqsave(q->queue_lock, flags); | |
468 | blk_start_queue(q); | |
469 | spin_unlock_irqrestore(q->queue_lock, flags); | |
470 | } | |
471 | } | |
98ac2162 | 472 | |
ce39f9d1 SJ |
473 | static unsigned int mmc_queue_packed_map_sg(struct mmc_queue *mq, |
474 | struct mmc_packed *packed, | |
475 | struct scatterlist *sg, | |
476 | enum mmc_packed_type cmd_type) | |
477 | { | |
478 | struct scatterlist *__sg = sg; | |
479 | unsigned int sg_len = 0; | |
480 | struct request *req; | |
481 | ||
482 | if (mmc_packed_wr(cmd_type)) { | |
483 | unsigned int hdr_sz = mmc_large_sector(mq->card) ? 4096 : 512; | |
484 | unsigned int max_seg_sz = queue_max_segment_size(mq->queue); | |
485 | unsigned int len, remain, offset = 0; | |
486 | u8 *buf = (u8 *)packed->cmd_hdr; | |
487 | ||
488 | remain = hdr_sz; | |
489 | do { | |
490 | len = min(remain, max_seg_sz); | |
491 | sg_set_buf(__sg, buf + offset, len); | |
492 | offset += len; | |
493 | remain -= len; | |
494 | (__sg++)->page_link &= ~0x02; | |
495 | sg_len++; | |
496 | } while (remain); | |
497 | } | |
498 | ||
499 | list_for_each_entry(req, &packed->list, queuelist) { | |
500 | sg_len += blk_rq_map_sg(mq->queue, req, __sg); | |
501 | __sg = sg + (sg_len - 1); | |
502 | (__sg++)->page_link &= ~0x02; | |
503 | } | |
504 | sg_mark_end(sg + (sg_len - 1)); | |
505 | return sg_len; | |
506 | } | |
507 | ||
2ff1fa67 PO |
508 | /* |
509 | * Prepare the sg list(s) to be handed of to the host driver | |
510 | */ | |
97868a2b | 511 | unsigned int mmc_queue_map_sg(struct mmc_queue *mq, struct mmc_queue_req *mqrq) |
98ccf149 PO |
512 | { |
513 | unsigned int sg_len; | |
2ff1fa67 PO |
514 | size_t buflen; |
515 | struct scatterlist *sg; | |
ce39f9d1 | 516 | enum mmc_packed_type cmd_type; |
2ff1fa67 | 517 | int i; |
98ccf149 | 518 | |
ce39f9d1 SJ |
519 | cmd_type = mqrq->cmd_type; |
520 | ||
521 | if (!mqrq->bounce_buf) { | |
522 | if (mmc_packed_cmd(cmd_type)) | |
523 | return mmc_queue_packed_map_sg(mq, mqrq->packed, | |
524 | mqrq->sg, cmd_type); | |
525 | else | |
526 | return blk_rq_map_sg(mq->queue, mqrq->req, mqrq->sg); | |
527 | } | |
98ccf149 | 528 | |
97868a2b | 529 | BUG_ON(!mqrq->bounce_sg); |
98ccf149 | 530 | |
ce39f9d1 SJ |
531 | if (mmc_packed_cmd(cmd_type)) |
532 | sg_len = mmc_queue_packed_map_sg(mq, mqrq->packed, | |
533 | mqrq->bounce_sg, cmd_type); | |
534 | else | |
535 | sg_len = blk_rq_map_sg(mq->queue, mqrq->req, mqrq->bounce_sg); | |
98ccf149 | 536 | |
97868a2b | 537 | mqrq->bounce_sg_len = sg_len; |
98ccf149 | 538 | |
2ff1fa67 | 539 | buflen = 0; |
97868a2b | 540 | for_each_sg(mqrq->bounce_sg, sg, sg_len, i) |
2ff1fa67 | 541 | buflen += sg->length; |
98ccf149 | 542 | |
97868a2b | 543 | sg_init_one(mqrq->sg, mqrq->bounce_buf, buflen); |
98ccf149 PO |
544 | |
545 | return 1; | |
546 | } | |
547 | ||
2ff1fa67 PO |
548 | /* |
549 | * If writing, bounce the data to the buffer before the request | |
550 | * is sent to the host driver | |
551 | */ | |
97868a2b | 552 | void mmc_queue_bounce_pre(struct mmc_queue_req *mqrq) |
98ccf149 | 553 | { |
97868a2b | 554 | if (!mqrq->bounce_buf) |
98ccf149 PO |
555 | return; |
556 | ||
97868a2b | 557 | if (rq_data_dir(mqrq->req) != WRITE) |
98ccf149 PO |
558 | return; |
559 | ||
97868a2b PF |
560 | sg_copy_to_buffer(mqrq->bounce_sg, mqrq->bounce_sg_len, |
561 | mqrq->bounce_buf, mqrq->sg[0].length); | |
98ccf149 PO |
562 | } |
563 | ||
2ff1fa67 PO |
564 | /* |
565 | * If reading, bounce the data from the buffer after the request | |
566 | * has been handled by the host driver | |
567 | */ | |
97868a2b | 568 | void mmc_queue_bounce_post(struct mmc_queue_req *mqrq) |
98ccf149 | 569 | { |
97868a2b | 570 | if (!mqrq->bounce_buf) |
98ccf149 PO |
571 | return; |
572 | ||
97868a2b | 573 | if (rq_data_dir(mqrq->req) != READ) |
98ccf149 PO |
574 | return; |
575 | ||
97868a2b PF |
576 | sg_copy_from_buffer(mqrq->bounce_sg, mqrq->bounce_sg_len, |
577 | mqrq->bounce_buf, mqrq->sg[0].length); | |
98ccf149 | 578 | } |