2 * Functions related to segment and merge handling
4 #include <linux/kernel.h>
5 #include <linux/module.h>
7 #include <linux/blkdev.h>
8 #include <linux/scatterlist.h>
12 static unsigned int __blk_recalc_rq_segments(struct request_queue
*q
,
15 struct bio_vec
*bv
, *bvprv
= NULL
;
16 int cluster
, i
, high
, highprv
= 1;
17 unsigned int seg_size
, nr_phys_segs
;
18 struct bio
*fbio
, *bbio
;
24 cluster
= blk_queue_cluster(q
);
28 bio_for_each_segment(bv
, bio
, i
) {
30 * the trick here is making sure that a high page is
31 * never considered part of another segment, since that
32 * might change with the bounce page.
34 high
= page_to_pfn(bv
->bv_page
) > queue_bounce_pfn(q
);
38 if (seg_size
+ bv
->bv_len
39 > queue_max_segment_size(q
))
41 if (!BIOVEC_PHYS_MERGEABLE(bvprv
, bv
))
43 if (!BIOVEC_SEG_BOUNDARY(q
, bvprv
, bv
))
46 seg_size
+= bv
->bv_len
;
51 if (nr_phys_segs
== 1 && seg_size
>
52 fbio
->bi_seg_front_size
)
53 fbio
->bi_seg_front_size
= seg_size
;
57 seg_size
= bv
->bv_len
;
63 if (nr_phys_segs
== 1 && seg_size
> fbio
->bi_seg_front_size
)
64 fbio
->bi_seg_front_size
= seg_size
;
65 if (seg_size
> bbio
->bi_seg_back_size
)
66 bbio
->bi_seg_back_size
= seg_size
;
71 void blk_recalc_rq_segments(struct request
*rq
)
73 rq
->nr_phys_segments
= __blk_recalc_rq_segments(rq
->q
, rq
->bio
);
76 void blk_recount_segments(struct request_queue
*q
, struct bio
*bio
)
78 struct bio
*nxt
= bio
->bi_next
;
81 bio
->bi_phys_segments
= __blk_recalc_rq_segments(q
, bio
);
83 bio
->bi_flags
|= (1 << BIO_SEG_VALID
);
85 EXPORT_SYMBOL(blk_recount_segments
);
87 static int blk_phys_contig_segment(struct request_queue
*q
, struct bio
*bio
,
90 if (!blk_queue_cluster(q
))
93 if (bio
->bi_seg_back_size
+ nxt
->bi_seg_front_size
>
94 queue_max_segment_size(q
))
97 if (!bio_has_data(bio
))
100 if (!BIOVEC_PHYS_MERGEABLE(__BVEC_END(bio
), __BVEC_START(nxt
)))
104 * bio and nxt are contiguous in memory; check if the queue allows
105 * these two to be merged into one
107 if (BIO_SEG_BOUNDARY(q
, bio
, nxt
))
114 __blk_segment_map_sg(struct request_queue
*q
, struct bio_vec
*bvec
,
115 struct scatterlist
*sglist
, struct bio_vec
**bvprv
,
116 struct scatterlist
**sg
, int *nsegs
, int *cluster
)
119 int nbytes
= bvec
->bv_len
;
121 if (*bvprv
&& *cluster
) {
122 if ((*sg
)->length
+ nbytes
> queue_max_segment_size(q
))
125 if (!BIOVEC_PHYS_MERGEABLE(*bvprv
, bvec
))
127 if (!BIOVEC_SEG_BOUNDARY(q
, *bvprv
, bvec
))
130 (*sg
)->length
+= nbytes
;
137 * If the driver previously mapped a shorter
138 * list, we could see a termination bit
139 * prematurely unless it fully inits the sg
140 * table on each mapping. We KNOW that there
141 * must be more entries here or the driver
142 * would be buggy, so force clear the
143 * termination bit to avoid doing a full
144 * sg_init_table() in drivers for each command.
150 sg_set_page(*sg
, bvec
->bv_page
, nbytes
, bvec
->bv_offset
);
157 * map a request to scatterlist, return number of sg entries setup. Caller
158 * must make sure sg can hold rq->nr_phys_segments entries
160 int blk_rq_map_sg(struct request_queue
*q
, struct request
*rq
,
161 struct scatterlist
*sglist
)
163 struct bio_vec
*bvec
, *bvprv
;
164 struct req_iterator iter
;
165 struct scatterlist
*sg
;
167 #if defined(FEATURE_STORAGE_PID_LOGGER)
168 struct page_pid_logger
*prev_logger
= 0;
172 cluster
= blk_queue_cluster(q
);
179 rq_for_each_segment(bvec
, rq
, iter
) {
180 __blk_segment_map_sg(q
, bvec
, sglist
, &bvprv
, &sg
,
182 //#undef FEATURE_STORAGE_PID_LOGGER
183 #if defined(FEATURE_STORAGE_PID_LOGGER)
185 extern spinlock_t g_locker
;
186 extern unsigned char *page_logger
;
188 extern struct struct_pid_logger g_pid_logger
[PID_ID_CNT
];
191 struct page_pid_logger
*tmp_logger
;
192 unsigned long page_offset
;
193 int index
, mmcqd_index
;
195 //#if defined(CONFIG_FLATMEM)
196 //page_offset = (unsigned long)((bvec->bv_page) - mem_map);
198 page_offset
= (unsigned long)(__page_to_pfn(bvec
->bv_page
))- PHYS_PFN_OFFSET
;
200 tmp_logger
=((struct page_pid_logger
*)page_logger
) + page_offset
;
201 //tmp_locker =((struct page_pid_locker *)page_logger_lock) + page_offset;
202 #if defined(CONFIG_FLATMEM)
203 //printk(KERN_INFO"hank merge pid1:%u pid2:%u bv_page:%p vmemmap:%p pfn:%ld %s \n", tmp_logger->pid1, tmp_logger->pid2, bvec->bv_page, vmemmap, (unsigned long)(__page_to_pfn(bvec->bv_page)), q->backing_dev_info.name);
205 //printk(KERN_INFO"hank merge pid1:%u pid2:%u bv_page:%x mem_map:%x pfn:%d %s \n", tmp_logger->pid1, tmp_logger->pid2, bvec->bv_page, mem_map, (unsigned long)(__page_to_pfn(bvec->bv_page)), q->backing_dev_info.name);
207 current_pid
= current
->pid
;
209 //find the exactly pid record array
210 for( mmcqd_index
=0; mmcqd_index
<PID_ID_CNT
; mmcqd_index
++) {
211 //printk(KERN_INFO"hank merge mmcqd_index:%d qcurrent_pid:%d current_pid:%d", mmcqd_index, g_pid_logger[mmcqd_index].current_pid, current_pid);
212 if( g_pid_logger
[mmcqd_index
].current_pid
==0 || g_pid_logger
[mmcqd_index
].current_pid
== current_pid
) {
213 g_pid_logger
[mmcqd_index
].current_pid
= current_pid
;
218 if( mmcqd_index
== PID_ID_CNT
)
221 if( tmp_logger->pid1 == 0XFFFF && tmp_logger->pid2 == 0XFFFF)
223 printk(KERN_INFO"hank merge fail offset:%d of:%d bytes:%d rw:%d", page_offset, bvec->bv_offset, nbytes, (rq->cmd_flags & REQ_WRITE));
226 printk(KERN_INFO"hank merge success offset:%d of:%d bytes:%d rw:%d", page_offset, bvec->bv_offset, nbytes, (rq->cmd_flags & REQ_WRITE));
229 if( tmp_logger
->pid1
!= 0xFFFF) {
230 spin_lock_irqsave(&g_locker
, flags
);
231 for( index
=0; index
<PID_LOGGER_COUNT
; index
++) {
232 if( tmp_logger
->pid1
== 0xFFFF)
234 if( (g_pid_logger
[mmcqd_index
].pid_logger
[index
] == 0 || g_pid_logger
[mmcqd_index
].pid_logger
[index
] == tmp_logger
->pid1
)) {
235 g_pid_logger
[mmcqd_index
].pid_logger
[index
] = tmp_logger
->pid1
;
236 if (rq
->cmd_flags
& REQ_WRITE
) {
237 g_pid_logger
[mmcqd_index
].pid_logger_counter
[index
]++;
238 g_pid_logger
[mmcqd_index
].pid_logger_length
[index
]+=bvec
->bv_len
;
240 g_pid_logger
[mmcqd_index
].pid_logger_r_counter
[index
]++;
241 g_pid_logger
[mmcqd_index
].pid_logger_r_length
[index
]+=bvec
->bv_len
;
243 if( prev_logger
&& prev_logger
!= tmp_logger
)
244 prev_logger
->pid1
= 0XFFFF;
245 //tmp_logger->pid1 = 0XFFFF;
250 spin_unlock_irqrestore(&g_locker
, flags
);
252 if( tmp_logger
->pid2
!= 0xFFFF) {
253 spin_lock_irqsave(&g_locker
, flags
);
254 for( index
=0; index
<PID_LOGGER_COUNT
; index
++) {
255 if( tmp_logger
->pid2
== 0xFFFF)
257 if( (g_pid_logger
[mmcqd_index
].pid_logger
[index
] == 0 || g_pid_logger
[mmcqd_index
].pid_logger
[index
] == tmp_logger
->pid2
)) {
258 g_pid_logger
[mmcqd_index
].pid_logger
[index
] = tmp_logger
->pid2
;
259 if (rq
->cmd_flags
& REQ_WRITE
) {
260 g_pid_logger
[mmcqd_index
].pid_logger_counter
[index
]++;
261 g_pid_logger
[mmcqd_index
].pid_logger_length
[index
]+=bvec
->bv_len
;
263 g_pid_logger
[mmcqd_index
].pid_logger_r_counter
[index
]++;
264 g_pid_logger
[mmcqd_index
].pid_logger_r_length
[index
]+=bvec
->bv_len
;
266 if( prev_logger
&& prev_logger
!= tmp_logger
)
267 prev_logger
->pid2
= 0XFFFF;
268 //tmp_logger->pid2 = 0XFFFF;
273 spin_unlock_irqrestore(&g_locker
, flags
);
275 prev_logger
= tmp_logger
;
281 } /* segments in rq */
284 if (unlikely(rq
->cmd_flags
& REQ_COPY_USER
) &&
285 (blk_rq_bytes(rq
) & q
->dma_pad_mask
)) {
286 unsigned int pad_len
=
287 (q
->dma_pad_mask
& ~blk_rq_bytes(rq
)) + 1;
289 sg
->length
+= pad_len
;
290 rq
->extra_len
+= pad_len
;
293 if (q
->dma_drain_size
&& q
->dma_drain_needed(rq
)) {
294 if (rq
->cmd_flags
& REQ_WRITE
)
295 memset(q
->dma_drain_buffer
, 0, q
->dma_drain_size
);
297 sg
->page_link
&= ~0x02;
299 sg_set_page(sg
, virt_to_page(q
->dma_drain_buffer
),
301 ((unsigned long)q
->dma_drain_buffer
) &
304 rq
->extra_len
+= q
->dma_drain_size
;
312 EXPORT_SYMBOL(blk_rq_map_sg
);
315 * blk_bio_map_sg - map a bio to a scatterlist
316 * @q: request_queue in question
317 * @bio: bio being mapped
318 * @sglist: scatterlist being mapped
321 * Caller must make sure sg can hold bio->bi_phys_segments entries
323 * Will return the number of sg entries setup
325 int blk_bio_map_sg(struct request_queue
*q
, struct bio
*bio
,
326 struct scatterlist
*sglist
)
328 struct bio_vec
*bvec
, *bvprv
;
329 struct scatterlist
*sg
;
334 cluster
= blk_queue_cluster(q
);
338 bio_for_each_segment(bvec
, bio
, i
) {
339 __blk_segment_map_sg(q
, bvec
, sglist
, &bvprv
, &sg
,
341 } /* segments in bio */
346 BUG_ON(bio
->bi_phys_segments
&& nsegs
> bio
->bi_phys_segments
);
349 EXPORT_SYMBOL(blk_bio_map_sg
);
351 static inline int ll_new_hw_segment(struct request_queue
*q
,
355 int nr_phys_segs
= bio_phys_segments(q
, bio
);
357 if (req
->nr_phys_segments
+ nr_phys_segs
> queue_max_segments(q
))
360 if (bio_integrity(bio
) && blk_integrity_merge_bio(q
, req
, bio
))
364 * This will form the start of a new hw segment. Bump both
367 req
->nr_phys_segments
+= nr_phys_segs
;
371 req
->cmd_flags
|= REQ_NOMERGE
;
372 if (req
== q
->last_merge
)
373 q
->last_merge
= NULL
;
377 int ll_back_merge_fn(struct request_queue
*q
, struct request
*req
,
380 if (blk_rq_sectors(req
) + bio_sectors(bio
) >
381 blk_rq_get_max_sectors(req
)) {
382 req
->cmd_flags
|= REQ_NOMERGE
;
383 if (req
== q
->last_merge
)
384 q
->last_merge
= NULL
;
387 if (!bio_flagged(req
->biotail
, BIO_SEG_VALID
))
388 blk_recount_segments(q
, req
->biotail
);
389 if (!bio_flagged(bio
, BIO_SEG_VALID
))
390 blk_recount_segments(q
, bio
);
392 return ll_new_hw_segment(q
, req
, bio
);
395 int ll_front_merge_fn(struct request_queue
*q
, struct request
*req
,
398 if (blk_rq_sectors(req
) + bio_sectors(bio
) >
399 blk_rq_get_max_sectors(req
)) {
400 req
->cmd_flags
|= REQ_NOMERGE
;
401 if (req
== q
->last_merge
)
402 q
->last_merge
= NULL
;
405 if (!bio_flagged(bio
, BIO_SEG_VALID
))
406 blk_recount_segments(q
, bio
);
407 if (!bio_flagged(req
->bio
, BIO_SEG_VALID
))
408 blk_recount_segments(q
, req
->bio
);
410 return ll_new_hw_segment(q
, req
, bio
);
413 static int ll_merge_requests_fn(struct request_queue
*q
, struct request
*req
,
414 struct request
*next
)
416 int total_phys_segments
;
417 unsigned int seg_size
=
418 req
->biotail
->bi_seg_back_size
+ next
->bio
->bi_seg_front_size
;
421 * First check if the either of the requests are re-queued
422 * requests. Can't merge them if they are.
424 if (req
->special
|| next
->special
)
428 * Will it become too large?
430 if ((blk_rq_sectors(req
) + blk_rq_sectors(next
)) >
431 blk_rq_get_max_sectors(req
))
434 total_phys_segments
= req
->nr_phys_segments
+ next
->nr_phys_segments
;
435 if (blk_phys_contig_segment(q
, req
->biotail
, next
->bio
)) {
436 if (req
->nr_phys_segments
== 1)
437 req
->bio
->bi_seg_front_size
= seg_size
;
438 if (next
->nr_phys_segments
== 1)
439 next
->biotail
->bi_seg_back_size
= seg_size
;
440 total_phys_segments
--;
443 if (total_phys_segments
> queue_max_segments(q
))
446 if (blk_integrity_rq(req
) && blk_integrity_merge_rq(q
, req
, next
))
450 req
->nr_phys_segments
= total_phys_segments
;
455 * blk_rq_set_mixed_merge - mark a request as mixed merge
456 * @rq: request to mark as mixed merge
459 * @rq is about to be mixed merged. Make sure the attributes
460 * which can be mixed are set in each bio and mark @rq as mixed
463 void blk_rq_set_mixed_merge(struct request
*rq
)
465 unsigned int ff
= rq
->cmd_flags
& REQ_FAILFAST_MASK
;
468 if (rq
->cmd_flags
& REQ_MIXED_MERGE
)
472 * @rq will no longer represent mixable attributes for all the
473 * contained bios. It will just track those of the first one.
474 * Distributes the attributs to each bio.
476 for (bio
= rq
->bio
; bio
; bio
= bio
->bi_next
) {
477 WARN_ON_ONCE((bio
->bi_rw
& REQ_FAILFAST_MASK
) &&
478 (bio
->bi_rw
& REQ_FAILFAST_MASK
) != ff
);
481 rq
->cmd_flags
|= REQ_MIXED_MERGE
;
484 static void blk_account_io_merge(struct request
*req
)
486 if (blk_do_io_stat(req
)) {
487 struct hd_struct
*part
;
490 cpu
= part_stat_lock();
493 part_round_stats(cpu
, part
);
494 part_dec_in_flight(part
, rq_data_dir(req
));
502 * Has to be called with the request spinlock acquired
504 static int attempt_merge(struct request_queue
*q
, struct request
*req
,
505 struct request
*next
)
507 if (!rq_mergeable(req
) || !rq_mergeable(next
))
510 if (!blk_check_merge_flags(req
->cmd_flags
, next
->cmd_flags
))
516 if (blk_rq_pos(req
) + blk_rq_sectors(req
) != blk_rq_pos(next
))
519 if (rq_data_dir(req
) != rq_data_dir(next
)
520 || req
->rq_disk
!= next
->rq_disk
524 if (req
->cmd_flags
& REQ_WRITE_SAME
&&
525 !blk_write_same_mergeable(req
->bio
, next
->bio
))
529 * If we are allowed to merge, then append bio list
530 * from next to rq and release next. merge_requests_fn
531 * will have updated segment counts, update sector
534 if (!ll_merge_requests_fn(q
, req
, next
))
538 * If failfast settings disagree or any of the two is already
539 * a mixed merge, mark both as mixed before proceeding. This
540 * makes sure that all involved bios have mixable attributes
543 if ((req
->cmd_flags
| next
->cmd_flags
) & REQ_MIXED_MERGE
||
544 (req
->cmd_flags
& REQ_FAILFAST_MASK
) !=
545 (next
->cmd_flags
& REQ_FAILFAST_MASK
)) {
546 blk_rq_set_mixed_merge(req
);
547 blk_rq_set_mixed_merge(next
);
551 * At this point we have either done a back merge
552 * or front merge. We need the smaller start_time of
553 * the merged requests to be the current request
554 * for accounting purposes.
556 if (time_after(req
->start_time
, next
->start_time
))
557 req
->start_time
= next
->start_time
;
559 req
->biotail
->bi_next
= next
->bio
;
560 req
->biotail
= next
->biotail
;
562 req
->__data_len
+= blk_rq_bytes(next
);
564 elv_merge_requests(q
, req
, next
);
567 * 'next' is going away, so update stats accordingly
569 blk_account_io_merge(next
);
571 req
->ioprio
= ioprio_best(req
->ioprio
, next
->ioprio
);
572 if (blk_rq_cpu_valid(next
))
573 req
->cpu
= next
->cpu
;
575 /* owner-ship of bio passed from next to req */
577 __blk_put_request(q
, next
);
581 int attempt_back_merge(struct request_queue
*q
, struct request
*rq
)
583 struct request
*next
= elv_latter_request(q
, rq
);
586 return attempt_merge(q
, rq
, next
);
591 int attempt_front_merge(struct request_queue
*q
, struct request
*rq
)
593 struct request
*prev
= elv_former_request(q
, rq
);
596 return attempt_merge(q
, prev
, rq
);
601 int blk_attempt_req_merge(struct request_queue
*q
, struct request
*rq
,
602 struct request
*next
)
604 return attempt_merge(q
, rq
, next
);
607 bool blk_rq_merge_ok(struct request
*rq
, struct bio
*bio
)
609 if (!rq_mergeable(rq
) || !bio_mergeable(bio
))
612 if (!blk_check_merge_flags(rq
->cmd_flags
, bio
->bi_rw
))
615 /* different data direction or already started, don't merge */
616 if (bio_data_dir(bio
) != rq_data_dir(rq
))
619 /* must be same device and not a special request */
620 if (rq
->rq_disk
!= bio
->bi_bdev
->bd_disk
|| rq
->special
)
623 /* only merge integrity protected bio into ditto rq */
624 if (bio_integrity(bio
) != blk_integrity_rq(rq
))
627 /* must be using the same buffer */
628 if (rq
->cmd_flags
& REQ_WRITE_SAME
&&
629 !blk_write_same_mergeable(rq
->bio
, bio
))
635 int blk_try_merge(struct request
*rq
, struct bio
*bio
)
637 if (blk_rq_pos(rq
) + blk_rq_sectors(rq
) == bio
->bi_sector
)
638 return ELEVATOR_BACK_MERGE
;
639 else if (blk_rq_pos(rq
) - bio_sectors(bio
) == bio
->bi_sector
)
640 return ELEVATOR_FRONT_MERGE
;
641 return ELEVATOR_NO_MERGE
;