| 1 | /* |
| 2 | * Functions related to segment and merge handling |
| 3 | */ |
| 4 | #include <linux/kernel.h> |
| 5 | #include <linux/module.h> |
| 6 | #include <linux/bio.h> |
| 7 | #include <linux/blkdev.h> |
| 8 | #include <linux/scatterlist.h> |
| 9 | |
| 10 | #include "blk.h" |
| 11 | |
| 12 | static unsigned int __blk_recalc_rq_segments(struct request_queue *q, |
| 13 | struct bio *bio) |
| 14 | { |
| 15 | struct bio_vec *bv, *bvprv = NULL; |
| 16 | int cluster, i, high, highprv = 1; |
| 17 | unsigned int seg_size, nr_phys_segs; |
| 18 | struct bio *fbio, *bbio; |
| 19 | |
| 20 | if (!bio) |
| 21 | return 0; |
| 22 | |
| 23 | fbio = bio; |
| 24 | cluster = blk_queue_cluster(q); |
| 25 | seg_size = 0; |
| 26 | nr_phys_segs = 0; |
| 27 | for_each_bio(bio) { |
| 28 | bio_for_each_segment(bv, bio, i) { |
| 29 | /* |
| 30 | * the trick here is making sure that a high page is |
| 31 | * never considered part of another segment, since that |
| 32 | * might change with the bounce page. |
| 33 | */ |
| 34 | high = page_to_pfn(bv->bv_page) > queue_bounce_pfn(q); |
| 35 | if (high || highprv) |
| 36 | goto new_segment; |
| 37 | if (cluster) { |
| 38 | if (seg_size + bv->bv_len |
| 39 | > queue_max_segment_size(q)) |
| 40 | goto new_segment; |
| 41 | if (!BIOVEC_PHYS_MERGEABLE(bvprv, bv)) |
| 42 | goto new_segment; |
| 43 | if (!BIOVEC_SEG_BOUNDARY(q, bvprv, bv)) |
| 44 | goto new_segment; |
| 45 | |
| 46 | seg_size += bv->bv_len; |
| 47 | bvprv = bv; |
| 48 | continue; |
| 49 | } |
| 50 | new_segment: |
| 51 | if (nr_phys_segs == 1 && seg_size > |
| 52 | fbio->bi_seg_front_size) |
| 53 | fbio->bi_seg_front_size = seg_size; |
| 54 | |
| 55 | nr_phys_segs++; |
| 56 | bvprv = bv; |
| 57 | seg_size = bv->bv_len; |
| 58 | highprv = high; |
| 59 | } |
| 60 | bbio = bio; |
| 61 | } |
| 62 | |
| 63 | if (nr_phys_segs == 1 && seg_size > fbio->bi_seg_front_size) |
| 64 | fbio->bi_seg_front_size = seg_size; |
| 65 | if (seg_size > bbio->bi_seg_back_size) |
| 66 | bbio->bi_seg_back_size = seg_size; |
| 67 | |
| 68 | return nr_phys_segs; |
| 69 | } |
| 70 | |
| 71 | void blk_recalc_rq_segments(struct request *rq) |
| 72 | { |
| 73 | rq->nr_phys_segments = __blk_recalc_rq_segments(rq->q, rq->bio); |
| 74 | } |
| 75 | |
| 76 | void blk_recount_segments(struct request_queue *q, struct bio *bio) |
| 77 | { |
| 78 | struct bio *nxt = bio->bi_next; |
| 79 | |
| 80 | bio->bi_next = NULL; |
| 81 | bio->bi_phys_segments = __blk_recalc_rq_segments(q, bio); |
| 82 | bio->bi_next = nxt; |
| 83 | bio->bi_flags |= (1 << BIO_SEG_VALID); |
| 84 | } |
| 85 | EXPORT_SYMBOL(blk_recount_segments); |
| 86 | |
| 87 | static int blk_phys_contig_segment(struct request_queue *q, struct bio *bio, |
| 88 | struct bio *nxt) |
| 89 | { |
| 90 | if (!blk_queue_cluster(q)) |
| 91 | return 0; |
| 92 | |
| 93 | if (bio->bi_seg_back_size + nxt->bi_seg_front_size > |
| 94 | queue_max_segment_size(q)) |
| 95 | return 0; |
| 96 | |
| 97 | if (!bio_has_data(bio)) |
| 98 | return 1; |
| 99 | |
| 100 | if (!BIOVEC_PHYS_MERGEABLE(__BVEC_END(bio), __BVEC_START(nxt))) |
| 101 | return 0; |
| 102 | |
| 103 | /* |
| 104 | * bio and nxt are contiguous in memory; check if the queue allows |
| 105 | * these two to be merged into one |
| 106 | */ |
| 107 | if (BIO_SEG_BOUNDARY(q, bio, nxt)) |
| 108 | return 1; |
| 109 | |
| 110 | return 0; |
| 111 | } |
| 112 | |
| 113 | static void |
| 114 | __blk_segment_map_sg(struct request_queue *q, struct bio_vec *bvec, |
| 115 | struct scatterlist *sglist, struct bio_vec **bvprv, |
| 116 | struct scatterlist **sg, int *nsegs, int *cluster) |
| 117 | { |
| 118 | |
| 119 | int nbytes = bvec->bv_len; |
| 120 | |
| 121 | if (*bvprv && *cluster) { |
| 122 | if ((*sg)->length + nbytes > queue_max_segment_size(q)) |
| 123 | goto new_segment; |
| 124 | |
| 125 | if (!BIOVEC_PHYS_MERGEABLE(*bvprv, bvec)) |
| 126 | goto new_segment; |
| 127 | if (!BIOVEC_SEG_BOUNDARY(q, *bvprv, bvec)) |
| 128 | goto new_segment; |
| 129 | |
| 130 | (*sg)->length += nbytes; |
| 131 | } else { |
| 132 | new_segment: |
| 133 | if (!*sg) |
| 134 | *sg = sglist; |
| 135 | else { |
| 136 | /* |
| 137 | * If the driver previously mapped a shorter |
| 138 | * list, we could see a termination bit |
| 139 | * prematurely unless it fully inits the sg |
| 140 | * table on each mapping. We KNOW that there |
| 141 | * must be more entries here or the driver |
| 142 | * would be buggy, so force clear the |
| 143 | * termination bit to avoid doing a full |
| 144 | * sg_init_table() in drivers for each command. |
| 145 | */ |
| 146 | sg_unmark_end(*sg); |
| 147 | *sg = sg_next(*sg); |
| 148 | } |
| 149 | |
| 150 | sg_set_page(*sg, bvec->bv_page, nbytes, bvec->bv_offset); |
| 151 | (*nsegs)++; |
| 152 | } |
| 153 | *bvprv = bvec; |
| 154 | } |
| 155 | |
| 156 | /* |
| 157 | * map a request to scatterlist, return number of sg entries setup. Caller |
| 158 | * must make sure sg can hold rq->nr_phys_segments entries |
| 159 | */ |
| 160 | int blk_rq_map_sg(struct request_queue *q, struct request *rq, |
| 161 | struct scatterlist *sglist) |
| 162 | { |
| 163 | struct bio_vec *bvec, *bvprv; |
| 164 | struct req_iterator iter; |
| 165 | struct scatterlist *sg; |
| 166 | int nsegs, cluster; |
| 167 | #if defined(FEATURE_STORAGE_PID_LOGGER) |
| 168 | struct page_pid_logger *prev_logger = 0; |
| 169 | #endif |
| 170 | |
| 171 | nsegs = 0; |
| 172 | cluster = blk_queue_cluster(q); |
| 173 | |
| 174 | /* |
| 175 | * for each bio in rq |
| 176 | */ |
| 177 | bvprv = NULL; |
| 178 | sg = NULL; |
| 179 | rq_for_each_segment(bvec, rq, iter) { |
| 180 | __blk_segment_map_sg(q, bvec, sglist, &bvprv, &sg, |
| 181 | &nsegs, &cluster); |
| 182 | //#undef FEATURE_STORAGE_PID_LOGGER |
| 183 | #if defined(FEATURE_STORAGE_PID_LOGGER) |
| 184 | do { |
| 185 | extern spinlock_t g_locker; |
| 186 | extern unsigned char *page_logger; |
| 187 | |
| 188 | extern struct struct_pid_logger g_pid_logger[PID_ID_CNT]; |
| 189 | unsigned long flags; |
| 190 | if(page_logger) { |
| 191 | struct page_pid_logger *tmp_logger; |
| 192 | unsigned long page_offset; |
| 193 | int index, mmcqd_index; |
| 194 | pid_t current_pid=0; |
| 195 | //#if defined(CONFIG_FLATMEM) |
| 196 | //page_offset = (unsigned long)((bvec->bv_page) - mem_map); |
| 197 | //#else |
| 198 | page_offset = (unsigned long)(__page_to_pfn(bvec->bv_page))- PHYS_PFN_OFFSET; |
| 199 | //#endif |
| 200 | tmp_logger =((struct page_pid_logger *)page_logger) + page_offset; |
| 201 | //tmp_locker =((struct page_pid_locker *)page_logger_lock) + page_offset; |
| 202 | #if defined(CONFIG_FLATMEM) |
| 203 | //printk(KERN_INFO"hank merge pid1:%u pid2:%u bv_page:%p vmemmap:%p pfn:%ld %s \n", tmp_logger->pid1, tmp_logger->pid2, bvec->bv_page, vmemmap, (unsigned long)(__page_to_pfn(bvec->bv_page)), q->backing_dev_info.name); |
| 204 | #else |
| 205 | //printk(KERN_INFO"hank merge pid1:%u pid2:%u bv_page:%x mem_map:%x pfn:%d %s \n", tmp_logger->pid1, tmp_logger->pid2, bvec->bv_page, mem_map, (unsigned long)(__page_to_pfn(bvec->bv_page)), q->backing_dev_info.name); |
| 206 | #endif |
| 207 | current_pid = current->pid; |
| 208 | |
| 209 | //find the exactly pid record array |
| 210 | for( mmcqd_index=0; mmcqd_index<PID_ID_CNT; mmcqd_index++) { |
| 211 | //printk(KERN_INFO"hank merge mmcqd_index:%d qcurrent_pid:%d current_pid:%d", mmcqd_index, g_pid_logger[mmcqd_index].current_pid, current_pid); |
| 212 | if( g_pid_logger[mmcqd_index].current_pid==0 || g_pid_logger[mmcqd_index].current_pid == current_pid ) { |
| 213 | g_pid_logger[mmcqd_index].current_pid = current_pid; |
| 214 | break; |
| 215 | } |
| 216 | } |
| 217 | //no match array |
| 218 | if( mmcqd_index == PID_ID_CNT ) |
| 219 | break; |
| 220 | /* |
| 221 | if( tmp_logger->pid1 == 0XFFFF && tmp_logger->pid2 == 0XFFFF) |
| 222 | { |
| 223 | printk(KERN_INFO"hank merge fail offset:%d of:%d bytes:%d rw:%d", page_offset, bvec->bv_offset, nbytes, (rq->cmd_flags & REQ_WRITE)); |
| 224 | }else |
| 225 | { |
| 226 | printk(KERN_INFO"hank merge success offset:%d of:%d bytes:%d rw:%d", page_offset, bvec->bv_offset, nbytes, (rq->cmd_flags & REQ_WRITE)); |
| 227 | } |
| 228 | */ |
| 229 | if( tmp_logger->pid1 != 0xFFFF) { |
| 230 | spin_lock_irqsave(&g_locker, flags); |
| 231 | for( index=0; index<PID_LOGGER_COUNT; index++) { |
| 232 | if( tmp_logger->pid1 == 0xFFFF) |
| 233 | break; |
| 234 | if( (g_pid_logger[mmcqd_index].pid_logger[index] == 0 || g_pid_logger[mmcqd_index].pid_logger[index] == tmp_logger->pid1)) { |
| 235 | g_pid_logger[mmcqd_index].pid_logger[index] = tmp_logger->pid1; |
| 236 | if (rq->cmd_flags & REQ_WRITE) { |
| 237 | g_pid_logger[mmcqd_index].pid_logger_counter[index]++; |
| 238 | g_pid_logger[mmcqd_index].pid_logger_length[index]+=bvec->bv_len; |
| 239 | }else { |
| 240 | g_pid_logger[mmcqd_index].pid_logger_r_counter[index]++; |
| 241 | g_pid_logger[mmcqd_index].pid_logger_r_length[index]+=bvec->bv_len; |
| 242 | } |
| 243 | if( prev_logger && prev_logger != tmp_logger) |
| 244 | prev_logger->pid1 = 0XFFFF; |
| 245 | //tmp_logger->pid1 = 0XFFFF; |
| 246 | break; |
| 247 | } |
| 248 | |
| 249 | } |
| 250 | spin_unlock_irqrestore(&g_locker, flags); |
| 251 | } |
| 252 | if( tmp_logger->pid2 != 0xFFFF) { |
| 253 | spin_lock_irqsave(&g_locker, flags); |
| 254 | for( index=0; index<PID_LOGGER_COUNT; index++) { |
| 255 | if( tmp_logger->pid2 == 0xFFFF) |
| 256 | break; |
| 257 | if( (g_pid_logger[mmcqd_index].pid_logger[index] == 0 || g_pid_logger[mmcqd_index].pid_logger[index] == tmp_logger->pid2)) { |
| 258 | g_pid_logger[mmcqd_index].pid_logger[index] = tmp_logger->pid2; |
| 259 | if (rq->cmd_flags & REQ_WRITE) { |
| 260 | g_pid_logger[mmcqd_index].pid_logger_counter[index]++; |
| 261 | g_pid_logger[mmcqd_index].pid_logger_length[index]+=bvec->bv_len; |
| 262 | }else { |
| 263 | g_pid_logger[mmcqd_index].pid_logger_r_counter[index]++; |
| 264 | g_pid_logger[mmcqd_index].pid_logger_r_length[index]+=bvec->bv_len; |
| 265 | } |
| 266 | if( prev_logger && prev_logger != tmp_logger) |
| 267 | prev_logger->pid2 = 0XFFFF; |
| 268 | //tmp_logger->pid2 = 0XFFFF; |
| 269 | break; |
| 270 | } |
| 271 | |
| 272 | } |
| 273 | spin_unlock_irqrestore(&g_locker, flags); |
| 274 | } |
| 275 | prev_logger = tmp_logger; |
| 276 | |
| 277 | } |
| 278 | } while (0); |
| 279 | |
| 280 | #endif |
| 281 | } /* segments in rq */ |
| 282 | |
| 283 | |
| 284 | if (unlikely(rq->cmd_flags & REQ_COPY_USER) && |
| 285 | (blk_rq_bytes(rq) & q->dma_pad_mask)) { |
| 286 | unsigned int pad_len = |
| 287 | (q->dma_pad_mask & ~blk_rq_bytes(rq)) + 1; |
| 288 | |
| 289 | sg->length += pad_len; |
| 290 | rq->extra_len += pad_len; |
| 291 | } |
| 292 | |
| 293 | if (q->dma_drain_size && q->dma_drain_needed(rq)) { |
| 294 | if (rq->cmd_flags & REQ_WRITE) |
| 295 | memset(q->dma_drain_buffer, 0, q->dma_drain_size); |
| 296 | |
| 297 | sg->page_link &= ~0x02; |
| 298 | sg = sg_next(sg); |
| 299 | sg_set_page(sg, virt_to_page(q->dma_drain_buffer), |
| 300 | q->dma_drain_size, |
| 301 | ((unsigned long)q->dma_drain_buffer) & |
| 302 | (PAGE_SIZE - 1)); |
| 303 | nsegs++; |
| 304 | rq->extra_len += q->dma_drain_size; |
| 305 | } |
| 306 | |
| 307 | if (sg) |
| 308 | sg_mark_end(sg); |
| 309 | |
| 310 | return nsegs; |
| 311 | } |
| 312 | EXPORT_SYMBOL(blk_rq_map_sg); |
| 313 | |
| 314 | /** |
| 315 | * blk_bio_map_sg - map a bio to a scatterlist |
| 316 | * @q: request_queue in question |
| 317 | * @bio: bio being mapped |
| 318 | * @sglist: scatterlist being mapped |
| 319 | * |
| 320 | * Note: |
| 321 | * Caller must make sure sg can hold bio->bi_phys_segments entries |
| 322 | * |
| 323 | * Will return the number of sg entries setup |
| 324 | */ |
| 325 | int blk_bio_map_sg(struct request_queue *q, struct bio *bio, |
| 326 | struct scatterlist *sglist) |
| 327 | { |
| 328 | struct bio_vec *bvec, *bvprv; |
| 329 | struct scatterlist *sg; |
| 330 | int nsegs, cluster; |
| 331 | unsigned long i; |
| 332 | |
| 333 | nsegs = 0; |
| 334 | cluster = blk_queue_cluster(q); |
| 335 | |
| 336 | bvprv = NULL; |
| 337 | sg = NULL; |
| 338 | bio_for_each_segment(bvec, bio, i) { |
| 339 | __blk_segment_map_sg(q, bvec, sglist, &bvprv, &sg, |
| 340 | &nsegs, &cluster); |
| 341 | } /* segments in bio */ |
| 342 | |
| 343 | if (sg) |
| 344 | sg_mark_end(sg); |
| 345 | |
| 346 | BUG_ON(bio->bi_phys_segments && nsegs > bio->bi_phys_segments); |
| 347 | return nsegs; |
| 348 | } |
| 349 | EXPORT_SYMBOL(blk_bio_map_sg); |
| 350 | |
| 351 | static inline int ll_new_hw_segment(struct request_queue *q, |
| 352 | struct request *req, |
| 353 | struct bio *bio) |
| 354 | { |
| 355 | int nr_phys_segs = bio_phys_segments(q, bio); |
| 356 | |
| 357 | if (req->nr_phys_segments + nr_phys_segs > queue_max_segments(q)) |
| 358 | goto no_merge; |
| 359 | |
| 360 | if (bio_integrity(bio) && blk_integrity_merge_bio(q, req, bio)) |
| 361 | goto no_merge; |
| 362 | |
| 363 | /* |
| 364 | * This will form the start of a new hw segment. Bump both |
| 365 | * counters. |
| 366 | */ |
| 367 | req->nr_phys_segments += nr_phys_segs; |
| 368 | return 1; |
| 369 | |
| 370 | no_merge: |
| 371 | req->cmd_flags |= REQ_NOMERGE; |
| 372 | if (req == q->last_merge) |
| 373 | q->last_merge = NULL; |
| 374 | return 0; |
| 375 | } |
| 376 | |
| 377 | int ll_back_merge_fn(struct request_queue *q, struct request *req, |
| 378 | struct bio *bio) |
| 379 | { |
| 380 | if (blk_rq_sectors(req) + bio_sectors(bio) > |
| 381 | blk_rq_get_max_sectors(req)) { |
| 382 | req->cmd_flags |= REQ_NOMERGE; |
| 383 | if (req == q->last_merge) |
| 384 | q->last_merge = NULL; |
| 385 | return 0; |
| 386 | } |
| 387 | if (!bio_flagged(req->biotail, BIO_SEG_VALID)) |
| 388 | blk_recount_segments(q, req->biotail); |
| 389 | if (!bio_flagged(bio, BIO_SEG_VALID)) |
| 390 | blk_recount_segments(q, bio); |
| 391 | |
| 392 | return ll_new_hw_segment(q, req, bio); |
| 393 | } |
| 394 | |
| 395 | int ll_front_merge_fn(struct request_queue *q, struct request *req, |
| 396 | struct bio *bio) |
| 397 | { |
| 398 | if (blk_rq_sectors(req) + bio_sectors(bio) > |
| 399 | blk_rq_get_max_sectors(req)) { |
| 400 | req->cmd_flags |= REQ_NOMERGE; |
| 401 | if (req == q->last_merge) |
| 402 | q->last_merge = NULL; |
| 403 | return 0; |
| 404 | } |
| 405 | if (!bio_flagged(bio, BIO_SEG_VALID)) |
| 406 | blk_recount_segments(q, bio); |
| 407 | if (!bio_flagged(req->bio, BIO_SEG_VALID)) |
| 408 | blk_recount_segments(q, req->bio); |
| 409 | |
| 410 | return ll_new_hw_segment(q, req, bio); |
| 411 | } |
| 412 | |
| 413 | static int ll_merge_requests_fn(struct request_queue *q, struct request *req, |
| 414 | struct request *next) |
| 415 | { |
| 416 | int total_phys_segments; |
| 417 | unsigned int seg_size = |
| 418 | req->biotail->bi_seg_back_size + next->bio->bi_seg_front_size; |
| 419 | |
| 420 | /* |
| 421 | * First check if the either of the requests are re-queued |
| 422 | * requests. Can't merge them if they are. |
| 423 | */ |
| 424 | if (req->special || next->special) |
| 425 | return 0; |
| 426 | |
| 427 | /* |
| 428 | * Will it become too large? |
| 429 | */ |
| 430 | if ((blk_rq_sectors(req) + blk_rq_sectors(next)) > |
| 431 | blk_rq_get_max_sectors(req)) |
| 432 | return 0; |
| 433 | |
| 434 | total_phys_segments = req->nr_phys_segments + next->nr_phys_segments; |
| 435 | if (blk_phys_contig_segment(q, req->biotail, next->bio)) { |
| 436 | if (req->nr_phys_segments == 1) |
| 437 | req->bio->bi_seg_front_size = seg_size; |
| 438 | if (next->nr_phys_segments == 1) |
| 439 | next->biotail->bi_seg_back_size = seg_size; |
| 440 | total_phys_segments--; |
| 441 | } |
| 442 | |
| 443 | if (total_phys_segments > queue_max_segments(q)) |
| 444 | return 0; |
| 445 | |
| 446 | if (blk_integrity_rq(req) && blk_integrity_merge_rq(q, req, next)) |
| 447 | return 0; |
| 448 | |
| 449 | /* Merge is OK... */ |
| 450 | req->nr_phys_segments = total_phys_segments; |
| 451 | return 1; |
| 452 | } |
| 453 | |
| 454 | /** |
| 455 | * blk_rq_set_mixed_merge - mark a request as mixed merge |
| 456 | * @rq: request to mark as mixed merge |
| 457 | * |
| 458 | * Description: |
| 459 | * @rq is about to be mixed merged. Make sure the attributes |
| 460 | * which can be mixed are set in each bio and mark @rq as mixed |
| 461 | * merged. |
| 462 | */ |
| 463 | void blk_rq_set_mixed_merge(struct request *rq) |
| 464 | { |
| 465 | unsigned int ff = rq->cmd_flags & REQ_FAILFAST_MASK; |
| 466 | struct bio *bio; |
| 467 | |
| 468 | if (rq->cmd_flags & REQ_MIXED_MERGE) |
| 469 | return; |
| 470 | |
| 471 | /* |
| 472 | * @rq will no longer represent mixable attributes for all the |
| 473 | * contained bios. It will just track those of the first one. |
| 474 | * Distributes the attributs to each bio. |
| 475 | */ |
| 476 | for (bio = rq->bio; bio; bio = bio->bi_next) { |
| 477 | WARN_ON_ONCE((bio->bi_rw & REQ_FAILFAST_MASK) && |
| 478 | (bio->bi_rw & REQ_FAILFAST_MASK) != ff); |
| 479 | bio->bi_rw |= ff; |
| 480 | } |
| 481 | rq->cmd_flags |= REQ_MIXED_MERGE; |
| 482 | } |
| 483 | |
| 484 | static void blk_account_io_merge(struct request *req) |
| 485 | { |
| 486 | if (blk_do_io_stat(req)) { |
| 487 | struct hd_struct *part; |
| 488 | int cpu; |
| 489 | |
| 490 | cpu = part_stat_lock(); |
| 491 | part = req->part; |
| 492 | |
| 493 | part_round_stats(cpu, part); |
| 494 | part_dec_in_flight(part, rq_data_dir(req)); |
| 495 | |
| 496 | hd_struct_put(part); |
| 497 | part_stat_unlock(); |
| 498 | } |
| 499 | } |
| 500 | |
| 501 | /* |
| 502 | * Has to be called with the request spinlock acquired |
| 503 | */ |
| 504 | static int attempt_merge(struct request_queue *q, struct request *req, |
| 505 | struct request *next) |
| 506 | { |
| 507 | if (!rq_mergeable(req) || !rq_mergeable(next)) |
| 508 | return 0; |
| 509 | |
| 510 | if (!blk_check_merge_flags(req->cmd_flags, next->cmd_flags)) |
| 511 | return 0; |
| 512 | |
| 513 | /* |
| 514 | * not contiguous |
| 515 | */ |
| 516 | if (blk_rq_pos(req) + blk_rq_sectors(req) != blk_rq_pos(next)) |
| 517 | return 0; |
| 518 | |
| 519 | if (rq_data_dir(req) != rq_data_dir(next) |
| 520 | || req->rq_disk != next->rq_disk |
| 521 | || next->special) |
| 522 | return 0; |
| 523 | |
| 524 | if (req->cmd_flags & REQ_WRITE_SAME && |
| 525 | !blk_write_same_mergeable(req->bio, next->bio)) |
| 526 | return 0; |
| 527 | |
| 528 | /* |
| 529 | * If we are allowed to merge, then append bio list |
| 530 | * from next to rq and release next. merge_requests_fn |
| 531 | * will have updated segment counts, update sector |
| 532 | * counts here. |
| 533 | */ |
| 534 | if (!ll_merge_requests_fn(q, req, next)) |
| 535 | return 0; |
| 536 | |
| 537 | /* |
| 538 | * If failfast settings disagree or any of the two is already |
| 539 | * a mixed merge, mark both as mixed before proceeding. This |
| 540 | * makes sure that all involved bios have mixable attributes |
| 541 | * set properly. |
| 542 | */ |
| 543 | if ((req->cmd_flags | next->cmd_flags) & REQ_MIXED_MERGE || |
| 544 | (req->cmd_flags & REQ_FAILFAST_MASK) != |
| 545 | (next->cmd_flags & REQ_FAILFAST_MASK)) { |
| 546 | blk_rq_set_mixed_merge(req); |
| 547 | blk_rq_set_mixed_merge(next); |
| 548 | } |
| 549 | |
| 550 | /* |
| 551 | * At this point we have either done a back merge |
| 552 | * or front merge. We need the smaller start_time of |
| 553 | * the merged requests to be the current request |
| 554 | * for accounting purposes. |
| 555 | */ |
| 556 | if (time_after(req->start_time, next->start_time)) |
| 557 | req->start_time = next->start_time; |
| 558 | |
| 559 | req->biotail->bi_next = next->bio; |
| 560 | req->biotail = next->biotail; |
| 561 | |
| 562 | req->__data_len += blk_rq_bytes(next); |
| 563 | |
| 564 | elv_merge_requests(q, req, next); |
| 565 | |
| 566 | /* |
| 567 | * 'next' is going away, so update stats accordingly |
| 568 | */ |
| 569 | blk_account_io_merge(next); |
| 570 | |
| 571 | req->ioprio = ioprio_best(req->ioprio, next->ioprio); |
| 572 | if (blk_rq_cpu_valid(next)) |
| 573 | req->cpu = next->cpu; |
| 574 | |
| 575 | /* owner-ship of bio passed from next to req */ |
| 576 | next->bio = NULL; |
| 577 | __blk_put_request(q, next); |
| 578 | return 1; |
| 579 | } |
| 580 | |
| 581 | int attempt_back_merge(struct request_queue *q, struct request *rq) |
| 582 | { |
| 583 | struct request *next = elv_latter_request(q, rq); |
| 584 | |
| 585 | if (next) |
| 586 | return attempt_merge(q, rq, next); |
| 587 | |
| 588 | return 0; |
| 589 | } |
| 590 | |
| 591 | int attempt_front_merge(struct request_queue *q, struct request *rq) |
| 592 | { |
| 593 | struct request *prev = elv_former_request(q, rq); |
| 594 | |
| 595 | if (prev) |
| 596 | return attempt_merge(q, prev, rq); |
| 597 | |
| 598 | return 0; |
| 599 | } |
| 600 | |
| 601 | int blk_attempt_req_merge(struct request_queue *q, struct request *rq, |
| 602 | struct request *next) |
| 603 | { |
| 604 | return attempt_merge(q, rq, next); |
| 605 | } |
| 606 | |
| 607 | bool blk_rq_merge_ok(struct request *rq, struct bio *bio) |
| 608 | { |
| 609 | if (!rq_mergeable(rq) || !bio_mergeable(bio)) |
| 610 | return false; |
| 611 | |
| 612 | if (!blk_check_merge_flags(rq->cmd_flags, bio->bi_rw)) |
| 613 | return false; |
| 614 | |
| 615 | /* different data direction or already started, don't merge */ |
| 616 | if (bio_data_dir(bio) != rq_data_dir(rq)) |
| 617 | return false; |
| 618 | |
| 619 | /* must be same device and not a special request */ |
| 620 | if (rq->rq_disk != bio->bi_bdev->bd_disk || rq->special) |
| 621 | return false; |
| 622 | |
| 623 | /* only merge integrity protected bio into ditto rq */ |
| 624 | if (bio_integrity(bio) != blk_integrity_rq(rq)) |
| 625 | return false; |
| 626 | |
| 627 | /* must be using the same buffer */ |
| 628 | if (rq->cmd_flags & REQ_WRITE_SAME && |
| 629 | !blk_write_same_mergeable(rq->bio, bio)) |
| 630 | return false; |
| 631 | |
| 632 | return true; |
| 633 | } |
| 634 | |
| 635 | int blk_try_merge(struct request *rq, struct bio *bio) |
| 636 | { |
| 637 | if (blk_rq_pos(rq) + blk_rq_sectors(rq) == bio->bi_sector) |
| 638 | return ELEVATOR_BACK_MERGE; |
| 639 | else if (blk_rq_pos(rq) - bio_sectors(bio) == bio->bi_sector) |
| 640 | return ELEVATOR_FRONT_MERGE; |
| 641 | return ELEVATOR_NO_MERGE; |
| 642 | } |