060ff695085c596f2ddcd2166e7825290d75bd3d
[GitHub/exynos8895/android_kernel_samsung_universal8895.git] / include / linux / bio.h
1 /*
2 * 2.5 block I/O model
3 *
4 * Copyright (C) 2001 Jens Axboe <axboe@suse.de>
5 *
6 * This program is free software; you can redistribute it and/or modify
7 * it under the terms of the GNU General Public License version 2 as
8 * published by the Free Software Foundation.
9 *
10 * This program is distributed in the hope that it will be useful,
11 * but WITHOUT ANY WARRANTY; without even the implied warranty of
12 *
13 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
14 * GNU General Public License for more details.
15 *
16 * You should have received a copy of the GNU General Public Licens
17 * along with this program; if not, write to the Free Software
18 * Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-
19 */
20 #ifndef __LINUX_BIO_H
21 #define __LINUX_BIO_H
22
23 #include <linux/highmem.h>
24 #include <linux/mempool.h>
25 #include <linux/ioprio.h>
26 #include <linux/bug.h>
27
28 #ifdef CONFIG_BLOCK
29
30 #include <asm/io.h>
31
32 /* struct bio, bio_vec and BIO_* flags are defined in blk_types.h */
33 #include <linux/blk_types.h>
34
35 #define BIO_DEBUG
36
37 #ifdef BIO_DEBUG
38 #define BIO_BUG_ON BUG_ON
39 #else
40 #define BIO_BUG_ON
41 #endif
42
43 #define BIO_MAX_PAGES 256
44 #define BIO_MAX_SIZE (BIO_MAX_PAGES << PAGE_CACHE_SHIFT)
45 #define BIO_MAX_SECTORS (BIO_MAX_SIZE >> 9)
46
47 /*
48 * upper 16 bits of bi_rw define the io priority of this bio
49 */
50 #define BIO_PRIO_SHIFT (8 * sizeof(unsigned long) - IOPRIO_BITS)
51 #define bio_prio(bio) ((bio)->bi_rw >> BIO_PRIO_SHIFT)
52 #define bio_prio_valid(bio) ioprio_valid(bio_prio(bio))
53
54 #define bio_set_prio(bio, prio) do { \
55 WARN_ON(prio >= (1 << IOPRIO_BITS)); \
56 (bio)->bi_rw &= ((1UL << BIO_PRIO_SHIFT) - 1); \
57 (bio)->bi_rw |= ((unsigned long) (prio) << BIO_PRIO_SHIFT); \
58 } while (0)
59
60 /*
61 * various member access, note that bio_data should of course not be used
62 * on highmem page vectors
63 */
64 #define bio_iovec_idx(bio, idx) (&((bio)->bi_io_vec[(idx)]))
65 #define bio_iovec(bio) bio_iovec_idx((bio), (bio)->bi_idx)
66 #define bio_page(bio) bio_iovec((bio))->bv_page
67 #define bio_offset(bio) bio_iovec((bio))->bv_offset
68 #define bio_segments(bio) ((bio)->bi_vcnt - (bio)->bi_idx)
69 #define bio_sectors(bio) ((bio)->bi_size >> 9)
70 #define bio_end_sector(bio) ((bio)->bi_sector + bio_sectors((bio)))
71
72 static inline unsigned int bio_cur_bytes(struct bio *bio)
73 {
74 if (bio->bi_vcnt)
75 return bio_iovec(bio)->bv_len;
76 else /* dataless requests such as discard */
77 return bio->bi_size;
78 }
79
80 static inline void *bio_data(struct bio *bio)
81 {
82 if (bio->bi_vcnt)
83 return page_address(bio_page(bio)) + bio_offset(bio);
84
85 return NULL;
86 }
87
88 /*
89 * will die
90 */
91 #define bio_to_phys(bio) (page_to_phys(bio_page((bio))) + (unsigned long) bio_offset((bio)))
92 #define bvec_to_phys(bv) (page_to_phys((bv)->bv_page) + (unsigned long) (bv)->bv_offset)
93
94 /*
95 * queues that have highmem support enabled may still need to revert to
96 * PIO transfers occasionally and thus map high pages temporarily. For
97 * permanent PIO fall back, user is probably better off disabling highmem
98 * I/O completely on that queue (see ide-dma for example)
99 */
100 #define __bio_kmap_atomic(bio, idx) \
101 (kmap_atomic(bio_iovec_idx((bio), (idx))->bv_page) + \
102 bio_iovec_idx((bio), (idx))->bv_offset)
103
104 #define __bio_kunmap_atomic(addr) kunmap_atomic(addr)
105
106 /*
107 * merge helpers etc
108 */
109
110 #define __BVEC_END(bio) bio_iovec_idx((bio), (bio)->bi_vcnt - 1)
111 #define __BVEC_START(bio) bio_iovec_idx((bio), (bio)->bi_idx)
112
113 /* Default implementation of BIOVEC_PHYS_MERGEABLE */
114 #define __BIOVEC_PHYS_MERGEABLE(vec1, vec2) \
115 ((bvec_to_phys((vec1)) + (vec1)->bv_len) == bvec_to_phys((vec2)))
116
117 /*
118 * allow arch override, for eg virtualized architectures (put in asm/io.h)
119 */
120 #ifndef BIOVEC_PHYS_MERGEABLE
121 #define BIOVEC_PHYS_MERGEABLE(vec1, vec2) \
122 __BIOVEC_PHYS_MERGEABLE(vec1, vec2)
123 #endif
124
125 #define __BIO_SEG_BOUNDARY(addr1, addr2, mask) \
126 (((addr1) | (mask)) == (((addr2) - 1) | (mask)))
127 #define BIOVEC_SEG_BOUNDARY(q, b1, b2) \
128 __BIO_SEG_BOUNDARY(bvec_to_phys((b1)), bvec_to_phys((b2)) + (b2)->bv_len, queue_segment_boundary((q)))
129 #define BIO_SEG_BOUNDARY(q, b1, b2) \
130 BIOVEC_SEG_BOUNDARY((q), __BVEC_END((b1)), __BVEC_START((b2)))
131
132 #define bio_io_error(bio) bio_endio((bio), -EIO)
133
134 /*
135 * drivers should not use the __ version unless they _really_ know what
136 * they're doing
137 */
138 #define __bio_for_each_segment(bvl, bio, i, start_idx) \
139 for (bvl = bio_iovec_idx((bio), (start_idx)), i = (start_idx); \
140 i < (bio)->bi_vcnt; \
141 bvl++, i++)
142
143 /*
144 * drivers should _never_ use the all version - the bio may have been split
145 * before it got to the driver and the driver won't own all of it
146 */
147 #define bio_for_each_segment_all(bvl, bio, i) \
148 for (i = 0; \
149 bvl = bio_iovec_idx((bio), (i)), i < (bio)->bi_vcnt; \
150 i++)
151
152 #define bio_for_each_segment(bvl, bio, i) \
153 for (i = (bio)->bi_idx; \
154 bvl = bio_iovec_idx((bio), (i)), i < (bio)->bi_vcnt; \
155 i++)
156
157 /*
158 * get a reference to a bio, so it won't disappear. the intended use is
159 * something like:
160 *
161 * bio_get(bio);
162 * submit_bio(rw, bio);
163 * if (bio->bi_flags ...)
164 * do_something
165 * bio_put(bio);
166 *
167 * without the bio_get(), it could potentially complete I/O before submit_bio
168 * returns. and then bio would be freed memory when if (bio->bi_flags ...)
169 * runs
170 */
171 #define bio_get(bio) atomic_inc(&(bio)->bi_cnt)
172
173 #if defined(CONFIG_BLK_DEV_INTEGRITY)
174 /*
175 * bio integrity payload
176 */
177 struct bio_integrity_payload {
178 struct bio *bip_bio; /* parent bio */
179
180 sector_t bip_sector; /* virtual start sector */
181
182 void *bip_buf; /* generated integrity data */
183 bio_end_io_t *bip_end_io; /* saved I/O completion fn */
184
185 unsigned int bip_size;
186
187 unsigned short bip_slab; /* slab the bip came from */
188 unsigned short bip_vcnt; /* # of integrity bio_vecs */
189 unsigned short bip_idx; /* current bip_vec index */
190 unsigned bip_owns_buf:1; /* should free bip_buf */
191
192 struct work_struct bip_work; /* I/O completion */
193
194 struct bio_vec *bip_vec;
195 struct bio_vec bip_inline_vecs[0];/* embedded bvec array */
196 };
197 #endif /* CONFIG_BLK_DEV_INTEGRITY */
198
199 /*
200 * A bio_pair is used when we need to split a bio.
201 * This can only happen for a bio that refers to just one
202 * page of data, and in the unusual situation when the
203 * page crosses a chunk/device boundary
204 *
205 * The address of the master bio is stored in bio1.bi_private
206 * The address of the pool the pair was allocated from is stored
207 * in bio2.bi_private
208 */
209 struct bio_pair {
210 struct bio bio1, bio2;
211 struct bio_vec bv1, bv2;
212 #if defined(CONFIG_BLK_DEV_INTEGRITY)
213 struct bio_integrity_payload bip1, bip2;
214 struct bio_vec iv1, iv2;
215 #endif
216 atomic_t cnt;
217 int error;
218 };
219 extern struct bio_pair *bio_split(struct bio *bi, int first_sectors);
220 extern void bio_pair_release(struct bio_pair *dbio);
221 extern void bio_trim(struct bio *bio, int offset, int size);
222
223 extern struct bio_set *bioset_create(unsigned int, unsigned int);
224 extern void bioset_free(struct bio_set *);
225 extern mempool_t *biovec_create_pool(struct bio_set *bs, int pool_entries);
226
227 extern struct bio *bio_alloc_bioset(gfp_t, int, struct bio_set *);
228 extern void bio_put(struct bio *);
229
230 extern void __bio_clone(struct bio *, struct bio *);
231 extern struct bio *bio_clone_bioset(struct bio *, gfp_t, struct bio_set *bs);
232
233 extern struct bio_set *fs_bio_set;
234
235 static inline struct bio *bio_alloc(gfp_t gfp_mask, unsigned int nr_iovecs)
236 {
237 return bio_alloc_bioset(gfp_mask, nr_iovecs, fs_bio_set);
238 }
239
240 static inline struct bio *bio_clone(struct bio *bio, gfp_t gfp_mask)
241 {
242 return bio_clone_bioset(bio, gfp_mask, fs_bio_set);
243 }
244
245 static inline struct bio *bio_kmalloc(gfp_t gfp_mask, unsigned int nr_iovecs)
246 {
247 return bio_alloc_bioset(gfp_mask, nr_iovecs, NULL);
248 }
249
250 static inline struct bio *bio_clone_kmalloc(struct bio *bio, gfp_t gfp_mask)
251 {
252 return bio_clone_bioset(bio, gfp_mask, NULL);
253
254 }
255
256 extern void bio_endio(struct bio *, int);
257 struct request_queue;
258 extern int bio_phys_segments(struct request_queue *, struct bio *);
259
260 extern int submit_bio_wait(int rw, struct bio *bio);
261 extern void bio_advance(struct bio *, unsigned);
262
263 extern void bio_init(struct bio *);
264 extern void bio_reset(struct bio *);
265
266 extern int bio_add_page(struct bio *, struct page *, unsigned int,unsigned int);
267 extern int bio_add_pc_page(struct request_queue *, struct bio *, struct page *,
268 unsigned int, unsigned int);
269 extern int bio_get_nr_vecs(struct block_device *);
270 extern sector_t bio_sector_offset(struct bio *, unsigned short, unsigned int);
271 extern struct bio *bio_map_user(struct request_queue *, struct block_device *,
272 unsigned long, unsigned int, int, gfp_t);
273 struct sg_iovec;
274 struct rq_map_data;
275 extern struct bio *bio_map_user_iov(struct request_queue *,
276 struct block_device *,
277 struct sg_iovec *, int, int, gfp_t);
278 extern void bio_unmap_user(struct bio *);
279 extern struct bio *bio_map_kern(struct request_queue *, void *, unsigned int,
280 gfp_t);
281 extern struct bio *bio_copy_kern(struct request_queue *, void *, unsigned int,
282 gfp_t, int);
283 extern void bio_set_pages_dirty(struct bio *bio);
284 extern void bio_check_pages_dirty(struct bio *bio);
285
286 #ifndef ARCH_IMPLEMENTS_FLUSH_DCACHE_PAGE
287 # error "You should define ARCH_IMPLEMENTS_FLUSH_DCACHE_PAGE for your platform"
288 #endif
289 #if ARCH_IMPLEMENTS_FLUSH_DCACHE_PAGE
290 extern void bio_flush_dcache_pages(struct bio *bi);
291 #else
292 static inline void bio_flush_dcache_pages(struct bio *bi)
293 {
294 }
295 #endif
296
297 extern void bio_copy_data(struct bio *dst, struct bio *src);
298 extern int bio_alloc_pages(struct bio *bio, gfp_t gfp);
299
300 extern struct bio *bio_copy_user(struct request_queue *, struct rq_map_data *,
301 unsigned long, unsigned int, int, gfp_t);
302 extern struct bio *bio_copy_user_iov(struct request_queue *,
303 struct rq_map_data *, struct sg_iovec *,
304 int, int, gfp_t);
305 extern int bio_uncopy_user(struct bio *);
306 void zero_fill_bio(struct bio *bio);
307 extern struct bio_vec *bvec_alloc(gfp_t, int, unsigned long *, mempool_t *);
308 extern void bvec_free(mempool_t *, struct bio_vec *, unsigned int);
309 extern unsigned int bvec_nr_vecs(unsigned short idx);
310
311 #ifdef CONFIG_BLK_CGROUP
312 int bio_associate_current(struct bio *bio);
313 void bio_disassociate_task(struct bio *bio);
314 #else /* CONFIG_BLK_CGROUP */
315 static inline int bio_associate_current(struct bio *bio) { return -ENOENT; }
316 static inline void bio_disassociate_task(struct bio *bio) { }
317 #endif /* CONFIG_BLK_CGROUP */
318
319 #ifdef CONFIG_HIGHMEM
320 /*
321 * remember never ever reenable interrupts between a bvec_kmap_irq and
322 * bvec_kunmap_irq!
323 */
324 static inline char *bvec_kmap_irq(struct bio_vec *bvec, unsigned long *flags)
325 {
326 unsigned long addr;
327
328 /*
329 * might not be a highmem page, but the preempt/irq count
330 * balancing is a lot nicer this way
331 */
332 local_irq_save(*flags);
333 addr = (unsigned long) kmap_atomic(bvec->bv_page);
334
335 BUG_ON(addr & ~PAGE_MASK);
336
337 return (char *) addr + bvec->bv_offset;
338 }
339
340 static inline void bvec_kunmap_irq(char *buffer, unsigned long *flags)
341 {
342 unsigned long ptr = (unsigned long) buffer & PAGE_MASK;
343
344 kunmap_atomic((void *) ptr);
345 local_irq_restore(*flags);
346 }
347
348 #else
349 static inline char *bvec_kmap_irq(struct bio_vec *bvec, unsigned long *flags)
350 {
351 return page_address(bvec->bv_page) + bvec->bv_offset;
352 }
353
354 static inline void bvec_kunmap_irq(char *buffer, unsigned long *flags)
355 {
356 *flags = 0;
357 }
358 #endif
359
360 static inline char *__bio_kmap_irq(struct bio *bio, unsigned short idx,
361 unsigned long *flags)
362 {
363 return bvec_kmap_irq(bio_iovec_idx(bio, idx), flags);
364 }
365 #define __bio_kunmap_irq(buf, flags) bvec_kunmap_irq(buf, flags)
366
367 #define bio_kmap_irq(bio, flags) \
368 __bio_kmap_irq((bio), (bio)->bi_idx, (flags))
369 #define bio_kunmap_irq(buf,flags) __bio_kunmap_irq(buf, flags)
370
371 /*
372 * Check whether this bio carries any data or not. A NULL bio is allowed.
373 */
374 static inline bool bio_has_data(struct bio *bio)
375 {
376 if (bio && bio->bi_vcnt)
377 return true;
378
379 return false;
380 }
381
382 static inline bool bio_is_rw(struct bio *bio)
383 {
384 if (!bio_has_data(bio))
385 return false;
386
387 if (bio->bi_rw & REQ_WRITE_SAME)
388 return false;
389
390 return true;
391 }
392
393 static inline bool bio_mergeable(struct bio *bio)
394 {
395 if (bio->bi_rw & REQ_NOMERGE_FLAGS)
396 return false;
397
398 return true;
399 }
400
401 /*
402 * BIO list management for use by remapping drivers (e.g. DM or MD) and loop.
403 *
404 * A bio_list anchors a singly-linked list of bios chained through the bi_next
405 * member of the bio. The bio_list also caches the last list member to allow
406 * fast access to the tail.
407 */
408 struct bio_list {
409 struct bio *head;
410 struct bio *tail;
411 };
412
413 static inline int bio_list_empty(const struct bio_list *bl)
414 {
415 return bl->head == NULL;
416 }
417
418 static inline void bio_list_init(struct bio_list *bl)
419 {
420 bl->head = bl->tail = NULL;
421 }
422
423 #define BIO_EMPTY_LIST { NULL, NULL }
424
425 #define bio_list_for_each(bio, bl) \
426 for (bio = (bl)->head; bio; bio = bio->bi_next)
427
428 static inline unsigned bio_list_size(const struct bio_list *bl)
429 {
430 unsigned sz = 0;
431 struct bio *bio;
432
433 bio_list_for_each(bio, bl)
434 sz++;
435
436 return sz;
437 }
438
439 static inline void bio_list_add(struct bio_list *bl, struct bio *bio)
440 {
441 bio->bi_next = NULL;
442
443 if (bl->tail)
444 bl->tail->bi_next = bio;
445 else
446 bl->head = bio;
447
448 bl->tail = bio;
449 }
450
451 static inline void bio_list_add_head(struct bio_list *bl, struct bio *bio)
452 {
453 bio->bi_next = bl->head;
454
455 bl->head = bio;
456
457 if (!bl->tail)
458 bl->tail = bio;
459 }
460
461 static inline void bio_list_merge(struct bio_list *bl, struct bio_list *bl2)
462 {
463 if (!bl2->head)
464 return;
465
466 if (bl->tail)
467 bl->tail->bi_next = bl2->head;
468 else
469 bl->head = bl2->head;
470
471 bl->tail = bl2->tail;
472 }
473
474 static inline void bio_list_merge_head(struct bio_list *bl,
475 struct bio_list *bl2)
476 {
477 if (!bl2->head)
478 return;
479
480 if (bl->head)
481 bl2->tail->bi_next = bl->head;
482 else
483 bl->tail = bl2->tail;
484
485 bl->head = bl2->head;
486 }
487
488 static inline struct bio *bio_list_peek(struct bio_list *bl)
489 {
490 return bl->head;
491 }
492
493 static inline struct bio *bio_list_pop(struct bio_list *bl)
494 {
495 struct bio *bio = bl->head;
496
497 if (bio) {
498 bl->head = bl->head->bi_next;
499 if (!bl->head)
500 bl->tail = NULL;
501
502 bio->bi_next = NULL;
503 }
504
505 return bio;
506 }
507
508 static inline struct bio *bio_list_get(struct bio_list *bl)
509 {
510 struct bio *bio = bl->head;
511
512 bl->head = bl->tail = NULL;
513
514 return bio;
515 }
516
517 /*
518 * bio_set is used to allow other portions of the IO system to
519 * allocate their own private memory pools for bio and iovec structures.
520 * These memory pools in turn all allocate from the bio_slab
521 * and the bvec_slabs[].
522 */
523 #define BIO_POOL_SIZE 2
524 #define BIOVEC_NR_POOLS 6
525 #define BIOVEC_MAX_IDX (BIOVEC_NR_POOLS - 1)
526
527 struct bio_set {
528 struct kmem_cache *bio_slab;
529 unsigned int front_pad;
530
531 mempool_t *bio_pool;
532 mempool_t *bvec_pool;
533 #if defined(CONFIG_BLK_DEV_INTEGRITY)
534 mempool_t *bio_integrity_pool;
535 mempool_t *bvec_integrity_pool;
536 #endif
537
538 /*
539 * Deadlock avoidance for stacking block drivers: see comments in
540 * bio_alloc_bioset() for details
541 */
542 spinlock_t rescue_lock;
543 struct bio_list rescue_list;
544 struct work_struct rescue_work;
545 struct workqueue_struct *rescue_workqueue;
546 };
547
548 struct biovec_slab {
549 int nr_vecs;
550 char *name;
551 struct kmem_cache *slab;
552 };
553
554 /*
555 * a small number of entries is fine, not going to be performance critical.
556 * basically we just need to survive
557 */
558 #define BIO_SPLIT_ENTRIES 2
559
560 #if defined(CONFIG_BLK_DEV_INTEGRITY)
561
562 #define bip_vec_idx(bip, idx) (&(bip->bip_vec[(idx)]))
563 #define bip_vec(bip) bip_vec_idx(bip, 0)
564
565 #define __bip_for_each_vec(bvl, bip, i, start_idx) \
566 for (bvl = bip_vec_idx((bip), (start_idx)), i = (start_idx); \
567 i < (bip)->bip_vcnt; \
568 bvl++, i++)
569
570 #define bip_for_each_vec(bvl, bip, i) \
571 __bip_for_each_vec(bvl, bip, i, (bip)->bip_idx)
572
573 #define bio_for_each_integrity_vec(_bvl, _bio, _iter) \
574 for_each_bio(_bio) \
575 bip_for_each_vec(_bvl, _bio->bi_integrity, _iter)
576
577 #define bio_integrity(bio) (bio->bi_integrity != NULL)
578
579 extern struct bio_integrity_payload *bio_integrity_alloc(struct bio *, gfp_t, unsigned int);
580 extern void bio_integrity_free(struct bio *);
581 extern int bio_integrity_add_page(struct bio *, struct page *, unsigned int, unsigned int);
582 extern int bio_integrity_enabled(struct bio *bio);
583 extern int bio_integrity_set_tag(struct bio *, void *, unsigned int);
584 extern int bio_integrity_get_tag(struct bio *, void *, unsigned int);
585 extern int bio_integrity_prep(struct bio *);
586 extern void bio_integrity_endio(struct bio *, int);
587 extern void bio_integrity_advance(struct bio *, unsigned int);
588 extern void bio_integrity_trim(struct bio *, unsigned int, unsigned int);
589 extern void bio_integrity_split(struct bio *, struct bio_pair *, int);
590 extern int bio_integrity_clone(struct bio *, struct bio *, gfp_t);
591 extern int bioset_integrity_create(struct bio_set *, int);
592 extern void bioset_integrity_free(struct bio_set *);
593 extern void bio_integrity_init(void);
594
595 #else /* CONFIG_BLK_DEV_INTEGRITY */
596
597 static inline int bio_integrity(struct bio *bio)
598 {
599 return 0;
600 }
601
602 static inline int bio_integrity_enabled(struct bio *bio)
603 {
604 return 0;
605 }
606
607 static inline int bioset_integrity_create(struct bio_set *bs, int pool_size)
608 {
609 return 0;
610 }
611
612 static inline void bioset_integrity_free (struct bio_set *bs)
613 {
614 return;
615 }
616
617 static inline int bio_integrity_prep(struct bio *bio)
618 {
619 return 0;
620 }
621
622 static inline void bio_integrity_free(struct bio *bio)
623 {
624 return;
625 }
626
627 static inline int bio_integrity_clone(struct bio *bio, struct bio *bio_src,
628 gfp_t gfp_mask)
629 {
630 return 0;
631 }
632
633 static inline void bio_integrity_split(struct bio *bio, struct bio_pair *bp,
634 int sectors)
635 {
636 return;
637 }
638
639 static inline void bio_integrity_advance(struct bio *bio,
640 unsigned int bytes_done)
641 {
642 return;
643 }
644
645 static inline void bio_integrity_trim(struct bio *bio, unsigned int offset,
646 unsigned int sectors)
647 {
648 return;
649 }
650
651 static inline void bio_integrity_init(void)
652 {
653 return;
654 }
655
656 #endif /* CONFIG_BLK_DEV_INTEGRITY */
657
658 #endif /* CONFIG_BLOCK */
659 #endif /* __LINUX_BIO_H */