Commit | Line | Data |
---|---|---|
8324aa91 JA |
1 | #ifndef BLK_INTERNAL_H |
2 | #define BLK_INTERNAL_H | |
3 | ||
86db1e29 JA |
4 | /* Amount of time in which a process may batch requests */ |
5 | #define BLK_BATCH_TIME (HZ/50UL) | |
6 | ||
7 | /* Number of requests a "batching" process may submit */ | |
8 | #define BLK_BATCH_REQ 32 | |
9 | ||
8324aa91 JA |
10 | extern struct kmem_cache *blk_requestq_cachep; |
11 | extern struct kobj_type blk_queue_ktype; | |
12 | ||
86db1e29 JA |
13 | void init_request_from_bio(struct request *req, struct bio *bio); |
14 | void blk_rq_bio_prep(struct request_queue *q, struct request *rq, | |
15 | struct bio *bio); | |
8324aa91 JA |
16 | void __blk_queue_free_tags(struct request_queue *q); |
17 | ||
86db1e29 JA |
18 | void blk_unplug_work(struct work_struct *work); |
19 | void blk_unplug_timeout(unsigned long data); | |
242f9dcb JA |
20 | void blk_rq_timed_out_timer(unsigned long data); |
21 | void blk_delete_timer(struct request *); | |
22 | void blk_add_timer(struct request *); | |
23 | ||
24 | /* | |
25 | * Internal atomic flags for request handling | |
26 | */ | |
27 | enum rq_atomic_flags { | |
28 | REQ_ATOM_COMPLETE = 0, | |
29 | }; | |
30 | ||
31 | /* | |
32 | * EH timer and IO completion will both attempt to 'grab' the request, make | |
33 | * sure that only one of them suceeds | |
34 | */ | |
35 | static inline int blk_mark_rq_complete(struct request *rq) | |
36 | { | |
37 | return test_and_set_bit(REQ_ATOM_COMPLETE, &rq->atomic_flags); | |
38 | } | |
39 | ||
40 | static inline void blk_clear_rq_complete(struct request *rq) | |
41 | { | |
42 | clear_bit(REQ_ATOM_COMPLETE, &rq->atomic_flags); | |
43 | } | |
86db1e29 JA |
44 | |
45 | struct io_context *current_io_context(gfp_t gfp_flags, int node); | |
46 | ||
d6d48196 JA |
47 | int ll_back_merge_fn(struct request_queue *q, struct request *req, |
48 | struct bio *bio); | |
49 | int ll_front_merge_fn(struct request_queue *q, struct request *req, | |
50 | struct bio *bio); | |
51 | int attempt_back_merge(struct request_queue *q, struct request *rq); | |
52 | int attempt_front_merge(struct request_queue *q, struct request *rq); | |
53 | void blk_recalc_rq_segments(struct request *rq); | |
54 | void blk_recalc_rq_sectors(struct request *rq, int nsect); | |
55 | ||
8324aa91 JA |
56 | void blk_queue_congestion_threshold(struct request_queue *q); |
57 | ||
ff88972c AB |
58 | int blk_dev_init(void); |
59 | ||
8324aa91 JA |
60 | /* |
61 | * Return the threshold (number of used requests) at which the queue is | |
62 | * considered to be congested. It include a little hysteresis to keep the | |
63 | * context switch rate down. | |
64 | */ | |
65 | static inline int queue_congestion_on_threshold(struct request_queue *q) | |
66 | { | |
67 | return q->nr_congestion_on; | |
68 | } | |
69 | ||
70 | /* | |
71 | * The threshold at which a queue is considered to be uncongested | |
72 | */ | |
73 | static inline int queue_congestion_off_threshold(struct request_queue *q) | |
74 | { | |
75 | return q->nr_congestion_off; | |
76 | } | |
77 | ||
7ba1ba12 MP |
78 | #if defined(CONFIG_BLK_DEV_INTEGRITY) |
79 | ||
80 | #define rq_for_each_integrity_segment(bvl, _rq, _iter) \ | |
81 | __rq_for_each_bio(_iter.bio, _rq) \ | |
82 | bip_for_each_vec(bvl, _iter.bio->bi_integrity, _iter.i) | |
83 | ||
84 | #endif /* BLK_DEV_INTEGRITY */ | |
85 | ||
c7c22e4d JA |
86 | static inline int blk_cpu_to_group(int cpu) |
87 | { | |
88 | #ifdef CONFIG_SCHED_MC | |
89 | cpumask_t mask = cpu_coregroup_map(cpu); | |
90 | return first_cpu(mask); | |
91 | #elif defined(CONFIG_SCHED_SMT) | |
92 | return first_cpu(per_cpu(cpu_sibling_map, cpu)); | |
93 | #else | |
94 | return cpu; | |
95 | #endif | |
96 | } | |
97 | ||
8324aa91 | 98 | #endif |