block: use 32-bit blk_status_t on Alpha
[GitHub/moto-9609/android_kernel_motorola_exynos9610.git] / include / linux / blk_types.h
1 /* SPDX-License-Identifier: GPL-2.0 */
2 /*
3 * Block data types and constants. Directly include this file only to
4 * break include dependency loop.
5 */
6 #ifndef __LINUX_BLK_TYPES_H
7 #define __LINUX_BLK_TYPES_H
8
9 #include <linux/types.h>
10 #include <linux/bvec.h>
11
12 struct bio_set;
13 struct bio;
14 struct bio_integrity_payload;
15 struct page;
16 struct block_device;
17 struct io_context;
18 struct cgroup_subsys_state;
19 typedef void (bio_end_io_t) (struct bio *);
20
21 /*
22 * Block error status values. See block/blk-core:blk_errors for the details.
23 * Alpha cannot write a byte atomically, so we need to use 32-bit value.
24 */
25 #if defined(CONFIG_ALPHA) && !defined(__alpha_bwx__)
26 typedef u32 __bitwise blk_status_t;
27 #else
28 typedef u8 __bitwise blk_status_t;
29 #endif
30 #define BLK_STS_OK 0
31 #define BLK_STS_NOTSUPP ((__force blk_status_t)1)
32 #define BLK_STS_TIMEOUT ((__force blk_status_t)2)
33 #define BLK_STS_NOSPC ((__force blk_status_t)3)
34 #define BLK_STS_TRANSPORT ((__force blk_status_t)4)
35 #define BLK_STS_TARGET ((__force blk_status_t)5)
36 #define BLK_STS_NEXUS ((__force blk_status_t)6)
37 #define BLK_STS_MEDIUM ((__force blk_status_t)7)
38 #define BLK_STS_PROTECTION ((__force blk_status_t)8)
39 #define BLK_STS_RESOURCE ((__force blk_status_t)9)
40 #define BLK_STS_IOERR ((__force blk_status_t)10)
41
42 /* hack for device mapper, don't use elsewhere: */
43 #define BLK_STS_DM_REQUEUE ((__force blk_status_t)11)
44
45 #define BLK_STS_AGAIN ((__force blk_status_t)12)
46
47 struct blk_issue_stat {
48 u64 stat;
49 };
50
51 /*
52 * main unit of I/O for the block layer and lower layers (ie drivers and
53 * stacking drivers)
54 */
55 struct bio {
56 struct bio *bi_next; /* request queue link */
57 struct gendisk *bi_disk;
58 unsigned int bi_opf; /* bottom bits req flags,
59 * top bits REQ_OP. Use
60 * accessors.
61 */
62 unsigned short bi_flags; /* status, etc and bvec pool number */
63 unsigned short bi_ioprio;
64 unsigned short bi_write_hint;
65 blk_status_t bi_status;
66 u8 bi_partno;
67
68 /* Number of segments in this BIO after
69 * physical address coalescing is performed.
70 */
71 unsigned int bi_phys_segments;
72
73 /*
74 * To keep track of the max segment size, we account for the
75 * sizes of the first and last mergeable segments in this bio.
76 */
77 unsigned int bi_seg_front_size;
78 unsigned int bi_seg_back_size;
79
80 struct bvec_iter bi_iter;
81
82 atomic_t __bi_remaining;
83 bio_end_io_t *bi_end_io;
84
85 void *bi_private;
86 #ifdef CONFIG_BLK_CGROUP
87 /*
88 * Optional ioc and css associated with this bio. Put on bio
89 * release. Read comment on top of bio_associate_current().
90 */
91 struct io_context *bi_ioc;
92 struct cgroup_subsys_state *bi_css;
93 #ifdef CONFIG_BLK_DEV_THROTTLING_LOW
94 void *bi_cg_private;
95 struct blk_issue_stat bi_issue_stat;
96 #endif
97 #endif
98 union {
99 #if defined(CONFIG_BLK_DEV_INTEGRITY)
100 struct bio_integrity_payload *bi_integrity; /* data integrity */
101 #endif
102 };
103
104 unsigned short bi_vcnt; /* how many bio_vec's */
105
106 /*
107 * Everything starting with bi_max_vecs will be preserved by bio_reset()
108 */
109
110 unsigned short bi_max_vecs; /* max bvl_vecs we can hold */
111
112 atomic_t __bi_cnt; /* pin count */
113
114 struct bio_vec *bi_io_vec; /* the actual vec list */
115
116 struct bio_set *bi_pool;
117
118 /*
119 * We can inline a number of vecs at the end of the bio, to avoid
120 * double allocations for a small number of bio_vecs. This member
121 * MUST obviously be kept at the very end of the bio.
122 */
123 struct bio_vec bi_inline_vecs[0];
124 };
125
126 #define BIO_RESET_BYTES offsetof(struct bio, bi_max_vecs)
127
128 /*
129 * bio flags
130 */
131 #define BIO_SEG_VALID 1 /* bi_phys_segments valid */
132 #define BIO_CLONED 2 /* doesn't own data */
133 #define BIO_BOUNCED 3 /* bio is a bounce bio */
134 #define BIO_USER_MAPPED 4 /* contains user pages */
135 #define BIO_NULL_MAPPED 5 /* contains invalid user pages */
136 #define BIO_QUIET 6 /* Make BIO Quiet */
137 #define BIO_CHAIN 7 /* chained bio, ->bi_remaining in effect */
138 #define BIO_REFFED 8 /* bio has elevated ->bi_cnt */
139 #define BIO_THROTTLED 9 /* This bio has already been subjected to
140 * throttling rules. Don't do it again. */
141 #define BIO_TRACE_COMPLETION 10 /* bio_endio() should trace the final completion
142 * of this bio. */
143 /* See BVEC_POOL_OFFSET below before adding new flags */
144
145 /*
146 * We support 6 different bvec pools, the last one is magic in that it
147 * is backed by a mempool.
148 */
149 #define BVEC_POOL_NR 6
150 #define BVEC_POOL_MAX (BVEC_POOL_NR - 1)
151
152 /*
153 * Top 3 bits of bio flags indicate the pool the bvecs came from. We add
154 * 1 to the actual index so that 0 indicates that there are no bvecs to be
155 * freed.
156 */
157 #define BVEC_POOL_BITS (3)
158 #define BVEC_POOL_OFFSET (16 - BVEC_POOL_BITS)
159 #define BVEC_POOL_IDX(bio) ((bio)->bi_flags >> BVEC_POOL_OFFSET)
160 #if (1<< BVEC_POOL_BITS) < (BVEC_POOL_NR+1)
161 # error "BVEC_POOL_BITS is too small"
162 #endif
163
164 /*
165 * Flags starting here get preserved by bio_reset() - this includes
166 * only BVEC_POOL_IDX()
167 */
168 #define BIO_RESET_BITS BVEC_POOL_OFFSET
169
170 /*
171 * Operations and flags common to the bio and request structures.
172 * We use 8 bits for encoding the operation, and the remaining 24 for flags.
173 *
174 * The least significant bit of the operation number indicates the data
175 * transfer direction:
176 *
177 * - if the least significant bit is set transfers are TO the device
178 * - if the least significant bit is not set transfers are FROM the device
179 *
180 * If a operation does not transfer data the least significant bit has no
181 * meaning.
182 */
183 #define REQ_OP_BITS 8
184 #define REQ_OP_MASK ((1 << REQ_OP_BITS) - 1)
185 #define REQ_FLAG_BITS 24
186
187 enum req_opf {
188 /* read sectors from the device */
189 REQ_OP_READ = 0,
190 /* write sectors to the device */
191 REQ_OP_WRITE = 1,
192 /* flush the volatile write cache */
193 REQ_OP_FLUSH = 2,
194 /* discard sectors */
195 REQ_OP_DISCARD = 3,
196 /* get zone information */
197 REQ_OP_ZONE_REPORT = 4,
198 /* securely erase sectors */
199 REQ_OP_SECURE_ERASE = 5,
200 /* seset a zone write pointer */
201 REQ_OP_ZONE_RESET = 6,
202 /* write the same sector many times */
203 REQ_OP_WRITE_SAME = 7,
204 /* write the zero filled sector many times */
205 REQ_OP_WRITE_ZEROES = 9,
206
207 /* SCSI passthrough using struct scsi_request */
208 REQ_OP_SCSI_IN = 32,
209 REQ_OP_SCSI_OUT = 33,
210 /* Driver private requests */
211 REQ_OP_DRV_IN = 34,
212 REQ_OP_DRV_OUT = 35,
213
214 REQ_OP_LAST,
215 };
216
217 enum req_flag_bits {
218 __REQ_FAILFAST_DEV = /* no driver retries of device errors */
219 REQ_OP_BITS,
220 __REQ_FAILFAST_TRANSPORT, /* no driver retries of transport errors */
221 __REQ_FAILFAST_DRIVER, /* no driver retries of driver errors */
222 __REQ_SYNC, /* request is sync (sync write or read) */
223 __REQ_META, /* metadata io request */
224 __REQ_PRIO, /* boost priority in cfq */
225 __REQ_NOMERGE, /* don't touch this for merging */
226 __REQ_IDLE, /* anticipate more IO after this one */
227 __REQ_INTEGRITY, /* I/O includes block integrity payload */
228 __REQ_FUA, /* forced unit access */
229 __REQ_PREFLUSH, /* request for cache flush */
230 __REQ_RAHEAD, /* read ahead, can fail anytime */
231 __REQ_BACKGROUND, /* background IO */
232
233 /* command specific flags for REQ_OP_WRITE_ZEROES: */
234 __REQ_NOUNMAP, /* do not free blocks when zeroing */
235
236 __REQ_NOWAIT, /* Don't wait if request will block */
237 __REQ_NR_BITS, /* stops here */
238 };
239
240 #define REQ_FAILFAST_DEV (1ULL << __REQ_FAILFAST_DEV)
241 #define REQ_FAILFAST_TRANSPORT (1ULL << __REQ_FAILFAST_TRANSPORT)
242 #define REQ_FAILFAST_DRIVER (1ULL << __REQ_FAILFAST_DRIVER)
243 #define REQ_SYNC (1ULL << __REQ_SYNC)
244 #define REQ_META (1ULL << __REQ_META)
245 #define REQ_PRIO (1ULL << __REQ_PRIO)
246 #define REQ_NOMERGE (1ULL << __REQ_NOMERGE)
247 #define REQ_IDLE (1ULL << __REQ_IDLE)
248 #define REQ_INTEGRITY (1ULL << __REQ_INTEGRITY)
249 #define REQ_FUA (1ULL << __REQ_FUA)
250 #define REQ_PREFLUSH (1ULL << __REQ_PREFLUSH)
251 #define REQ_RAHEAD (1ULL << __REQ_RAHEAD)
252 #define REQ_BACKGROUND (1ULL << __REQ_BACKGROUND)
253
254 #define REQ_NOUNMAP (1ULL << __REQ_NOUNMAP)
255 #define REQ_NOWAIT (1ULL << __REQ_NOWAIT)
256
257 #define REQ_FAILFAST_MASK \
258 (REQ_FAILFAST_DEV | REQ_FAILFAST_TRANSPORT | REQ_FAILFAST_DRIVER)
259
260 #define REQ_NOMERGE_FLAGS \
261 (REQ_NOMERGE | REQ_PREFLUSH | REQ_FUA)
262
263 #define bio_op(bio) \
264 ((bio)->bi_opf & REQ_OP_MASK)
265 #define req_op(req) \
266 ((req)->cmd_flags & REQ_OP_MASK)
267
268 /* obsolete, don't use in new code */
269 static inline void bio_set_op_attrs(struct bio *bio, unsigned op,
270 unsigned op_flags)
271 {
272 bio->bi_opf = op | op_flags;
273 }
274
275 static inline bool op_is_write(unsigned int op)
276 {
277 return (op & 1);
278 }
279
280 /*
281 * Check if the bio or request is one that needs special treatment in the
282 * flush state machine.
283 */
284 static inline bool op_is_flush(unsigned int op)
285 {
286 return op & (REQ_FUA | REQ_PREFLUSH);
287 }
288
289 /*
290 * Reads are always treated as synchronous, as are requests with the FUA or
291 * PREFLUSH flag. Other operations may be marked as synchronous using the
292 * REQ_SYNC flag.
293 */
294 static inline bool op_is_sync(unsigned int op)
295 {
296 return (op & REQ_OP_MASK) == REQ_OP_READ ||
297 (op & (REQ_SYNC | REQ_FUA | REQ_PREFLUSH));
298 }
299
300 typedef unsigned int blk_qc_t;
301 #define BLK_QC_T_NONE -1U
302 #define BLK_QC_T_SHIFT 16
303 #define BLK_QC_T_INTERNAL (1U << 31)
304
305 static inline bool blk_qc_t_valid(blk_qc_t cookie)
306 {
307 return cookie != BLK_QC_T_NONE;
308 }
309
310 static inline blk_qc_t blk_tag_to_qc_t(unsigned int tag, unsigned int queue_num,
311 bool internal)
312 {
313 blk_qc_t ret = tag | (queue_num << BLK_QC_T_SHIFT);
314
315 if (internal)
316 ret |= BLK_QC_T_INTERNAL;
317
318 return ret;
319 }
320
321 static inline unsigned int blk_qc_t_to_queue_num(blk_qc_t cookie)
322 {
323 return (cookie & ~BLK_QC_T_INTERNAL) >> BLK_QC_T_SHIFT;
324 }
325
326 static inline unsigned int blk_qc_t_to_tag(blk_qc_t cookie)
327 {
328 return cookie & ((1u << BLK_QC_T_SHIFT) - 1);
329 }
330
331 static inline bool blk_qc_t_is_internal(blk_qc_t cookie)
332 {
333 return (cookie & BLK_QC_T_INTERNAL) != 0;
334 }
335
336 struct blk_rq_stat {
337 s64 mean;
338 u64 min;
339 u64 max;
340 s32 nr_samples;
341 s32 nr_batch;
342 u64 batch;
343 };
344
345 #endif /* __LINUX_BLK_TYPES_H */