blk_types.h 8.62 KB
Newer Older
1
2
3
4
5
6
7
8
/*
 * Block data types and constants.  Directly include this file only to
 * break include dependency loop.
 */
#ifndef __LINUX_BLK_TYPES_H
#define __LINUX_BLK_TYPES_H

#include <linux/types.h>
9
#include <linux/bvec.h>
10
11
12
13
14
15

struct bio_set;
struct bio;
struct bio_integrity_payload;
struct page;
struct block_device;
16
17
struct io_context;
struct cgroup_subsys_state;
18
typedef void (bio_end_io_t) (struct bio *);
19

Al Viro's avatar
Al Viro committed
20
#ifdef CONFIG_BLOCK
21
22
23
24
25
26
27
/*
 * main unit of I/O for the block layer and lower layers (ie drivers and
 * stacking drivers)
 */
struct bio {
	struct bio		*bi_next;	/* request queue link */
	struct block_device	*bi_bdev;
28
	int			bi_error;
Jens Axboe's avatar
Jens Axboe committed
29
30
31
	unsigned int		bi_opf;		/* bottom bits req flags,
						 * top bits REQ_OP. Use
						 * accessors.
32
						 */
33
	unsigned short		bi_flags;	/* status, command, etc */
34
	unsigned short		bi_ioprio;
35

36
	struct bvec_iter	bi_iter;
37
38
39
40
41
42
43
44
45
46
47
48
49

	/* Number of segments in this BIO after
	 * physical address coalescing is performed.
	 */
	unsigned int		bi_phys_segments;

	/*
	 * To keep track of the max segment size, we account for the
	 * sizes of the first and last mergeable segments in this bio.
	 */
	unsigned int		bi_seg_front_size;
	unsigned int		bi_seg_back_size;

50
	atomic_t		__bi_remaining;
Kent Overstreet's avatar
Kent Overstreet committed
51

52
53
54
	bio_end_io_t		*bi_end_io;

	void			*bi_private;
55
56
57
58
59
60
61
62
#ifdef CONFIG_BLK_CGROUP
	/*
	 * Optional ioc and css associated with this bio.  Put on bio
	 * release.  Read comment on top of bio_associate_current().
	 */
	struct io_context	*bi_ioc;
	struct cgroup_subsys_state *bi_css;
#endif
63
	union {
64
#if defined(CONFIG_BLK_DEV_INTEGRITY)
65
		struct bio_integrity_payload *bi_integrity; /* data integrity */
66
#endif
67
	};
68

69
70
	unsigned short		bi_vcnt;	/* how many bio_vec's */

Kent Overstreet's avatar
Kent Overstreet committed
71
72
73
74
	/*
	 * Everything starting with bi_max_vecs will be preserved by bio_reset()
	 */

75
	unsigned short		bi_max_vecs;	/* max bvl_vecs we can hold */
Kent Overstreet's avatar
Kent Overstreet committed
76

77
	atomic_t		__bi_cnt;	/* pin count */
Kent Overstreet's avatar
Kent Overstreet committed
78
79
80

	struct bio_vec		*bi_io_vec;	/* the actual vec list */

81
82
	struct bio_set		*bi_pool;

83
84
85
86
87
88
89
90
	/*
	 * We can inline a number of vecs at the end of the bio, to avoid
	 * double allocations for a small number of bio_vecs. This member
	 * MUST obviously be kept at the very end of the bio.
	 */
	struct bio_vec		bi_inline_vecs[0];
};

91
#define BIO_OP_SHIFT	(8 * FIELD_SIZEOF(struct bio, bi_opf) - REQ_OP_BITS)
92
#define bio_flags(bio)	((bio)->bi_opf & ((1 << BIO_OP_SHIFT) - 1))
Jens Axboe's avatar
Jens Axboe committed
93
#define bio_op(bio)	((bio)->bi_opf >> BIO_OP_SHIFT)
94

95
96
97
98
99
100
101
102
103
104
105
106
#define bio_set_op_attrs(bio, op, op_flags) do {			\
	if (__builtin_constant_p(op))					\
		BUILD_BUG_ON((op) + 0U >= (1U << REQ_OP_BITS));		\
	else								\
		WARN_ON_ONCE((op) + 0U >= (1U << REQ_OP_BITS));		\
	if (__builtin_constant_p(op_flags))				\
		BUILD_BUG_ON((op_flags) + 0U >= (1U << BIO_OP_SHIFT));	\
	else								\
		WARN_ON_ONCE((op_flags) + 0U >= (1U << BIO_OP_SHIFT));	\
	(bio)->bi_opf = bio_flags(bio);					\
	(bio)->bi_opf |= (((op) + 0U) << BIO_OP_SHIFT);			\
	(bio)->bi_opf |= (op_flags);					\
107
108
} while (0)

Kent Overstreet's avatar
Kent Overstreet committed
109
110
#define BIO_RESET_BYTES		offsetof(struct bio, bi_max_vecs)

111
112
113
/*
 * bio flags
 */
Jens Axboe's avatar
Jens Axboe committed
114
115
116
117
118
119
#define BIO_SEG_VALID	1	/* bi_phys_segments valid */
#define BIO_CLONED	2	/* doesn't own data */
#define BIO_BOUNCED	3	/* bio is a bounce bio */
#define BIO_USER_MAPPED 4	/* contains user pages */
#define BIO_NULL_MAPPED 5	/* contains invalid user pages */
#define BIO_QUIET	6	/* Make BIO Quiet */
120
121
#define BIO_CHAIN	7	/* chained bio, ->bi_remaining in effect */
#define BIO_REFFED	8	/* bio has elevated ->bi_cnt */
Kent Overstreet's avatar
Kent Overstreet committed
122
123
124

/*
 * Flags starting here get preserved by bio_reset() - this includes
125
 * BVEC_POOL_IDX()
Kent Overstreet's avatar
Kent Overstreet committed
126
 */
127
#define BIO_RESET_BITS	10
Kent Overstreet's avatar
Kent Overstreet committed
128

129
/*
130
131
 * We support 6 different bvec pools, the last one is magic in that it
 * is backed by a mempool.
132
 */
133
134
135
136
137
138
139
140
141
#define BVEC_POOL_NR		6
#define BVEC_POOL_MAX		(BVEC_POOL_NR - 1)

/*
 * Top 4 bits of bio flags indicate the pool the bvecs came from.  We add
 * 1 to the actual index so that 0 indicates that there are no bvecs to be
 * freed.
 */
#define BVEC_POOL_BITS		(4)
142
#define BVEC_POOL_OFFSET	(16 - BVEC_POOL_BITS)
143
#define BVEC_POOL_IDX(bio)	((bio)->bi_flags >> BVEC_POOL_OFFSET)
144

145
146
#endif /* CONFIG_BLOCK */

147
148
/*
 * Request flags.  For use in the cmd_flags field of struct request, and in
Jens Axboe's avatar
Jens Axboe committed
149
 * bi_opf of struct bio.  Note that some flags are only valid in either one.
150
151
152
153
154
155
156
157
158
 */
enum rq_flag_bits {
	/* common flags */
	__REQ_FAILFAST_DEV,	/* no driver retries of device errors */
	__REQ_FAILFAST_TRANSPORT, /* no driver retries of transport errors */
	__REQ_FAILFAST_DRIVER,	/* no driver retries of driver errors */

	__REQ_SYNC,		/* request is sync (sync write or read) */
	__REQ_META,		/* metadata io request */
159
	__REQ_PRIO,		/* boost priority in cfq */
160

161
	__REQ_NOMERGE,		/* don't touch this for merging */
162
	__REQ_NOIDLE,		/* don't anticipate more IO after this one */
163
	__REQ_INTEGRITY,	/* I/O includes block integrity payload */
164
	__REQ_FUA,		/* forced unit access */
165
	__REQ_PREFLUSH,		/* request for cache flush */
166
167
168

	/* bio only flags */
	__REQ_RAHEAD,		/* read ahead, can fail anytime */
169
170
	__REQ_THROTTLED,	/* This bio has already been subjected to
				 * throttling rules. Don't do it again. */
171
172
173
174
175
176
177
178
179
180

	/* request only flags */
	__REQ_SORTED,		/* elevator knows about this request */
	__REQ_SOFTBARRIER,	/* may not be passed by ioscheduler */
	__REQ_STARTED,		/* drive already may have started this one */
	__REQ_DONTPREP,		/* don't call prep for this one */
	__REQ_QUEUED,		/* uses queueing */
	__REQ_ELVPRIV,		/* elevator private data attached */
	__REQ_FAILED,		/* set if the request failed */
	__REQ_QUIET,		/* don't worry about errors */
181
182
183
	__REQ_PREEMPT,		/* set for "ide_preempt" requests and also
				   for requests for which the SCSI "quiesce"
				   state must be ignored. */
184
185
	__REQ_ALLOCED,		/* request came from our alloc pool */
	__REQ_COPY_USER,	/* contains copies of user pages */
Tejun Heo's avatar
Tejun Heo committed
186
	__REQ_FLUSH_SEQ,	/* request for flush sequence */
187
188
	__REQ_IO_STAT,		/* account I/O stat */
	__REQ_MIXED_MERGE,	/* merge of different types, fail separately */
189
	__REQ_PM,		/* runtime pm request */
190
	__REQ_HASHED,		/* on IO scheduler merge hash */
191
	__REQ_MQ_INFLIGHT,	/* track inflight for MQ */
192
193
194
	__REQ_NR_BITS,		/* stops here */
};

195
196
197
198
199
200
201
#define REQ_FAILFAST_DEV	(1ULL << __REQ_FAILFAST_DEV)
#define REQ_FAILFAST_TRANSPORT	(1ULL << __REQ_FAILFAST_TRANSPORT)
#define REQ_FAILFAST_DRIVER	(1ULL << __REQ_FAILFAST_DRIVER)
#define REQ_SYNC		(1ULL << __REQ_SYNC)
#define REQ_META		(1ULL << __REQ_META)
#define REQ_PRIO		(1ULL << __REQ_PRIO)
#define REQ_NOIDLE		(1ULL << __REQ_NOIDLE)
202
#define REQ_INTEGRITY		(1ULL << __REQ_INTEGRITY)
203
204
205
206

#define REQ_FAILFAST_MASK \
	(REQ_FAILFAST_DEV | REQ_FAILFAST_TRANSPORT | REQ_FAILFAST_DRIVER)
#define REQ_COMMON_MASK \
207
	(REQ_FAILFAST_MASK | REQ_SYNC | REQ_META | REQ_PRIO | REQ_NOIDLE | \
208
	 REQ_PREFLUSH | REQ_FUA | REQ_INTEGRITY | REQ_NOMERGE)
209
#define REQ_CLONE_MASK		REQ_COMMON_MASK
210

211
212
/* This mask is used for both bio and request merge checking */
#define REQ_NOMERGE_FLAGS \
213
	(REQ_NOMERGE | REQ_STARTED | REQ_SOFTBARRIER | REQ_PREFLUSH | REQ_FUA | REQ_FLUSH_SEQ)
214

215
216
217
218
219
220
221
222
223
224
225
226
227
228
229
230
#define REQ_RAHEAD		(1ULL << __REQ_RAHEAD)
#define REQ_THROTTLED		(1ULL << __REQ_THROTTLED)

#define REQ_SORTED		(1ULL << __REQ_SORTED)
#define REQ_SOFTBARRIER		(1ULL << __REQ_SOFTBARRIER)
#define REQ_FUA			(1ULL << __REQ_FUA)
#define REQ_NOMERGE		(1ULL << __REQ_NOMERGE)
#define REQ_STARTED		(1ULL << __REQ_STARTED)
#define REQ_DONTPREP		(1ULL << __REQ_DONTPREP)
#define REQ_QUEUED		(1ULL << __REQ_QUEUED)
#define REQ_ELVPRIV		(1ULL << __REQ_ELVPRIV)
#define REQ_FAILED		(1ULL << __REQ_FAILED)
#define REQ_QUIET		(1ULL << __REQ_QUIET)
#define REQ_PREEMPT		(1ULL << __REQ_PREEMPT)
#define REQ_ALLOCED		(1ULL << __REQ_ALLOCED)
#define REQ_COPY_USER		(1ULL << __REQ_COPY_USER)
231
#define REQ_PREFLUSH		(1ULL << __REQ_PREFLUSH)
232
233
234
235
#define REQ_FLUSH_SEQ		(1ULL << __REQ_FLUSH_SEQ)
#define REQ_IO_STAT		(1ULL << __REQ_IO_STAT)
#define REQ_MIXED_MERGE		(1ULL << __REQ_MIXED_MERGE)
#define REQ_PM			(1ULL << __REQ_PM)
236
#define REQ_HASHED		(1ULL << __REQ_HASHED)
237
#define REQ_MQ_INFLIGHT		(1ULL << __REQ_MQ_INFLIGHT)
238

239
240
241
242
243
244
245
enum req_op {
	REQ_OP_READ,
	REQ_OP_WRITE,
	REQ_OP_DISCARD,		/* request to discard sectors */
	REQ_OP_SECURE_ERASE,	/* request to securely erase sectors */
	REQ_OP_WRITE_SAME,	/* write same block many times */
	REQ_OP_FLUSH,		/* request for cache flush */
246
247
	REQ_OP_ZONE_REPORT,	/* Get zone information */
	REQ_OP_ZONE_RESET,	/* Reset a zone write pointer */
248
249
250
251
};

#define REQ_OP_BITS 3

252
253
254
255
256
257
258
259
260
261
262
263
264
265
266
267
268
269
270
271
272
typedef unsigned int blk_qc_t;
#define BLK_QC_T_NONE	-1U
#define BLK_QC_T_SHIFT	16

static inline bool blk_qc_t_valid(blk_qc_t cookie)
{
	return cookie != BLK_QC_T_NONE;
}

static inline blk_qc_t blk_tag_to_qc_t(unsigned int tag, unsigned int queue_num)
{
	return tag | (queue_num << BLK_QC_T_SHIFT);
}

static inline unsigned int blk_qc_t_to_queue_num(blk_qc_t cookie)
{
	return cookie >> BLK_QC_T_SHIFT;
}

static inline unsigned int blk_qc_t_to_tag(blk_qc_t cookie)
{
273
	return cookie & ((1u << BLK_QC_T_SHIFT) - 1);
274
275
}

276
#endif /* __LINUX_BLK_TYPES_H */