blk-mq.h 9.07 KB
Newer Older
1
/* SPDX-License-Identifier: GPL-2.0 */
2
3
4
5
#ifndef BLK_MQ_H
#define BLK_MQ_H

#include <linux/blkdev.h>
6
#include <linux/sbitmap.h>
7
#include <linux/srcu.h>
8
9

struct blk_mq_tags;
10
struct blk_flush_queue;
11

12
13
14
/**
 * struct blk_mq_hw_ctx - State for a hardware queue facing the hardware block device
 */
15
16
17
18
struct blk_mq_hw_ctx {
	struct {
		spinlock_t		lock;
		struct list_head	dispatch;
19
		unsigned long		state;		/* BLK_MQ_S_* flags */
20
21
	} ____cacheline_aligned_in_smp;

22
	struct delayed_work	run_work;
23
	cpumask_var_t		cpumask;
24
25
	int			next_cpu;
	int			next_cpu_batch;
26
27
28

	unsigned long		flags;		/* BLK_MQ_F_* flags */

29
	void			*sched_data;
30
	struct request_queue	*queue;
31
	struct blk_flush_queue	*fq;
32
33
34

	void			*driver_data;

35
	struct sbitmap		ctx_map;
36

37
38
	struct blk_mq_ctx	*dispatch_from;

39
	struct blk_mq_ctx	**ctxs;
40
	unsigned int		nr_ctx;
41

42
	wait_queue_entry_t	dispatch_wait;
43
	atomic_t		wait_index;
44
45

	struct blk_mq_tags	*tags;
46
	struct blk_mq_tags	*sched_tags;
47
48
49

	unsigned long		queued;
	unsigned long		run;
50
#define BLK_MQ_MAX_DISPATCH_ORDER	7
51
52
53
	unsigned long		dispatched[BLK_MQ_MAX_DISPATCH_ORDER];

	unsigned int		numa_node;
54
	unsigned int		queue_num;
55

56
	atomic_t		nr_active;
57
	unsigned int		nr_expired;
58

59
	struct hlist_node	cpuhp_dead;
60
	struct kobject		kobj;
Jens Axboe's avatar
Jens Axboe committed
61

62
	unsigned long		poll_considered;
Jens Axboe's avatar
Jens Axboe committed
63
64
	unsigned long		poll_invoked;
	unsigned long		poll_success;
65
66
67

#ifdef CONFIG_BLK_DEBUG_FS
	struct dentry		*debugfs_dir;
68
	struct dentry		*sched_debugfs_dir;
69
#endif
70
71

	/* Must be the last member - see also blk_mq_hw_ctx_size(). */
72
	struct srcu_struct	srcu[0];
73
74
};

75
struct blk_mq_tag_set {
76
	unsigned int		*mq_map;
77
	const struct blk_mq_ops	*ops;
78
	unsigned int		nr_hw_queues;
79
	unsigned int		queue_depth;	/* max hw supported */
80
81
82
83
84
	unsigned int		reserved_tags;
	unsigned int		cmd_size;	/* per-request extra data */
	int			numa_node;
	unsigned int		timeout;
	unsigned int		flags;		/* BLK_MQ_F_* */
85
86
87
	void			*driver_data;

	struct blk_mq_tags	**tags;
88
89
90

	struct mutex		tag_list_lock;
	struct list_head	tag_list;
91
92
};

93
94
95
96
97
struct blk_mq_queue_data {
	struct request *rq;
	bool last;
};

98
99
typedef blk_status_t (queue_rq_fn)(struct blk_mq_hw_ctx *,
		const struct blk_mq_queue_data *);
100
typedef bool (get_budget_fn)(struct blk_mq_hw_ctx *);
101
typedef void (put_budget_fn)(struct blk_mq_hw_ctx *);
102
typedef enum blk_eh_timer_return (timeout_fn)(struct request *, bool);
103
104
typedef int (init_hctx_fn)(struct blk_mq_hw_ctx *, void *, unsigned int);
typedef void (exit_hctx_fn)(struct blk_mq_hw_ctx *, unsigned int);
105
typedef int (init_request_fn)(struct blk_mq_tag_set *set, struct request *,
106
		unsigned int, unsigned int);
107
typedef void (exit_request_fn)(struct blk_mq_tag_set *set, struct request *,
108
		unsigned int);
109

110
111
typedef void (busy_iter_fn)(struct blk_mq_hw_ctx *, struct request *, void *,
		bool);
Keith Busch's avatar
Keith Busch committed
112
typedef void (busy_tag_iter_fn)(struct request *, void *, bool);
Jens Axboe's avatar
Jens Axboe committed
113
typedef int (poll_fn)(struct blk_mq_hw_ctx *, unsigned int);
114
typedef int (map_queues_fn)(struct blk_mq_tag_set *set);
Jens Axboe's avatar
Jens Axboe committed
115

116

117
118
119
120
121
122
struct blk_mq_ops {
	/*
	 * Queue request
	 */
	queue_rq_fn		*queue_rq;

123
124
125
126
127
128
129
130
131
	/*
	 * Reserve budget before queue request, once .queue_rq is
	 * run, it is driver's responsibility to release the
	 * reserved budget. Also we have to handle failure case
	 * of .get_budget for avoiding I/O deadlock.
	 */
	get_budget_fn		*get_budget;
	put_budget_fn		*put_budget;

132
133
134
	/*
	 * Called on request timeout
	 */
135
	timeout_fn		*timeout;
136

Jens Axboe's avatar
Jens Axboe committed
137
138
139
140
141
	/*
	 * Called to poll for completion of a specific tag.
	 */
	poll_fn			*poll;

142
143
	softirq_done_fn		*complete;

144
145
146
147
148
149
150
	/*
	 * Called when the block layer side of a hardware queue has been
	 * set up, allowing the driver to allocate/init matching structures.
	 * Ditto for exit/teardown.
	 */
	init_hctx_fn		*init_hctx;
	exit_hctx_fn		*exit_hctx;
151
152
153
154

	/*
	 * Called for every command allocated by the block layer to allow
	 * the driver to set up driver specific data.
155
156
157
158
	 *
	 * Tag greater than or equal to queue_depth is for setting up
	 * flush request.
	 *
159
160
161
162
	 * Ditto for exit/teardown.
	 */
	init_request_fn		*init_request;
	exit_request_fn		*exit_request;
163
164
	/* Called from inside blk_get_request() */
	void (*initialize_rq_fn)(struct request *rq);
165
166

	map_queues_fn		*map_queues;
167
168
169
170
171
172
173
174

#ifdef CONFIG_BLK_DEBUG_FS
	/*
	 * Used by the debugfs implementation to show driver-specific
	 * information about a request.
	 */
	void (*show_rq)(struct seq_file *m, struct request *rq);
#endif
175
176
177
178
};

enum {
	BLK_MQ_F_SHOULD_MERGE	= 1 << 0,
179
180
	BLK_MQ_F_TAG_SHARED	= 1 << 1,
	BLK_MQ_F_SG_MERGE	= 1 << 2,
181
	BLK_MQ_F_BLOCKING	= 1 << 5,
182
	BLK_MQ_F_NO_SCHED	= 1 << 6,
Shaohua Li's avatar
Shaohua Li committed
183
184
	BLK_MQ_F_ALLOC_POLICY_START_BIT = 8,
	BLK_MQ_F_ALLOC_POLICY_BITS = 1,
185

186
	BLK_MQ_S_STOPPED	= 0,
187
	BLK_MQ_S_TAG_ACTIVE	= 1,
188
	BLK_MQ_S_SCHED_RESTART	= 2,
189

190
	BLK_MQ_MAX_DEPTH	= 10240,
191
192

	BLK_MQ_CPU_WORK_BATCH	= 8,
193
};
Shaohua Li's avatar
Shaohua Li committed
194
195
196
197
198
199
#define BLK_MQ_FLAG_TO_ALLOC_POLICY(flags) \
	((flags >> BLK_MQ_F_ALLOC_POLICY_START_BIT) & \
		((1 << BLK_MQ_F_ALLOC_POLICY_BITS) - 1))
#define BLK_ALLOC_POLICY_TO_MQ_FLAG(policy) \
	((policy & ((1 << BLK_MQ_F_ALLOC_POLICY_BITS) - 1)) \
		<< BLK_MQ_F_ALLOC_POLICY_START_BIT)
200

201
struct request_queue *blk_mq_init_queue(struct blk_mq_tag_set *);
202
203
struct request_queue *blk_mq_init_allocated_queue(struct blk_mq_tag_set *set,
						  struct request_queue *q);
204
205
int blk_mq_register_dev(struct device *, struct request_queue *);
void blk_mq_unregister_dev(struct device *, struct request_queue *);
206

207
208
209
int blk_mq_alloc_tag_set(struct blk_mq_tag_set *set);
void blk_mq_free_tag_set(struct blk_mq_tag_set *set);

210
211
212
213
void blk_mq_flush_plug_list(struct blk_plug *plug, bool from_schedule);

void blk_mq_free_request(struct request *rq);
bool blk_mq_can_queue(struct blk_mq_hw_ctx *);
214
215

enum {
216
217
218
219
220
221
222
223
	/* return when out of requests */
	BLK_MQ_REQ_NOWAIT	= (__force blk_mq_req_flags_t)(1 << 0),
	/* allocate from reserved pool */
	BLK_MQ_REQ_RESERVED	= (__force blk_mq_req_flags_t)(1 << 1),
	/* allocate internal/sched tag */
	BLK_MQ_REQ_INTERNAL	= (__force blk_mq_req_flags_t)(1 << 2),
	/* set RQF_PREEMPT */
	BLK_MQ_REQ_PREEMPT	= (__force blk_mq_req_flags_t)(1 << 3),
224
225
};

226
struct request *blk_mq_alloc_request(struct request_queue *q, unsigned int op,
227
		blk_mq_req_flags_t flags);
228
struct request *blk_mq_alloc_request_hctx(struct request_queue *q,
229
230
		unsigned int op, blk_mq_req_flags_t flags,
		unsigned int hctx_idx);
231
struct request *blk_mq_tag_to_rq(struct blk_mq_tags *tags, unsigned int tag);
232

233
234
235
236
237
238
239
240
241
242
243
244
245
246
247
248
249
enum {
	BLK_MQ_UNIQUE_TAG_BITS = 16,
	BLK_MQ_UNIQUE_TAG_MASK = (1 << BLK_MQ_UNIQUE_TAG_BITS) - 1,
};

u32 blk_mq_unique_tag(struct request *rq);

static inline u16 blk_mq_unique_tag_to_hwq(u32 unique_tag)
{
	return unique_tag >> BLK_MQ_UNIQUE_TAG_BITS;
}

static inline u16 blk_mq_unique_tag_to_tag(u32 unique_tag)
{
	return unique_tag & BLK_MQ_UNIQUE_TAG_MASK;
}

250

251
int blk_mq_request_started(struct request *rq);
252
void blk_mq_start_request(struct request *rq);
253
254
void blk_mq_end_request(struct request *rq, blk_status_t error);
void __blk_mq_end_request(struct request *rq, blk_status_t error);
255

256
257
258
void blk_mq_requeue_request(struct request *rq, bool kick_requeue_list);
void blk_mq_add_to_requeue_list(struct request *rq, bool at_head,
				bool kick_requeue_list);
259
void blk_mq_kick_requeue_list(struct request_queue *q);
260
void blk_mq_delay_kick_requeue_list(struct request_queue *q, unsigned long msecs);
261
void blk_mq_complete_request(struct request *rq);
262

263
bool blk_mq_queue_stopped(struct request_queue *q);
264
265
void blk_mq_stop_hw_queue(struct blk_mq_hw_ctx *hctx);
void blk_mq_start_hw_queue(struct blk_mq_hw_ctx *hctx);
266
void blk_mq_stop_hw_queues(struct request_queue *q);
267
void blk_mq_start_hw_queues(struct request_queue *q);
268
void blk_mq_start_stopped_hw_queue(struct blk_mq_hw_ctx *hctx, bool async);
269
void blk_mq_start_stopped_hw_queues(struct request_queue *q, bool async);
270
void blk_mq_quiesce_queue(struct request_queue *q);
271
void blk_mq_unquiesce_queue(struct request_queue *q);
272
void blk_mq_delay_run_hw_queue(struct blk_mq_hw_ctx *hctx, unsigned long msecs);
273
bool blk_mq_run_hw_queue(struct blk_mq_hw_ctx *hctx, bool async);
274
void blk_mq_run_hw_queues(struct request_queue *q, bool async);
275
276
void blk_mq_tagset_busy_iter(struct blk_mq_tag_set *tagset,
		busy_tag_iter_fn *fn, void *priv);
277
void blk_mq_freeze_queue(struct request_queue *q);
278
void blk_mq_unfreeze_queue(struct request_queue *q);
279
void blk_freeze_queue_start(struct request_queue *q);
280
void blk_mq_freeze_queue_wait(struct request_queue *q);
281
282
int blk_mq_freeze_queue_wait_timeout(struct request_queue *q,
				     unsigned long timeout);
283
284
int blk_mq_tagset_iter(struct blk_mq_tag_set *set, void *data,
		int (reinit_request)(void *, struct request *));
285

286
int blk_mq_map_queues(struct blk_mq_tag_set *set);
287
288
void blk_mq_update_nr_hw_queues(struct blk_mq_tag_set *set, int nr_hw_queues);

289
void blk_mq_quiesce_queue_nowait(struct request_queue *q);
290

291
292
/*
 * Driver command data is immediately after the request. So subtract request
293
 * size to get back to the original request, add request size to get the PDU.
294
295
296
297
298
299
300
 */
static inline struct request *blk_mq_rq_from_pdu(void *pdu)
{
	return pdu - sizeof(struct request);
}
static inline void *blk_mq_rq_to_pdu(struct request *rq)
{
301
	return rq + 1;
302
303
304
}

#define queue_for_each_hw_ctx(q, hctx, i)				\
305
306
	for ((i) = 0; (i) < (q)->nr_hw_queues &&			\
	     ({ hctx = (q)->queue_hw_ctx[i]; 1; }); (i)++)
307
308

#define hctx_for_each_ctx(hctx, ctx, i)					\
309
310
	for ((i) = 0; (i) < (hctx)->nr_ctx &&				\
	     ({ ctx = (hctx)->ctxs[(i)]; 1; }); (i)++)
311
312

#endif