blk-flush.c 14 KB
Newer Older
1
/*
2
 * Functions to sequence FLUSH and FUA writes.
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27
28
29
30
31
32
33
34
35
36
37
38
39
40
41
42
43
44
45
46
47
48
49
50
51
52
53
54
55
56
57
58
59
60
61
62
63
64
 *
 * Copyright (C) 2011		Max Planck Institute for Gravitational Physics
 * Copyright (C) 2011		Tejun Heo <tj@kernel.org>
 *
 * This file is released under the GPLv2.
 *
 * REQ_{FLUSH|FUA} requests are decomposed to sequences consisted of three
 * optional steps - PREFLUSH, DATA and POSTFLUSH - according to the request
 * properties and hardware capability.
 *
 * If a request doesn't have data, only REQ_FLUSH makes sense, which
 * indicates a simple flush request.  If there is data, REQ_FLUSH indicates
 * that the device cache should be flushed before the data is executed, and
 * REQ_FUA means that the data must be on non-volatile media on request
 * completion.
 *
 * If the device doesn't have writeback cache, FLUSH and FUA don't make any
 * difference.  The requests are either completed immediately if there's no
 * data or executed as normal requests otherwise.
 *
 * If the device has writeback cache and supports FUA, REQ_FLUSH is
 * translated to PREFLUSH but REQ_FUA is passed down directly with DATA.
 *
 * If the device has writeback cache and doesn't support FUA, REQ_FLUSH is
 * translated to PREFLUSH and REQ_FUA to POSTFLUSH.
 *
 * The actual execution of flush is double buffered.  Whenever a request
 * needs to execute PRE or POSTFLUSH, it queues at
 * q->flush_queue[q->flush_pending_idx].  Once certain criteria are met, a
 * flush is issued and the pending_idx is toggled.  When the flush
 * completes, all the requests which were pending are proceeded to the next
 * step.  This allows arbitrary merging of different types of FLUSH/FUA
 * requests.
 *
 * Currently, the following conditions are used to determine when to issue
 * flush.
 *
 * C1. At any given time, only one flush shall be in progress.  This makes
 *     double buffering sufficient.
 *
 * C2. Flush is deferred if any request is executing DATA of its sequence.
 *     This avoids issuing separate POSTFLUSHes for requests which shared
 *     PREFLUSH.
 *
 * C3. The second condition is ignored if there is a request which has
 *     waited longer than FLUSH_PENDING_TIMEOUT.  This is to avoid
 *     starvation in the unlikely case where there are continuous stream of
 *     FUA (without FLUSH) requests.
 *
 * For devices which support FUA, it isn't clear whether C2 (and thus C3)
 * is beneficial.
 *
 * Note that a sequenced FLUSH/FUA request with DATA is completed twice.
 * Once while executing DATA and again after the whole sequence is
 * complete.  The first completion updates the contained bio but doesn't
 * finish it so that the bio submitter is notified only after the whole
 * sequence is complete.  This is implemented by testing REQ_FLUSH_SEQ in
 * req_bio_endio().
 *
 * The above peculiarity requires that each FLUSH/FUA request has only one
 * bio attached to it, which is guaranteed as they aren't allowed to be
 * merged in the usual way.
65
 */
66

67
68
69
70
#include <linux/kernel.h>
#include <linux/module.h>
#include <linux/bio.h>
#include <linux/blkdev.h>
71
#include <linux/gfp.h>
72
#include <linux/blk-mq.h>
73
74

#include "blk.h"
75
#include "blk-mq.h"
76

77
78
/* FLUSH/FUA sequences */
enum {
79
80
81
82
83
84
85
86
87
88
89
90
91
	REQ_FSEQ_PREFLUSH	= (1 << 0), /* pre-flushing in progress */
	REQ_FSEQ_DATA		= (1 << 1), /* data write in progress */
	REQ_FSEQ_POSTFLUSH	= (1 << 2), /* post-flushing in progress */
	REQ_FSEQ_DONE		= (1 << 3),

	REQ_FSEQ_ACTIONS	= REQ_FSEQ_PREFLUSH | REQ_FSEQ_DATA |
				  REQ_FSEQ_POSTFLUSH,

	/*
	 * If flush has been pending longer than the following timeout,
	 * it's issued even if flush_data requests are still in flight.
	 */
	FLUSH_PENDING_TIMEOUT	= 5 * HZ,
92
93
};

94
static bool blk_kick_flush(struct request_queue *q);
95

96
static unsigned int blk_flush_policy(unsigned int fflags, struct request *rq)
97
{
98
	unsigned int policy = 0;
99

100
101
102
	if (blk_rq_sectors(rq))
		policy |= REQ_FSEQ_DATA;

103
104
105
106
107
	if (fflags & REQ_FLUSH) {
		if (rq->cmd_flags & REQ_FLUSH)
			policy |= REQ_FSEQ_PREFLUSH;
		if (!(fflags & REQ_FUA) && (rq->cmd_flags & REQ_FUA))
			policy |= REQ_FSEQ_POSTFLUSH;
108
	}
109
	return policy;
110
111
}

112
static unsigned int blk_flush_cur_seq(struct request *rq)
113
{
114
115
	return 1 << ffz(rq->flush.seq);
}
116

117
118
static void blk_flush_restore_request(struct request *rq)
{
119
	/*
120
121
122
	 * After flush data completion, @rq->bio is %NULL but we need to
	 * complete the bio again.  @rq->biotail is guaranteed to equal the
	 * original @rq->bio.  Restore it.
123
	 */
124
125
126
127
	rq->bio = rq->biotail;

	/* make @rq a normal request */
	rq->cmd_flags &= ~REQ_FLUSH_SEQ;
128
	rq->end_io = rq->flush.saved_end_io;
129
130
}

131
static bool blk_flush_queue_rq(struct request *rq, bool add_front)
132
{
133
	if (rq->q->mq_ops) {
134
135
136
137
		struct request_queue *q = rq->q;

		blk_mq_add_to_requeue_list(rq, add_front);
		blk_mq_kick_requeue_list(q);
138
139
		return false;
	} else {
140
141
142
143
		if (add_front)
			list_add(&rq->queuelist, &rq->q->queue_head);
		else
			list_add_tail(&rq->queuelist, &rq->q->queue_head);
144
145
		return true;
	}
146
147
}

148
149
150
151
152
153
154
155
156
157
/**
 * blk_flush_complete_seq - complete flush sequence
 * @rq: FLUSH/FUA request being sequenced
 * @seq: sequences to complete (mask of %REQ_FSEQ_*, can be zero)
 * @error: whether an error occurred
 *
 * @rq just completed @seq part of its flush sequence, record the
 * completion and trigger the next step.
 *
 * CONTEXT:
158
 * spin_lock_irq(q->queue_lock or q->mq_flush_lock)
159
160
161
162
163
164
 *
 * RETURNS:
 * %true if requests were added to the dispatch queue, %false otherwise.
 */
static bool blk_flush_complete_seq(struct request *rq, unsigned int seq,
				   int error)
165
{
166
167
	struct request_queue *q = rq->q;
	struct list_head *pending = &q->flush_queue[q->flush_pending_idx];
168
	bool queued = false, kicked;
169
170
171
172
173
174
175
176
177
178
179
180
181
182
183
184
185
186
187
188

	BUG_ON(rq->flush.seq & seq);
	rq->flush.seq |= seq;

	if (likely(!error))
		seq = blk_flush_cur_seq(rq);
	else
		seq = REQ_FSEQ_DONE;

	switch (seq) {
	case REQ_FSEQ_PREFLUSH:
	case REQ_FSEQ_POSTFLUSH:
		/* queue for flush */
		if (list_empty(pending))
			q->flush_pending_since = jiffies;
		list_move_tail(&rq->flush.list, pending);
		break;

	case REQ_FSEQ_DATA:
		list_move_tail(&rq->flush.list, &q->flush_data_in_flight);
189
		queued = blk_flush_queue_rq(rq, true);
190
191
192
193
194
195
196
197
198
199
200
201
		break;

	case REQ_FSEQ_DONE:
		/*
		 * @rq was previously adjusted by blk_flush_issue() for
		 * flush sequencing and may already have gone through the
		 * flush data request completion path.  Restore @rq for
		 * normal completion and end it.
		 */
		BUG_ON(!list_empty(&rq->queuelist));
		list_del_init(&rq->flush.list);
		blk_flush_restore_request(rq);
202
		if (q->mq_ops)
203
			blk_mq_end_request(rq, error);
204
205
		else
			__blk_end_request_all(rq, error);
206
207
208
209
210
211
		break;

	default:
		BUG();
	}

212
213
	kicked = blk_kick_flush(q);
	return kicked | queued;
214
215
}

216
static void flush_end_io(struct request *flush_rq, int error)
217
{
218
	struct request_queue *q = flush_rq->q;
219
	struct list_head *running;
220
221
	bool queued = false;
	struct request *rq, *n;
222
	unsigned long flags = 0;
223

224
	if (q->mq_ops) {
225
		spin_lock_irqsave(&q->mq_flush_lock, flags);
226
		q->flush_rq->tag = -1;
227
	}
228

229
	running = &q->flush_queue[q->flush_running_idx];
230
231
232
233
	BUG_ON(q->flush_pending_idx == q->flush_running_idx);

	/* account completion of the flush request */
	q->flush_running_idx ^= 1;
234
235
236

	if (!q->mq_ops)
		elv_completed_request(q, flush_rq);
237
238
239
240
241
242
243
244
245

	/* and push the waiting requests to the next stage */
	list_for_each_entry_safe(rq, n, running, flush.list) {
		unsigned int seq = blk_flush_cur_seq(rq);

		BUG_ON(seq != REQ_FSEQ_PREFLUSH && seq != REQ_FSEQ_POSTFLUSH);
		queued |= blk_flush_complete_seq(rq, seq, error);
	}

246
	/*
247
248
249
250
251
252
253
254
255
	 * Kick the queue to avoid stall for two cases:
	 * 1. Moving a request silently to empty queue_head may stall the
	 * queue.
	 * 2. When flush request is running in non-queueable queue, the
	 * queue is hold. Restart the queue after flush request is finished
	 * to avoid stall.
	 * This function is called from request completion path and calling
	 * directly into request_fn may confuse the driver.  Always use
	 * kblockd.
256
	 */
257
	if (queued || q->flush_queue_delayed) {
258
259
		WARN_ON(q->mq_ops);
		blk_run_queue_async(q);
260
	}
261
	q->flush_queue_delayed = 0;
262
263
264
265
	if (q->mq_ops)
		spin_unlock_irqrestore(&q->mq_flush_lock, flags);
}

266
267
268
269
270
271
272
273
/**
 * blk_kick_flush - consider issuing flush request
 * @q: request_queue being kicked
 *
 * Flush related states of @q have changed, consider issuing flush request.
 * Please read the comment at the top of this file for more info.
 *
 * CONTEXT:
274
 * spin_lock_irq(q->queue_lock or q->mq_flush_lock)
275
276
277
278
279
 *
 * RETURNS:
 * %true if flush was issued, %false otherwise.
 */
static bool blk_kick_flush(struct request_queue *q)
280
{
281
282
283
284
285
286
287
288
289
290
291
292
293
294
295
296
297
298
	struct list_head *pending = &q->flush_queue[q->flush_pending_idx];
	struct request *first_rq =
		list_first_entry(pending, struct request, flush.list);

	/* C1 described at the top of this file */
	if (q->flush_pending_idx != q->flush_running_idx || list_empty(pending))
		return false;

	/* C2 and C3 */
	if (!list_empty(&q->flush_data_in_flight) &&
	    time_before(jiffies,
			q->flush_pending_since + FLUSH_PENDING_TIMEOUT))
		return false;

	/*
	 * Issue flush and toggle pending_idx.  This makes pending_idx
	 * different from running_idx, which means flush is in flight.
	 */
299
	q->flush_pending_idx ^= 1;
300

301
	blk_rq_init(q, q->flush_rq);
302
303
	if (q->mq_ops)
		blk_mq_clone_flush_request(q->flush_rq, first_rq);
304

305
306
307
308
	q->flush_rq->cmd_type = REQ_TYPE_FS;
	q->flush_rq->cmd_flags = WRITE_FLUSH | REQ_FLUSH_SEQ;
	q->flush_rq->rq_disk = first_rq->rq_disk;
	q->flush_rq->end_io = flush_end_io;
309

310
	return blk_flush_queue_rq(q->flush_rq, false);
311
312
}

313
static void flush_data_end_io(struct request *rq, int error)
314
{
315
316
	struct request_queue *q = rq->q;

317
318
319
320
	/*
	 * After populating an empty queue, kick it to avoid stall.  Read
	 * the comment in flush_end_io().
	 */
321
	if (blk_flush_complete_seq(rq, REQ_FSEQ_DATA, error))
322
		blk_run_queue_async(q);
323
324
}

325
326
327
328
329
330
331
332
333
334
335
336
337
338
339
340
341
342
343
344
static void mq_flush_data_end_io(struct request *rq, int error)
{
	struct request_queue *q = rq->q;
	struct blk_mq_hw_ctx *hctx;
	struct blk_mq_ctx *ctx;
	unsigned long flags;

	ctx = rq->mq_ctx;
	hctx = q->mq_ops->map_queue(q, ctx->cpu);

	/*
	 * After populating an empty queue, kick it to avoid stall.  Read
	 * the comment in flush_end_io().
	 */
	spin_lock_irqsave(&q->mq_flush_lock, flags);
	if (blk_flush_complete_seq(rq, REQ_FSEQ_DATA, error))
		blk_mq_run_hw_queue(hctx, true);
	spin_unlock_irqrestore(&q->mq_flush_lock, flags);
}

345
346
347
348
/**
 * blk_insert_flush - insert a new FLUSH/FUA request
 * @rq: request to insert
 *
349
 * To be called from __elv_add_request() for %ELEVATOR_INSERT_FLUSH insertions.
350
 * or __blk_mq_run_hw_queue() to dispatch request.
351
352
353
354
 * @rq is being submitted.  Analyze what needs to be done and put it on the
 * right queue.
 *
 * CONTEXT:
355
 * spin_lock_irq(q->queue_lock) in !mq case
356
357
 */
void blk_insert_flush(struct request *rq)
358
{
359
360
361
	struct request_queue *q = rq->q;
	unsigned int fflags = q->flush_flags;	/* may change, cache */
	unsigned int policy = blk_flush_policy(fflags, rq);
362

363
364
365
366
	/*
	 * @policy now records what operations need to be done.  Adjust
	 * REQ_FLUSH and FUA for the driver.
	 */
367
	rq->cmd_flags &= ~REQ_FLUSH;
368
369
370
	if (!(fflags & REQ_FUA))
		rq->cmd_flags &= ~REQ_FUA;

371
372
373
374
375
376
377
	/*
	 * An empty flush handed down from a stacking driver may
	 * translate into nothing if the underlying device does not
	 * advertise a write-back cache.  In this case, simply
	 * complete the request.
	 */
	if (!policy) {
378
		if (q->mq_ops)
379
			blk_mq_end_request(rq, 0);
380
381
		else
			__blk_end_bidi_request(rq, 0, 0, 0);
382
383
384
		return;
	}

385
	BUG_ON(rq->bio != rq->biotail); /*assumes zero or single bio rq */
386

387
388
389
390
391
392
393
	/*
	 * If there's data but flush is not necessary, the request can be
	 * processed directly without going through flush machinery.  Queue
	 * for normal execution.
	 */
	if ((policy & REQ_FSEQ_DATA) &&
	    !(policy & (REQ_FSEQ_PREFLUSH | REQ_FSEQ_POSTFLUSH))) {
394
		if (q->mq_ops) {
395
			blk_mq_insert_request(rq, false, false, true);
396
397
		} else
			list_add_tail(&rq->queuelist, &q->queue_head);
398
		return;
399
	}
400

401
402
403
404
405
406
	/*
	 * @rq should go through flush machinery.  Mark it part of flush
	 * sequence and submit for further processing.
	 */
	memset(&rq->flush, 0, sizeof(rq->flush));
	INIT_LIST_HEAD(&rq->flush.list);
Tejun Heo's avatar
Tejun Heo committed
407
	rq->cmd_flags |= REQ_FLUSH_SEQ;
408
	rq->flush.saved_end_io = rq->end_io; /* Usually NULL */
409
410
411
412
413
414
415
416
	if (q->mq_ops) {
		rq->end_io = mq_flush_data_end_io;

		spin_lock_irq(&q->mq_flush_lock);
		blk_flush_complete_seq(rq, REQ_FSEQ_ACTIONS & ~policy, 0);
		spin_unlock_irq(&q->mq_flush_lock);
		return;
	}
417
418
419
	rq->end_io = flush_data_end_io;

	blk_flush_complete_seq(rq, REQ_FSEQ_ACTIONS & ~policy, 0);
420
421
422
423
424
}

/**
 * blkdev_issue_flush - queue a flush
 * @bdev:	blockdev to issue flush for
425
 * @gfp_mask:	memory allocation flags (for bio_alloc)
426
427
428
429
430
 * @error_sector:	error sector
 *
 * Description:
 *    Issue a flush for the block device in question. Caller can supply
 *    room for storing the error offset in case of a flush error, if they
431
432
 *    wish to. If WAIT flag is not passed then caller may check only what
 *    request was pushed in some internal queue for later handling.
433
 */
434
int blkdev_issue_flush(struct block_device *bdev, gfp_t gfp_mask,
435
		sector_t *error_sector)
436
437
438
{
	struct request_queue *q;
	struct bio *bio;
439
	int ret = 0;
440
441
442
443
444
445
446
447

	if (bdev->bd_disk == NULL)
		return -ENXIO;

	q = bdev_get_queue(bdev);
	if (!q)
		return -ENXIO;

448
449
450
451
	/*
	 * some block devices may not have their queue correctly set up here
	 * (e.g. loop device without a backing file) and so issuing a flush
	 * here will panic. Ensure there is a request function before issuing
452
	 * the flush.
453
454
455
456
	 */
	if (!q->make_request_fn)
		return -ENXIO;

457
	bio = bio_alloc(gfp_mask, 0);
458
459
	bio->bi_bdev = bdev;

460
	ret = submit_bio_wait(WRITE_FLUSH, bio);
461
462
463
464
465
466
467

	/*
	 * The driver must store the error location in ->bi_sector, if
	 * it supports it. For non-stacked drivers, this should be
	 * copied from blk_rq_pos(rq).
	 */
	if (error_sector)
468
		*error_sector = bio->bi_iter.bi_sector;
469
470
471
472
473

	bio_put(bio);
	return ret;
}
EXPORT_SYMBOL(blkdev_issue_flush);
474

475
static int blk_mq_init_flush(struct request_queue *q)
476
{
477
478
	struct blk_mq_tag_set *set = q->tag_set;

479
	spin_lock_init(&q->mq_flush_lock);
480
481
482
483
484
485
486

	q->flush_rq = kzalloc(round_up(sizeof(struct request) +
				set->cmd_size, cache_line_size()),
				GFP_KERNEL);
	if (!q->flush_rq)
		return -ENOMEM;
	return 0;
487
}
488
489
490
491
492
493
494
495
496
497
498
499
500
501
502
503
504

int blk_init_flush(struct request_queue *q)
{
	if (q->mq_ops)
		return blk_mq_init_flush(q);

	q->flush_rq = kzalloc(sizeof(struct request), GFP_KERNEL);
	if (!q->flush_rq)
		return -ENOMEM;

	return 0;
}

void blk_exit_flush(struct request_queue *q)
{
	kfree(q->flush_rq);
}