dm-thin.c 74.7 KB
Newer Older
1
/*
2
 * Copyright (C) 2011-2012 Red Hat UK.
3
4
5
6
7
 *
 * This file is released under the GPL.
 */

#include "dm-thin-metadata.h"
8
#include "dm-bio-prison.h"
9
#include "dm.h"
10
11
12
13
14
15
16
17
18
19
20
21
22
23

#include <linux/device-mapper.h>
#include <linux/dm-io.h>
#include <linux/dm-kcopyd.h>
#include <linux/list.h>
#include <linux/init.h>
#include <linux/module.h>
#include <linux/slab.h>

#define	DM_MSG_PREFIX	"thin"

/*
 * Tunable constants
 */
24
#define ENDIO_HOOK_POOL_SIZE 1024
25
26
#define MAPPING_POOL_SIZE 1024
#define PRISON_CELLS 1024
27
#define COMMIT_PERIOD HZ
28

29
30
31
DECLARE_DM_KCOPYD_THROTTLE_WITH_MODULE_PARM(snapshot_copy_throttle,
		"A percentage of time allocated for copy on write");

32
33
34
35
36
37
38
39
40
41
42
43
44
45
46
47
48
49
50
51
52
53
54
55
56
57
58
59
60
61
62
63
/*
 * The block size of the device holding pool data must be
 * between 64KB and 1GB.
 */
#define DATA_DEV_BLOCK_SIZE_MIN_SECTORS (64 * 1024 >> SECTOR_SHIFT)
#define DATA_DEV_BLOCK_SIZE_MAX_SECTORS (1024 * 1024 * 1024 >> SECTOR_SHIFT)

/*
 * Device id is restricted to 24 bits.
 */
#define MAX_DEV_ID ((1 << 24) - 1)

/*
 * How do we handle breaking sharing of data blocks?
 * =================================================
 *
 * We use a standard copy-on-write btree to store the mappings for the
 * devices (note I'm talking about copy-on-write of the metadata here, not
 * the data).  When you take an internal snapshot you clone the root node
 * of the origin btree.  After this there is no concept of an origin or a
 * snapshot.  They are just two device trees that happen to point to the
 * same data blocks.
 *
 * When we get a write in we decide if it's to a shared data block using
 * some timestamp magic.  If it is, we have to break sharing.
 *
 * Let's say we write to a shared block in what was the origin.  The
 * steps are:
 *
 * i) plug io further to this physical block. (see bio_prison code).
 *
 * ii) quiesce any read io to that shared data block.  Obviously
64
 * including all devices that share this block.  (see dm_deferred_set code)
65
66
67
68
69
 *
 * iii) copy the data block to a newly allocate block.  This step can be
 * missed out if the io covers the block. (schedule_copy).
 *
 * iv) insert the new mapping into the origin's btree
Joe Thornber's avatar
Joe Thornber committed
70
 * (process_prepared_mapping).  This act of inserting breaks some
71
72
73
74
75
76
77
78
79
80
81
82
83
84
85
86
87
88
89
90
91
92
93
94
95
96
97
98
99
100
101
102
103
104
105
106
107
 * sharing of btree nodes between the two devices.  Breaking sharing only
 * effects the btree of that specific device.  Btrees for the other
 * devices that share the block never change.  The btree for the origin
 * device as it was after the last commit is untouched, ie. we're using
 * persistent data structures in the functional programming sense.
 *
 * v) unplug io to this physical block, including the io that triggered
 * the breaking of sharing.
 *
 * Steps (ii) and (iii) occur in parallel.
 *
 * The metadata _doesn't_ need to be committed before the io continues.  We
 * get away with this because the io is always written to a _new_ block.
 * If there's a crash, then:
 *
 * - The origin mapping will point to the old origin block (the shared
 * one).  This will contain the data as it was before the io that triggered
 * the breaking of sharing came in.
 *
 * - The snap mapping still points to the old block.  As it would after
 * the commit.
 *
 * The downside of this scheme is the timestamp magic isn't perfect, and
 * will continue to think that data block in the snapshot device is shared
 * even after the write to the origin has broken sharing.  I suspect data
 * blocks will typically be shared by many different devices, so we're
 * breaking sharing n + 1 times, rather than n, where n is the number of
 * devices that reference this data block.  At the moment I think the
 * benefits far, far outweigh the disadvantages.
 */

/*----------------------------------------------------------------*/

/*
 * Key building.
 */
static void build_data_key(struct dm_thin_device *td,
108
			   dm_block_t b, struct dm_cell_key *key)
109
110
111
112
113
114
115
{
	key->virtual = 0;
	key->dev = dm_thin_dev_id(td);
	key->block = b;
}

static void build_virtual_key(struct dm_thin_device *td, dm_block_t b,
116
			      struct dm_cell_key *key)
117
118
119
120
121
122
123
124
125
126
127
128
129
{
	key->virtual = 1;
	key->dev = dm_thin_dev_id(td);
	key->block = b;
}

/*----------------------------------------------------------------*/

/*
 * A pool device ties together a metadata device and a data device.  It
 * also provides the interface for creating and destroying internal
 * devices.
 */
Mike Snitzer's avatar
Mike Snitzer committed
130
struct dm_thin_new_mapping;
131

132
133
134
135
136
137
138
139
140
/*
 * The pool runs in 3 modes.  Ordered in degraded order for comparisons.
 */
enum pool_mode {
	PM_WRITE,		/* metadata may be changed */
	PM_READ_ONLY,		/* metadata may not be changed */
	PM_FAIL,		/* all I/O fails */
};

141
struct pool_features {
142
143
	enum pool_mode mode;

Mike Snitzer's avatar
Mike Snitzer committed
144
145
146
	bool zero_new_blocks:1;
	bool discard_enabled:1;
	bool discard_passdown:1;
147
148
};

149
150
151
152
struct thin_c;
typedef void (*process_bio_fn)(struct thin_c *tc, struct bio *bio);
typedef void (*process_mapping_fn)(struct dm_thin_new_mapping *m);

153
154
155
156
157
158
159
160
161
struct pool {
	struct list_head list;
	struct dm_target *ti;	/* Only set if a pool target is bound */

	struct mapped_device *pool_md;
	struct block_device *md_dev;
	struct dm_pool_metadata *pmd;

	dm_block_t low_water_blocks;
162
	uint32_t sectors_per_block;
163
	int sectors_per_block_shift;
164

165
	struct pool_features pf;
166
167
168
	unsigned low_water_triggered:1;	/* A dm event has been sent */
	unsigned no_free_space:1;	/* A -ENOSPC warning has been issued */

169
	struct dm_bio_prison *prison;
170
171
172
173
	struct dm_kcopyd_client *copier;

	struct workqueue_struct *wq;
	struct work_struct worker;
174
	struct delayed_work waker;
175

176
	unsigned long last_commit_jiffies;
177
	unsigned ref_count;
178
179
180
181
182

	spinlock_t lock;
	struct bio_list deferred_bios;
	struct bio_list deferred_flush_bios;
	struct list_head prepared_mappings;
Joe Thornber's avatar
Joe Thornber committed
183
	struct list_head prepared_discards;
184
185
186

	struct bio_list retry_on_resume_list;

187
188
	struct dm_deferred_set *shared_read_ds;
	struct dm_deferred_set *all_io_ds;
189

Mike Snitzer's avatar
Mike Snitzer committed
190
	struct dm_thin_new_mapping *next_mapping;
191
	mempool_t *mapping_pool;
192
193
194
195
196
197

	process_bio_fn process_bio;
	process_bio_fn process_discard;

	process_mapping_fn process_prepared_mapping;
	process_mapping_fn process_prepared_discard;
198
199
};

200
201
202
static enum pool_mode get_pool_mode(struct pool *pool);
static void set_pool_mode(struct pool *pool, enum pool_mode mode);

203
204
205
206
207
208
209
210
211
212
213
/*
 * Target context for a pool.
 */
struct pool_c {
	struct dm_target *ti;
	struct pool *pool;
	struct dm_dev *data_dev;
	struct dm_dev *metadata_dev;
	struct dm_target_callbacks callbacks;

	dm_block_t low_water_blocks;
214
215
	struct pool_features requested_pf; /* Features requested during table load */
	struct pool_features adjusted_pf;  /* Features used after adjusting for constituent devices */
216
217
218
219
220
221
222
};

/*
 * Target context for a thin.
 */
struct thin_c {
	struct dm_dev *pool_dev;
223
	struct dm_dev *origin_dev;
224
225
226
227
228
229
230
231
	dm_thin_id dev_id;

	struct pool *pool;
	struct dm_thin_device *td;
};

/*----------------------------------------------------------------*/

232
233
234
235
236
237
238
239
240
241
242
/*
 * wake_worker() is used when new work is queued and when pool_resume is
 * ready to continue deferred IO processing.
 */
static void wake_worker(struct pool *pool)
{
	queue_work(pool->wq, &pool->worker);
}

/*----------------------------------------------------------------*/

243
244
245
246
247
248
249
250
251
252
253
254
255
256
257
258
259
260
261
262
263
264
265
266
267
268
269
270
271
272
273
274
275
276
277
278
279
280
281
static int bio_detain(struct pool *pool, struct dm_cell_key *key, struct bio *bio,
		      struct dm_bio_prison_cell **cell_result)
{
	int r;
	struct dm_bio_prison_cell *cell_prealloc;

	/*
	 * Allocate a cell from the prison's mempool.
	 * This might block but it can't fail.
	 */
	cell_prealloc = dm_bio_prison_alloc_cell(pool->prison, GFP_NOIO);

	r = dm_bio_detain(pool->prison, key, bio, cell_prealloc, cell_result);
	if (r)
		/*
		 * We reused an old cell; we can get rid of
		 * the new one.
		 */
		dm_bio_prison_free_cell(pool->prison, cell_prealloc);

	return r;
}

static void cell_release(struct pool *pool,
			 struct dm_bio_prison_cell *cell,
			 struct bio_list *bios)
{
	dm_cell_release(pool->prison, cell, bios);
	dm_bio_prison_free_cell(pool->prison, cell);
}

static void cell_release_no_holder(struct pool *pool,
				   struct dm_bio_prison_cell *cell,
				   struct bio_list *bios)
{
	dm_cell_release_no_holder(pool->prison, cell, bios);
	dm_bio_prison_free_cell(pool->prison, cell);
}

282
283
284
285
286
287
288
289
290
291
292
293
294
static void cell_defer_no_holder_no_free(struct thin_c *tc,
					 struct dm_bio_prison_cell *cell)
{
	struct pool *pool = tc->pool;
	unsigned long flags;

	spin_lock_irqsave(&pool->lock, flags);
	dm_cell_release_no_holder(pool->prison, cell, &pool->deferred_bios);
	spin_unlock_irqrestore(&pool->lock, flags);

	wake_worker(pool);
}

295
296
297
298
299
300
301
302
303
static void cell_error(struct pool *pool,
		       struct dm_bio_prison_cell *cell)
{
	dm_cell_error(pool->prison, cell);
	dm_bio_prison_free_cell(pool->prison, cell);
}

/*----------------------------------------------------------------*/

304
305
306
307
308
309
310
311
312
313
314
315
316
317
318
319
320
321
322
323
324
325
326
327
328
329
330
331
332
333
334
335
336
337
338
339
340
341
342
343
344
345
346
347
348
349
350
351
352
353
354
355
356
357
358
359
360
361
362
363
/*
 * A global list of pools that uses a struct mapped_device as a key.
 */
static struct dm_thin_pool_table {
	struct mutex mutex;
	struct list_head pools;
} dm_thin_pool_table;

static void pool_table_init(void)
{
	mutex_init(&dm_thin_pool_table.mutex);
	INIT_LIST_HEAD(&dm_thin_pool_table.pools);
}

static void __pool_table_insert(struct pool *pool)
{
	BUG_ON(!mutex_is_locked(&dm_thin_pool_table.mutex));
	list_add(&pool->list, &dm_thin_pool_table.pools);
}

static void __pool_table_remove(struct pool *pool)
{
	BUG_ON(!mutex_is_locked(&dm_thin_pool_table.mutex));
	list_del(&pool->list);
}

static struct pool *__pool_table_lookup(struct mapped_device *md)
{
	struct pool *pool = NULL, *tmp;

	BUG_ON(!mutex_is_locked(&dm_thin_pool_table.mutex));

	list_for_each_entry(tmp, &dm_thin_pool_table.pools, list) {
		if (tmp->pool_md == md) {
			pool = tmp;
			break;
		}
	}

	return pool;
}

static struct pool *__pool_table_lookup_metadata_dev(struct block_device *md_dev)
{
	struct pool *pool = NULL, *tmp;

	BUG_ON(!mutex_is_locked(&dm_thin_pool_table.mutex));

	list_for_each_entry(tmp, &dm_thin_pool_table.pools, list) {
		if (tmp->md_dev == md_dev) {
			pool = tmp;
			break;
		}
	}

	return pool;
}

/*----------------------------------------------------------------*/

Mike Snitzer's avatar
Mike Snitzer committed
364
struct dm_thin_endio_hook {
365
	struct thin_c *tc;
366
367
	struct dm_deferred_entry *shared_read_entry;
	struct dm_deferred_entry *all_io_entry;
Mike Snitzer's avatar
Mike Snitzer committed
368
	struct dm_thin_new_mapping *overwrite_mapping;
369
370
};

371
372
373
374
375
376
377
378
379
380
static void __requeue_bio_list(struct thin_c *tc, struct bio_list *master)
{
	struct bio *bio;
	struct bio_list bios;

	bio_list_init(&bios);
	bio_list_merge(&bios, master);
	bio_list_init(master);

	while ((bio = bio_list_pop(&bios))) {
381
		struct dm_thin_endio_hook *h = dm_per_bio_data(bio, sizeof(struct dm_thin_endio_hook));
Mike Snitzer's avatar
Mike Snitzer committed
382

383
		if (h->tc == tc)
384
385
386
387
388
389
390
391
392
393
394
395
396
397
398
399
400
401
402
403
404
405
406
407
			bio_endio(bio, DM_ENDIO_REQUEUE);
		else
			bio_list_add(master, bio);
	}
}

static void requeue_io(struct thin_c *tc)
{
	struct pool *pool = tc->pool;
	unsigned long flags;

	spin_lock_irqsave(&pool->lock, flags);
	__requeue_bio_list(tc, &pool->deferred_bios);
	__requeue_bio_list(tc, &pool->retry_on_resume_list);
	spin_unlock_irqrestore(&pool->lock, flags);
}

/*
 * This section of code contains the logic for processing a thin device's IO.
 * Much of the code depends on pool object resources (lists, workqueues, etc)
 * but most is exclusively called from the thin target rather than the thin-pool
 * target.
 */

408
409
410
411
412
static bool block_size_is_power_of_two(struct pool *pool)
{
	return pool->sectors_per_block_shift >= 0;
}

413
414
static dm_block_t get_bio_block(struct thin_c *tc, struct bio *bio)
{
415
	struct pool *pool = tc->pool;
416
417
	sector_t block_nr = bio->bi_sector;

418
419
	if (block_size_is_power_of_two(pool))
		block_nr >>= pool->sectors_per_block_shift;
420
	else
421
		(void) sector_div(block_nr, pool->sectors_per_block);
422
423

	return block_nr;
424
425
426
427
428
}

static void remap(struct thin_c *tc, struct bio *bio, dm_block_t block)
{
	struct pool *pool = tc->pool;
429
	sector_t bi_sector = bio->bi_sector;
430
431

	bio->bi_bdev = tc->pool_dev->bdev;
432
	if (block_size_is_power_of_two(pool))
433
434
		bio->bi_sector = (block << pool->sectors_per_block_shift) |
				(bi_sector & (pool->sectors_per_block - 1));
435
436
437
	else
		bio->bi_sector = (block * pool->sectors_per_block) +
				 sector_div(bi_sector, pool->sectors_per_block);
438
439
}

440
441
442
443
444
static void remap_to_origin(struct thin_c *tc, struct bio *bio)
{
	bio->bi_bdev = tc->origin_dev->bdev;
}

445
446
447
448
449
450
static int bio_triggers_commit(struct thin_c *tc, struct bio *bio)
{
	return (bio->bi_rw & (REQ_FLUSH | REQ_FUA)) &&
		dm_thin_changed_this_transaction(tc->td);
}

451
452
453
454
455
456
457
static void inc_all_io_entry(struct pool *pool, struct bio *bio)
{
	struct dm_thin_endio_hook *h;

	if (bio->bi_rw & REQ_DISCARD)
		return;

458
	h = dm_per_bio_data(bio, sizeof(struct dm_thin_endio_hook));
459
460
461
	h->all_io_entry = dm_deferred_entry_inc(pool->all_io_ds);
}

462
static void issue(struct thin_c *tc, struct bio *bio)
463
464
465
466
{
	struct pool *pool = tc->pool;
	unsigned long flags;

467
468
469
470
471
	if (!bio_triggers_commit(tc, bio)) {
		generic_make_request(bio);
		return;
	}

472
	/*
473
474
475
	 * Complete bio with an error if earlier I/O caused changes to
	 * the metadata that can't be committed e.g, due to I/O errors
	 * on the metadata device.
476
	 */
477
478
479
480
481
482
483
484
485
486
487
488
	if (dm_thin_aborted_changes(tc->td)) {
		bio_io_error(bio);
		return;
	}

	/*
	 * Batch together any bios that trigger commits and then issue a
	 * single commit for them in process_deferred_bios().
	 */
	spin_lock_irqsave(&pool->lock, flags);
	bio_list_add(&pool->deferred_flush_bios, bio);
	spin_unlock_irqrestore(&pool->lock, flags);
489
490
}

491
492
493
494
495
496
497
498
499
500
501
502
503
static void remap_to_origin_and_issue(struct thin_c *tc, struct bio *bio)
{
	remap_to_origin(tc, bio);
	issue(tc, bio);
}

static void remap_and_issue(struct thin_c *tc, struct bio *bio,
			    dm_block_t block)
{
	remap(tc, bio, block);
	issue(tc, bio);
}

504
505
506
507
508
/*----------------------------------------------------------------*/

/*
 * Bio endio functions.
 */
Mike Snitzer's avatar
Mike Snitzer committed
509
struct dm_thin_new_mapping {
510
511
	struct list_head list;

512
513
	unsigned quiesced:1;
	unsigned prepared:1;
Joe Thornber's avatar
Joe Thornber committed
514
	unsigned pass_discard:1;
515
516
517
518

	struct thin_c *tc;
	dm_block_t virt_block;
	dm_block_t data_block;
Mike Snitzer's avatar
Mike Snitzer committed
519
	struct dm_bio_prison_cell *cell, *cell2;
520
521
522
523
524
525
526
527
528
529
530
531
	int err;

	/*
	 * If the bio covers the whole area of a block then we can avoid
	 * zeroing or copying.  Instead this bio is hooked.  The bio will
	 * still be in the cell, so care has to be taken to avoid issuing
	 * the bio twice.
	 */
	struct bio *bio;
	bio_end_io_t *saved_bi_end_io;
};

Mike Snitzer's avatar
Mike Snitzer committed
532
static void __maybe_add_mapping(struct dm_thin_new_mapping *m)
533
534
535
{
	struct pool *pool = m->tc->pool;

536
	if (m->quiesced && m->prepared) {
537
538
539
540
541
542
543
544
		list_add(&m->list, &pool->prepared_mappings);
		wake_worker(pool);
	}
}

static void copy_complete(int read_err, unsigned long write_err, void *context)
{
	unsigned long flags;
Mike Snitzer's avatar
Mike Snitzer committed
545
	struct dm_thin_new_mapping *m = context;
546
547
548
549
550
551
552
553
554
555
556
557
558
	struct pool *pool = m->tc->pool;

	m->err = read_err || write_err ? -EIO : 0;

	spin_lock_irqsave(&pool->lock, flags);
	m->prepared = 1;
	__maybe_add_mapping(m);
	spin_unlock_irqrestore(&pool->lock, flags);
}

static void overwrite_endio(struct bio *bio, int err)
{
	unsigned long flags;
559
	struct dm_thin_endio_hook *h = dm_per_bio_data(bio, sizeof(struct dm_thin_endio_hook));
Mike Snitzer's avatar
Mike Snitzer committed
560
	struct dm_thin_new_mapping *m = h->overwrite_mapping;
561
562
563
564
565
566
567
568
569
570
571
572
573
574
575
576
577
578
579
580
581
582
583
	struct pool *pool = m->tc->pool;

	m->err = err;

	spin_lock_irqsave(&pool->lock, flags);
	m->prepared = 1;
	__maybe_add_mapping(m);
	spin_unlock_irqrestore(&pool->lock, flags);
}

/*----------------------------------------------------------------*/

/*
 * Workqueue.
 */

/*
 * Prepared mapping jobs.
 */

/*
 * This sends the bios in the cell back to the deferred_bios list.
 */
Joe Thornber's avatar
Joe Thornber committed
584
static void cell_defer(struct thin_c *tc, struct dm_bio_prison_cell *cell)
585
586
587
588
589
{
	struct pool *pool = tc->pool;
	unsigned long flags;

	spin_lock_irqsave(&pool->lock, flags);
590
	cell_release(pool, cell, &pool->deferred_bios);
591
592
593
594
595
596
	spin_unlock_irqrestore(&tc->pool->lock, flags);

	wake_worker(pool);
}

/*
597
 * Same as cell_defer above, except it omits the original holder of the cell.
598
 */
599
static void cell_defer_no_holder(struct thin_c *tc, struct dm_bio_prison_cell *cell)
600
601
602
603
604
{
	struct pool *pool = tc->pool;
	unsigned long flags;

	spin_lock_irqsave(&pool->lock, flags);
605
	cell_release_no_holder(pool, cell, &pool->deferred_bios);
606
607
608
609
610
	spin_unlock_irqrestore(&pool->lock, flags);

	wake_worker(pool);
}

611
612
613
614
static void process_prepared_mapping_fail(struct dm_thin_new_mapping *m)
{
	if (m->bio)
		m->bio->bi_end_io = m->saved_bi_end_io;
615
	cell_error(m->tc->pool, m->cell);
616
617
618
	list_del(&m->list);
	mempool_free(m, m->tc->pool->mapping_pool);
}
619

Mike Snitzer's avatar
Mike Snitzer committed
620
static void process_prepared_mapping(struct dm_thin_new_mapping *m)
621
622
{
	struct thin_c *tc = m->tc;
623
	struct pool *pool = tc->pool;
624
625
626
627
628
629
630
631
	struct bio *bio;
	int r;

	bio = m->bio;
	if (bio)
		bio->bi_end_io = m->saved_bi_end_io;

	if (m->err) {
632
		cell_error(pool, m->cell);
633
		goto out;
634
635
636
637
638
639
640
641
642
	}

	/*
	 * Commit the prepared block into the mapping btree.
	 * Any I/O for this block arriving after this point will get
	 * remapped to it directly.
	 */
	r = dm_thin_insert_block(tc->td, m->virt_block, m->data_block);
	if (r) {
643
644
645
		DMERR_LIMIT("%s: dm_thin_insert_block() failed: error = %d",
			    dm_device_name(pool->pool_md), r);
		set_pool_mode(pool, PM_READ_ONLY);
646
		cell_error(pool, m->cell);
647
		goto out;
648
649
650
651
652
653
654
655
656
	}

	/*
	 * Release any bios held while the block was being provisioned.
	 * If we are processing a write bio that completely covers the block,
	 * we already processed it so can ignore it now when processing
	 * the bios in the cell.
	 */
	if (bio) {
657
		cell_defer_no_holder(tc, m->cell);
658
659
		bio_endio(bio, 0);
	} else
Joe Thornber's avatar
Joe Thornber committed
660
		cell_defer(tc, m->cell);
661

662
out:
663
	list_del(&m->list);
664
	mempool_free(m, pool->mapping_pool);
665
666
}

667
static void process_prepared_discard_fail(struct dm_thin_new_mapping *m)
Joe Thornber's avatar
Joe Thornber committed
668
669
670
{
	struct thin_c *tc = m->tc;

671
	bio_io_error(m->bio);
672
673
	cell_defer_no_holder(tc, m->cell);
	cell_defer_no_holder(tc, m->cell2);
674
675
676
677
678
679
	mempool_free(m, tc->pool->mapping_pool);
}

static void process_prepared_discard_passdown(struct dm_thin_new_mapping *m)
{
	struct thin_c *tc = m->tc;
Joe Thornber's avatar
Joe Thornber committed
680

681
	inc_all_io_entry(tc->pool, m->bio);
682
683
	cell_defer_no_holder(tc, m->cell);
	cell_defer_no_holder(tc, m->cell2);
684

Joe Thornber's avatar
Joe Thornber committed
685
686
687
688
689
690
691
692
	if (m->pass_discard)
		remap_and_issue(tc, m->bio, m->data_block);
	else
		bio_endio(m->bio, 0);

	mempool_free(m, tc->pool->mapping_pool);
}

693
694
695
696
697
698
699
static void process_prepared_discard(struct dm_thin_new_mapping *m)
{
	int r;
	struct thin_c *tc = m->tc;

	r = dm_thin_remove_block(tc->td, m->virt_block);
	if (r)
700
		DMERR_LIMIT("dm_thin_remove_block() failed");
701
702
703
704

	process_prepared_discard_passdown(m);
}

Joe Thornber's avatar
Joe Thornber committed
705
static void process_prepared(struct pool *pool, struct list_head *head,
706
			     process_mapping_fn *fn)
707
708
709
{
	unsigned long flags;
	struct list_head maps;
Mike Snitzer's avatar
Mike Snitzer committed
710
	struct dm_thin_new_mapping *m, *tmp;
711
712
713

	INIT_LIST_HEAD(&maps);
	spin_lock_irqsave(&pool->lock, flags);
Joe Thornber's avatar
Joe Thornber committed
714
	list_splice_init(head, &maps);
715
716
717
	spin_unlock_irqrestore(&pool->lock, flags);

	list_for_each_entry_safe(m, tmp, &maps, list)
718
		(*fn)(m);
719
720
721
722
723
}

/*
 * Deferred bio jobs.
 */
Joe Thornber's avatar
Joe Thornber committed
724
static int io_overlaps_block(struct pool *pool, struct bio *bio)
725
{
726
	return bio->bi_size == (pool->sectors_per_block << SECTOR_SHIFT);
Joe Thornber's avatar
Joe Thornber committed
727
728
729
730
731
732
}

static int io_overwrites_block(struct pool *pool, struct bio *bio)
{
	return (bio_data_dir(bio) == WRITE) &&
		io_overlaps_block(pool, bio);
733
734
735
736
737
738
739
740
741
742
743
744
745
746
747
748
749
750
751
}

static void save_and_set_endio(struct bio *bio, bio_end_io_t **save,
			       bio_end_io_t *fn)
{
	*save = bio->bi_end_io;
	bio->bi_end_io = fn;
}

static int ensure_next_mapping(struct pool *pool)
{
	if (pool->next_mapping)
		return 0;

	pool->next_mapping = mempool_alloc(pool->mapping_pool, GFP_ATOMIC);

	return pool->next_mapping ? 0 : -ENOMEM;
}

Mike Snitzer's avatar
Mike Snitzer committed
752
static struct dm_thin_new_mapping *get_next_mapping(struct pool *pool)
753
{
Mike Snitzer's avatar
Mike Snitzer committed
754
	struct dm_thin_new_mapping *r = pool->next_mapping;
755
756
757
758
759
760
761
762
763

	BUG_ON(!pool->next_mapping);

	pool->next_mapping = NULL;

	return r;
}

static void schedule_copy(struct thin_c *tc, dm_block_t virt_block,
764
765
			  struct dm_dev *origin, dm_block_t data_origin,
			  dm_block_t data_dest,
Mike Snitzer's avatar
Mike Snitzer committed
766
			  struct dm_bio_prison_cell *cell, struct bio *bio)
767
768
769
{
	int r;
	struct pool *pool = tc->pool;
Mike Snitzer's avatar
Mike Snitzer committed
770
	struct dm_thin_new_mapping *m = get_next_mapping(pool);
771
772

	INIT_LIST_HEAD(&m->list);
773
	m->quiesced = 0;
774
775
776
777
778
779
780
781
	m->prepared = 0;
	m->tc = tc;
	m->virt_block = virt_block;
	m->data_block = data_dest;
	m->cell = cell;
	m->err = 0;
	m->bio = NULL;

782
	if (!dm_deferred_set_add_work(pool->shared_read_ds, &m->list))
783
		m->quiesced = 1;
784
785
786
787
788
789
790
791

	/*
	 * IO to pool_dev remaps to the pool target's data_dev.
	 *
	 * If the whole block of data is being overwritten, we can issue the
	 * bio immediately. Otherwise we use kcopyd to clone the data first.
	 */
	if (io_overwrites_block(pool, bio)) {
792
		struct dm_thin_endio_hook *h = dm_per_bio_data(bio, sizeof(struct dm_thin_endio_hook));
Mike Snitzer's avatar
Mike Snitzer committed
793

794
		h->overwrite_mapping = m;
795
796
		m->bio = bio;
		save_and_set_endio(bio, &m->saved_bi_end_io, overwrite_endio);
797
		inc_all_io_entry(pool, bio);
798
799
800
801
		remap_and_issue(tc, bio, data_dest);
	} else {
		struct dm_io_region from, to;

802
		from.bdev = origin->bdev;
803
804
805
806
807
808
809
810
811
812
813
		from.sector = data_origin * pool->sectors_per_block;
		from.count = pool->sectors_per_block;

		to.bdev = tc->pool_dev->bdev;
		to.sector = data_dest * pool->sectors_per_block;
		to.count = pool->sectors_per_block;

		r = dm_kcopyd_copy(pool->copier, &from, 1, &to,
				   0, copy_complete, m);
		if (r < 0) {
			mempool_free(m, pool->mapping_pool);
814
			DMERR_LIMIT("dm_kcopyd_copy() failed");
815
			cell_error(pool, cell);
816
817
818
819
		}
	}
}

820
821
static void schedule_internal_copy(struct thin_c *tc, dm_block_t virt_block,
				   dm_block_t data_origin, dm_block_t data_dest,
Mike Snitzer's avatar
Mike Snitzer committed
822
				   struct dm_bio_prison_cell *cell, struct bio *bio)
823
824
825
826
827
828
829
{
	schedule_copy(tc, virt_block, tc->pool_dev,
		      data_origin, data_dest, cell, bio);
}

static void schedule_external_copy(struct thin_c *tc, dm_block_t virt_block,
				   dm_block_t data_dest,
Mike Snitzer's avatar
Mike Snitzer committed
830
				   struct dm_bio_prison_cell *cell, struct bio *bio)
831
832
833
834
835
{
	schedule_copy(tc, virt_block, tc->origin_dev,
		      virt_block, data_dest, cell, bio);
}

836
static void schedule_zero(struct thin_c *tc, dm_block_t virt_block,
Mike Snitzer's avatar
Mike Snitzer committed
837
			  dm_block_t data_block, struct dm_bio_prison_cell *cell,
838
839
840
			  struct bio *bio)
{
	struct pool *pool = tc->pool;
Mike Snitzer's avatar
Mike Snitzer committed
841
	struct dm_thin_new_mapping *m = get_next_mapping(pool);
842
843

	INIT_LIST_HEAD(&m->list);
844
	m->quiesced = 1;
845
846
847
848
849
850
851
852
853
854
855
856
857
	m->prepared = 0;
	m->tc = tc;
	m->virt_block = virt_block;
	m->data_block = data_block;
	m->cell = cell;
	m->err = 0;
	m->bio = NULL;

	/*
	 * If the whole block of data is being overwritten or we are not
	 * zeroing pre-existing data, we can issue the bio immediately.
	 * Otherwise we use kcopyd to zero the data first.
	 */
858
	if (!pool->pf.zero_new_blocks)
859
860
861
		process_prepared_mapping(m);

	else if (io_overwrites_block(pool, bio)) {
862
		struct dm_thin_endio_hook *h = dm_per_bio_data(bio, sizeof(struct dm_thin_endio_hook));
Mike Snitzer's avatar
Mike Snitzer committed
863

864
		h->overwrite_mapping = m;
865
866
		m->bio = bio;
		save_and_set_endio(bio, &m->saved_bi_end_io, overwrite_endio);
867
		inc_all_io_entry(pool, bio);
868
869
870
871
872
873
874
875
876
877
878
879
		remap_and_issue(tc, bio, data_block);
	} else {
		int r;
		struct dm_io_region to;

		to.bdev = tc->pool_dev->bdev;
		to.sector = data_block * pool->sectors_per_block;
		to.count = pool->sectors_per_block;

		r = dm_kcopyd_zero(pool->copier, 1, &to, 0, copy_complete, m);
		if (r < 0) {
			mempool_free(m, pool->mapping_pool);
880
			DMERR_LIMIT("dm_kcopyd_zero() failed");
881
			cell_error(pool, cell);
882
883
884
885
		}
	}
}

886
887
888
889
890
891
static int commit(struct pool *pool)
{
	int r;

	r = dm_pool_commit_metadata(pool->pmd);
	if (r)
892
893
		DMERR_LIMIT("%s: commit failed: error = %d",
			    dm_device_name(pool->pool_md), r);
894
895
896
897
898
899
900
901
902
903
904
905
906
907
908
909
910
911
912
913
914
915

	return r;
}

/*
 * A non-zero return indicates read_only or fail_io mode.
 * Many callers don't care about the return value.
 */
static int commit_or_fallback(struct pool *pool)
{
	int r;

	if (get_pool_mode(pool) != PM_WRITE)
		return -EINVAL;

	r = commit(pool);
	if (r)
		set_pool_mode(pool, PM_READ_ONLY);

	return r;
}

916
917
918
919
920
921
922
static int alloc_data_block(struct thin_c *tc, dm_block_t *result)
{
	int r;
	dm_block_t free_blocks;
	unsigned long flags;
	struct pool *pool = tc->pool;

923
924
925
926
927
928
929
	/*
	 * Once no_free_space is set we must not allow allocation to succeed.
	 * Otherwise it is difficult to explain, debug, test and support.
	 */
	if (pool->no_free_space)
		return -ENOSPC;

930
931
932
933
934
	r = dm_pool_get_free_block_count(pool->pmd, &free_blocks);
	if (r)
		return r;

	if (free_blocks <= pool->low_water_blocks && !pool->low_water_triggered) {
935
		DMWARN("%s: reached low water mark for data device: sending event.",
936
937
938
939
940
941
942
943
		       dm_device_name(pool->pool_md));
		spin_lock_irqsave(&pool->lock, flags);
		pool->low_water_triggered = 1;
		spin_unlock_irqrestore(&pool->lock, flags);
		dm_table_event(pool->ti->table);
	}

	if (!free_blocks) {
944
945
946
947
948
		/*
		 * Try to commit to see if that will free up some
		 * more space.
		 */
		(void) commit_or_fallback(pool);
949

950
951
952
		r = dm_pool_get_free_block_count(pool->pmd, &free_blocks);
		if (r)
			return r;
953

954
955
956
957
958
959
960
961
962
963
964
965
966
967
		/*
		 * If we still have no space we set a flag to avoid
		 * doing all this checking and return -ENOSPC.  This
		 * flag serves as a latch that disallows allocations from
		 * this pool until the admin takes action (e.g. resize or
		 * table reload).
		 */
		if (!free_blocks) {
			DMWARN("%s: no free space available.",
			       dm_device_name(pool->pool_md));
			spin_lock_irqsave(&pool->lock, flags);
			pool->no_free_space = 1;
			spin_unlock_irqrestore(&pool->lock, flags);
			return -ENOSPC;
968
969
970
971
972
973
974
975
976
977
978
979
980
981
982
983
		}
	}

	r = dm_pool_alloc_data_block(pool->pmd, result);
	if (r)
		return r;

	return 0;
}

/*
 * If we have run out of space, queue bios until the device is
 * resumed, presumably after having been reloaded with more space.
 */
static void retry_on_resume(struct bio *bio)
{
984
	struct dm_thin_endio_hook *h = dm_per_bio_data(bio, sizeof(struct dm_thin_endio_hook));
985
	struct thin_c *tc = h->tc;
986
987
988
989
990
991
992
993
	struct pool *pool = tc->pool;
	unsigned long flags;

	spin_lock_irqsave(&pool->lock, flags);
	bio_list_add(&pool->retry_on_resume_list, bio);
	spin_unlock_irqrestore(&pool->lock, flags);
}

994
static void no_space(struct pool *pool, struct dm_bio_prison_cell *cell)
995
996
997
998
999
{
	struct bio *bio;
	struct bio_list bios;

	bio_list_init(&bios);
1000
	cell_release(pool, cell, &bios);
1001
1002
1003
1004
1005

	while ((bio = bio_list_pop(&bios)))
		retry_on_resume(bio);
}

Joe Thornber's avatar
Joe Thornber committed
1006
1007
1008
static void process_discard(struct thin_c *tc, struct bio *bio)
{
	int r;
1009
	unsigned long flags;
Joe Thornber's avatar
Joe Thornber committed
1010
	struct pool *pool = tc->pool;
Mike Snitzer's avatar
Mike Snitzer committed
1011
	struct dm_bio_prison_cell *cell, *cell2;
1012
	struct dm_cell_key key, key2;
Joe Thornber's avatar
Joe Thornber committed
1013
1014
	dm_block_t block = get_bio_block(tc, bio);
	struct dm_thin_lookup_result lookup_result;
Mike Snitzer's avatar
Mike Snitzer committed
1015
	struct dm_thin_new_mapping *m;
Joe Thornber's avatar
Joe Thornber committed
1016
1017

	build_virtual_key(tc->td, block, &key);
1018
	if (bio_detain(tc->pool, &key, bio, &cell))
Joe Thornber's avatar
Joe Thornber committed
1019
1020
1021
1022
1023
1024
1025
1026
1027
1028
1029
		return;

	r = dm_thin_find_block(tc->td, block, 1, &lookup_result);
	switch (r) {
	case 0:
		/*
		 * Check nobody is fiddling with this pool block.  This can
		 * happen if someone's in the process of breaking sharing
		 * on this block.
		 */
		build_data_key(tc->td, lookup_result.block, &key2);
1030
		if (bio_detain(tc->pool, &key2, bio, &cell2)) {
1031
			cell_defer_no_holder(tc, cell);
Joe Thornber's avatar
Joe Thornber committed
1032
1033
1034
1035
1036
1037
1038
1039
1040
1041
			break;
		}

		if (io_overlaps_block(pool, bio)) {
			/*
			 * IO may still be going to the destination block.  We must
			 * quiesce before we can do the removal.
			 */
			m = get_next_mapping(pool);
			m->tc = tc;
1042
			m->pass_discard = (!lookup_result.shared) && pool->pf.discard_passdown;
Joe Thornber's avatar
Joe Thornber committed
1043
1044
1045
1046
1047
1048
1049
			m->virt_block = block;
			m->data_block = lookup_result.block;
			m->cell = cell;
			m->cell2 = cell2;
			m->err = 0;
			m->bio = bio;

1050
			if (!dm_deferred_set_add_work(pool->all_io_ds, &m->list)) {
1051
				spin_lock_irqsave(&pool->lock, flags);
Joe Thornber's avatar
Joe Thornber committed
1052
				list_add(&m->list, &pool->prepared_discards);
1053
				spin_unlock_irqrestore(&pool->lock, flags);
Joe Thornber's avatar
Joe Thornber committed
1054
1055
1056
				wake_worker(pool);
			}
		} else {
1057
			inc_all_io_entry(pool, bio);
1058
1059
			cell_defer_no_holder(tc, cell);
			cell_defer_no_holder(tc, cell2);
1060

Joe Thornber's avatar
Joe Thornber committed
1061
			/*
1062
1063
1064
			 * The DM core makes sure that the discard doesn't span
			 * a block boundary.  So we submit the discard of a
			 * partial block appropriately.
Joe Thornber's avatar
Joe Thornber committed
1065
			 */
1066
1067
1068
1069
			if ((!lookup_result.shared) && pool->pf.discard_passdown)
				remap_and_issue(tc, bio, lookup_result.block);
			else
				bio_endio(bio, 0);
Joe Thornber's avatar
Joe Thornber committed
1070
1071
1072
1073
1074
1075
1076
		}
		break;

	case -ENODATA:
		/*
		 * It isn't provisioned, just forget it.
		 */
1077
		cell_defer_no_holder(tc, cell);
Joe Thornber's avatar
Joe Thornber committed
1078
1079
1080
1081
		bio_endio(bio, 0);
		break;

	default:
1082
1083
		DMERR_LIMIT("%s: dm_thin_find_block() failed: error = %d",
			    __func__, r);
1084
		cell_defer_no_holder(tc, cell);
Joe Thornber's avatar
Joe Thornber committed
1085
1086
1087
1088
1089
		bio_io_error(bio);
		break;
	}
}

1090
static void break_sharing(struct thin_c *tc, struct bio *bio, dm_block_t block,
1091
			  struct dm_cell_key *key,
1092
			  struct dm_thin_lookup_result *lookup_result,
Mike Snitzer's avatar
Mike Snitzer committed
1093
			  struct dm_bio_prison_cell *cell)
1094
1095
1096
{
	int r;
	dm_block_t data_block;
1097
	struct pool *pool = tc->pool;
1098
1099
1100
1101

	r = alloc_data_block(tc, &data_block);
	switch (r) {
	case 0:
1102
1103
		schedule_internal_copy(tc, block, lookup_result->block,
				       data_block, cell, bio);
1104
1105
1106
		break;

	case -ENOSPC:
1107
		no_space(pool, cell);
1108
1109
1110
		break;

	default:
1111
1112
		DMERR_LIMIT("%s: alloc_data_block() failed: error = %d",
			    __func__, r);
1113
1114
		set_pool_mode(pool, PM_READ_ONLY);
		cell_error(pool, cell);
1115
1116
1117
1118
1119
1120
1121
1122
		break;
	}
}

static void process_shared_bio(struct thin_c *tc, struct bio *bio,
			       dm_block_t block,
			       struct dm_thin_lookup_result *lookup_result)
{
Mike Snitzer's avatar
Mike Snitzer committed
1123
	struct dm_bio_prison_cell *cell;
1124
	struct pool *pool = tc->pool;
1125
	struct dm_cell_key key;
1126
1127
1128
1129
1130
1131

	/*
	 * If cell is already occupied, then sharing is already in the process
	 * of being broken so we have nothing further to do here.
	 */
	build_data_key(tc->td, lookup_result->block, &key);
1132
	if (bio_detain(pool, &key, bio, &cell))
1133
1134
		return;

1135
	if (bio_data_dir(bio) == WRITE && bio->bi_size)
1136
1137
		break_sharing(tc, bio, block, &key, lookup_result, cell);
	else {
1138
		struct dm_thin_endio_hook *h = dm_per_bio_data(bio, sizeof(struct dm_thin_endio_hook));
1139

1140
		h->shared_read_entry = dm_deferred_entry_inc(pool->shared_read_ds);
1141
		inc_all_io_entry(pool, bio);
1142
		cell_defer_no_holder(tc, cell);
1143

1144
1145
1146
1147
1148
		remap_and_issue(tc, bio, lookup_result->block);
	}
}

static void provision_block(struct thin_c *tc, struct bio *bio, dm_block_t block,
Mike Snitzer's avatar
Mike Snitzer committed
1149
			    struct dm_bio_prison_cell *cell)
1150
1151
1152
{
	int r;
	dm_block_t data_block;
1153
	struct pool *pool = tc->pool;
1154
1155
1156
1157
1158

	/*
	 * Remap empty bios (flushes) immediately, without provisioning.
	 */
	if (!bio->bi_size) {
1159
		inc_all_io_entry(pool, bio);
1160
		cell_defer_no_holder(tc, cell);
1161

1162
1163
1164
1165
1166
1167
1168
1169
1170
		remap_and_issue(tc, bio, 0);
		return;
	}

	/*
	 * Fill read bios with zeroes and complete them immediately.
	 */
	if (bio_data_dir(bio) == READ) {
		zero_fill_bio(bio);
1171
		cell_defer_no_holder(tc, cell);
1172
1173
1174
1175
1176
1177
1178
		bio_endio(bio, 0);
		return;
	}

	r = alloc_data_block(tc, &data_block);
	switch (r) {
	case 0:
1179
1180
1181
1182
		if (tc->origin_dev)
			schedule_external_copy(tc, block, data_block, cell, bio);
		else
			schedule_zero(tc, block, data_block, cell, bio);
1183
1184
1185
		break;

	case -ENOSPC:
1186
		no_space(pool, cell);
1187
1188
1189
		break;

	default:
1190
1191
		DMERR_LIMIT("%s: alloc_data_block() failed: error = %d",
			    __func__, r);
1192
1193
		set_pool_mode(pool, PM_READ_ONLY);
		cell_error(pool, cell);
1194
1195
1196
1197
1198
1199
1200
		break;
	}
}

static void process_bio(struct thin_c *tc, struct bio *bio)
{
	int r;
1201
	struct pool *pool = tc->pool;
1202
	dm_block_t block = get_bio_block(tc, bio);
Mike Snitzer's avatar
Mike Snitzer committed
1203
	struct dm_bio_prison_cell *cell;
1204
	struct dm_cell_key key;
1205
1206
1207
1208
1209
1210
1211
	struct dm_thin_lookup_result lookup_result;

	/*
	 * If cell is already occupied, then the block is already
	 * being provisioned so we have nothing further to do here.
	 */
	build_virtual_key(tc->td, block, &key);
1212
	if (bio_detain(pool, &key, bio, &cell))
1213
1214
1215
1216
1217
		return;

	r = dm_thin_find_block(tc->td, block, 1, &lookup_result);
	switch (r) {
	case 0:
1218
		if (lookup_result.shared) {
1219
			process_shared_bio(tc, bio, block, &lookup_result);
1220
			cell_defer_no_holder(tc, cell); /* FIXME: pass this cell into process_shared? */
1221
		} else {
1222
			inc_all_io_entry(pool, bio);
1223
			cell_defer_no_holder(tc, cell);
1224

1225
			remap_and_issue(tc, bio, lookup_result.block);
1226
		}
1227
1228
1229
		break;

	case -ENODATA:
1230
		if (bio_data_dir(bio) == READ && tc->origin_dev) {
1231
			inc_all_io_entry(pool, bio);
1232
			cell_defer_no_holder(tc, cell);
1233

1234
1235
1236
			remap_to_origin_and_issue(tc, bio);
		} else
			provision_block(tc, bio, block, cell);
1237
1238
1239
		break;

	default:
1240
1241
		DMERR_LIMIT("%s: dm_thin_find_block() failed: error = %d",
			    __func__, r);
1242
		cell_defer_no_holder(tc, cell);
1243
1244
1245
1246
1247
		bio_io_error(bio);
		break;
	}
}

1248
1249
1250
1251
1252
1253
1254
1255
1256
1257
1258
1259
static void process_bio_read_only(struct thin_c *tc, struct bio *bio)
{
	int r;
	int rw = bio_data_dir(bio);
	dm_block_t block = get_bio_block(tc, bio);
	struct dm_thin_lookup_result lookup_result;

	r = dm_thin_find_block(tc->td, block, 1, &lookup_result);
	switch (r) {
	case 0:
		if (lookup_result.shared && (rw == WRITE) && bio->bi_size)
			bio_io_error(bio);
1260
1261
		else {
			inc_all_io_entry(tc->pool, bio);
1262
			remap_and_issue(tc, bio, lookup_result.block);
1263
		}
1264
1265
1266
1267
1268
1269
1270
1271
1272
		break;

	case -ENODATA:
		if (rw != READ) {
			bio_io_error(bio);
			break;
		}

		if (tc->origin_dev) {
1273
			inc_all_io_entry(tc->pool, bio);
1274
1275
1276
1277
1278
1279
1280
1281
1282
			remap_to_origin_and_issue(tc, bio);
			break;
		}

		zero_fill_bio(bio);
		bio_endio(bio, 0);
		break;

	default:
1283
1284
		DMERR_LIMIT("%s: dm_thin_find_block() failed: error = %d",
			    __func__, r);
1285
1286
1287
1288
1289
1290
1291
1292
1293
1294
		bio_io_error(bio);
		break;
	}
}

static void process_bio_fail(struct thin_c *tc, struct bio *bio)
{
	bio_io_error(bio);
}

1295
1296
1297
1298
/*
 * FIXME: should we also commit due to size of transaction, measured in
 * metadata blocks?
 */
1299
1300
1301
1302
1303
1304
static int need_commit_due_to_time(struct pool *pool)
{
	return jiffies < pool->last_commit_jiffies ||
	       jiffies > pool->last_commit_jiffies + COMMIT_PERIOD;
}

1305
1306
1307
1308
1309
1310
1311
1312
1313
1314
1315
1316
1317
1318
static void process_deferred_bios(struct pool *pool)
{
	unsigned long flags;
	struct bio *bio;
	struct bio_list bios;

	bio_list_init(&bios);

	spin_lock_irqsave(&pool->lock, flags);
	bio_list_merge(&bios, &pool->deferred_bios);
	bio_list_init(&pool->deferred_bios);
	spin_unlock_irqrestore(&pool->lock, flags);

	while ((bio = bio_list_pop(&bios))) {
1319
		struct dm_thin_endio_hook *h = dm_per_bio_data(bio, sizeof(struct dm_thin_endio_hook));
1320
1321
		struct thin_c *tc = h->tc;

1322
1323
1324
1325
1326
1327
1328
1329
1330
1331
1332
1333
		/*
		 * If we've got no free new_mapping structs, and processing
		 * this bio might require one, we pause until there are some
		 * prepared mappings to process.
		 */
		if (ensure_next_mapping(pool)) {
			spin_lock_irqsave(&pool->lock, flags);
			bio_list_merge(&pool->deferred_bios, &bios);
			spin_unlock_irqrestore(&pool->lock, flags);

			break;
		}
Joe Thornber's avatar
Joe Thornber committed
1334
1335

		if (bio->bi_rw & REQ_DISCARD)
1336
			pool->process_discard(tc, bio);
Joe Thornber's avatar
Joe Thornber committed
1337
		else
1338
			pool->process_bio(tc, bio);
1339
1340
1341
1342
1343
1344
1345
1346
1347
1348
1349
1350
	}

	/*
	 * If there are any deferred flush bios, we must commit
	 * the metadata before issuing them.
	 */
	bio_list_init(&bios);
	spin_lock_irqsave(&pool->lock, flags);
	bio_list_merge(&bios, &pool->deferred_flush_bios);
	bio_list_init(&pool->deferred_flush_bios);
	spin_unlock_irqrestore(&pool->lock, flags);

1351
	if (bio_list_empty(&bios) && !need_commit_due_to_time(pool))
1352
1353
		return;

1354
	if (commit_or_fallback(pool)) {
1355
1356
1357
1358
		while ((bio = bio_list_pop(&bios)))
			bio_io_error(bio);
		return;
	}
1359
	pool->last_commit_jiffies = jiffies;
1360
1361
1362
1363
1364
1365
1366
1367
1368

	while ((bio = bio_list_pop(&bios)))
		generic_make_request(bio);
}

static void do_worker(struct work_struct *ws)
{
	struct pool *pool = container_of(ws, struct pool, worker);

1369
1370
	process_prepared(pool, &pool->prepared_mappings, &pool->process_prepared_mapping);
	process_prepared(pool, &pool->prepared_discards, &pool->process_prepared_discard);
1371
1372
1373
	process_deferred_bios(pool);
}

1374
1375
1376
1377
1378
1379
1380
1381
1382
1383
1384
/*
 * We want to commit periodically so that not too much
 * unwritten data builds up.
 */
static void do_waker(struct work_struct *ws)
{
	struct pool *pool = container_of(to_delayed_work(ws), struct pool, waker);
	wake_worker(pool);
	queue_delayed_work(pool->wq, &pool->waker, COMMIT_PERIOD);
}

1385
1386
/*----------------------------------------------------------------*/