dm-thin.c 110 KB
Newer Older
1
/*
2
 * Copyright (C) 2011-2012 Red Hat UK.
3
4
5
6
7
 *
 * This file is released under the GPL.
 */

#include "dm-thin-metadata.h"
8
#include "dm-bio-prison.h"
9
#include "dm.h"
10
11
12
13

#include <linux/device-mapper.h>
#include <linux/dm-io.h>
#include <linux/dm-kcopyd.h>
14
#include <linux/jiffies.h>
15
#include <linux/log2.h>
16
#include <linux/list.h>
17
#include <linux/rculist.h>
18
19
20
#include <linux/init.h>
#include <linux/module.h>
#include <linux/slab.h>
21
#include <linux/vmalloc.h>
22
#include <linux/sort.h>
23
#include <linux/rbtree.h>
24
25
26
27
28
29

#define	DM_MSG_PREFIX	"thin"

/*
 * Tunable constants
 */
30
#define ENDIO_HOOK_POOL_SIZE 1024
31
#define MAPPING_POOL_SIZE 1024
32
#define COMMIT_PERIOD HZ
33
34
35
#define NO_SPACE_TIMEOUT_SECS 60

static unsigned no_space_timeout_secs = NO_SPACE_TIMEOUT_SECS;
36

37
38
39
DECLARE_DM_KCOPYD_THROTTLE_WITH_MODULE_PARM(snapshot_copy_throttle,
		"A percentage of time allocated for copy on write");

40
41
42
43
44
45
46
47
48
49
50
51
52
53
54
55
56
57
58
59
60
61
62
63
64
65
66
67
68
69
70
71
/*
 * The block size of the device holding pool data must be
 * between 64KB and 1GB.
 */
#define DATA_DEV_BLOCK_SIZE_MIN_SECTORS (64 * 1024 >> SECTOR_SHIFT)
#define DATA_DEV_BLOCK_SIZE_MAX_SECTORS (1024 * 1024 * 1024 >> SECTOR_SHIFT)

/*
 * Device id is restricted to 24 bits.
 */
#define MAX_DEV_ID ((1 << 24) - 1)

/*
 * How do we handle breaking sharing of data blocks?
 * =================================================
 *
 * We use a standard copy-on-write btree to store the mappings for the
 * devices (note I'm talking about copy-on-write of the metadata here, not
 * the data).  When you take an internal snapshot you clone the root node
 * of the origin btree.  After this there is no concept of an origin or a
 * snapshot.  They are just two device trees that happen to point to the
 * same data blocks.
 *
 * When we get a write in we decide if it's to a shared data block using
 * some timestamp magic.  If it is, we have to break sharing.
 *
 * Let's say we write to a shared block in what was the origin.  The
 * steps are:
 *
 * i) plug io further to this physical block. (see bio_prison code).
 *
 * ii) quiesce any read io to that shared data block.  Obviously
72
 * including all devices that share this block.  (see dm_deferred_set code)
73
74
75
76
77
 *
 * iii) copy the data block to a newly allocate block.  This step can be
 * missed out if the io covers the block. (schedule_copy).
 *
 * iv) insert the new mapping into the origin's btree
Joe Thornber's avatar
Joe Thornber committed
78
 * (process_prepared_mapping).  This act of inserting breaks some
79
80
81
82
83
84
85
86
87
88
89
90
91
92
93
94
95
96
97
98
99
100
101
102
103
104
105
106
107
108
109
110
111
112
113
114
 * sharing of btree nodes between the two devices.  Breaking sharing only
 * effects the btree of that specific device.  Btrees for the other
 * devices that share the block never change.  The btree for the origin
 * device as it was after the last commit is untouched, ie. we're using
 * persistent data structures in the functional programming sense.
 *
 * v) unplug io to this physical block, including the io that triggered
 * the breaking of sharing.
 *
 * Steps (ii) and (iii) occur in parallel.
 *
 * The metadata _doesn't_ need to be committed before the io continues.  We
 * get away with this because the io is always written to a _new_ block.
 * If there's a crash, then:
 *
 * - The origin mapping will point to the old origin block (the shared
 * one).  This will contain the data as it was before the io that triggered
 * the breaking of sharing came in.
 *
 * - The snap mapping still points to the old block.  As it would after
 * the commit.
 *
 * The downside of this scheme is the timestamp magic isn't perfect, and
 * will continue to think that data block in the snapshot device is shared
 * even after the write to the origin has broken sharing.  I suspect data
 * blocks will typically be shared by many different devices, so we're
 * breaking sharing n + 1 times, rather than n, where n is the number of
 * devices that reference this data block.  At the moment I think the
 * benefits far, far outweigh the disadvantages.
 */

/*----------------------------------------------------------------*/

/*
 * Key building.
 */
Joe Thornber's avatar
Joe Thornber committed
115
116
117
118
119
120
121
enum lock_space {
	VIRTUAL,
	PHYSICAL
};

static void build_key(struct dm_thin_device *td, enum lock_space ls,
		      dm_block_t b, dm_block_t e, struct dm_cell_key *key)
122
{
Joe Thornber's avatar
Joe Thornber committed
123
	key->virtual = (ls == VIRTUAL);
124
	key->dev = dm_thin_dev_id(td);
125
	key->block_begin = b;
Joe Thornber's avatar
Joe Thornber committed
126
127
128
129
130
131
132
	key->block_end = e;
}

static void build_data_key(struct dm_thin_device *td, dm_block_t b,
			   struct dm_cell_key *key)
{
	build_key(td, PHYSICAL, b, b + 1llu, key);
133
134
135
}

static void build_virtual_key(struct dm_thin_device *td, dm_block_t b,
136
			      struct dm_cell_key *key)
137
{
Joe Thornber's avatar
Joe Thornber committed
138
	build_key(td, VIRTUAL, b, b + 1llu, key);
139
140
141
142
}

/*----------------------------------------------------------------*/

Joe Thornber's avatar
Joe Thornber committed
143
144
145
146
147
148
149
150
151
152
153
154
155
156
157
158
159
160
161
162
163
164
165
166
167
168
169
170
171
172
173
174
175
176
177
178
179
180
181
182
183
184
185
186
187
188
189
#define THROTTLE_THRESHOLD (1 * HZ)

struct throttle {
	struct rw_semaphore lock;
	unsigned long threshold;
	bool throttle_applied;
};

static void throttle_init(struct throttle *t)
{
	init_rwsem(&t->lock);
	t->throttle_applied = false;
}

static void throttle_work_start(struct throttle *t)
{
	t->threshold = jiffies + THROTTLE_THRESHOLD;
}

static void throttle_work_update(struct throttle *t)
{
	if (!t->throttle_applied && jiffies > t->threshold) {
		down_write(&t->lock);
		t->throttle_applied = true;
	}
}

static void throttle_work_complete(struct throttle *t)
{
	if (t->throttle_applied) {
		t->throttle_applied = false;
		up_write(&t->lock);
	}
}

static void throttle_lock(struct throttle *t)
{
	down_read(&t->lock);
}

static void throttle_unlock(struct throttle *t)
{
	up_read(&t->lock);
}

/*----------------------------------------------------------------*/

190
191
192
193
194
/*
 * A pool device ties together a metadata device and a data device.  It
 * also provides the interface for creating and destroying internal
 * devices.
 */
Mike Snitzer's avatar
Mike Snitzer committed
195
struct dm_thin_new_mapping;
196

197
/*
198
 * The pool runs in 4 modes.  Ordered in degraded order for comparisons.
199
200
201
 */
enum pool_mode {
	PM_WRITE,		/* metadata may be changed */
202
	PM_OUT_OF_DATA_SPACE,	/* metadata may be changed, though data may not be allocated */
203
204
205
206
	PM_READ_ONLY,		/* metadata may not be changed */
	PM_FAIL,		/* all I/O fails */
};

207
struct pool_features {
208
209
	enum pool_mode mode;

Mike Snitzer's avatar
Mike Snitzer committed
210
211
212
	bool zero_new_blocks:1;
	bool discard_enabled:1;
	bool discard_passdown:1;
213
	bool error_if_no_space:1;
214
215
};

216
217
struct thin_c;
typedef void (*process_bio_fn)(struct thin_c *tc, struct bio *bio);
218
typedef void (*process_cell_fn)(struct thin_c *tc, struct dm_bio_prison_cell *cell);
219
220
typedef void (*process_mapping_fn)(struct dm_thin_new_mapping *m);

221
222
#define CELL_SORT_ARRAY_SIZE 8192

223
224
225
226
227
228
229
230
231
struct pool {
	struct list_head list;
	struct dm_target *ti;	/* Only set if a pool target is bound */

	struct mapped_device *pool_md;
	struct block_device *md_dev;
	struct dm_pool_metadata *pmd;

	dm_block_t low_water_blocks;
232
	uint32_t sectors_per_block;
233
	int sectors_per_block_shift;
234

235
	struct pool_features pf;
236
	bool low_water_triggered:1;	/* A dm event has been sent */
237
	bool suspended:1;
238

239
	struct dm_bio_prison *prison;
240
241
242
	struct dm_kcopyd_client *copier;

	struct workqueue_struct *wq;
Joe Thornber's avatar
Joe Thornber committed
243
	struct throttle throttle;
244
	struct work_struct worker;
245
	struct delayed_work waker;
246
	struct delayed_work no_space_timeout;
247

248
	unsigned long last_commit_jiffies;
249
	unsigned ref_count;
250
251
252
253

	spinlock_t lock;
	struct bio_list deferred_flush_bios;
	struct list_head prepared_mappings;
Joe Thornber's avatar
Joe Thornber committed
254
	struct list_head prepared_discards;
255
	struct list_head active_thins;
256

257
258
	struct dm_deferred_set *shared_read_ds;
	struct dm_deferred_set *all_io_ds;
259

Mike Snitzer's avatar
Mike Snitzer committed
260
	struct dm_thin_new_mapping *next_mapping;
261
	mempool_t *mapping_pool;
262
263
264
265

	process_bio_fn process_bio;
	process_bio_fn process_discard;

266
267
268
	process_cell_fn process_cell;
	process_cell_fn process_discard_cell;

269
270
	process_mapping_fn process_prepared_mapping;
	process_mapping_fn process_prepared_discard;
271

272
	struct dm_bio_prison_cell **cell_sort_array;
273
274
};

275
static enum pool_mode get_pool_mode(struct pool *pool);
276
static void metadata_operation_failed(struct pool *pool, const char *op, int r);
277

278
279
280
281
282
283
284
285
286
287
288
/*
 * Target context for a pool.
 */
struct pool_c {
	struct dm_target *ti;
	struct pool *pool;
	struct dm_dev *data_dev;
	struct dm_dev *metadata_dev;
	struct dm_target_callbacks callbacks;

	dm_block_t low_water_blocks;
289
290
	struct pool_features requested_pf; /* Features requested during table load */
	struct pool_features adjusted_pf;  /* Features used after adjusting for constituent devices */
291
292
293
294
295
296
};

/*
 * Target context for a thin.
 */
struct thin_c {
297
	struct list_head list;
298
	struct dm_dev *pool_dev;
299
	struct dm_dev *origin_dev;
300
	sector_t origin_size;
301
302
303
304
	dm_thin_id dev_id;

	struct pool *pool;
	struct dm_thin_device *td;
305
306
	struct mapped_device *thin_md;

307
	bool requeue_mode:1;
308
	spinlock_t lock;
309
	struct list_head deferred_cells;
310
311
	struct bio_list deferred_bio_list;
	struct bio_list retry_on_resume_list;
312
	struct rb_root sort_bio_list; /* sorted list of deferred bios */
313
314
315
316
317
318
319

	/*
	 * Ensures the thin is not destroyed until the worker has finished
	 * iterating the active_thins list.
	 */
	atomic_t refcount;
	struct completion can_destroy;
320
321
322
323
};

/*----------------------------------------------------------------*/

Joe Thornber's avatar
Joe Thornber committed
324
325
326
327
328
329
330
331
332
333
334
335
336
337
338
339
340
341
342
343
344
345
346
347
348
349
350
351
352
353
354
355
356
357
358
359
360
361
362
363
364
365
366
367
368
369
370
371
372
373
374
375
376
377
378
379
380
381
382
383
384
385
386
387
388
389
390
391
392
393
394
395
396
397
398
399
400
401
402
403
404
405
406
407
408
409
410
411
412
413
414
415
416
417
418
419
420
421
422
423
424
425
426
427
428
429
430
431
432
433
434
435
436
437
438
439
440
441
442
443
444
445
446
447
448
449
450
451
452
453
454
455
/**
 * __blkdev_issue_discard_async - queue a discard with async completion
 * @bdev:	blockdev to issue discard for
 * @sector:	start sector
 * @nr_sects:	number of sectors to discard
 * @gfp_mask:	memory allocation flags (for bio_alloc)
 * @flags:	BLKDEV_IFL_* flags to control behaviour
 * @parent_bio: parent discard bio that all sub discards get chained to
 *
 * Description:
 *    Asynchronously issue a discard request for the sectors in question.
 *    NOTE: this variant of blk-core's blkdev_issue_discard() is a stop-gap
 *    that is being kept local to DM thinp until the block changes to allow
 *    late bio splitting land upstream.
 */
static int __blkdev_issue_discard_async(struct block_device *bdev, sector_t sector,
					sector_t nr_sects, gfp_t gfp_mask, unsigned long flags,
					struct bio *parent_bio)
{
	struct request_queue *q = bdev_get_queue(bdev);
	int type = REQ_WRITE | REQ_DISCARD;
	unsigned int max_discard_sectors, granularity;
	int alignment;
	struct bio *bio;
	int ret = 0;
	struct blk_plug plug;

	if (!q)
		return -ENXIO;

	if (!blk_queue_discard(q))
		return -EOPNOTSUPP;

	/* Zero-sector (unknown) and one-sector granularities are the same.  */
	granularity = max(q->limits.discard_granularity >> 9, 1U);
	alignment = (bdev_discard_alignment(bdev) >> 9) % granularity;

	/*
	 * Ensure that max_discard_sectors is of the proper
	 * granularity, so that requests stay aligned after a split.
	 */
	max_discard_sectors = min(q->limits.max_discard_sectors, UINT_MAX >> 9);
	max_discard_sectors -= max_discard_sectors % granularity;
	if (unlikely(!max_discard_sectors)) {
		/* Avoid infinite loop below. Being cautious never hurts. */
		return -EOPNOTSUPP;
	}

	if (flags & BLKDEV_DISCARD_SECURE) {
		if (!blk_queue_secdiscard(q))
			return -EOPNOTSUPP;
		type |= REQ_SECURE;
	}

	blk_start_plug(&plug);
	while (nr_sects) {
		unsigned int req_sects;
		sector_t end_sect, tmp;

		/*
		 * Required bio_put occurs in bio_endio thanks to bio_chain below
		 */
		bio = bio_alloc(gfp_mask, 1);
		if (!bio) {
			ret = -ENOMEM;
			break;
		}

		req_sects = min_t(sector_t, nr_sects, max_discard_sectors);

		/*
		 * If splitting a request, and the next starting sector would be
		 * misaligned, stop the discard at the previous aligned sector.
		 */
		end_sect = sector + req_sects;
		tmp = end_sect;
		if (req_sects < nr_sects &&
		    sector_div(tmp, granularity) != alignment) {
			end_sect = end_sect - alignment;
			sector_div(end_sect, granularity);
			end_sect = end_sect * granularity + alignment;
			req_sects = end_sect - sector;
		}

		bio_chain(bio, parent_bio);

		bio->bi_iter.bi_sector = sector;
		bio->bi_bdev = bdev;

		bio->bi_iter.bi_size = req_sects << 9;
		nr_sects -= req_sects;
		sector = end_sect;

		submit_bio(type, bio);

		/*
		 * We can loop for a long time in here, if someone does
		 * full device discards (like mkfs). Be nice and allow
		 * us to schedule out to avoid softlocking if preempt
		 * is disabled.
		 */
		cond_resched();
	}
	blk_finish_plug(&plug);

	return ret;
}

static bool block_size_is_power_of_two(struct pool *pool)
{
	return pool->sectors_per_block_shift >= 0;
}

static sector_t block_to_sectors(struct pool *pool, dm_block_t b)
{
	return block_size_is_power_of_two(pool) ?
		(b << pool->sectors_per_block_shift) :
		(b * pool->sectors_per_block);
}

static int issue_discard(struct thin_c *tc, dm_block_t data_b, dm_block_t data_e,
			 struct bio *parent_bio)
{
	sector_t s = block_to_sectors(tc->pool, data_b);
	sector_t len = block_to_sectors(tc->pool, data_e - data_b);

	return __blkdev_issue_discard_async(tc->pool_dev->bdev, s, len,
					    GFP_NOWAIT, 0, parent_bio);
}

/*----------------------------------------------------------------*/

456
457
458
459
460
461
462
463
464
465
466
/*
 * wake_worker() is used when new work is queued and when pool_resume is
 * ready to continue deferred IO processing.
 */
static void wake_worker(struct pool *pool)
{
	queue_work(pool->wq, &pool->worker);
}

/*----------------------------------------------------------------*/

467
468
469
470
471
472
473
474
475
476
477
478
479
480
481
482
483
484
485
486
487
488
489
490
491
492
493
494
495
496
497
static int bio_detain(struct pool *pool, struct dm_cell_key *key, struct bio *bio,
		      struct dm_bio_prison_cell **cell_result)
{
	int r;
	struct dm_bio_prison_cell *cell_prealloc;

	/*
	 * Allocate a cell from the prison's mempool.
	 * This might block but it can't fail.
	 */
	cell_prealloc = dm_bio_prison_alloc_cell(pool->prison, GFP_NOIO);

	r = dm_bio_detain(pool->prison, key, bio, cell_prealloc, cell_result);
	if (r)
		/*
		 * We reused an old cell; we can get rid of
		 * the new one.
		 */
		dm_bio_prison_free_cell(pool->prison, cell_prealloc);

	return r;
}

static void cell_release(struct pool *pool,
			 struct dm_bio_prison_cell *cell,
			 struct bio_list *bios)
{
	dm_cell_release(pool->prison, cell, bios);
	dm_bio_prison_free_cell(pool->prison, cell);
}

498
499
500
501
502
503
504
505
506
static void cell_visit_release(struct pool *pool,
			       void (*fn)(void *, struct dm_bio_prison_cell *),
			       void *context,
			       struct dm_bio_prison_cell *cell)
{
	dm_cell_visit_release(pool->prison, fn, context, cell);
	dm_bio_prison_free_cell(pool->prison, cell);
}

507
508
509
510
511
512
513
514
static void cell_release_no_holder(struct pool *pool,
				   struct dm_bio_prison_cell *cell,
				   struct bio_list *bios)
{
	dm_cell_release_no_holder(pool->prison, cell, bios);
	dm_bio_prison_free_cell(pool->prison, cell);
}

515
516
static void cell_error_with_code(struct pool *pool,
				 struct dm_bio_prison_cell *cell, int error_code)
517
{
518
	dm_cell_error(pool->prison, cell, error_code);
519
520
521
	dm_bio_prison_free_cell(pool->prison, cell);
}

522
523
524
525
526
static void cell_error(struct pool *pool, struct dm_bio_prison_cell *cell)
{
	cell_error_with_code(pool, cell, -EIO);
}

527
528
529
530
531
532
533
534
535
536
static void cell_success(struct pool *pool, struct dm_bio_prison_cell *cell)
{
	cell_error_with_code(pool, cell, 0);
}

static void cell_requeue(struct pool *pool, struct dm_bio_prison_cell *cell)
{
	cell_error_with_code(pool, cell, DM_ENDIO_REQUEUE);
}

537
538
/*----------------------------------------------------------------*/

539
540
541
542
543
544
545
546
547
548
549
550
551
552
553
554
555
556
557
558
559
560
561
562
563
564
565
566
567
568
569
570
571
572
573
574
575
576
577
578
579
580
581
582
583
584
585
586
587
588
589
590
591
592
593
594
595
596
597
598
/*
 * A global list of pools that uses a struct mapped_device as a key.
 */
static struct dm_thin_pool_table {
	struct mutex mutex;
	struct list_head pools;
} dm_thin_pool_table;

static void pool_table_init(void)
{
	mutex_init(&dm_thin_pool_table.mutex);
	INIT_LIST_HEAD(&dm_thin_pool_table.pools);
}

static void __pool_table_insert(struct pool *pool)
{
	BUG_ON(!mutex_is_locked(&dm_thin_pool_table.mutex));
	list_add(&pool->list, &dm_thin_pool_table.pools);
}

static void __pool_table_remove(struct pool *pool)
{
	BUG_ON(!mutex_is_locked(&dm_thin_pool_table.mutex));
	list_del(&pool->list);
}

static struct pool *__pool_table_lookup(struct mapped_device *md)
{
	struct pool *pool = NULL, *tmp;

	BUG_ON(!mutex_is_locked(&dm_thin_pool_table.mutex));

	list_for_each_entry(tmp, &dm_thin_pool_table.pools, list) {
		if (tmp->pool_md == md) {
			pool = tmp;
			break;
		}
	}

	return pool;
}

static struct pool *__pool_table_lookup_metadata_dev(struct block_device *md_dev)
{
	struct pool *pool = NULL, *tmp;

	BUG_ON(!mutex_is_locked(&dm_thin_pool_table.mutex));

	list_for_each_entry(tmp, &dm_thin_pool_table.pools, list) {
		if (tmp->md_dev == md_dev) {
			pool = tmp;
			break;
		}
	}

	return pool;
}

/*----------------------------------------------------------------*/

Mike Snitzer's avatar
Mike Snitzer committed
599
struct dm_thin_endio_hook {
600
	struct thin_c *tc;
601
602
	struct dm_deferred_entry *shared_read_entry;
	struct dm_deferred_entry *all_io_entry;
Mike Snitzer's avatar
Mike Snitzer committed
603
	struct dm_thin_new_mapping *overwrite_mapping;
604
	struct rb_node rb_node;
Joe Thornber's avatar
Joe Thornber committed
605
	struct dm_bio_prison_cell *cell;
606
607
};

608
609
610
611
612
613
614
static void __merge_bio_list(struct bio_list *bios, struct bio_list *master)
{
	bio_list_merge(bios, master);
	bio_list_init(master);
}

static void error_bio_list(struct bio_list *bios, int error)
615
616
{
	struct bio *bio;
617
618
619
620
621
622
623

	while ((bio = bio_list_pop(bios)))
		bio_endio(bio, error);
}

static void error_thin_bio_list(struct thin_c *tc, struct bio_list *master, int error)
{
624
	struct bio_list bios;
625
	unsigned long flags;
626
627

	bio_list_init(&bios);
628

629
	spin_lock_irqsave(&tc->lock, flags);
630
	__merge_bio_list(&bios, master);
631
	spin_unlock_irqrestore(&tc->lock, flags);
632

633
	error_bio_list(&bios, error);
634
635
}

636
637
638
639
640
641
642
643
644
645
646
647
648
649
650
651
652
static void requeue_deferred_cells(struct thin_c *tc)
{
	struct pool *pool = tc->pool;
	unsigned long flags;
	struct list_head cells;
	struct dm_bio_prison_cell *cell, *tmp;

	INIT_LIST_HEAD(&cells);

	spin_lock_irqsave(&tc->lock, flags);
	list_splice_init(&tc->deferred_cells, &cells);
	spin_unlock_irqrestore(&tc->lock, flags);

	list_for_each_entry_safe(cell, tmp, &cells, user_list)
		cell_requeue(pool, cell);
}

653
654
static void requeue_io(struct thin_c *tc)
{
655
	struct bio_list bios;
656
	unsigned long flags;
657
658
659

	bio_list_init(&bios);

660
	spin_lock_irqsave(&tc->lock, flags);
661
662
	__merge_bio_list(&bios, &tc->deferred_bio_list);
	__merge_bio_list(&bios, &tc->retry_on_resume_list);
663
	spin_unlock_irqrestore(&tc->lock, flags);
664

665
666
	error_bio_list(&bios, DM_ENDIO_REQUEUE);
	requeue_deferred_cells(tc);
667
668
}

669
670
671
672
673
674
static void error_retry_list(struct pool *pool)
{
	struct thin_c *tc;

	rcu_read_lock();
	list_for_each_entry_rcu(tc, &pool->active_thins, list)
675
		error_thin_bio_list(tc, &tc->retry_on_resume_list, -EIO);
676
677
678
	rcu_read_unlock();
}

679
680
681
682
683
684
685
686
687
/*
 * This section of code contains the logic for processing a thin device's IO.
 * Much of the code depends on pool object resources (lists, workqueues, etc)
 * but most is exclusively called from the thin target rather than the thin-pool
 * target.
 */

static dm_block_t get_bio_block(struct thin_c *tc, struct bio *bio)
{
688
	struct pool *pool = tc->pool;
689
	sector_t block_nr = bio->bi_iter.bi_sector;
690

691
692
	if (block_size_is_power_of_two(pool))
		block_nr >>= pool->sectors_per_block_shift;
693
	else
694
		(void) sector_div(block_nr, pool->sectors_per_block);
695
696

	return block_nr;
697
698
}

Joe Thornber's avatar
Joe Thornber committed
699
700
701
702
703
704
705
706
707
708
709
710
711
712
713
714
715
716
717
718
719
720
721
722
723
724
725
726
/*
 * Returns the _complete_ blocks that this bio covers.
 */
static void get_bio_block_range(struct thin_c *tc, struct bio *bio,
				dm_block_t *begin, dm_block_t *end)
{
	struct pool *pool = tc->pool;
	sector_t b = bio->bi_iter.bi_sector;
	sector_t e = b + (bio->bi_iter.bi_size >> SECTOR_SHIFT);

	b += pool->sectors_per_block - 1ull; /* so we round up */

	if (block_size_is_power_of_two(pool)) {
		b >>= pool->sectors_per_block_shift;
		e >>= pool->sectors_per_block_shift;
	} else {
		(void) sector_div(b, pool->sectors_per_block);
		(void) sector_div(e, pool->sectors_per_block);
	}

	if (e < b)
		/* Can happen if the bio is within a single block. */
		e = b;

	*begin = b;
	*end = e;
}

727
728
729
static void remap(struct thin_c *tc, struct bio *bio, dm_block_t block)
{
	struct pool *pool = tc->pool;
730
	sector_t bi_sector = bio->bi_iter.bi_sector;
731
732

	bio->bi_bdev = tc->pool_dev->bdev;
733
	if (block_size_is_power_of_two(pool))
734
735
736
		bio->bi_iter.bi_sector =
			(block << pool->sectors_per_block_shift) |
			(bi_sector & (pool->sectors_per_block - 1));
737
	else
738
		bio->bi_iter.bi_sector = (block * pool->sectors_per_block) +
739
				 sector_div(bi_sector, pool->sectors_per_block);
740
741
}

742
743
744
745
746
static void remap_to_origin(struct thin_c *tc, struct bio *bio)
{
	bio->bi_bdev = tc->origin_dev->bdev;
}

747
748
749
750
751
752
static int bio_triggers_commit(struct thin_c *tc, struct bio *bio)
{
	return (bio->bi_rw & (REQ_FLUSH | REQ_FUA)) &&
		dm_thin_changed_this_transaction(tc->td);
}

753
754
755
756
757
758
759
static void inc_all_io_entry(struct pool *pool, struct bio *bio)
{
	struct dm_thin_endio_hook *h;

	if (bio->bi_rw & REQ_DISCARD)
		return;

760
	h = dm_per_bio_data(bio, sizeof(struct dm_thin_endio_hook));
761
762
763
	h->all_io_entry = dm_deferred_entry_inc(pool->all_io_ds);
}

764
static void issue(struct thin_c *tc, struct bio *bio)
765
766
767
768
{
	struct pool *pool = tc->pool;
	unsigned long flags;

769
770
771
772
773
	if (!bio_triggers_commit(tc, bio)) {
		generic_make_request(bio);
		return;
	}

774
	/*
775
776
777
	 * Complete bio with an error if earlier I/O caused changes to
	 * the metadata that can't be committed e.g, due to I/O errors
	 * on the metadata device.
778
	 */
779
780
781
782
783
784
785
786
787
788
789
790
	if (dm_thin_aborted_changes(tc->td)) {
		bio_io_error(bio);
		return;
	}

	/*
	 * Batch together any bios that trigger commits and then issue a
	 * single commit for them in process_deferred_bios().
	 */
	spin_lock_irqsave(&pool->lock, flags);
	bio_list_add(&pool->deferred_flush_bios, bio);
	spin_unlock_irqrestore(&pool->lock, flags);
791
792
}

793
794
795
796
797
798
799
800
801
802
803
804
805
static void remap_to_origin_and_issue(struct thin_c *tc, struct bio *bio)
{
	remap_to_origin(tc, bio);
	issue(tc, bio);
}

static void remap_and_issue(struct thin_c *tc, struct bio *bio,
			    dm_block_t block)
{
	remap(tc, bio, block);
	issue(tc, bio);
}

806
807
808
809
810
/*----------------------------------------------------------------*/

/*
 * Bio endio functions.
 */
Mike Snitzer's avatar
Mike Snitzer committed
811
struct dm_thin_new_mapping {
812
813
	struct list_head list;

814
	bool pass_discard:1;
Joe Thornber's avatar
Joe Thornber committed
815
	bool maybe_shared:1;
816

817
818
819
820
821
822
823
	/*
	 * Track quiescing, copying and zeroing preparation actions.  When this
	 * counter hits zero the block is prepared and can be inserted into the
	 * btree.
	 */
	atomic_t prepare_actions;

824
	int err;
825
	struct thin_c *tc;
Joe Thornber's avatar
Joe Thornber committed
826
	dm_block_t virt_begin, virt_end;
827
	dm_block_t data_block;
Joe Thornber's avatar
Joe Thornber committed
828
	struct dm_bio_prison_cell *cell;
829
830
831
832
833
834
835
836
837
838
839

	/*
	 * If the bio covers the whole area of a block then we can avoid
	 * zeroing or copying.  Instead this bio is hooked.  The bio will
	 * still be in the cell, so care has to be taken to avoid issuing
	 * the bio twice.
	 */
	struct bio *bio;
	bio_end_io_t *saved_bi_end_io;
};

840
static void __complete_mapping_preparation(struct dm_thin_new_mapping *m)
841
842
843
{
	struct pool *pool = m->tc->pool;

844
	if (atomic_dec_and_test(&m->prepare_actions)) {
845
		list_add_tail(&m->list, &pool->prepared_mappings);
846
847
848
849
		wake_worker(pool);
	}
}

850
static void complete_mapping_preparation(struct dm_thin_new_mapping *m)
851
852
853
854
855
{
	unsigned long flags;
	struct pool *pool = m->tc->pool;

	spin_lock_irqsave(&pool->lock, flags);
856
	__complete_mapping_preparation(m);
857
858
859
	spin_unlock_irqrestore(&pool->lock, flags);
}

860
861
862
863
864
865
866
867
static void copy_complete(int read_err, unsigned long write_err, void *context)
{
	struct dm_thin_new_mapping *m = context;

	m->err = read_err || write_err ? -EIO : 0;
	complete_mapping_preparation(m);
}

868
869
static void overwrite_endio(struct bio *bio, int err)
{
870
	struct dm_thin_endio_hook *h = dm_per_bio_data(bio, sizeof(struct dm_thin_endio_hook));
Mike Snitzer's avatar
Mike Snitzer committed
871
	struct dm_thin_new_mapping *m = h->overwrite_mapping;
872

873
874
	bio->bi_end_io = m->saved_bi_end_io;

875
	m->err = err;
876
	complete_mapping_preparation(m);
877
878
879
880
881
882
883
884
885
886
887
888
889
}

/*----------------------------------------------------------------*/

/*
 * Workqueue.
 */

/*
 * Prepared mapping jobs.
 */

/*
890
891
 * This sends the bios in the cell, except the original holder, back
 * to the deferred_bios list.
892
 */
893
static void cell_defer_no_holder(struct thin_c *tc, struct dm_bio_prison_cell *cell)
894
895
896
897
{
	struct pool *pool = tc->pool;
	unsigned long flags;

898
899
900
	spin_lock_irqsave(&tc->lock, flags);
	cell_release_no_holder(pool, cell, &tc->deferred_bio_list);
	spin_unlock_irqrestore(&tc->lock, flags);
901
902
903
904

	wake_worker(pool);
}

905
906
static void thin_defer_bio(struct thin_c *tc, struct bio *bio);

907
908
909
910
911
912
913
914
struct remap_info {
	struct thin_c *tc;
	struct bio_list defer_bios;
	struct bio_list issue_bios;
};

static void __inc_remap_and_issue_cell(void *context,
				       struct dm_bio_prison_cell *cell)
915
{
916
	struct remap_info *info = context;
917
918
	struct bio *bio;

919
	while ((bio = bio_list_pop(&cell->bios))) {
920
		if (bio->bi_rw & (REQ_DISCARD | REQ_FLUSH | REQ_FUA))
921
			bio_list_add(&info->defer_bios, bio);
922
		else {
923
924
925
926
927
928
929
930
			inc_all_io_entry(info->tc->pool, bio);

			/*
			 * We can't issue the bios with the bio prison lock
			 * held, so we add them to a list to issue on
			 * return from this function.
			 */
			bio_list_add(&info->issue_bios, bio);
931
932
933
934
		}
	}
}

935
936
937
938
939
940
941
942
943
944
945
946
947
948
949
950
951
952
953
954
955
956
957
958
959
960
static void inc_remap_and_issue_cell(struct thin_c *tc,
				     struct dm_bio_prison_cell *cell,
				     dm_block_t block)
{
	struct bio *bio;
	struct remap_info info;

	info.tc = tc;
	bio_list_init(&info.defer_bios);
	bio_list_init(&info.issue_bios);

	/*
	 * We have to be careful to inc any bios we're about to issue
	 * before the cell is released, and avoid a race with new bios
	 * being added to the cell.
	 */
	cell_visit_release(tc->pool, __inc_remap_and_issue_cell,
			   &info, cell);

	while ((bio = bio_list_pop(&info.defer_bios)))
		thin_defer_bio(tc, bio);

	while ((bio = bio_list_pop(&info.issue_bios)))
		remap_and_issue(info.tc, bio, block);
}

961
962
static void process_prepared_mapping_fail(struct dm_thin_new_mapping *m)
{
963
	cell_error(m->tc->pool, m->cell);
964
965
966
	list_del(&m->list);
	mempool_free(m, m->tc->pool->mapping_pool);
}
967

Mike Snitzer's avatar
Mike Snitzer committed
968
static void process_prepared_mapping(struct dm_thin_new_mapping *m)
969
970
{
	struct thin_c *tc = m->tc;
971
	struct pool *pool = tc->pool;
972
	struct bio *bio = m->bio;
973
974
975
	int r;

	if (m->err) {
976
		cell_error(pool, m->cell);
977
		goto out;
978
979
980
981
982
983
984
	}

	/*
	 * Commit the prepared block into the mapping btree.
	 * Any I/O for this block arriving after this point will get
	 * remapped to it directly.
	 */
Joe Thornber's avatar
Joe Thornber committed
985
	r = dm_thin_insert_block(tc->td, m->virt_begin, m->data_block);
986
	if (r) {
987
		metadata_operation_failed(pool, "dm_thin_insert_block", r);
988
		cell_error(pool, m->cell);
989
		goto out;
990
991
992
993
994
995
996
997
998
	}

	/*
	 * Release any bios held while the block was being provisioned.
	 * If we are processing a write bio that completely covers the block,
	 * we already processed it so can ignore it now when processing
	 * the bios in the cell.
	 */
	if (bio) {
999
		inc_remap_and_issue_cell(tc, m->cell, m->data_block);
1000
		bio_endio(bio, 0);
1001
1002
1003
1004
1005
	} else {
		inc_all_io_entry(tc->pool, m->cell->holder);
		remap_and_issue(tc, m->cell->holder, m->data_block);
		inc_remap_and_issue_cell(tc, m->cell, m->data_block);
	}
1006

1007
out:
1008
	list_del(&m->list);
1009
	mempool_free(m, pool->mapping_pool);
1010
1011
}

Joe Thornber's avatar
Joe Thornber committed
1012
1013
1014
/*----------------------------------------------------------------*/

static void free_discard_mapping(struct dm_thin_new_mapping *m)
Joe Thornber's avatar
Joe Thornber committed
1015
1016
{
	struct thin_c *tc = m->tc;
Joe Thornber's avatar
Joe Thornber committed
1017
1018
1019
1020
	if (m->cell)
		cell_defer_no_holder(tc, m->cell);
	mempool_free(m, tc->pool->mapping_pool);
}
Joe Thornber's avatar
Joe Thornber committed
1021

Joe Thornber's avatar
Joe Thornber committed
1022
1023
static void process_prepared_discard_fail(struct dm_thin_new_mapping *m)
{
1024
	bio_io_error(m->bio);
Joe Thornber's avatar
Joe Thornber committed
1025
1026
1027
1028
1029
1030
1031
1032
1033
1034
1035
1036
1037
1038
1039
1040
1041
1042
1043
1044
1045
	free_discard_mapping(m);
}

static void process_prepared_discard_success(struct dm_thin_new_mapping *m)
{
	bio_endio(m->bio, 0);
	free_discard_mapping(m);
}

static void process_prepared_discard_no_passdown(struct dm_thin_new_mapping *m)
{
	int r;
	struct thin_c *tc = m->tc;

	r = dm_thin_remove_range(tc->td, m->cell->key.block_begin, m->cell->key.block_end);
	if (r) {
		metadata_operation_failed(tc->pool, "dm_thin_remove_range", r);
		bio_io_error(m->bio);
	} else
		bio_endio(m->bio, 0);

1046
	cell_defer_no_holder(tc, m->cell);
1047
1048
1049
	mempool_free(m, tc->pool->mapping_pool);
}

Joe Thornber's avatar
Joe Thornber committed
1050
static int passdown_double_checking_shared_status(struct dm_thin_new_mapping *m)
1051
{
Joe Thornber's avatar
Joe Thornber committed
1052
1053
1054
1055
1056
1057
	/*
	 * We've already unmapped this range of blocks, but before we
	 * passdown we have to check that these blocks are now unused.
	 */
	int r;
	bool used = true;
1058
	struct thin_c *tc = m->tc;
Joe Thornber's avatar
Joe Thornber committed
1059
1060
	struct pool *pool = tc->pool;
	dm_block_t b = m->data_block, e, end = m->data_block + m->virt_end - m->virt_begin;
Joe Thornber's avatar
Joe Thornber committed
1061

Joe Thornber's avatar
Joe Thornber committed
1062
1063
1064
1065
1066
1067
	while (b != end) {
		/* find start of unmapped run */
		for (; b < end; b++) {
			r = dm_pool_block_is_used(pool->pmd, b, &used);
			if (r)
				return r;
1068

Joe Thornber's avatar
Joe Thornber committed
1069
1070
			if (!used)
				break;
1071
		}
Joe Thornber's avatar
Joe Thornber committed
1072

Joe Thornber's avatar
Joe Thornber committed
1073
1074
1075
1076
1077
1078
1079
1080
1081
1082
1083
1084
1085
1086
1087
1088
1089
1090
1091
1092
1093
		if (b == end)
			break;

		/* find end of run */
		for (e = b + 1; e != end; e++) {
			r = dm_pool_block_is_used(pool->pmd, e, &used);
			if (r)
				return r;

			if (used)
				break;
		}

		r = issue_discard(tc, b, e, m->bio);
		if (r)
			return r;

		b = e;
	}

	return 0;
Joe Thornber's avatar
Joe Thornber committed
1094
1095
}

Joe Thornber's avatar
Joe Thornber committed
1096
static void process_prepared_discard_passdown(struct dm_thin_new_mapping *m)
1097
1098
1099
{
	int r;
	struct thin_c *tc = m->tc;
Joe Thornber's avatar
Joe Thornber committed
1100
	struct pool *pool = tc->pool;
1101

Joe Thornber's avatar
Joe Thornber committed
1102
	r = dm_thin_remove_range(tc->td, m->virt_begin, m->virt_end);
1103
	if (r)
Joe Thornber's avatar
Joe Thornber committed
1104
1105
1106
1107
1108
1109
		metadata_operation_failed(pool, "dm_thin_remove_range", r);

	else if (m->maybe_shared)
		r = passdown_double_checking_shared_status(m);
	else
		r = issue_discard(tc, m->data_block, m->data_block + (m->virt_end - m->virt_begin), m->bio);
1110

Joe Thornber's avatar
Joe Thornber committed
1111
1112
1113
1114
1115
1116
1117
	/*
	 * Even if r is set, there could be sub discards in flight that we
	 * need to wait for.
	 */
	bio_endio(m->bio, r);
	cell_defer_no_holder(tc, m->cell);
	mempool_free(m, pool->mapping_pool);
1118
1119
}

Joe Thornber's avatar
Joe Thornber committed
1120
static void process_prepared(struct pool *pool, struct list_head *head,
1121
			     process_mapping_fn *fn)
1122
1123
1124
{
	unsigned long flags;
	struct list_head maps;
Mike Snitzer's avatar
Mike Snitzer committed
1125
	struct dm_thin_new_mapping *m, *tmp;
1126
1127
1128

	INIT_LIST_HEAD(&maps);
	spin_lock_irqsave(&pool->lock, flags);
Joe Thornber's avatar
Joe Thornber committed
1129
	list_splice_init(head, &maps);
1130
1131
1132
	spin_unlock_irqrestore(&pool->lock, flags);

	list_for_each_entry_safe(m, tmp, &maps, list)
1133
		(*fn)(m);
1134
1135
1136
1137
1138
}

/*
 * Deferred bio jobs.
 */
Joe Thornber's avatar
Joe Thornber committed
1139
static int io_overlaps_block(struct pool *pool, struct bio *bio)
1140
{
1141
1142
	return bio->bi_iter.bi_size ==
		(pool->sectors_per_block << SECTOR_SHIFT);
Joe Thornber's avatar
Joe Thornber committed
1143
1144
1145
1146
1147
1148
}

static int io_overwrites_block(struct pool *pool, struct bio *bio)
{
	return (bio_data_dir(bio) == WRITE) &&
		io_overlaps_block(pool, bio);
1149
1150
1151
1152
1153
1154
1155
1156
1157
1158
1159
1160
1161
1162
1163
1164
1165
1166
1167
}

static void save_and_set_endio(struct bio *bio, bio_end_io_t **save,
			       bio_end_io_t *fn)
{
	*save = bio->bi_end_io;
	bio->bi_end_io = fn;
}

static int ensure_next_mapping(struct pool *pool)
{
	if (pool->next_mapping)
		return 0;

	pool->next_mapping = mempool_alloc(pool->mapping_pool, GFP_ATOMIC);

	return pool->next_mapping ? 0 : -ENOMEM;
}

Mike Snitzer's avatar
Mike Snitzer committed
1168
static struct dm_thin_new_mapping *get_next_mapping(struct pool *pool)
1169
{
1170
	struct dm_thin_new_mapping *m = pool->next_mapping;
1171
1172
1173

	BUG_ON(!pool->next_mapping);

1174
1175
1176
1177
	memset(m, 0, sizeof(struct dm_thin_new_mapping));
	INIT_LIST_HEAD(&m->list);
	m->bio = NULL;

1178
1179
	pool->next_mapping = NULL;

1180
	return m;
1181
1182
}

1183
1184
1185
1186
1187
1188
1189
1190
1191
1192
1193
1194
1195
1196
1197
1198
1199
static void ll_zero(struct thin_c *tc, struct dm_thin_new_mapping *m,
		    sector_t begin, sector_t end)
{
	int r;
	struct dm_io_region to;

	to.bdev = tc->pool_dev->bdev;
	to.sector = begin;
	to.count = end - begin;

	r = dm_kcopyd_zero(tc->pool->copier, 1, &to, 0, copy_complete, m);
	if (r < 0) {
		DMERR_LIMIT("dm_kcopyd_zero() failed");
		copy_complete(1, 1, m);
	}
}

1200
static void remap_and_issue_overwrite(struct thin_c *tc, struct bio *bio,
Joe Thornber's avatar
Joe Thornber committed
1201
				      dm_block_t data_begin,
1202
1203
1204
1205
1206
1207
1208
1209
1210
				      struct dm_thin_new_mapping *m)
{
	struct pool *pool = tc->pool;
	struct dm_thin_endio_hook *h = dm_per_bio_data(bio, sizeof(struct dm_thin_endio_hook));

	h->overwrite_mapping = m;
	m->bio = bio;
	save_and_set_endio(bio, &m->saved_bi_end_io, overwrite_endio);
	inc_all_io_entry(pool, bio);
Joe Thornber's avatar
Joe Thornber committed
1211
	remap_and_issue(tc, bio, data_begin);
1212
1213
}

1214
1215
1216
/*
 * A partial copy also needs to zero the uncopied region.
 */
1217
static void schedule_copy(struct thin_c *tc, dm_block_t virt_block,
1218
1219
			  struct dm_dev *origin, dm_block_t data_origin,
			  dm_block_t data_dest,
1220
1221
			  struct dm_bio_prison_cell *cell, struct bio *bio,
			  sector_t len)
1222
1223
1224
{
	int r;
	struct pool *pool = tc->pool;
Mike Snitzer's avatar
Mike Snitzer committed
1225
	struct dm_thin_new_mapping *m = get_next_mapping(pool);
1226
1227

	m->tc = tc;
Joe Thornber's avatar
Joe Thornber committed
1228
1229
	m->virt_begin = virt_block;
	m->virt_end = virt_block + 1u;
1230
1231
1232
	m->data_block = data_dest;
	m->cell = cell;

1233
1234
1235
1236
1237
1238
1239
	/*
	 * quiesce action + copy action + an extra reference held for the
	 * duration of this function (we may need to inc later for a
	 * partial zero).
	 */
	atomic_set(&m->prepare_actions, 3);

1240
	if (!dm_deferred_set_add_work(pool->shared_read_ds, &m->list))
1241
		complete_mapping_preparation(m); /* already quiesced */
1242
1243
1244
1245
1246
1247
1248

	/*
	 * IO to pool_dev remaps to the pool target's data_dev.
	 *
	 * If the whole block of data is being overwritten, we can issue the
	 * bio immediately. Otherwise we use kcopyd to clone the data first.
	 */
1249
1250
1251
	if (io_overwrites_block(pool, bio))
		remap_and_issue_overwrite(tc, bio, data_dest, m);
	else {
1252
1253
		struct dm_io_region from, to;

1254
		from.bdev = origin->bdev;
1255
		from.sector = data_origin * pool->sectors_per_block;
1256
		from.count = len;
1257
1258
1259

		to.bdev = tc->pool_dev->bdev;
		to.sector = data_dest * pool->sectors_per_block;
1260
		to.count = len;
1261
1262
1263
1264

		r = dm_kcopyd_copy(pool->copier, &from, 1, &to,
				   0, copy_complete, m);
		if (r < 0) {
1265
			DMERR_LIMIT("dm_kcopyd_copy() failed");
1266
1267
1268
1269
1270
1271
1272
1273
1274
1275
1276
1277
1278
1279
1280
1281
1282
1283
			copy_complete(1, 1, m);

			/*
			 * We allow the zero to be issued, to simplify the
			 * error path.  Otherwise we'd need to start
			 * worrying about decrementing the prepare_actions
			 * counter.
			 */
		}

		/*
		 * Do we need to zero a tail region?
		 */
		if (len < pool->sectors_per_block && pool->pf.zero_new_blocks) {
			atomic_inc(&m->prepare_actions);
			ll_zero(tc, m,
				data_dest * pool->sectors_per_block + len,
				(data_dest + 1) * pool->sectors_per_block);
1284
1285
		}
	}
1286
1287

	complete_mapping_preparation(m); /* drop our ref */
1288
1289
}

1290
1291
static void schedule_internal_copy(struct thin_c *tc, dm_block_t virt_block,
				   dm_block_t data_origin, dm_block_t data_dest,
Mike Snitzer's avatar
Mike Snitzer committed
1292
				   struct dm_bio_prison_cell *cell, struct bio *bio)
1293
1294
{
	schedule_copy(tc, virt_block, tc->pool_dev,
1295
1296
		      data_origin, data_dest, cell, bio,
		      tc->pool->sectors_per_block);
1297
1298
}

1299
static void schedule_zero(struct thin_c *tc, dm_block_t virt_block,
Mike Snitzer's avatar
Mike Snitzer committed
1300
			  dm_block_t data_block, struct dm_bio_prison_cell *cell,
1301
1302
1303
			  struct bio *bio)
{
	struct pool *pool = tc->pool;
Mike Snitzer's avatar
Mike Snitzer committed
1304
	struct dm_thin_new_mapping *m = get_next_mapping(pool);
1305

1306
	atomic_set(&m->prepare_actions, 1); /* no need to quiesce */
1307
	m->tc = tc;
Joe Thornber's avatar
Joe Thornber committed
1308
1309
	m->virt_begin = virt_block;
	m->virt_end = virt_block + 1u;
1310
1311
1312
1313
1314
1315
1316
1317
	m->data_block = data_block;
	m->cell = cell;

	/*
	 * If the whole block of data is being overwritten or we are not
	 * zeroing pre-existing data, we can issue the bio immediately.
	 * Otherwise we use kcopyd to zero the data first.
	 */
1318
1319
1320
1321
1322
1323
1324
	if (pool->pf.zero_new_blocks) {
		if (io_overwrites_block(pool, bio))
			remap_and_issue_overwrite(tc, bio, data_block, m);
		else
			ll_zero(tc, m, data_block * pool->sectors_per_block,
				(data_block + 1) * pool->sectors_per_block);
	} else
1325
		process_prepared_mapping(m);
1326
}
1327

1328
1329
1330
1331
1332
1333
1334
1335
1336
1337
1338
1339
1340
1341
1342
1343
1344
1345
1346
1347
static void schedule_external_copy(struct thin_c *tc, dm_block_t virt_block,
				   dm_block_t data_dest,
				   struct dm_bio_prison_cell *cell, struct bio *bio)
{
	struct pool *pool = tc->pool;
	sector_t virt_block_begin = virt_block * pool->sectors_per_block;
	sector_t virt_block_end = (virt_block + 1) * pool->sectors_per_block;

	if (virt_block_end <= tc->origin_size)
		schedule_copy(tc, virt_block, tc->origin_dev,
			      virt_block, data_dest, cell, bio,
			      pool->sectors_per_block);

	else if (virt_block_begin < tc->origin_size)
		schedule_copy(tc, virt_block, tc->origin_dev,
			      virt_block, data_dest, cell, bio,
			      tc->origin_size - virt_block_begin);

	else
		schedule_zero(tc, virt_block, data_dest, cell, bio);
1348
1349
}

1350
1351
1352
1353
1354
1355
1356
1357
1358
1359
1360
1361
1362
1363
1364
1365
1366
1367
static void set_pool_mode(struct pool *pool, enum pool_mode new_mode);

static void check_for_space(struct pool *pool)
{
	int r;
	dm_block_t nr_free;

	if (get_pool_mode(pool) != PM_OUT_OF_DATA_SPACE)
		return;

	r = dm_pool_get_free_block_count(pool->pmd, &nr_free);
	if (r)
		return;

	if (nr_free)
		set_pool_mode(pool, PM_WRITE);
}

1368
1369
1370
1371
/*
 * A non-zero return indicates read_only or fail_io mode.
 * Many callers don't care about the return value.
 */
1372
static int commit(struct pool *pool)
1373
1374
1375
{
	int r;

1376
	if (get_pool_mode(pool) >= PM_READ_ONLY)
1377
1378
		return -EINVAL;

1379
	r = dm_pool_commit_metadata(pool->pmd);
1380
1381
	if (r)
		metadata_operation_failed(pool, "dm_pool_commit_metadata", r);
1382
1383
	else
		check_for_space(pool);
1384
1385
1386
1387

	return r;
}

1388
1389
1390
1391
1392
1393
1394
1395
1396
1397
1398
1399
1400
1401
static void check_low_water_mark(struct pool *pool, dm_block_t free_blocks)
{
	unsigned long flags;

	if (free_blocks <= pool->low_water_blocks && !pool->low_water_triggered) {
		DMWARN("%s: reached low water mark for data device: sending event.",
		       dm_device_name(pool->pool_md));
		spin_lock_irqsave(&pool->lock, flags);
		pool->low_water_triggered = true;
		spin_unlock_irqrestore(&pool->lock, flags);
		dm_table_event(pool->ti->table);
	}
}

1402
1403
1404
1405
1406
1407
static int alloc_data_block(struct thin_c *tc, dm_block_t