dm-thin.c 85.8 KB
Newer Older
1
/*
2
 * Copyright (C) 2011-2012 Red Hat UK.
3
4
5
6
7
 *
 * This file is released under the GPL.
 */

#include "dm-thin-metadata.h"
8
#include "dm-bio-prison.h"
9
#include "dm.h"
10
11
12
13
14

#include <linux/device-mapper.h>
#include <linux/dm-io.h>
#include <linux/dm-kcopyd.h>
#include <linux/list.h>
15
#include <linux/rculist.h>
16
17
18
#include <linux/init.h>
#include <linux/module.h>
#include <linux/slab.h>
19
#include <linux/rbtree.h>
20
21
22
23
24
25

#define	DM_MSG_PREFIX	"thin"

/*
 * Tunable constants
 */
26
#define ENDIO_HOOK_POOL_SIZE 1024
27
28
#define MAPPING_POOL_SIZE 1024
#define PRISON_CELLS 1024
29
#define COMMIT_PERIOD HZ
30

31
32
33
DECLARE_DM_KCOPYD_THROTTLE_WITH_MODULE_PARM(snapshot_copy_throttle,
		"A percentage of time allocated for copy on write");

34
35
36
37
38
39
40
41
42
43
44
45
46
47
48
49
50
51
52
53
54
55
56
57
58
59
60
61
62
63
64
65
/*
 * The block size of the device holding pool data must be
 * between 64KB and 1GB.
 */
#define DATA_DEV_BLOCK_SIZE_MIN_SECTORS (64 * 1024 >> SECTOR_SHIFT)
#define DATA_DEV_BLOCK_SIZE_MAX_SECTORS (1024 * 1024 * 1024 >> SECTOR_SHIFT)

/*
 * Device id is restricted to 24 bits.
 */
#define MAX_DEV_ID ((1 << 24) - 1)

/*
 * How do we handle breaking sharing of data blocks?
 * =================================================
 *
 * We use a standard copy-on-write btree to store the mappings for the
 * devices (note I'm talking about copy-on-write of the metadata here, not
 * the data).  When you take an internal snapshot you clone the root node
 * of the origin btree.  After this there is no concept of an origin or a
 * snapshot.  They are just two device trees that happen to point to the
 * same data blocks.
 *
 * When we get a write in we decide if it's to a shared data block using
 * some timestamp magic.  If it is, we have to break sharing.
 *
 * Let's say we write to a shared block in what was the origin.  The
 * steps are:
 *
 * i) plug io further to this physical block. (see bio_prison code).
 *
 * ii) quiesce any read io to that shared data block.  Obviously
66
 * including all devices that share this block.  (see dm_deferred_set code)
67
68
69
70
71
 *
 * iii) copy the data block to a newly allocate block.  This step can be
 * missed out if the io covers the block. (schedule_copy).
 *
 * iv) insert the new mapping into the origin's btree
Joe Thornber's avatar
Joe Thornber committed
72
 * (process_prepared_mapping).  This act of inserting breaks some
73
74
75
76
77
78
79
80
81
82
83
84
85
86
87
88
89
90
91
92
93
94
95
96
97
98
99
100
101
102
103
104
105
106
107
108
109
 * sharing of btree nodes between the two devices.  Breaking sharing only
 * effects the btree of that specific device.  Btrees for the other
 * devices that share the block never change.  The btree for the origin
 * device as it was after the last commit is untouched, ie. we're using
 * persistent data structures in the functional programming sense.
 *
 * v) unplug io to this physical block, including the io that triggered
 * the breaking of sharing.
 *
 * Steps (ii) and (iii) occur in parallel.
 *
 * The metadata _doesn't_ need to be committed before the io continues.  We
 * get away with this because the io is always written to a _new_ block.
 * If there's a crash, then:
 *
 * - The origin mapping will point to the old origin block (the shared
 * one).  This will contain the data as it was before the io that triggered
 * the breaking of sharing came in.
 *
 * - The snap mapping still points to the old block.  As it would after
 * the commit.
 *
 * The downside of this scheme is the timestamp magic isn't perfect, and
 * will continue to think that data block in the snapshot device is shared
 * even after the write to the origin has broken sharing.  I suspect data
 * blocks will typically be shared by many different devices, so we're
 * breaking sharing n + 1 times, rather than n, where n is the number of
 * devices that reference this data block.  At the moment I think the
 * benefits far, far outweigh the disadvantages.
 */

/*----------------------------------------------------------------*/

/*
 * Key building.
 */
static void build_data_key(struct dm_thin_device *td,
110
			   dm_block_t b, struct dm_cell_key *key)
111
112
113
114
115
116
117
{
	key->virtual = 0;
	key->dev = dm_thin_dev_id(td);
	key->block = b;
}

static void build_virtual_key(struct dm_thin_device *td, dm_block_t b,
118
			      struct dm_cell_key *key)
119
120
121
122
123
124
125
126
127
128
129
130
131
{
	key->virtual = 1;
	key->dev = dm_thin_dev_id(td);
	key->block = b;
}

/*----------------------------------------------------------------*/

/*
 * A pool device ties together a metadata device and a data device.  It
 * also provides the interface for creating and destroying internal
 * devices.
 */
Mike Snitzer's avatar
Mike Snitzer committed
132
struct dm_thin_new_mapping;
133

134
/*
135
 * The pool runs in 4 modes.  Ordered in degraded order for comparisons.
136
137
138
 */
enum pool_mode {
	PM_WRITE,		/* metadata may be changed */
139
	PM_OUT_OF_DATA_SPACE,	/* metadata may be changed, though data may not be allocated */
140
141
142
143
	PM_READ_ONLY,		/* metadata may not be changed */
	PM_FAIL,		/* all I/O fails */
};

144
struct pool_features {
145
146
	enum pool_mode mode;

Mike Snitzer's avatar
Mike Snitzer committed
147
148
149
	bool zero_new_blocks:1;
	bool discard_enabled:1;
	bool discard_passdown:1;
150
	bool error_if_no_space:1;
151
152
};

153
154
155
156
struct thin_c;
typedef void (*process_bio_fn)(struct thin_c *tc, struct bio *bio);
typedef void (*process_mapping_fn)(struct dm_thin_new_mapping *m);

157
158
159
160
161
162
163
164
165
struct pool {
	struct list_head list;
	struct dm_target *ti;	/* Only set if a pool target is bound */

	struct mapped_device *pool_md;
	struct block_device *md_dev;
	struct dm_pool_metadata *pmd;

	dm_block_t low_water_blocks;
166
	uint32_t sectors_per_block;
167
	int sectors_per_block_shift;
168

169
	struct pool_features pf;
170
	bool low_water_triggered:1;	/* A dm event has been sent */
171

172
	struct dm_bio_prison *prison;
173
174
175
176
	struct dm_kcopyd_client *copier;

	struct workqueue_struct *wq;
	struct work_struct worker;
177
	struct delayed_work waker;
178

179
	unsigned long last_commit_jiffies;
180
	unsigned ref_count;
181
182
183
184

	spinlock_t lock;
	struct bio_list deferred_flush_bios;
	struct list_head prepared_mappings;
Joe Thornber's avatar
Joe Thornber committed
185
	struct list_head prepared_discards;
186
	struct list_head active_thins;
187

188
189
	struct dm_deferred_set *shared_read_ds;
	struct dm_deferred_set *all_io_ds;
190

Mike Snitzer's avatar
Mike Snitzer committed
191
	struct dm_thin_new_mapping *next_mapping;
192
	mempool_t *mapping_pool;
193
194
195
196
197
198

	process_bio_fn process_bio;
	process_bio_fn process_discard;

	process_mapping_fn process_prepared_mapping;
	process_mapping_fn process_prepared_discard;
199
200
};

201
static enum pool_mode get_pool_mode(struct pool *pool);
202
static void metadata_operation_failed(struct pool *pool, const char *op, int r);
203

204
205
206
207
208
209
210
211
212
213
214
/*
 * Target context for a pool.
 */
struct pool_c {
	struct dm_target *ti;
	struct pool *pool;
	struct dm_dev *data_dev;
	struct dm_dev *metadata_dev;
	struct dm_target_callbacks callbacks;

	dm_block_t low_water_blocks;
215
216
	struct pool_features requested_pf; /* Features requested during table load */
	struct pool_features adjusted_pf;  /* Features used after adjusting for constituent devices */
217
218
219
220
221
222
};

/*
 * Target context for a thin.
 */
struct thin_c {
223
	struct list_head list;
224
	struct dm_dev *pool_dev;
225
	struct dm_dev *origin_dev;
226
227
228
229
	dm_thin_id dev_id;

	struct pool *pool;
	struct dm_thin_device *td;
230
	bool requeue_mode:1;
231
232
233
	spinlock_t lock;
	struct bio_list deferred_bio_list;
	struct bio_list retry_on_resume_list;
234
	struct rb_root sort_bio_list; /* sorted list of deferred bios */
235
236
237
238
239
240
241

	/*
	 * Ensures the thin is not destroyed until the worker has finished
	 * iterating the active_thins list.
	 */
	atomic_t refcount;
	struct completion can_destroy;
242
243
244
245
};

/*----------------------------------------------------------------*/

246
247
248
249
250
251
252
253
254
255
256
/*
 * wake_worker() is used when new work is queued and when pool_resume is
 * ready to continue deferred IO processing.
 */
static void wake_worker(struct pool *pool)
{
	queue_work(pool->wq, &pool->worker);
}

/*----------------------------------------------------------------*/

257
258
259
260
261
262
263
264
265
266
267
268
269
270
271
272
273
274
275
276
277
278
279
280
281
282
283
284
285
286
287
288
289
290
291
292
293
294
295
static int bio_detain(struct pool *pool, struct dm_cell_key *key, struct bio *bio,
		      struct dm_bio_prison_cell **cell_result)
{
	int r;
	struct dm_bio_prison_cell *cell_prealloc;

	/*
	 * Allocate a cell from the prison's mempool.
	 * This might block but it can't fail.
	 */
	cell_prealloc = dm_bio_prison_alloc_cell(pool->prison, GFP_NOIO);

	r = dm_bio_detain(pool->prison, key, bio, cell_prealloc, cell_result);
	if (r)
		/*
		 * We reused an old cell; we can get rid of
		 * the new one.
		 */
		dm_bio_prison_free_cell(pool->prison, cell_prealloc);

	return r;
}

static void cell_release(struct pool *pool,
			 struct dm_bio_prison_cell *cell,
			 struct bio_list *bios)
{
	dm_cell_release(pool->prison, cell, bios);
	dm_bio_prison_free_cell(pool->prison, cell);
}

static void cell_release_no_holder(struct pool *pool,
				   struct dm_bio_prison_cell *cell,
				   struct bio_list *bios)
{
	dm_cell_release_no_holder(pool->prison, cell, bios);
	dm_bio_prison_free_cell(pool->prison, cell);
}

296
297
298
299
300
301
static void cell_defer_no_holder_no_free(struct thin_c *tc,
					 struct dm_bio_prison_cell *cell)
{
	struct pool *pool = tc->pool;
	unsigned long flags;

302
303
304
	spin_lock_irqsave(&tc->lock, flags);
	dm_cell_release_no_holder(pool->prison, cell, &tc->deferred_bio_list);
	spin_unlock_irqrestore(&tc->lock, flags);
305
306
307
308

	wake_worker(pool);
}

309
310
311
312
313
314
315
316
317
static void cell_error(struct pool *pool,
		       struct dm_bio_prison_cell *cell)
{
	dm_cell_error(pool->prison, cell);
	dm_bio_prison_free_cell(pool->prison, cell);
}

/*----------------------------------------------------------------*/

318
319
320
321
322
323
324
325
326
327
328
329
330
331
332
333
334
335
336
337
338
339
340
341
342
343
344
345
346
347
348
349
350
351
352
353
354
355
356
357
358
359
360
361
362
363
364
365
366
367
368
369
370
371
372
373
374
375
376
377
/*
 * A global list of pools that uses a struct mapped_device as a key.
 */
static struct dm_thin_pool_table {
	struct mutex mutex;
	struct list_head pools;
} dm_thin_pool_table;

static void pool_table_init(void)
{
	mutex_init(&dm_thin_pool_table.mutex);
	INIT_LIST_HEAD(&dm_thin_pool_table.pools);
}

static void __pool_table_insert(struct pool *pool)
{
	BUG_ON(!mutex_is_locked(&dm_thin_pool_table.mutex));
	list_add(&pool->list, &dm_thin_pool_table.pools);
}

static void __pool_table_remove(struct pool *pool)
{
	BUG_ON(!mutex_is_locked(&dm_thin_pool_table.mutex));
	list_del(&pool->list);
}

static struct pool *__pool_table_lookup(struct mapped_device *md)
{
	struct pool *pool = NULL, *tmp;

	BUG_ON(!mutex_is_locked(&dm_thin_pool_table.mutex));

	list_for_each_entry(tmp, &dm_thin_pool_table.pools, list) {
		if (tmp->pool_md == md) {
			pool = tmp;
			break;
		}
	}

	return pool;
}

static struct pool *__pool_table_lookup_metadata_dev(struct block_device *md_dev)
{
	struct pool *pool = NULL, *tmp;

	BUG_ON(!mutex_is_locked(&dm_thin_pool_table.mutex));

	list_for_each_entry(tmp, &dm_thin_pool_table.pools, list) {
		if (tmp->md_dev == md_dev) {
			pool = tmp;
			break;
		}
	}

	return pool;
}

/*----------------------------------------------------------------*/

Mike Snitzer's avatar
Mike Snitzer committed
378
struct dm_thin_endio_hook {
379
	struct thin_c *tc;
380
381
	struct dm_deferred_entry *shared_read_entry;
	struct dm_deferred_entry *all_io_entry;
Mike Snitzer's avatar
Mike Snitzer committed
382
	struct dm_thin_new_mapping *overwrite_mapping;
383
	struct rb_node rb_node;
384
385
};

386
static void requeue_bio_list(struct thin_c *tc, struct bio_list *master)
387
388
389
{
	struct bio *bio;
	struct bio_list bios;
390
	unsigned long flags;
391
392

	bio_list_init(&bios);
393

394
	spin_lock_irqsave(&tc->lock, flags);
395
396
	bio_list_merge(&bios, master);
	bio_list_init(master);
397
	spin_unlock_irqrestore(&tc->lock, flags);
398

399
400
	while ((bio = bio_list_pop(&bios)))
		bio_endio(bio, DM_ENDIO_REQUEUE);
401
402
403
404
}

static void requeue_io(struct thin_c *tc)
{
405
406
	requeue_bio_list(tc, &tc->deferred_bio_list);
	requeue_bio_list(tc, &tc->retry_on_resume_list);
407
408
}

409
static void error_thin_retry_list(struct thin_c *tc)
410
411
412
413
414
415
416
{
	struct bio *bio;
	unsigned long flags;
	struct bio_list bios;

	bio_list_init(&bios);

417
418
419
420
	spin_lock_irqsave(&tc->lock, flags);
	bio_list_merge(&bios, &tc->retry_on_resume_list);
	bio_list_init(&tc->retry_on_resume_list);
	spin_unlock_irqrestore(&tc->lock, flags);
421
422
423
424
425

	while ((bio = bio_list_pop(&bios)))
		bio_io_error(bio);
}

426
427
428
429
430
431
432
433
434
435
static void error_retry_list(struct pool *pool)
{
	struct thin_c *tc;

	rcu_read_lock();
	list_for_each_entry_rcu(tc, &pool->active_thins, list)
		error_thin_retry_list(tc);
	rcu_read_unlock();
}

436
437
438
439
440
441
442
/*
 * This section of code contains the logic for processing a thin device's IO.
 * Much of the code depends on pool object resources (lists, workqueues, etc)
 * but most is exclusively called from the thin target rather than the thin-pool
 * target.
 */

443
444
445
446
447
static bool block_size_is_power_of_two(struct pool *pool)
{
	return pool->sectors_per_block_shift >= 0;
}

448
449
static dm_block_t get_bio_block(struct thin_c *tc, struct bio *bio)
{
450
	struct pool *pool = tc->pool;
451
	sector_t block_nr = bio->bi_iter.bi_sector;
452

453
454
	if (block_size_is_power_of_two(pool))
		block_nr >>= pool->sectors_per_block_shift;
455
	else
456
		(void) sector_div(block_nr, pool->sectors_per_block);
457
458

	return block_nr;
459
460
461
462
463
}

static void remap(struct thin_c *tc, struct bio *bio, dm_block_t block)
{
	struct pool *pool = tc->pool;
464
	sector_t bi_sector = bio->bi_iter.bi_sector;
465
466

	bio->bi_bdev = tc->pool_dev->bdev;
467
	if (block_size_is_power_of_two(pool))
468
469
470
		bio->bi_iter.bi_sector =
			(block << pool->sectors_per_block_shift) |
			(bi_sector & (pool->sectors_per_block - 1));
471
	else
472
		bio->bi_iter.bi_sector = (block * pool->sectors_per_block) +
473
				 sector_div(bi_sector, pool->sectors_per_block);
474
475
}

476
477
478
479
480
static void remap_to_origin(struct thin_c *tc, struct bio *bio)
{
	bio->bi_bdev = tc->origin_dev->bdev;
}

481
482
483
484
485
486
static int bio_triggers_commit(struct thin_c *tc, struct bio *bio)
{
	return (bio->bi_rw & (REQ_FLUSH | REQ_FUA)) &&
		dm_thin_changed_this_transaction(tc->td);
}

487
488
489
490
491
492
493
static void inc_all_io_entry(struct pool *pool, struct bio *bio)
{
	struct dm_thin_endio_hook *h;

	if (bio->bi_rw & REQ_DISCARD)
		return;

494
	h = dm_per_bio_data(bio, sizeof(struct dm_thin_endio_hook));
495
496
497
	h->all_io_entry = dm_deferred_entry_inc(pool->all_io_ds);
}

498
static void issue(struct thin_c *tc, struct bio *bio)
499
500
501
502
{
	struct pool *pool = tc->pool;
	unsigned long flags;

503
504
505
506
507
	if (!bio_triggers_commit(tc, bio)) {
		generic_make_request(bio);
		return;
	}

508
	/*
509
510
511
	 * Complete bio with an error if earlier I/O caused changes to
	 * the metadata that can't be committed e.g, due to I/O errors
	 * on the metadata device.
512
	 */
513
514
515
516
517
518
519
520
521
522
523
524
	if (dm_thin_aborted_changes(tc->td)) {
		bio_io_error(bio);
		return;
	}

	/*
	 * Batch together any bios that trigger commits and then issue a
	 * single commit for them in process_deferred_bios().
	 */
	spin_lock_irqsave(&pool->lock, flags);
	bio_list_add(&pool->deferred_flush_bios, bio);
	spin_unlock_irqrestore(&pool->lock, flags);
525
526
}

527
528
529
530
531
532
533
534
535
536
537
538
539
static void remap_to_origin_and_issue(struct thin_c *tc, struct bio *bio)
{
	remap_to_origin(tc, bio);
	issue(tc, bio);
}

static void remap_and_issue(struct thin_c *tc, struct bio *bio,
			    dm_block_t block)
{
	remap(tc, bio, block);
	issue(tc, bio);
}

540
541
542
543
544
/*----------------------------------------------------------------*/

/*
 * Bio endio functions.
 */
Mike Snitzer's avatar
Mike Snitzer committed
545
struct dm_thin_new_mapping {
546
547
	struct list_head list;

548
549
550
551
	bool quiesced:1;
	bool prepared:1;
	bool pass_discard:1;
	bool definitely_not_shared:1;
552

553
	int err;
554
555
556
	struct thin_c *tc;
	dm_block_t virt_block;
	dm_block_t data_block;
Mike Snitzer's avatar
Mike Snitzer committed
557
	struct dm_bio_prison_cell *cell, *cell2;
558
559
560
561
562
563
564
565
566
567
568

	/*
	 * If the bio covers the whole area of a block then we can avoid
	 * zeroing or copying.  Instead this bio is hooked.  The bio will
	 * still be in the cell, so care has to be taken to avoid issuing
	 * the bio twice.
	 */
	struct bio *bio;
	bio_end_io_t *saved_bi_end_io;
};

Mike Snitzer's avatar
Mike Snitzer committed
569
static void __maybe_add_mapping(struct dm_thin_new_mapping *m)
570
571
572
{
	struct pool *pool = m->tc->pool;

573
	if (m->quiesced && m->prepared) {
574
		list_add_tail(&m->list, &pool->prepared_mappings);
575
576
577
578
579
580
581
		wake_worker(pool);
	}
}

static void copy_complete(int read_err, unsigned long write_err, void *context)
{
	unsigned long flags;
Mike Snitzer's avatar
Mike Snitzer committed
582
	struct dm_thin_new_mapping *m = context;
583
584
585
586
587
	struct pool *pool = m->tc->pool;

	m->err = read_err || write_err ? -EIO : 0;

	spin_lock_irqsave(&pool->lock, flags);
588
	m->prepared = true;
589
590
591
592
593
594
595
	__maybe_add_mapping(m);
	spin_unlock_irqrestore(&pool->lock, flags);
}

static void overwrite_endio(struct bio *bio, int err)
{
	unsigned long flags;
596
	struct dm_thin_endio_hook *h = dm_per_bio_data(bio, sizeof(struct dm_thin_endio_hook));
Mike Snitzer's avatar
Mike Snitzer committed
597
	struct dm_thin_new_mapping *m = h->overwrite_mapping;
598
599
600
601
602
	struct pool *pool = m->tc->pool;

	m->err = err;

	spin_lock_irqsave(&pool->lock, flags);
603
	m->prepared = true;
604
605
606
607
608
609
610
611
612
613
614
615
616
617
618
619
620
	__maybe_add_mapping(m);
	spin_unlock_irqrestore(&pool->lock, flags);
}

/*----------------------------------------------------------------*/

/*
 * Workqueue.
 */

/*
 * Prepared mapping jobs.
 */

/*
 * This sends the bios in the cell back to the deferred_bios list.
 */
Joe Thornber's avatar
Joe Thornber committed
621
static void cell_defer(struct thin_c *tc, struct dm_bio_prison_cell *cell)
622
623
624
625
{
	struct pool *pool = tc->pool;
	unsigned long flags;

626
627
628
	spin_lock_irqsave(&tc->lock, flags);
	cell_release(pool, cell, &tc->deferred_bio_list);
	spin_unlock_irqrestore(&tc->lock, flags);
629
630
631
632
633

	wake_worker(pool);
}

/*
634
 * Same as cell_defer above, except it omits the original holder of the cell.
635
 */
636
static void cell_defer_no_holder(struct thin_c *tc, struct dm_bio_prison_cell *cell)
637
638
639
640
{
	struct pool *pool = tc->pool;
	unsigned long flags;

641
642
643
	spin_lock_irqsave(&tc->lock, flags);
	cell_release_no_holder(pool, cell, &tc->deferred_bio_list);
	spin_unlock_irqrestore(&tc->lock, flags);
644
645
646
647

	wake_worker(pool);
}

648
649
static void process_prepared_mapping_fail(struct dm_thin_new_mapping *m)
{
Kent Overstreet's avatar
Kent Overstreet committed
650
	if (m->bio) {
651
		m->bio->bi_end_io = m->saved_bi_end_io;
Kent Overstreet's avatar
Kent Overstreet committed
652
653
		atomic_inc(&m->bio->bi_remaining);
	}
654
	cell_error(m->tc->pool, m->cell);
655
656
657
	list_del(&m->list);
	mempool_free(m, m->tc->pool->mapping_pool);
}
658

Mike Snitzer's avatar
Mike Snitzer committed
659
static void process_prepared_mapping(struct dm_thin_new_mapping *m)
660
661
{
	struct thin_c *tc = m->tc;
662
	struct pool *pool = tc->pool;
663
664
665
666
	struct bio *bio;
	int r;

	bio = m->bio;
Kent Overstreet's avatar
Kent Overstreet committed
667
	if (bio) {
668
		bio->bi_end_io = m->saved_bi_end_io;
Kent Overstreet's avatar
Kent Overstreet committed
669
670
		atomic_inc(&bio->bi_remaining);
	}
671
672

	if (m->err) {
673
		cell_error(pool, m->cell);
674
		goto out;
675
676
677
678
679
680
681
682
683
	}

	/*
	 * Commit the prepared block into the mapping btree.
	 * Any I/O for this block arriving after this point will get
	 * remapped to it directly.
	 */
	r = dm_thin_insert_block(tc->td, m->virt_block, m->data_block);
	if (r) {
684
		metadata_operation_failed(pool, "dm_thin_insert_block", r);
685
		cell_error(pool, m->cell);
686
		goto out;
687
688
689
690
691
692
693
694
695
	}

	/*
	 * Release any bios held while the block was being provisioned.
	 * If we are processing a write bio that completely covers the block,
	 * we already processed it so can ignore it now when processing
	 * the bios in the cell.
	 */
	if (bio) {
696
		cell_defer_no_holder(tc, m->cell);
697
698
		bio_endio(bio, 0);
	} else
Joe Thornber's avatar
Joe Thornber committed
699
		cell_defer(tc, m->cell);
700

701
out:
702
	list_del(&m->list);
703
	mempool_free(m, pool->mapping_pool);
704
705
}

706
static void process_prepared_discard_fail(struct dm_thin_new_mapping *m)
Joe Thornber's avatar
Joe Thornber committed
707
708
709
{
	struct thin_c *tc = m->tc;

710
	bio_io_error(m->bio);
711
712
	cell_defer_no_holder(tc, m->cell);
	cell_defer_no_holder(tc, m->cell2);
713
714
715
716
717
718
	mempool_free(m, tc->pool->mapping_pool);
}

static void process_prepared_discard_passdown(struct dm_thin_new_mapping *m)
{
	struct thin_c *tc = m->tc;
Joe Thornber's avatar
Joe Thornber committed
719

720
	inc_all_io_entry(tc->pool, m->bio);
721
722
	cell_defer_no_holder(tc, m->cell);
	cell_defer_no_holder(tc, m->cell2);
723

Joe Thornber's avatar
Joe Thornber committed
724
	if (m->pass_discard)
725
726
727
728
729
730
731
732
733
		if (m->definitely_not_shared)
			remap_and_issue(tc, m->bio, m->data_block);
		else {
			bool used = false;
			if (dm_pool_block_is_used(tc->pool->pmd, m->data_block, &used) || used)
				bio_endio(m->bio, 0);
			else
				remap_and_issue(tc, m->bio, m->data_block);
		}
Joe Thornber's avatar
Joe Thornber committed
734
735
736
737
738
739
	else
		bio_endio(m->bio, 0);

	mempool_free(m, tc->pool->mapping_pool);
}

740
741
742
743
744
745
746
static void process_prepared_discard(struct dm_thin_new_mapping *m)
{
	int r;
	struct thin_c *tc = m->tc;

	r = dm_thin_remove_block(tc->td, m->virt_block);
	if (r)
747
		DMERR_LIMIT("dm_thin_remove_block() failed");
748
749
750
751

	process_prepared_discard_passdown(m);
}

Joe Thornber's avatar
Joe Thornber committed
752
static void process_prepared(struct pool *pool, struct list_head *head,
753
			     process_mapping_fn *fn)
754
755
756
{
	unsigned long flags;
	struct list_head maps;
Mike Snitzer's avatar
Mike Snitzer committed
757
	struct dm_thin_new_mapping *m, *tmp;
758
759
760

	INIT_LIST_HEAD(&maps);
	spin_lock_irqsave(&pool->lock, flags);
Joe Thornber's avatar
Joe Thornber committed
761
	list_splice_init(head, &maps);
762
763
764
	spin_unlock_irqrestore(&pool->lock, flags);

	list_for_each_entry_safe(m, tmp, &maps, list)
765
		(*fn)(m);
766
767
768
769
770
}

/*
 * Deferred bio jobs.
 */
Joe Thornber's avatar
Joe Thornber committed
771
static int io_overlaps_block(struct pool *pool, struct bio *bio)
772
{
773
774
	return bio->bi_iter.bi_size ==
		(pool->sectors_per_block << SECTOR_SHIFT);
Joe Thornber's avatar
Joe Thornber committed
775
776
777
778
779
780
}

static int io_overwrites_block(struct pool *pool, struct bio *bio)
{
	return (bio_data_dir(bio) == WRITE) &&
		io_overlaps_block(pool, bio);
781
782
783
784
785
786
787
788
789
790
791
792
793
794
795
796
797
798
799
}

static void save_and_set_endio(struct bio *bio, bio_end_io_t **save,
			       bio_end_io_t *fn)
{
	*save = bio->bi_end_io;
	bio->bi_end_io = fn;
}

static int ensure_next_mapping(struct pool *pool)
{
	if (pool->next_mapping)
		return 0;

	pool->next_mapping = mempool_alloc(pool->mapping_pool, GFP_ATOMIC);

	return pool->next_mapping ? 0 : -ENOMEM;
}

Mike Snitzer's avatar
Mike Snitzer committed
800
static struct dm_thin_new_mapping *get_next_mapping(struct pool *pool)
801
{
802
	struct dm_thin_new_mapping *m = pool->next_mapping;
803
804
805

	BUG_ON(!pool->next_mapping);

806
807
808
809
	memset(m, 0, sizeof(struct dm_thin_new_mapping));
	INIT_LIST_HEAD(&m->list);
	m->bio = NULL;

810
811
	pool->next_mapping = NULL;

812
	return m;
813
814
815
}

static void schedule_copy(struct thin_c *tc, dm_block_t virt_block,
816
817
			  struct dm_dev *origin, dm_block_t data_origin,
			  dm_block_t data_dest,
Mike Snitzer's avatar
Mike Snitzer committed
818
			  struct dm_bio_prison_cell *cell, struct bio *bio)
819
820
821
{
	int r;
	struct pool *pool = tc->pool;
Mike Snitzer's avatar
Mike Snitzer committed
822
	struct dm_thin_new_mapping *m = get_next_mapping(pool);
823
824
825
826
827
828

	m->tc = tc;
	m->virt_block = virt_block;
	m->data_block = data_dest;
	m->cell = cell;

829
	if (!dm_deferred_set_add_work(pool->shared_read_ds, &m->list))
830
		m->quiesced = true;
831
832
833
834
835
836
837
838

	/*
	 * IO to pool_dev remaps to the pool target's data_dev.
	 *
	 * If the whole block of data is being overwritten, we can issue the
	 * bio immediately. Otherwise we use kcopyd to clone the data first.
	 */
	if (io_overwrites_block(pool, bio)) {
839
		struct dm_thin_endio_hook *h = dm_per_bio_data(bio, sizeof(struct dm_thin_endio_hook));
Mike Snitzer's avatar
Mike Snitzer committed
840

841
		h->overwrite_mapping = m;
842
843
		m->bio = bio;
		save_and_set_endio(bio, &m->saved_bi_end_io, overwrite_endio);
844
		inc_all_io_entry(pool, bio);
845
846
847
848
		remap_and_issue(tc, bio, data_dest);
	} else {
		struct dm_io_region from, to;

849
		from.bdev = origin->bdev;
850
851
852
853
854
855
856
857
858
859
860
		from.sector = data_origin * pool->sectors_per_block;
		from.count = pool->sectors_per_block;

		to.bdev = tc->pool_dev->bdev;
		to.sector = data_dest * pool->sectors_per_block;
		to.count = pool->sectors_per_block;

		r = dm_kcopyd_copy(pool->copier, &from, 1, &to,
				   0, copy_complete, m);
		if (r < 0) {
			mempool_free(m, pool->mapping_pool);
861
			DMERR_LIMIT("dm_kcopyd_copy() failed");
862
			cell_error(pool, cell);
863
864
865
866
		}
	}
}

867
868
static void schedule_internal_copy(struct thin_c *tc, dm_block_t virt_block,
				   dm_block_t data_origin, dm_block_t data_dest,
Mike Snitzer's avatar
Mike Snitzer committed
869
				   struct dm_bio_prison_cell *cell, struct bio *bio)
870
871
872
873
874
875
876
{
	schedule_copy(tc, virt_block, tc->pool_dev,
		      data_origin, data_dest, cell, bio);
}

static void schedule_external_copy(struct thin_c *tc, dm_block_t virt_block,
				   dm_block_t data_dest,
Mike Snitzer's avatar
Mike Snitzer committed
877
				   struct dm_bio_prison_cell *cell, struct bio *bio)
878
879
880
881
882
{
	schedule_copy(tc, virt_block, tc->origin_dev,
		      virt_block, data_dest, cell, bio);
}

883
static void schedule_zero(struct thin_c *tc, dm_block_t virt_block,
Mike Snitzer's avatar
Mike Snitzer committed
884
			  dm_block_t data_block, struct dm_bio_prison_cell *cell,
885
886
887
			  struct bio *bio)
{
	struct pool *pool = tc->pool;
Mike Snitzer's avatar
Mike Snitzer committed
888
	struct dm_thin_new_mapping *m = get_next_mapping(pool);
889

890
891
	m->quiesced = true;
	m->prepared = false;
892
893
894
895
896
897
898
899
900
901
	m->tc = tc;
	m->virt_block = virt_block;
	m->data_block = data_block;
	m->cell = cell;

	/*
	 * If the whole block of data is being overwritten or we are not
	 * zeroing pre-existing data, we can issue the bio immediately.
	 * Otherwise we use kcopyd to zero the data first.
	 */
902
	if (!pool->pf.zero_new_blocks)
903
904
905
		process_prepared_mapping(m);

	else if (io_overwrites_block(pool, bio)) {
906
		struct dm_thin_endio_hook *h = dm_per_bio_data(bio, sizeof(struct dm_thin_endio_hook));
Mike Snitzer's avatar
Mike Snitzer committed
907

908
		h->overwrite_mapping = m;
909
910
		m->bio = bio;
		save_and_set_endio(bio, &m->saved_bi_end_io, overwrite_endio);
911
		inc_all_io_entry(pool, bio);
912
913
914
915
916
917
918
919
920
921
922
923
		remap_and_issue(tc, bio, data_block);
	} else {
		int r;
		struct dm_io_region to;

		to.bdev = tc->pool_dev->bdev;
		to.sector = data_block * pool->sectors_per_block;
		to.count = pool->sectors_per_block;

		r = dm_kcopyd_zero(pool->copier, 1, &to, 0, copy_complete, m);
		if (r < 0) {
			mempool_free(m, pool->mapping_pool);
924
			DMERR_LIMIT("dm_kcopyd_zero() failed");
925
			cell_error(pool, cell);
926
927
928
929
		}
	}
}

930
931
932
933
/*
 * A non-zero return indicates read_only or fail_io mode.
 * Many callers don't care about the return value.
 */
934
static int commit(struct pool *pool)
935
936
937
{
	int r;

938
	if (get_pool_mode(pool) >= PM_READ_ONLY)
939
940
		return -EINVAL;

941
	r = dm_pool_commit_metadata(pool->pmd);
942
943
	if (r)
		metadata_operation_failed(pool, "dm_pool_commit_metadata", r);
944
945
946
947

	return r;
}

948
949
950
951
952
953
954
955
956
957
958
959
960
961
static void check_low_water_mark(struct pool *pool, dm_block_t free_blocks)
{
	unsigned long flags;

	if (free_blocks <= pool->low_water_blocks && !pool->low_water_triggered) {
		DMWARN("%s: reached low water mark for data device: sending event.",
		       dm_device_name(pool->pool_md));
		spin_lock_irqsave(&pool->lock, flags);
		pool->low_water_triggered = true;
		spin_unlock_irqrestore(&pool->lock, flags);
		dm_table_event(pool->ti->table);
	}
}

962
963
static void set_pool_mode(struct pool *pool, enum pool_mode new_mode);

964
965
966
967
968
969
static int alloc_data_block(struct thin_c *tc, dm_block_t *result)
{
	int r;
	dm_block_t free_blocks;
	struct pool *pool = tc->pool;

970
	if (WARN_ON(get_pool_mode(pool) != PM_WRITE))
971
972
		return -EINVAL;

973
	r = dm_pool_get_free_block_count(pool->pmd, &free_blocks);
974
975
	if (r) {
		metadata_operation_failed(pool, "dm_pool_get_free_block_count", r);
976
		return r;
977
	}
978

979
	check_low_water_mark(pool, free_blocks);
980
981

	if (!free_blocks) {
982
983
984
985
		/*
		 * Try to commit to see if that will free up some
		 * more space.
		 */
986
987
988
		r = commit(pool);
		if (r)
			return r;
989

990
		r = dm_pool_get_free_block_count(pool->pmd, &free_blocks);
991
992
		if (r) {
			metadata_operation_failed(pool, "dm_pool_get_free_block_count", r);
993
			return r;
994
		}
995

996
		if (!free_blocks) {
997
			set_pool_mode(pool, PM_OUT_OF_DATA_SPACE);
998
			return -ENOSPC;
999
1000
1001
1002
		}
	}

	r = dm_pool_alloc_data_block(pool->pmd, result);
1003
	if (r) {
1004
		metadata_operation_failed(pool, "dm_pool_alloc_data_block", r);
1005
		return r;
1006
	}
1007
1008
1009
1010
1011
1012
1013
1014
1015
1016

	return 0;
}

/*
 * If we have run out of space, queue bios until the device is
 * resumed, presumably after having been reloaded with more space.
 */
static void retry_on_resume(struct bio *bio)
{
1017
	struct dm_thin_endio_hook *h = dm_per_bio_data(bio, sizeof(struct dm_thin_endio_hook));
1018
	struct thin_c *tc = h->tc;
1019
1020
	unsigned long flags;

1021
1022
1023
	spin_lock_irqsave(&tc->lock, flags);
	bio_list_add(&tc->retry_on_resume_list, bio);
	spin_unlock_irqrestore(&tc->lock, flags);
1024
1025
}

1026
static bool should_error_unserviceable_bio(struct pool *pool)
1027
{
1028
1029
1030
1031
1032
1033
1034
1035
1036
1037
1038
1039
1040
1041
1042
1043
1044
1045
1046
1047
	enum pool_mode m = get_pool_mode(pool);

	switch (m) {
	case PM_WRITE:
		/* Shouldn't get here */
		DMERR_LIMIT("bio unserviceable, yet pool is in PM_WRITE mode");
		return true;

	case PM_OUT_OF_DATA_SPACE:
		return pool->pf.error_if_no_space;

	case PM_READ_ONLY:
	case PM_FAIL:
		return true;
	default:
		/* Shouldn't get here */
		DMERR_LIMIT("bio unserviceable, yet pool has an unknown mode");
		return true;
	}
}
1048

1049
1050
1051
static void handle_unserviceable_bio(struct pool *pool, struct bio *bio)
{
	if (should_error_unserviceable_bio(pool))
1052
		bio_io_error(bio);
1053
1054
	else
		retry_on_resume(bio);
1055
1056
}

1057
static void retry_bios_on_resume(struct pool *pool, struct dm_bio_prison_cell *cell)
1058
1059
1060
1061
{
	struct bio *bio;
	struct bio_list bios;

1062
1063
1064
1065
1066
	if (should_error_unserviceable_bio(pool)) {
		cell_error(pool, cell);
		return;
	}

1067
	bio_list_init(&bios);
1068
	cell_release(pool, cell, &bios);
1069

1070
1071
1072
1073
1074
1075
	if (should_error_unserviceable_bio(pool))
		while ((bio = bio_list_pop(&bios)))
			bio_io_error(bio);
	else
		while ((bio = bio_list_pop(&bios)))
			retry_on_resume(bio);
1076
1077
}

Joe Thornber's avatar
Joe Thornber committed
1078
1079
1080
static void process_discard(struct thin_c *tc, struct bio *bio)
{
	int r;
1081
	unsigned long flags;
Joe Thornber's avatar
Joe Thornber committed
1082
	struct pool *pool = tc->pool;
Mike Snitzer's avatar
Mike Snitzer committed
1083
	struct dm_bio_prison_cell *cell, *cell2;
1084
	struct dm_cell_key key, key2;
Joe Thornber's avatar
Joe Thornber committed
1085
1086
	dm_block_t block = get_bio_block(tc, bio);
	struct dm_thin_lookup_result lookup_result;
Mike Snitzer's avatar
Mike Snitzer committed
1087
	struct dm_thin_new_mapping *m;
Joe Thornber's avatar
Joe Thornber committed
1088
1089

	build_virtual_key(tc->td, block, &key);
1090
	if (bio_detain(tc->pool, &key, bio, &cell))
Joe Thornber's avatar
Joe Thornber committed
1091
1092
1093
1094
1095
1096
1097
1098
1099
1100
1101
		return;

	r = dm_thin_find_block(tc->td, block, 1, &lookup_result);
	switch (r) {
	case 0:
		/*
		 * Check nobody is fiddling with this pool block.  This can
		 * happen if someone's in the process of breaking sharing
		 * on this block.
		 */
		build_data_key(tc->td, lookup_result.block, &key2);
1102
		if (bio_detain(tc->pool, &key2, bio, &cell2)) {
1103
			cell_defer_no_holder(tc, cell);
Joe Thornber's avatar
Joe Thornber committed
1104
1105
1106
1107
1108
1109
1110
1111
1112
1113
			break;
		}

		if (io_overlaps_block(pool, bio)) {
			/*
			 * IO may still be going to the destination block.  We must
			 * quiesce before we can do the removal.
			 */
			m = get_next_mapping(pool);
			m->tc = tc;
1114
1115
			m->pass_discard = pool->pf.discard_passdown;
			m->definitely_not_shared = !lookup_result.shared;
Joe Thornber's avatar
Joe Thornber committed
1116
1117
1118
1119
1120
1121
			m->virt_block = block;
			m->data_block = lookup_result.block;
			m->cell = cell;
			m->cell2 = cell2;
			m->bio = bio;

1122
			if (!dm_deferred_set_add_work(pool->all_io_ds, &m->list)) {
1123
				spin_lock_irqsave(&pool->lock, flags);
1124
				list_add_tail(&m->list, &pool->prepared_discards);
1125
				spin_unlock_irqrestore(&pool->lock, flags);
Joe Thornber's avatar
Joe Thornber committed
1126
1127
1128
				wake_worker(pool);
			}
		} else {
1129
			inc_all_io_entry(pool, bio);
1130
1131
			cell_defer_no_holder(tc, cell);
			cell_defer_no_holder(tc, cell2);
1132

Joe Thornber's avatar
Joe Thornber committed
1133
			/*
1134
1135
1136
			 * The DM core makes sure that the discard doesn't span
			 * a block boundary.  So we submit the discard of a
			 * partial block appropriately.
Joe Thornber's avatar
Joe Thornber committed
1137
			 */
1138
1139
1140
1141
			if ((!lookup_result.shared) && pool->pf.discard_passdown)
				remap_and_issue(tc, bio, lookup_result.block);
			else
				bio_endio(bio, 0);
Joe Thornber's avatar
Joe Thornber committed
1142
1143
1144
1145
1146
1147
1148
		}
		break;

	case -ENODATA:
		/*
		 * It isn't provisioned, just forget it.
		 */
1149
		cell_defer_no_holder(tc, cell);
Joe Thornber's avatar
Joe Thornber committed
1150
1151
1152
1153
		bio_endio(bio, 0);
		break;

	default:
1154
1155
		DMERR_LIMIT("%s: dm_thin_find_block() failed: error = %d",
			    __func__, r);
1156
		cell_defer_no_holder(tc, cell);
Joe Thornber's avatar
Joe Thornber committed
1157
1158
1159
1160
1161
		bio_io_error(bio);
		break;
	}
}

1162
static void break_sharing(struct thin_c *tc, struct bio *bio, dm_block_t block,
1163
			  struct dm_cell_key *key,
1164
			  struct dm_thin_lookup_result *lookup_result,
Mike Snitzer's avatar
Mike Snitzer committed
1165
			  struct dm_bio_prison_cell *cell)
1166
1167
1168
{
	int r;
	dm_block_t data_block;
1169
	struct pool *pool = tc->pool;
1170
1171
1172
1173

	r = alloc_data_block(tc, &data_block);
	switch (r) {
	case 0:
1174
1175
		schedule_internal_copy(tc, block, lookup_result->block,
				       data_block, cell, bio);
1176
1177
1178
		break;

	case -ENOSPC:
1179
		retry_bios_on_resume(pool, cell);
1180
1181
1182
		break;

	default:
1183
1184
		DMERR_LIMIT("%s: alloc_data_block() failed: error = %d",
			    __func__, r);
1185
		cell_error(pool, cell);
1186
1187
1188
1189
1190
1191
1192
1193
		break;
	}
}

static void process_shared_bio(struct thin_c *tc, struct bio *bio,
			       dm_block_t block,
			       struct dm_thin_lookup_result *lookup_result)
{
Mike Snitzer's avatar
Mike Snitzer committed
1194
	struct dm_bio_prison_cell *cell;
1195
	struct pool *pool = tc->pool;
1196
	struct dm_cell_key key;
1197
1198
1199
1200
1201
1202

	/*
	 * If cell is already occupied, then sharing is already in the process
	 * of being broken so we have nothing further to do here.
	 */
	build_data_key(tc->td, lookup_result->block, &key);
1203
	if (bio_detain(pool, &key, bio, &cell))
1204
1205
		return;

1206
	if (bio_data_dir(bio) == WRITE && bio->bi_iter.bi_size)
1207
1208
		break_sharing(tc, bio, block, &key, lookup_result, cell);
	else {
1209
		struct dm_thin_endio_hook *h = dm_per_bio_data(bio, sizeof(struct dm_thin_endio_hook));
1210

1211
		h->shared_read_entry = dm_deferred_entry_inc(pool->shared_read_ds);
1212
		inc_all_io_entry(pool, bio);
1213
		cell_defer_no_holder(tc, cell);
1214

1215
1216
1217
1218
1219
		remap_and_issue(tc, bio, lookup_result->block);
	}
}

static void provision_block(struct thin_c *tc, struct bio *bio, dm_block_t block,
Mike Snitzer's avatar
Mike Snitzer committed
1220
			    struct dm_bio_prison_cell *cell)
1221
1222
1223
{
	int r;
	dm_block_t data_block;
1224
	struct pool *pool = tc->pool;
1225
1226
1227
1228

	/*
	 * Remap empty bios (flushes) immediately, without provisioning.
	 */
1229
	if (!bio->bi_iter.bi_size) {
1230
		inc_all_io_entry(pool, bio);
1231
		cell_defer_no_holder(tc, cell);
1232

1233
1234
1235
1236
1237
1238
1239
1240
1241
		remap_and_issue(tc, bio, 0);
		return;
	}

	/*
	 * Fill read bios with zeroes and complete them immediately.
	 */
	if (bio_data_dir(bio) == READ) {
		zero_fill_bio(bio);
1242
		cell_defer_no_holder(tc, cell);
1243
1244
1245
1246
1247
1248
1249
		bio_endio(bio, 0);
		return;
	}

	r = alloc_data_block(tc, &data_block);
	switch (r) {
	case 0:
1250
1251
1252