dm-thin.c 91.3 KB
Newer Older
1
/*
2
 * Copyright (C) 2011-2012 Red Hat UK.
3
4
5
6
7
 *
 * This file is released under the GPL.
 */

#include "dm-thin-metadata.h"
8
#include "dm-bio-prison.h"
9
#include "dm.h"
10
11
12
13
14

#include <linux/device-mapper.h>
#include <linux/dm-io.h>
#include <linux/dm-kcopyd.h>
#include <linux/list.h>
15
#include <linux/rculist.h>
16
17
18
#include <linux/init.h>
#include <linux/module.h>
#include <linux/slab.h>
19
#include <linux/rbtree.h>
20
21
22
23
24
25

#define	DM_MSG_PREFIX	"thin"

/*
 * Tunable constants
 */
26
#define ENDIO_HOOK_POOL_SIZE 1024
27
#define MAPPING_POOL_SIZE 1024
28
#define COMMIT_PERIOD HZ
29
30
31
#define NO_SPACE_TIMEOUT_SECS 60

static unsigned no_space_timeout_secs = NO_SPACE_TIMEOUT_SECS;
32

33
34
35
DECLARE_DM_KCOPYD_THROTTLE_WITH_MODULE_PARM(snapshot_copy_throttle,
		"A percentage of time allocated for copy on write");

36
37
38
39
40
41
42
43
44
45
46
47
48
49
50
51
52
53
54
55
56
57
58
59
60
61
62
63
64
65
66
67
/*
 * The block size of the device holding pool data must be
 * between 64KB and 1GB.
 */
#define DATA_DEV_BLOCK_SIZE_MIN_SECTORS (64 * 1024 >> SECTOR_SHIFT)
#define DATA_DEV_BLOCK_SIZE_MAX_SECTORS (1024 * 1024 * 1024 >> SECTOR_SHIFT)

/*
 * Device id is restricted to 24 bits.
 */
#define MAX_DEV_ID ((1 << 24) - 1)

/*
 * How do we handle breaking sharing of data blocks?
 * =================================================
 *
 * We use a standard copy-on-write btree to store the mappings for the
 * devices (note I'm talking about copy-on-write of the metadata here, not
 * the data).  When you take an internal snapshot you clone the root node
 * of the origin btree.  After this there is no concept of an origin or a
 * snapshot.  They are just two device trees that happen to point to the
 * same data blocks.
 *
 * When we get a write in we decide if it's to a shared data block using
 * some timestamp magic.  If it is, we have to break sharing.
 *
 * Let's say we write to a shared block in what was the origin.  The
 * steps are:
 *
 * i) plug io further to this physical block. (see bio_prison code).
 *
 * ii) quiesce any read io to that shared data block.  Obviously
68
 * including all devices that share this block.  (see dm_deferred_set code)
69
70
71
72
73
 *
 * iii) copy the data block to a newly allocate block.  This step can be
 * missed out if the io covers the block. (schedule_copy).
 *
 * iv) insert the new mapping into the origin's btree
Joe Thornber's avatar
Joe Thornber committed
74
 * (process_prepared_mapping).  This act of inserting breaks some
75
76
77
78
79
80
81
82
83
84
85
86
87
88
89
90
91
92
93
94
95
96
97
98
99
100
101
102
103
104
105
106
107
108
109
110
111
 * sharing of btree nodes between the two devices.  Breaking sharing only
 * effects the btree of that specific device.  Btrees for the other
 * devices that share the block never change.  The btree for the origin
 * device as it was after the last commit is untouched, ie. we're using
 * persistent data structures in the functional programming sense.
 *
 * v) unplug io to this physical block, including the io that triggered
 * the breaking of sharing.
 *
 * Steps (ii) and (iii) occur in parallel.
 *
 * The metadata _doesn't_ need to be committed before the io continues.  We
 * get away with this because the io is always written to a _new_ block.
 * If there's a crash, then:
 *
 * - The origin mapping will point to the old origin block (the shared
 * one).  This will contain the data as it was before the io that triggered
 * the breaking of sharing came in.
 *
 * - The snap mapping still points to the old block.  As it would after
 * the commit.
 *
 * The downside of this scheme is the timestamp magic isn't perfect, and
 * will continue to think that data block in the snapshot device is shared
 * even after the write to the origin has broken sharing.  I suspect data
 * blocks will typically be shared by many different devices, so we're
 * breaking sharing n + 1 times, rather than n, where n is the number of
 * devices that reference this data block.  At the moment I think the
 * benefits far, far outweigh the disadvantages.
 */

/*----------------------------------------------------------------*/

/*
 * Key building.
 */
static void build_data_key(struct dm_thin_device *td,
112
			   dm_block_t b, struct dm_cell_key *key)
113
114
115
116
117
118
119
{
	key->virtual = 0;
	key->dev = dm_thin_dev_id(td);
	key->block = b;
}

static void build_virtual_key(struct dm_thin_device *td, dm_block_t b,
120
			      struct dm_cell_key *key)
121
122
123
124
125
126
127
128
{
	key->virtual = 1;
	key->dev = dm_thin_dev_id(td);
	key->block = b;
}

/*----------------------------------------------------------------*/

Joe Thornber's avatar
Joe Thornber committed
129
130
131
132
133
134
135
136
137
138
139
140
141
142
143
144
145
146
147
148
149
150
151
152
153
154
155
156
157
158
159
160
161
162
163
164
165
166
167
168
169
170
171
172
173
174
175
#define THROTTLE_THRESHOLD (1 * HZ)

struct throttle {
	struct rw_semaphore lock;
	unsigned long threshold;
	bool throttle_applied;
};

static void throttle_init(struct throttle *t)
{
	init_rwsem(&t->lock);
	t->throttle_applied = false;
}

static void throttle_work_start(struct throttle *t)
{
	t->threshold = jiffies + THROTTLE_THRESHOLD;
}

static void throttle_work_update(struct throttle *t)
{
	if (!t->throttle_applied && jiffies > t->threshold) {
		down_write(&t->lock);
		t->throttle_applied = true;
	}
}

static void throttle_work_complete(struct throttle *t)
{
	if (t->throttle_applied) {
		t->throttle_applied = false;
		up_write(&t->lock);
	}
}

static void throttle_lock(struct throttle *t)
{
	down_read(&t->lock);
}

static void throttle_unlock(struct throttle *t)
{
	up_read(&t->lock);
}

/*----------------------------------------------------------------*/

176
177
178
179
180
/*
 * A pool device ties together a metadata device and a data device.  It
 * also provides the interface for creating and destroying internal
 * devices.
 */
Mike Snitzer's avatar
Mike Snitzer committed
181
struct dm_thin_new_mapping;
182

183
/*
184
 * The pool runs in 4 modes.  Ordered in degraded order for comparisons.
185
186
187
 */
enum pool_mode {
	PM_WRITE,		/* metadata may be changed */
188
	PM_OUT_OF_DATA_SPACE,	/* metadata may be changed, though data may not be allocated */
189
190
191
192
	PM_READ_ONLY,		/* metadata may not be changed */
	PM_FAIL,		/* all I/O fails */
};

193
struct pool_features {
194
195
	enum pool_mode mode;

Mike Snitzer's avatar
Mike Snitzer committed
196
197
198
	bool zero_new_blocks:1;
	bool discard_enabled:1;
	bool discard_passdown:1;
199
	bool error_if_no_space:1;
200
201
};

202
203
204
205
struct thin_c;
typedef void (*process_bio_fn)(struct thin_c *tc, struct bio *bio);
typedef void (*process_mapping_fn)(struct dm_thin_new_mapping *m);

206
207
208
209
210
211
212
213
214
struct pool {
	struct list_head list;
	struct dm_target *ti;	/* Only set if a pool target is bound */

	struct mapped_device *pool_md;
	struct block_device *md_dev;
	struct dm_pool_metadata *pmd;

	dm_block_t low_water_blocks;
215
	uint32_t sectors_per_block;
216
	int sectors_per_block_shift;
217

218
	struct pool_features pf;
219
	bool low_water_triggered:1;	/* A dm event has been sent */
220

221
	struct dm_bio_prison *prison;
222
223
224
	struct dm_kcopyd_client *copier;

	struct workqueue_struct *wq;
Joe Thornber's avatar
Joe Thornber committed
225
	struct throttle throttle;
226
	struct work_struct worker;
227
	struct delayed_work waker;
228
	struct delayed_work no_space_timeout;
229

230
	unsigned long last_commit_jiffies;
231
	unsigned ref_count;
232
233
234
235

	spinlock_t lock;
	struct bio_list deferred_flush_bios;
	struct list_head prepared_mappings;
Joe Thornber's avatar
Joe Thornber committed
236
	struct list_head prepared_discards;
237
	struct list_head active_thins;
238

239
240
	struct dm_deferred_set *shared_read_ds;
	struct dm_deferred_set *all_io_ds;
241

Mike Snitzer's avatar
Mike Snitzer committed
242
	struct dm_thin_new_mapping *next_mapping;
243
	mempool_t *mapping_pool;
244
245
246
247
248
249

	process_bio_fn process_bio;
	process_bio_fn process_discard;

	process_mapping_fn process_prepared_mapping;
	process_mapping_fn process_prepared_discard;
250
251
};

252
static enum pool_mode get_pool_mode(struct pool *pool);
253
static void metadata_operation_failed(struct pool *pool, const char *op, int r);
254

255
256
257
258
259
260
261
262
263
264
265
/*
 * Target context for a pool.
 */
struct pool_c {
	struct dm_target *ti;
	struct pool *pool;
	struct dm_dev *data_dev;
	struct dm_dev *metadata_dev;
	struct dm_target_callbacks callbacks;

	dm_block_t low_water_blocks;
266
267
	struct pool_features requested_pf; /* Features requested during table load */
	struct pool_features adjusted_pf;  /* Features used after adjusting for constituent devices */
268
269
270
271
272
273
};

/*
 * Target context for a thin.
 */
struct thin_c {
274
	struct list_head list;
275
	struct dm_dev *pool_dev;
276
	struct dm_dev *origin_dev;
277
	sector_t origin_size;
278
279
280
281
	dm_thin_id dev_id;

	struct pool *pool;
	struct dm_thin_device *td;
282
	bool requeue_mode:1;
283
284
285
	spinlock_t lock;
	struct bio_list deferred_bio_list;
	struct bio_list retry_on_resume_list;
286
	struct rb_root sort_bio_list; /* sorted list of deferred bios */
287
288
289
290
291
292
293

	/*
	 * Ensures the thin is not destroyed until the worker has finished
	 * iterating the active_thins list.
	 */
	atomic_t refcount;
	struct completion can_destroy;
294
295
296
297
};

/*----------------------------------------------------------------*/

298
299
300
301
302
303
304
305
306
307
308
/*
 * wake_worker() is used when new work is queued and when pool_resume is
 * ready to continue deferred IO processing.
 */
static void wake_worker(struct pool *pool)
{
	queue_work(pool->wq, &pool->worker);
}

/*----------------------------------------------------------------*/

309
310
311
312
313
314
315
316
317
318
319
320
321
322
323
324
325
326
327
328
329
330
331
332
333
334
335
336
337
338
339
340
341
342
343
344
345
346
347
static int bio_detain(struct pool *pool, struct dm_cell_key *key, struct bio *bio,
		      struct dm_bio_prison_cell **cell_result)
{
	int r;
	struct dm_bio_prison_cell *cell_prealloc;

	/*
	 * Allocate a cell from the prison's mempool.
	 * This might block but it can't fail.
	 */
	cell_prealloc = dm_bio_prison_alloc_cell(pool->prison, GFP_NOIO);

	r = dm_bio_detain(pool->prison, key, bio, cell_prealloc, cell_result);
	if (r)
		/*
		 * We reused an old cell; we can get rid of
		 * the new one.
		 */
		dm_bio_prison_free_cell(pool->prison, cell_prealloc);

	return r;
}

static void cell_release(struct pool *pool,
			 struct dm_bio_prison_cell *cell,
			 struct bio_list *bios)
{
	dm_cell_release(pool->prison, cell, bios);
	dm_bio_prison_free_cell(pool->prison, cell);
}

static void cell_release_no_holder(struct pool *pool,
				   struct dm_bio_prison_cell *cell,
				   struct bio_list *bios)
{
	dm_cell_release_no_holder(pool->prison, cell, bios);
	dm_bio_prison_free_cell(pool->prison, cell);
}

348
349
350
351
352
353
static void cell_defer_no_holder_no_free(struct thin_c *tc,
					 struct dm_bio_prison_cell *cell)
{
	struct pool *pool = tc->pool;
	unsigned long flags;

354
355
356
	spin_lock_irqsave(&tc->lock, flags);
	dm_cell_release_no_holder(pool->prison, cell, &tc->deferred_bio_list);
	spin_unlock_irqrestore(&tc->lock, flags);
357
358
359
360

	wake_worker(pool);
}

361
362
static void cell_error_with_code(struct pool *pool,
				 struct dm_bio_prison_cell *cell, int error_code)
363
{
364
	dm_cell_error(pool->prison, cell, error_code);
365
366
367
	dm_bio_prison_free_cell(pool->prison, cell);
}

368
369
370
371
372
static void cell_error(struct pool *pool, struct dm_bio_prison_cell *cell)
{
	cell_error_with_code(pool, cell, -EIO);
}

373
374
/*----------------------------------------------------------------*/

375
376
377
378
379
380
381
382
383
384
385
386
387
388
389
390
391
392
393
394
395
396
397
398
399
400
401
402
403
404
405
406
407
408
409
410
411
412
413
414
415
416
417
418
419
420
421
422
423
424
425
426
427
428
429
430
431
432
433
434
/*
 * A global list of pools that uses a struct mapped_device as a key.
 */
static struct dm_thin_pool_table {
	struct mutex mutex;
	struct list_head pools;
} dm_thin_pool_table;

static void pool_table_init(void)
{
	mutex_init(&dm_thin_pool_table.mutex);
	INIT_LIST_HEAD(&dm_thin_pool_table.pools);
}

static void __pool_table_insert(struct pool *pool)
{
	BUG_ON(!mutex_is_locked(&dm_thin_pool_table.mutex));
	list_add(&pool->list, &dm_thin_pool_table.pools);
}

static void __pool_table_remove(struct pool *pool)
{
	BUG_ON(!mutex_is_locked(&dm_thin_pool_table.mutex));
	list_del(&pool->list);
}

static struct pool *__pool_table_lookup(struct mapped_device *md)
{
	struct pool *pool = NULL, *tmp;

	BUG_ON(!mutex_is_locked(&dm_thin_pool_table.mutex));

	list_for_each_entry(tmp, &dm_thin_pool_table.pools, list) {
		if (tmp->pool_md == md) {
			pool = tmp;
			break;
		}
	}

	return pool;
}

static struct pool *__pool_table_lookup_metadata_dev(struct block_device *md_dev)
{
	struct pool *pool = NULL, *tmp;

	BUG_ON(!mutex_is_locked(&dm_thin_pool_table.mutex));

	list_for_each_entry(tmp, &dm_thin_pool_table.pools, list) {
		if (tmp->md_dev == md_dev) {
			pool = tmp;
			break;
		}
	}

	return pool;
}

/*----------------------------------------------------------------*/

Mike Snitzer's avatar
Mike Snitzer committed
435
struct dm_thin_endio_hook {
436
	struct thin_c *tc;
437
438
	struct dm_deferred_entry *shared_read_entry;
	struct dm_deferred_entry *all_io_entry;
Mike Snitzer's avatar
Mike Snitzer committed
439
	struct dm_thin_new_mapping *overwrite_mapping;
440
	struct rb_node rb_node;
441
442
};

443
static void requeue_bio_list(struct thin_c *tc, struct bio_list *master)
444
445
446
{
	struct bio *bio;
	struct bio_list bios;
447
	unsigned long flags;
448
449

	bio_list_init(&bios);
450

451
	spin_lock_irqsave(&tc->lock, flags);
452
453
	bio_list_merge(&bios, master);
	bio_list_init(master);
454
	spin_unlock_irqrestore(&tc->lock, flags);
455

456
457
	while ((bio = bio_list_pop(&bios)))
		bio_endio(bio, DM_ENDIO_REQUEUE);
458
459
460
461
}

static void requeue_io(struct thin_c *tc)
{
462
463
	requeue_bio_list(tc, &tc->deferred_bio_list);
	requeue_bio_list(tc, &tc->retry_on_resume_list);
464
465
}

466
static void error_thin_retry_list(struct thin_c *tc)
467
468
469
470
471
472
473
{
	struct bio *bio;
	unsigned long flags;
	struct bio_list bios;

	bio_list_init(&bios);

474
475
476
477
	spin_lock_irqsave(&tc->lock, flags);
	bio_list_merge(&bios, &tc->retry_on_resume_list);
	bio_list_init(&tc->retry_on_resume_list);
	spin_unlock_irqrestore(&tc->lock, flags);
478
479
480
481
482

	while ((bio = bio_list_pop(&bios)))
		bio_io_error(bio);
}

483
484
485
486
487
488
489
490
491
492
static void error_retry_list(struct pool *pool)
{
	struct thin_c *tc;

	rcu_read_lock();
	list_for_each_entry_rcu(tc, &pool->active_thins, list)
		error_thin_retry_list(tc);
	rcu_read_unlock();
}

493
494
495
496
497
498
499
/*
 * This section of code contains the logic for processing a thin device's IO.
 * Much of the code depends on pool object resources (lists, workqueues, etc)
 * but most is exclusively called from the thin target rather than the thin-pool
 * target.
 */

500
501
502
503
504
static bool block_size_is_power_of_two(struct pool *pool)
{
	return pool->sectors_per_block_shift >= 0;
}

505
506
static dm_block_t get_bio_block(struct thin_c *tc, struct bio *bio)
{
507
	struct pool *pool = tc->pool;
508
	sector_t block_nr = bio->bi_iter.bi_sector;
509

510
511
	if (block_size_is_power_of_two(pool))
		block_nr >>= pool->sectors_per_block_shift;
512
	else
513
		(void) sector_div(block_nr, pool->sectors_per_block);
514
515

	return block_nr;
516
517
518
519
520
}

static void remap(struct thin_c *tc, struct bio *bio, dm_block_t block)
{
	struct pool *pool = tc->pool;
521
	sector_t bi_sector = bio->bi_iter.bi_sector;
522
523

	bio->bi_bdev = tc->pool_dev->bdev;
524
	if (block_size_is_power_of_two(pool))
525
526
527
		bio->bi_iter.bi_sector =
			(block << pool->sectors_per_block_shift) |
			(bi_sector & (pool->sectors_per_block - 1));
528
	else
529
		bio->bi_iter.bi_sector = (block * pool->sectors_per_block) +
530
				 sector_div(bi_sector, pool->sectors_per_block);
531
532
}

533
534
535
536
537
static void remap_to_origin(struct thin_c *tc, struct bio *bio)
{
	bio->bi_bdev = tc->origin_dev->bdev;
}

538
539
540
541
542
543
static int bio_triggers_commit(struct thin_c *tc, struct bio *bio)
{
	return (bio->bi_rw & (REQ_FLUSH | REQ_FUA)) &&
		dm_thin_changed_this_transaction(tc->td);
}

544
545
546
547
548
549
550
static void inc_all_io_entry(struct pool *pool, struct bio *bio)
{
	struct dm_thin_endio_hook *h;

	if (bio->bi_rw & REQ_DISCARD)
		return;

551
	h = dm_per_bio_data(bio, sizeof(struct dm_thin_endio_hook));
552
553
554
	h->all_io_entry = dm_deferred_entry_inc(pool->all_io_ds);
}

555
static void issue(struct thin_c *tc, struct bio *bio)
556
557
558
559
{
	struct pool *pool = tc->pool;
	unsigned long flags;

560
561
562
563
564
	if (!bio_triggers_commit(tc, bio)) {
		generic_make_request(bio);
		return;
	}

565
	/*
566
567
568
	 * Complete bio with an error if earlier I/O caused changes to
	 * the metadata that can't be committed e.g, due to I/O errors
	 * on the metadata device.
569
	 */
570
571
572
573
574
575
576
577
578
579
580
581
	if (dm_thin_aborted_changes(tc->td)) {
		bio_io_error(bio);
		return;
	}

	/*
	 * Batch together any bios that trigger commits and then issue a
	 * single commit for them in process_deferred_bios().
	 */
	spin_lock_irqsave(&pool->lock, flags);
	bio_list_add(&pool->deferred_flush_bios, bio);
	spin_unlock_irqrestore(&pool->lock, flags);
582
583
}

584
585
586
587
588
589
590
591
592
593
594
595
596
static void remap_to_origin_and_issue(struct thin_c *tc, struct bio *bio)
{
	remap_to_origin(tc, bio);
	issue(tc, bio);
}

static void remap_and_issue(struct thin_c *tc, struct bio *bio,
			    dm_block_t block)
{
	remap(tc, bio, block);
	issue(tc, bio);
}

597
598
599
600
601
/*----------------------------------------------------------------*/

/*
 * Bio endio functions.
 */
Mike Snitzer's avatar
Mike Snitzer committed
602
struct dm_thin_new_mapping {
603
604
	struct list_head list;

605
606
	bool pass_discard:1;
	bool definitely_not_shared:1;
607

608
609
610
611
612
613
614
	/*
	 * Track quiescing, copying and zeroing preparation actions.  When this
	 * counter hits zero the block is prepared and can be inserted into the
	 * btree.
	 */
	atomic_t prepare_actions;

615
	int err;
616
617
618
	struct thin_c *tc;
	dm_block_t virt_block;
	dm_block_t data_block;
Mike Snitzer's avatar
Mike Snitzer committed
619
	struct dm_bio_prison_cell *cell, *cell2;
620
621
622
623
624
625
626
627
628
629
630

	/*
	 * If the bio covers the whole area of a block then we can avoid
	 * zeroing or copying.  Instead this bio is hooked.  The bio will
	 * still be in the cell, so care has to be taken to avoid issuing
	 * the bio twice.
	 */
	struct bio *bio;
	bio_end_io_t *saved_bi_end_io;
};

631
static void __complete_mapping_preparation(struct dm_thin_new_mapping *m)
632
633
634
{
	struct pool *pool = m->tc->pool;

635
	if (atomic_dec_and_test(&m->prepare_actions)) {
636
		list_add_tail(&m->list, &pool->prepared_mappings);
637
638
639
640
		wake_worker(pool);
	}
}

641
static void complete_mapping_preparation(struct dm_thin_new_mapping *m)
642
643
644
645
646
{
	unsigned long flags;
	struct pool *pool = m->tc->pool;

	spin_lock_irqsave(&pool->lock, flags);
647
	__complete_mapping_preparation(m);
648
649
650
	spin_unlock_irqrestore(&pool->lock, flags);
}

651
652
653
654
655
656
657
658
static void copy_complete(int read_err, unsigned long write_err, void *context)
{
	struct dm_thin_new_mapping *m = context;

	m->err = read_err || write_err ? -EIO : 0;
	complete_mapping_preparation(m);
}

659
660
static void overwrite_endio(struct bio *bio, int err)
{
661
	struct dm_thin_endio_hook *h = dm_per_bio_data(bio, sizeof(struct dm_thin_endio_hook));
Mike Snitzer's avatar
Mike Snitzer committed
662
	struct dm_thin_new_mapping *m = h->overwrite_mapping;
663
664

	m->err = err;
665
	complete_mapping_preparation(m);
666
667
668
669
670
671
672
673
674
675
676
677
678
679
680
}

/*----------------------------------------------------------------*/

/*
 * Workqueue.
 */

/*
 * Prepared mapping jobs.
 */

/*
 * This sends the bios in the cell back to the deferred_bios list.
 */
Joe Thornber's avatar
Joe Thornber committed
681
static void cell_defer(struct thin_c *tc, struct dm_bio_prison_cell *cell)
682
683
684
685
{
	struct pool *pool = tc->pool;
	unsigned long flags;

686
687
688
	spin_lock_irqsave(&tc->lock, flags);
	cell_release(pool, cell, &tc->deferred_bio_list);
	spin_unlock_irqrestore(&tc->lock, flags);
689
690
691
692
693

	wake_worker(pool);
}

/*
694
 * Same as cell_defer above, except it omits the original holder of the cell.
695
 */
696
static void cell_defer_no_holder(struct thin_c *tc, struct dm_bio_prison_cell *cell)
697
698
699
700
{
	struct pool *pool = tc->pool;
	unsigned long flags;

701
702
703
	spin_lock_irqsave(&tc->lock, flags);
	cell_release_no_holder(pool, cell, &tc->deferred_bio_list);
	spin_unlock_irqrestore(&tc->lock, flags);
704
705
706
707

	wake_worker(pool);
}

708
709
static void process_prepared_mapping_fail(struct dm_thin_new_mapping *m)
{
Kent Overstreet's avatar
Kent Overstreet committed
710
	if (m->bio) {
711
		m->bio->bi_end_io = m->saved_bi_end_io;
Kent Overstreet's avatar
Kent Overstreet committed
712
713
		atomic_inc(&m->bio->bi_remaining);
	}
714
	cell_error(m->tc->pool, m->cell);
715
716
717
	list_del(&m->list);
	mempool_free(m, m->tc->pool->mapping_pool);
}
718

Mike Snitzer's avatar
Mike Snitzer committed
719
static void process_prepared_mapping(struct dm_thin_new_mapping *m)
720
721
{
	struct thin_c *tc = m->tc;
722
	struct pool *pool = tc->pool;
723
724
725
726
	struct bio *bio;
	int r;

	bio = m->bio;
Kent Overstreet's avatar
Kent Overstreet committed
727
	if (bio) {
728
		bio->bi_end_io = m->saved_bi_end_io;
Kent Overstreet's avatar
Kent Overstreet committed
729
730
		atomic_inc(&bio->bi_remaining);
	}
731
732

	if (m->err) {
733
		cell_error(pool, m->cell);
734
		goto out;
735
736
737
738
739
740
741
742
743
	}

	/*
	 * Commit the prepared block into the mapping btree.
	 * Any I/O for this block arriving after this point will get
	 * remapped to it directly.
	 */
	r = dm_thin_insert_block(tc->td, m->virt_block, m->data_block);
	if (r) {
744
		metadata_operation_failed(pool, "dm_thin_insert_block", r);
745
		cell_error(pool, m->cell);
746
		goto out;
747
748
749
750
751
752
753
754
755
	}

	/*
	 * Release any bios held while the block was being provisioned.
	 * If we are processing a write bio that completely covers the block,
	 * we already processed it so can ignore it now when processing
	 * the bios in the cell.
	 */
	if (bio) {
756
		cell_defer_no_holder(tc, m->cell);
757
758
		bio_endio(bio, 0);
	} else
Joe Thornber's avatar
Joe Thornber committed
759
		cell_defer(tc, m->cell);
760

761
out:
762
	list_del(&m->list);
763
	mempool_free(m, pool->mapping_pool);
764
765
}

766
static void process_prepared_discard_fail(struct dm_thin_new_mapping *m)
Joe Thornber's avatar
Joe Thornber committed
767
768
769
{
	struct thin_c *tc = m->tc;

770
	bio_io_error(m->bio);
771
772
	cell_defer_no_holder(tc, m->cell);
	cell_defer_no_holder(tc, m->cell2);
773
774
775
776
777
778
	mempool_free(m, tc->pool->mapping_pool);
}

static void process_prepared_discard_passdown(struct dm_thin_new_mapping *m)
{
	struct thin_c *tc = m->tc;
Joe Thornber's avatar
Joe Thornber committed
779

780
	inc_all_io_entry(tc->pool, m->bio);
781
782
	cell_defer_no_holder(tc, m->cell);
	cell_defer_no_holder(tc, m->cell2);
783

Joe Thornber's avatar
Joe Thornber committed
784
	if (m->pass_discard)
785
786
787
788
789
790
791
792
793
		if (m->definitely_not_shared)
			remap_and_issue(tc, m->bio, m->data_block);
		else {
			bool used = false;
			if (dm_pool_block_is_used(tc->pool->pmd, m->data_block, &used) || used)
				bio_endio(m->bio, 0);
			else
				remap_and_issue(tc, m->bio, m->data_block);
		}
Joe Thornber's avatar
Joe Thornber committed
794
795
796
797
798
799
	else
		bio_endio(m->bio, 0);

	mempool_free(m, tc->pool->mapping_pool);
}

800
801
802
803
804
805
806
static void process_prepared_discard(struct dm_thin_new_mapping *m)
{
	int r;
	struct thin_c *tc = m->tc;

	r = dm_thin_remove_block(tc->td, m->virt_block);
	if (r)
807
		DMERR_LIMIT("dm_thin_remove_block() failed");
808
809
810
811

	process_prepared_discard_passdown(m);
}

Joe Thornber's avatar
Joe Thornber committed
812
static void process_prepared(struct pool *pool, struct list_head *head,
813
			     process_mapping_fn *fn)
814
815
816
{
	unsigned long flags;
	struct list_head maps;
Mike Snitzer's avatar
Mike Snitzer committed
817
	struct dm_thin_new_mapping *m, *tmp;
818
819
820

	INIT_LIST_HEAD(&maps);
	spin_lock_irqsave(&pool->lock, flags);
Joe Thornber's avatar
Joe Thornber committed
821
	list_splice_init(head, &maps);
822
823
824
	spin_unlock_irqrestore(&pool->lock, flags);

	list_for_each_entry_safe(m, tmp, &maps, list)
825
		(*fn)(m);
826
827
828
829
830
}

/*
 * Deferred bio jobs.
 */
Joe Thornber's avatar
Joe Thornber committed
831
static int io_overlaps_block(struct pool *pool, struct bio *bio)
832
{
833
834
	return bio->bi_iter.bi_size ==
		(pool->sectors_per_block << SECTOR_SHIFT);
Joe Thornber's avatar
Joe Thornber committed
835
836
837
838
839
840
}

static int io_overwrites_block(struct pool *pool, struct bio *bio)
{
	return (bio_data_dir(bio) == WRITE) &&
		io_overlaps_block(pool, bio);
841
842
843
844
845
846
847
848
849
850
851
852
853
854
855
856
857
858
859
}

static void save_and_set_endio(struct bio *bio, bio_end_io_t **save,
			       bio_end_io_t *fn)
{
	*save = bio->bi_end_io;
	bio->bi_end_io = fn;
}

static int ensure_next_mapping(struct pool *pool)
{
	if (pool->next_mapping)
		return 0;

	pool->next_mapping = mempool_alloc(pool->mapping_pool, GFP_ATOMIC);

	return pool->next_mapping ? 0 : -ENOMEM;
}

Mike Snitzer's avatar
Mike Snitzer committed
860
static struct dm_thin_new_mapping *get_next_mapping(struct pool *pool)
861
{
862
	struct dm_thin_new_mapping *m = pool->next_mapping;
863
864
865

	BUG_ON(!pool->next_mapping);

866
867
868
869
	memset(m, 0, sizeof(struct dm_thin_new_mapping));
	INIT_LIST_HEAD(&m->list);
	m->bio = NULL;

870
871
	pool->next_mapping = NULL;

872
	return m;
873
874
}

875
876
877
878
879
880
881
882
883
884
885
886
887
888
889
890
891
892
893
894
static void ll_zero(struct thin_c *tc, struct dm_thin_new_mapping *m,
		    sector_t begin, sector_t end)
{
	int r;
	struct dm_io_region to;

	to.bdev = tc->pool_dev->bdev;
	to.sector = begin;
	to.count = end - begin;

	r = dm_kcopyd_zero(tc->pool->copier, 1, &to, 0, copy_complete, m);
	if (r < 0) {
		DMERR_LIMIT("dm_kcopyd_zero() failed");
		copy_complete(1, 1, m);
	}
}

/*
 * A partial copy also needs to zero the uncopied region.
 */
895
static void schedule_copy(struct thin_c *tc, dm_block_t virt_block,
896
897
			  struct dm_dev *origin, dm_block_t data_origin,
			  dm_block_t data_dest,
898
899
			  struct dm_bio_prison_cell *cell, struct bio *bio,
			  sector_t len)
900
901
902
{
	int r;
	struct pool *pool = tc->pool;
Mike Snitzer's avatar
Mike Snitzer committed
903
	struct dm_thin_new_mapping *m = get_next_mapping(pool);
904
905
906
907
908
909

	m->tc = tc;
	m->virt_block = virt_block;
	m->data_block = data_dest;
	m->cell = cell;

910
911
912
913
914
915
916
	/*
	 * quiesce action + copy action + an extra reference held for the
	 * duration of this function (we may need to inc later for a
	 * partial zero).
	 */
	atomic_set(&m->prepare_actions, 3);

917
	if (!dm_deferred_set_add_work(pool->shared_read_ds, &m->list))
918
		complete_mapping_preparation(m); /* already quiesced */
919
920
921
922
923
924
925
926

	/*
	 * IO to pool_dev remaps to the pool target's data_dev.
	 *
	 * If the whole block of data is being overwritten, we can issue the
	 * bio immediately. Otherwise we use kcopyd to clone the data first.
	 */
	if (io_overwrites_block(pool, bio)) {
927
		struct dm_thin_endio_hook *h = dm_per_bio_data(bio, sizeof(struct dm_thin_endio_hook));
Mike Snitzer's avatar
Mike Snitzer committed
928

929
		h->overwrite_mapping = m;
930
931
		m->bio = bio;
		save_and_set_endio(bio, &m->saved_bi_end_io, overwrite_endio);
932
		inc_all_io_entry(pool, bio);
933
934
935
936
		remap_and_issue(tc, bio, data_dest);
	} else {
		struct dm_io_region from, to;

937
		from.bdev = origin->bdev;
938
		from.sector = data_origin * pool->sectors_per_block;
939
		from.count = len;
940
941
942

		to.bdev = tc->pool_dev->bdev;
		to.sector = data_dest * pool->sectors_per_block;
943
		to.count = len;
944
945
946
947

		r = dm_kcopyd_copy(pool->copier, &from, 1, &to,
				   0, copy_complete, m);
		if (r < 0) {
948
			DMERR_LIMIT("dm_kcopyd_copy() failed");
949
950
951
952
953
954
955
956
957
958
959
960
961
962
963
964
965
966
			copy_complete(1, 1, m);

			/*
			 * We allow the zero to be issued, to simplify the
			 * error path.  Otherwise we'd need to start
			 * worrying about decrementing the prepare_actions
			 * counter.
			 */
		}

		/*
		 * Do we need to zero a tail region?
		 */
		if (len < pool->sectors_per_block && pool->pf.zero_new_blocks) {
			atomic_inc(&m->prepare_actions);
			ll_zero(tc, m,
				data_dest * pool->sectors_per_block + len,
				(data_dest + 1) * pool->sectors_per_block);
967
968
		}
	}
969
970

	complete_mapping_preparation(m); /* drop our ref */
971
972
}

973
974
static void schedule_internal_copy(struct thin_c *tc, dm_block_t virt_block,
				   dm_block_t data_origin, dm_block_t data_dest,
Mike Snitzer's avatar
Mike Snitzer committed
975
				   struct dm_bio_prison_cell *cell, struct bio *bio)
976
977
{
	schedule_copy(tc, virt_block, tc->pool_dev,
978
979
		      data_origin, data_dest, cell, bio,
		      tc->pool->sectors_per_block);
980
981
}

982
static void schedule_zero(struct thin_c *tc, dm_block_t virt_block,
Mike Snitzer's avatar
Mike Snitzer committed
983
			  dm_block_t data_block, struct dm_bio_prison_cell *cell,
984
985
986
			  struct bio *bio)
{
	struct pool *pool = tc->pool;
Mike Snitzer's avatar
Mike Snitzer committed
987
	struct dm_thin_new_mapping *m = get_next_mapping(pool);
988

989
	atomic_set(&m->prepare_actions, 1); /* no need to quiesce */
990
991
992
993
994
995
996
997
998
999
	m->tc = tc;
	m->virt_block = virt_block;
	m->data_block = data_block;
	m->cell = cell;

	/*
	 * If the whole block of data is being overwritten or we are not
	 * zeroing pre-existing data, we can issue the bio immediately.
	 * Otherwise we use kcopyd to zero the data first.
	 */
1000
	if (!pool->pf.zero_new_blocks)
1001
1002
1003
		process_prepared_mapping(m);

	else if (io_overwrites_block(pool, bio)) {
1004
		struct dm_thin_endio_hook *h = dm_per_bio_data(bio, sizeof(struct dm_thin_endio_hook));
Mike Snitzer's avatar
Mike Snitzer committed
1005

1006
		h->overwrite_mapping = m;
1007
1008
		m->bio = bio;
		save_and_set_endio(bio, &m->saved_bi_end_io, overwrite_endio);
1009
		inc_all_io_entry(pool, bio);
1010
1011
		remap_and_issue(tc, bio, data_block);

1012
1013
1014
1015
1016
	} else
		ll_zero(tc, m,
			data_block * pool->sectors_per_block,
			(data_block + 1) * pool->sectors_per_block);
}
1017

1018
1019
1020
1021
1022
1023
1024
1025
1026
1027
1028
1029
1030
1031
1032
1033
1034
1035
1036
1037
static void schedule_external_copy(struct thin_c *tc, dm_block_t virt_block,
				   dm_block_t data_dest,
				   struct dm_bio_prison_cell *cell, struct bio *bio)
{
	struct pool *pool = tc->pool;
	sector_t virt_block_begin = virt_block * pool->sectors_per_block;
	sector_t virt_block_end = (virt_block + 1) * pool->sectors_per_block;

	if (virt_block_end <= tc->origin_size)
		schedule_copy(tc, virt_block, tc->origin_dev,
			      virt_block, data_dest, cell, bio,
			      pool->sectors_per_block);

	else if (virt_block_begin < tc->origin_size)
		schedule_copy(tc, virt_block, tc->origin_dev,
			      virt_block, data_dest, cell, bio,
			      tc->origin_size - virt_block_begin);

	else
		schedule_zero(tc, virt_block, data_dest, cell, bio);
1038
1039
}

1040
1041
1042
1043
/*
 * A non-zero return indicates read_only or fail_io mode.
 * Many callers don't care about the return value.
 */
1044
static int commit(struct pool *pool)
1045
1046
1047
{
	int r;

1048
	if (get_pool_mode(pool) >= PM_READ_ONLY)
1049
1050
		return -EINVAL;

1051
	r = dm_pool_commit_metadata(pool->pmd);
1052
1053
	if (r)
		metadata_operation_failed(pool, "dm_pool_commit_metadata", r);
1054
1055
1056
1057

	return r;
}

1058
1059
1060
1061
1062
1063
1064
1065
1066
1067
1068
1069
1070
1071
static void check_low_water_mark(struct pool *pool, dm_block_t free_blocks)
{
	unsigned long flags;

	if (free_blocks <= pool->low_water_blocks && !pool->low_water_triggered) {
		DMWARN("%s: reached low water mark for data device: sending event.",
		       dm_device_name(pool->pool_md));
		spin_lock_irqsave(&pool->lock, flags);
		pool->low_water_triggered = true;
		spin_unlock_irqrestore(&pool->lock, flags);
		dm_table_event(pool->ti->table);
	}
}

1072
1073
static void set_pool_mode(struct pool *pool, enum pool_mode new_mode);

1074
1075
1076
1077
1078
1079
static int alloc_data_block(struct thin_c *tc, dm_block_t *result)
{
	int r;
	dm_block_t free_blocks;
	struct pool *pool = tc->pool;

1080
	if (WARN_ON(get_pool_mode(pool) != PM_WRITE))
1081
1082
		return -EINVAL;

1083
	r = dm_pool_get_free_block_count(pool->pmd, &free_blocks);
1084
1085
	if (r) {
		metadata_operation_failed(pool, "dm_pool_get_free_block_count", r);
1086
		return r;
1087
	}
1088

1089
	check_low_water_mark(pool, free_blocks);
1090
1091

	if (!free_blocks) {
1092
1093
1094
1095
		/*
		 * Try to commit to see if that will free up some
		 * more space.
		 */
1096
1097
1098
		r = commit(pool);
		if (r)
			return r;
1099

1100
		r = dm_pool_get_free_block_count(pool->pmd, &free_blocks);
1101
1102
		if (r) {
			metadata_operation_failed(pool, "dm_pool_get_free_block_count", r);
1103
			return r;
1104
		}
1105

1106
		if (!free_blocks) {
1107
			set_pool_mode(pool, PM_OUT_OF_DATA_SPACE);
1108
			return -ENOSPC;
1109
1110
1111
1112
		}
	}

	r = dm_pool_alloc_data_block(pool->pmd, result);
1113
	if (r) {
1114
		metadata_operation_failed(pool, "dm_pool_alloc_data_block", r);
1115
		return r;
1116
	}
1117
1118
1119
1120
1121
1122
1123
1124
1125
1126

	return 0;
}

/*
 * If we have run out of space, queue bios until the device is
 * resumed, presumably after having been reloaded with more space.
 */
static void retry_on_resume(struct bio *bio)
{
1127
	struct dm_thin_endio_hook *h = dm_per_bio_data(bio, sizeof(struct dm_thin_endio_hook));
1128
	struct thin_c *tc = h->tc;
1129
1130
	unsigned long flags;

1131
1132
1133
	spin_lock_irqsave(&tc->lock, flags);
	bio_list_add(&tc->retry_on_resume_list, bio);
	spin_unlock_irqrestore(&tc->lock, flags);
1134
1135
}

1136
static int should_error_unserviceable_bio(struct pool *pool)
1137
{
1138
1139
1140
1141
1142
1143
	enum pool_mode m = get_pool_mode(pool);

	switch (m) {
	case PM_WRITE:
		/* Shouldn't get here */
		DMERR_LIMIT("bio unserviceable, yet pool is in PM_WRITE mode");
1144
		return -EIO;
1145
1146

	case PM_OUT_OF_DATA_SPACE:
1147
		return pool->pf.error_if_no_space ? -ENOSPC : 0;
1148
1149
1150

	case PM_READ_ONLY:
	case PM_FAIL:
1151
		return -EIO;
1152
1153
1154
	default:
		/* Shouldn't get here */
		DMERR_LIMIT("bio unserviceable, yet pool has an unknown mode");
1155
		return -EIO;
1156
1157
	}
}
1158

1159
1160
static void handle_unserviceable_bio(struct pool *pool, struct bio *bio)
{
1161
1162
1163
1164
	int error = should_error_unserviceable_bio(pool);

	if (error)
		bio_endio(bio, error);
1165
1166
	else
		retry_on_resume(bio);
1167
1168
}

1169
static void retry_bios_on_resume(struct pool *pool, struct dm_bio_prison_cell *cell)
1170
1171
1172
{
	struct bio *bio;
	struct bio_list bios;
1173
	int error;
1174

1175
1176
1177
	error = should_error_unserviceable_bio(pool);
	if (error) {
		cell_error_with_code(pool, cell, error);
1178
1179
1180
		return;
	}

1181
	bio_list_init(&bios);
1182
	cell_release(pool, cell, &bios);
1183

1184
1185
	error = should_error_unserviceable_bio(pool);
	if (error)
1186
		while ((bio = bio_list_pop(&bios)))
1187
			bio_endio(bio, error);
1188
1189
1190
	else
		while ((bio = bio_list_pop(&bios)))
			retry_on_resume(bio);
1191
1192
}

Joe Thornber's avatar
Joe Thornber committed
1193
1194
1195
static void process_discard(struct thin_c *tc, struct bio *bio)
{
	int r;
1196
	unsigned long flags;
Joe Thornber's avatar
Joe Thornber committed
1197
	struct pool *pool = tc->pool;
Mike Snitzer's avatar
Mike Snitzer committed
1198
	struct dm_bio_prison_cell *cell, *cell2;
1199
	struct dm_cell_key key, key2;
Joe Thornber's avatar
Joe Thornber committed
1200
1201
	dm_block_t block = get_bio_block(tc, bio);
	struct dm_thin_lookup_result lookup_result;
Mike Snitzer's avatar
Mike Snitzer committed
1202
	struct dm_thin_new_mapping *m;
Joe Thornber's avatar
Joe Thornber committed
1203
1204

	build_virtual_key(tc->td, block, &key);
1205
	if (bio_detain(tc->pool, &key, bio, &cell))
Joe Thornber's avatar
Joe Thornber committed
1206
1207
1208
1209
1210
1211
1212
1213
1214
1215
1216
		return;

	r = dm_thin_find_block(tc->td, block, 1, &lookup_result);
	switch (r) {
	case 0:
		/*
		 * Check nobody is fiddling with this pool block.  This can
		 * happen if someone's in the process of breaking sharing
		 * on this block.
		 */
		build_data_key(tc->td, lookup_result.block, &key2);
1217
		if (bio_detain(tc->pool, &key2, bio, &cell2)) {
1218
			cell_defer_no_holder(tc, cell);
Joe Thornber's avatar
Joe Thornber committed
1219
1220
1221
1222
1223
1224
1225
1226
1227
1228
			break;
		}

		if (io_overlaps_block(pool, bio)) {
			/*
			 * IO may still be going to the destination block.  We must
			 * quiesce before we can do the removal.
			 */
			m = get_next_mapping(pool);
			m->tc = tc;
1229
1230
			m->pass_discard = pool->pf.discard_passdown;
			m->definitely_not_shared = !lookup_result.shared;
Joe Thornber's avatar
Joe Thornber committed
1231
1232
1233
1234
1235
1236
			m->virt_block = block;
			m->data_block = lookup_result.block;
			m->cell = cell;
			m->cell2 = cell2;
			m->bio = bio;

1237
			if (!dm_deferred_set_add_work(pool->all_io_ds, &m->list)) {
1238
				spin_lock_irqsave(&pool->lock, flags);
1239
				list_add_tail(&m->list, &pool->prepared_discards);
1240
				spin_unlock_irqrestore(&pool->lock, flags);
Joe Thornber's avatar
Joe Thornber committed
1241
1242
1243
				wake_worker(pool);
			}
		} else {
1244
			inc_all_io_entry(pool, bio);
1245
1246
			cell_defer_no_holder(tc, cell);
			cell_defer_no_holder(tc, cell2);
1247

Joe Thornber's avatar
Joe Thornber committed
1248
			/*
1249
1250
1251
			 * The DM core makes sure that the discard doesn't span
			 * a block boundary.  So we submit the discard of a
			 * partial block appropriately.
Joe Thornber's avatar
Joe Thornber committed
1252
			 */
1253
1254
1255
1256
			if ((!lookup_result.shared) && pool->pf.discard_passdown)
				remap_and_issue(tc, bio, lookup_result.block);
			else
				bio_endio(bio, 0);
Joe Thornber's avatar
Joe Thornber committed
1257
1258
1259
1260
1261
1262
1263
		}
		break;

	case -ENODATA:
		/*
		 * It isn't provisioned, just forget it.
		 */
1264
		cell_defer_no_holder(tc, cell);
Joe Thornber's avatar
Joe Thornber committed
1265
1266
1267
1268
		bio_endio(bio, 0);
		break;

	default:
1269
1270
		DMERR_LIMIT("%s: dm_thin_find_block() failed: error = %d",
			    __func__, r);