dm-thin.c 82.6 KB
Newer Older
1
/*
2
 * Copyright (C) 2011-2012 Red Hat UK.
3
4
5
6
7
 *
 * This file is released under the GPL.
 */

#include "dm-thin-metadata.h"
8
#include "dm-bio-prison.h"
9
#include "dm.h"
10
11
12
13
14

#include <linux/device-mapper.h>
#include <linux/dm-io.h>
#include <linux/dm-kcopyd.h>
#include <linux/list.h>
15
#include <linux/rculist.h>
16
17
18
19
20
21
22
23
24
#include <linux/init.h>
#include <linux/module.h>
#include <linux/slab.h>

#define	DM_MSG_PREFIX	"thin"

/*
 * Tunable constants
 */
25
#define ENDIO_HOOK_POOL_SIZE 1024
26
27
#define MAPPING_POOL_SIZE 1024
#define PRISON_CELLS 1024
28
#define COMMIT_PERIOD HZ
29

30
31
32
DECLARE_DM_KCOPYD_THROTTLE_WITH_MODULE_PARM(snapshot_copy_throttle,
		"A percentage of time allocated for copy on write");

33
34
35
36
37
38
39
40
41
42
43
44
45
46
47
48
49
50
51
52
53
54
55
56
57
58
59
60
61
62
63
64
/*
 * The block size of the device holding pool data must be
 * between 64KB and 1GB.
 */
#define DATA_DEV_BLOCK_SIZE_MIN_SECTORS (64 * 1024 >> SECTOR_SHIFT)
#define DATA_DEV_BLOCK_SIZE_MAX_SECTORS (1024 * 1024 * 1024 >> SECTOR_SHIFT)

/*
 * Device id is restricted to 24 bits.
 */
#define MAX_DEV_ID ((1 << 24) - 1)

/*
 * How do we handle breaking sharing of data blocks?
 * =================================================
 *
 * We use a standard copy-on-write btree to store the mappings for the
 * devices (note I'm talking about copy-on-write of the metadata here, not
 * the data).  When you take an internal snapshot you clone the root node
 * of the origin btree.  After this there is no concept of an origin or a
 * snapshot.  They are just two device trees that happen to point to the
 * same data blocks.
 *
 * When we get a write in we decide if it's to a shared data block using
 * some timestamp magic.  If it is, we have to break sharing.
 *
 * Let's say we write to a shared block in what was the origin.  The
 * steps are:
 *
 * i) plug io further to this physical block. (see bio_prison code).
 *
 * ii) quiesce any read io to that shared data block.  Obviously
65
 * including all devices that share this block.  (see dm_deferred_set code)
66
67
68
69
70
 *
 * iii) copy the data block to a newly allocate block.  This step can be
 * missed out if the io covers the block. (schedule_copy).
 *
 * iv) insert the new mapping into the origin's btree
Joe Thornber's avatar
Joe Thornber committed
71
 * (process_prepared_mapping).  This act of inserting breaks some
72
73
74
75
76
77
78
79
80
81
82
83
84
85
86
87
88
89
90
91
92
93
94
95
96
97
98
99
100
101
102
103
104
105
106
107
108
 * sharing of btree nodes between the two devices.  Breaking sharing only
 * effects the btree of that specific device.  Btrees for the other
 * devices that share the block never change.  The btree for the origin
 * device as it was after the last commit is untouched, ie. we're using
 * persistent data structures in the functional programming sense.
 *
 * v) unplug io to this physical block, including the io that triggered
 * the breaking of sharing.
 *
 * Steps (ii) and (iii) occur in parallel.
 *
 * The metadata _doesn't_ need to be committed before the io continues.  We
 * get away with this because the io is always written to a _new_ block.
 * If there's a crash, then:
 *
 * - The origin mapping will point to the old origin block (the shared
 * one).  This will contain the data as it was before the io that triggered
 * the breaking of sharing came in.
 *
 * - The snap mapping still points to the old block.  As it would after
 * the commit.
 *
 * The downside of this scheme is the timestamp magic isn't perfect, and
 * will continue to think that data block in the snapshot device is shared
 * even after the write to the origin has broken sharing.  I suspect data
 * blocks will typically be shared by many different devices, so we're
 * breaking sharing n + 1 times, rather than n, where n is the number of
 * devices that reference this data block.  At the moment I think the
 * benefits far, far outweigh the disadvantages.
 */

/*----------------------------------------------------------------*/

/*
 * Key building.
 */
static void build_data_key(struct dm_thin_device *td,
109
			   dm_block_t b, struct dm_cell_key *key)
110
111
112
113
114
115
116
{
	key->virtual = 0;
	key->dev = dm_thin_dev_id(td);
	key->block = b;
}

static void build_virtual_key(struct dm_thin_device *td, dm_block_t b,
117
			      struct dm_cell_key *key)
118
119
120
121
122
123
124
125
126
127
128
129
130
{
	key->virtual = 1;
	key->dev = dm_thin_dev_id(td);
	key->block = b;
}

/*----------------------------------------------------------------*/

/*
 * A pool device ties together a metadata device and a data device.  It
 * also provides the interface for creating and destroying internal
 * devices.
 */
Mike Snitzer's avatar
Mike Snitzer committed
131
struct dm_thin_new_mapping;
132

133
/*
134
 * The pool runs in 4 modes.  Ordered in degraded order for comparisons.
135
136
137
 */
enum pool_mode {
	PM_WRITE,		/* metadata may be changed */
138
	PM_OUT_OF_DATA_SPACE,	/* metadata may be changed, though data may not be allocated */
139
140
141
142
	PM_READ_ONLY,		/* metadata may not be changed */
	PM_FAIL,		/* all I/O fails */
};

143
struct pool_features {
144
145
	enum pool_mode mode;

Mike Snitzer's avatar
Mike Snitzer committed
146
147
148
	bool zero_new_blocks:1;
	bool discard_enabled:1;
	bool discard_passdown:1;
149
	bool error_if_no_space:1;
150
151
};

152
153
154
155
struct thin_c;
typedef void (*process_bio_fn)(struct thin_c *tc, struct bio *bio);
typedef void (*process_mapping_fn)(struct dm_thin_new_mapping *m);

156
157
158
159
160
161
162
163
164
struct pool {
	struct list_head list;
	struct dm_target *ti;	/* Only set if a pool target is bound */

	struct mapped_device *pool_md;
	struct block_device *md_dev;
	struct dm_pool_metadata *pmd;

	dm_block_t low_water_blocks;
165
	uint32_t sectors_per_block;
166
	int sectors_per_block_shift;
167

168
	struct pool_features pf;
169
	bool low_water_triggered:1;	/* A dm event has been sent */
170

171
	struct dm_bio_prison *prison;
172
173
174
175
	struct dm_kcopyd_client *copier;

	struct workqueue_struct *wq;
	struct work_struct worker;
176
	struct delayed_work waker;
177

178
	unsigned long last_commit_jiffies;
179
	unsigned ref_count;
180
181
182
183

	spinlock_t lock;
	struct bio_list deferred_flush_bios;
	struct list_head prepared_mappings;
Joe Thornber's avatar
Joe Thornber committed
184
	struct list_head prepared_discards;
185
	struct list_head active_thins;
186

187
188
	struct dm_deferred_set *shared_read_ds;
	struct dm_deferred_set *all_io_ds;
189

Mike Snitzer's avatar
Mike Snitzer committed
190
	struct dm_thin_new_mapping *next_mapping;
191
	mempool_t *mapping_pool;
192
193
194
195
196
197

	process_bio_fn process_bio;
	process_bio_fn process_discard;

	process_mapping_fn process_prepared_mapping;
	process_mapping_fn process_prepared_discard;
198
199
};

200
static enum pool_mode get_pool_mode(struct pool *pool);
201
static void metadata_operation_failed(struct pool *pool, const char *op, int r);
202

203
204
205
206
207
208
209
210
211
212
213
/*
 * Target context for a pool.
 */
struct pool_c {
	struct dm_target *ti;
	struct pool *pool;
	struct dm_dev *data_dev;
	struct dm_dev *metadata_dev;
	struct dm_target_callbacks callbacks;

	dm_block_t low_water_blocks;
214
215
	struct pool_features requested_pf; /* Features requested during table load */
	struct pool_features adjusted_pf;  /* Features used after adjusting for constituent devices */
216
217
218
219
220
221
};

/*
 * Target context for a thin.
 */
struct thin_c {
222
	struct list_head list;
223
	struct dm_dev *pool_dev;
224
	struct dm_dev *origin_dev;
225
226
227
228
	dm_thin_id dev_id;

	struct pool *pool;
	struct dm_thin_device *td;
229
	bool requeue_mode:1;
230
231
232
	spinlock_t lock;
	struct bio_list deferred_bio_list;
	struct bio_list retry_on_resume_list;
233
234
235
236
};

/*----------------------------------------------------------------*/

237
238
239
240
241
242
243
244
245
246
247
/*
 * wake_worker() is used when new work is queued and when pool_resume is
 * ready to continue deferred IO processing.
 */
static void wake_worker(struct pool *pool)
{
	queue_work(pool->wq, &pool->worker);
}

/*----------------------------------------------------------------*/

248
249
250
251
252
253
254
255
256
257
258
259
260
261
262
263
264
265
266
267
268
269
270
271
272
273
274
275
276
277
278
279
280
281
282
283
284
285
286
static int bio_detain(struct pool *pool, struct dm_cell_key *key, struct bio *bio,
		      struct dm_bio_prison_cell **cell_result)
{
	int r;
	struct dm_bio_prison_cell *cell_prealloc;

	/*
	 * Allocate a cell from the prison's mempool.
	 * This might block but it can't fail.
	 */
	cell_prealloc = dm_bio_prison_alloc_cell(pool->prison, GFP_NOIO);

	r = dm_bio_detain(pool->prison, key, bio, cell_prealloc, cell_result);
	if (r)
		/*
		 * We reused an old cell; we can get rid of
		 * the new one.
		 */
		dm_bio_prison_free_cell(pool->prison, cell_prealloc);

	return r;
}

static void cell_release(struct pool *pool,
			 struct dm_bio_prison_cell *cell,
			 struct bio_list *bios)
{
	dm_cell_release(pool->prison, cell, bios);
	dm_bio_prison_free_cell(pool->prison, cell);
}

static void cell_release_no_holder(struct pool *pool,
				   struct dm_bio_prison_cell *cell,
				   struct bio_list *bios)
{
	dm_cell_release_no_holder(pool->prison, cell, bios);
	dm_bio_prison_free_cell(pool->prison, cell);
}

287
288
289
290
291
292
static void cell_defer_no_holder_no_free(struct thin_c *tc,
					 struct dm_bio_prison_cell *cell)
{
	struct pool *pool = tc->pool;
	unsigned long flags;

293
294
295
	spin_lock_irqsave(&tc->lock, flags);
	dm_cell_release_no_holder(pool->prison, cell, &tc->deferred_bio_list);
	spin_unlock_irqrestore(&tc->lock, flags);
296
297
298
299

	wake_worker(pool);
}

300
301
302
303
304
305
306
307
308
static void cell_error(struct pool *pool,
		       struct dm_bio_prison_cell *cell)
{
	dm_cell_error(pool->prison, cell);
	dm_bio_prison_free_cell(pool->prison, cell);
}

/*----------------------------------------------------------------*/

309
310
311
312
313
314
315
316
317
318
319
320
321
322
323
324
325
326
327
328
329
330
331
332
333
334
335
336
337
338
339
340
341
342
343
344
345
346
347
348
349
350
351
352
353
354
355
356
357
358
359
360
361
362
363
364
365
366
367
368
/*
 * A global list of pools that uses a struct mapped_device as a key.
 */
static struct dm_thin_pool_table {
	struct mutex mutex;
	struct list_head pools;
} dm_thin_pool_table;

static void pool_table_init(void)
{
	mutex_init(&dm_thin_pool_table.mutex);
	INIT_LIST_HEAD(&dm_thin_pool_table.pools);
}

static void __pool_table_insert(struct pool *pool)
{
	BUG_ON(!mutex_is_locked(&dm_thin_pool_table.mutex));
	list_add(&pool->list, &dm_thin_pool_table.pools);
}

static void __pool_table_remove(struct pool *pool)
{
	BUG_ON(!mutex_is_locked(&dm_thin_pool_table.mutex));
	list_del(&pool->list);
}

static struct pool *__pool_table_lookup(struct mapped_device *md)
{
	struct pool *pool = NULL, *tmp;

	BUG_ON(!mutex_is_locked(&dm_thin_pool_table.mutex));

	list_for_each_entry(tmp, &dm_thin_pool_table.pools, list) {
		if (tmp->pool_md == md) {
			pool = tmp;
			break;
		}
	}

	return pool;
}

static struct pool *__pool_table_lookup_metadata_dev(struct block_device *md_dev)
{
	struct pool *pool = NULL, *tmp;

	BUG_ON(!mutex_is_locked(&dm_thin_pool_table.mutex));

	list_for_each_entry(tmp, &dm_thin_pool_table.pools, list) {
		if (tmp->md_dev == md_dev) {
			pool = tmp;
			break;
		}
	}

	return pool;
}

/*----------------------------------------------------------------*/

Mike Snitzer's avatar
Mike Snitzer committed
369
struct dm_thin_endio_hook {
370
	struct thin_c *tc;
371
372
	struct dm_deferred_entry *shared_read_entry;
	struct dm_deferred_entry *all_io_entry;
Mike Snitzer's avatar
Mike Snitzer committed
373
	struct dm_thin_new_mapping *overwrite_mapping;
374
375
};

376
static void requeue_bio_list(struct thin_c *tc, struct bio_list *master)
377
378
379
{
	struct bio *bio;
	struct bio_list bios;
380
	unsigned long flags;
381
382

	bio_list_init(&bios);
383

384
	spin_lock_irqsave(&tc->lock, flags);
385
386
	bio_list_merge(&bios, master);
	bio_list_init(master);
387
	spin_unlock_irqrestore(&tc->lock, flags);
388

389
390
	while ((bio = bio_list_pop(&bios)))
		bio_endio(bio, DM_ENDIO_REQUEUE);
391
392
393
394
}

static void requeue_io(struct thin_c *tc)
{
395
396
	requeue_bio_list(tc, &tc->deferred_bio_list);
	requeue_bio_list(tc, &tc->retry_on_resume_list);
397
398
}

399
static void error_thin_retry_list(struct thin_c *tc)
400
401
402
403
404
405
406
{
	struct bio *bio;
	unsigned long flags;
	struct bio_list bios;

	bio_list_init(&bios);

407
408
409
410
	spin_lock_irqsave(&tc->lock, flags);
	bio_list_merge(&bios, &tc->retry_on_resume_list);
	bio_list_init(&tc->retry_on_resume_list);
	spin_unlock_irqrestore(&tc->lock, flags);
411
412
413
414
415

	while ((bio = bio_list_pop(&bios)))
		bio_io_error(bio);
}

416
417
418
419
420
421
422
423
424
425
static void error_retry_list(struct pool *pool)
{
	struct thin_c *tc;

	rcu_read_lock();
	list_for_each_entry_rcu(tc, &pool->active_thins, list)
		error_thin_retry_list(tc);
	rcu_read_unlock();
}

426
427
428
429
430
431
432
/*
 * This section of code contains the logic for processing a thin device's IO.
 * Much of the code depends on pool object resources (lists, workqueues, etc)
 * but most is exclusively called from the thin target rather than the thin-pool
 * target.
 */

433
434
435
436
437
static bool block_size_is_power_of_two(struct pool *pool)
{
	return pool->sectors_per_block_shift >= 0;
}

438
439
static dm_block_t get_bio_block(struct thin_c *tc, struct bio *bio)
{
440
	struct pool *pool = tc->pool;
441
	sector_t block_nr = bio->bi_iter.bi_sector;
442

443
444
	if (block_size_is_power_of_two(pool))
		block_nr >>= pool->sectors_per_block_shift;
445
	else
446
		(void) sector_div(block_nr, pool->sectors_per_block);
447
448

	return block_nr;
449
450
451
452
453
}

static void remap(struct thin_c *tc, struct bio *bio, dm_block_t block)
{
	struct pool *pool = tc->pool;
454
	sector_t bi_sector = bio->bi_iter.bi_sector;
455
456

	bio->bi_bdev = tc->pool_dev->bdev;
457
	if (block_size_is_power_of_two(pool))
458
459
460
		bio->bi_iter.bi_sector =
			(block << pool->sectors_per_block_shift) |
			(bi_sector & (pool->sectors_per_block - 1));
461
	else
462
		bio->bi_iter.bi_sector = (block * pool->sectors_per_block) +
463
				 sector_div(bi_sector, pool->sectors_per_block);
464
465
}

466
467
468
469
470
static void remap_to_origin(struct thin_c *tc, struct bio *bio)
{
	bio->bi_bdev = tc->origin_dev->bdev;
}

471
472
473
474
475
476
static int bio_triggers_commit(struct thin_c *tc, struct bio *bio)
{
	return (bio->bi_rw & (REQ_FLUSH | REQ_FUA)) &&
		dm_thin_changed_this_transaction(tc->td);
}

477
478
479
480
481
482
483
static void inc_all_io_entry(struct pool *pool, struct bio *bio)
{
	struct dm_thin_endio_hook *h;

	if (bio->bi_rw & REQ_DISCARD)
		return;

484
	h = dm_per_bio_data(bio, sizeof(struct dm_thin_endio_hook));
485
486
487
	h->all_io_entry = dm_deferred_entry_inc(pool->all_io_ds);
}

488
static void issue(struct thin_c *tc, struct bio *bio)
489
490
491
492
{
	struct pool *pool = tc->pool;
	unsigned long flags;

493
494
495
496
497
	if (!bio_triggers_commit(tc, bio)) {
		generic_make_request(bio);
		return;
	}

498
	/*
499
500
501
	 * Complete bio with an error if earlier I/O caused changes to
	 * the metadata that can't be committed e.g, due to I/O errors
	 * on the metadata device.
502
	 */
503
504
505
506
507
508
509
510
511
512
513
514
	if (dm_thin_aborted_changes(tc->td)) {
		bio_io_error(bio);
		return;
	}

	/*
	 * Batch together any bios that trigger commits and then issue a
	 * single commit for them in process_deferred_bios().
	 */
	spin_lock_irqsave(&pool->lock, flags);
	bio_list_add(&pool->deferred_flush_bios, bio);
	spin_unlock_irqrestore(&pool->lock, flags);
515
516
}

517
518
519
520
521
522
523
524
525
526
527
528
529
static void remap_to_origin_and_issue(struct thin_c *tc, struct bio *bio)
{
	remap_to_origin(tc, bio);
	issue(tc, bio);
}

static void remap_and_issue(struct thin_c *tc, struct bio *bio,
			    dm_block_t block)
{
	remap(tc, bio, block);
	issue(tc, bio);
}

530
531
532
533
534
/*----------------------------------------------------------------*/

/*
 * Bio endio functions.
 */
Mike Snitzer's avatar
Mike Snitzer committed
535
struct dm_thin_new_mapping {
536
537
	struct list_head list;

538
539
540
541
	bool quiesced:1;
	bool prepared:1;
	bool pass_discard:1;
	bool definitely_not_shared:1;
542

543
	int err;
544
545
546
	struct thin_c *tc;
	dm_block_t virt_block;
	dm_block_t data_block;
Mike Snitzer's avatar
Mike Snitzer committed
547
	struct dm_bio_prison_cell *cell, *cell2;
548
549
550
551
552
553
554
555
556
557
558

	/*
	 * If the bio covers the whole area of a block then we can avoid
	 * zeroing or copying.  Instead this bio is hooked.  The bio will
	 * still be in the cell, so care has to be taken to avoid issuing
	 * the bio twice.
	 */
	struct bio *bio;
	bio_end_io_t *saved_bi_end_io;
};

Mike Snitzer's avatar
Mike Snitzer committed
559
static void __maybe_add_mapping(struct dm_thin_new_mapping *m)
560
561
562
{
	struct pool *pool = m->tc->pool;

563
	if (m->quiesced && m->prepared) {
564
		list_add_tail(&m->list, &pool->prepared_mappings);
565
566
567
568
569
570
571
		wake_worker(pool);
	}
}

static void copy_complete(int read_err, unsigned long write_err, void *context)
{
	unsigned long flags;
Mike Snitzer's avatar
Mike Snitzer committed
572
	struct dm_thin_new_mapping *m = context;
573
574
575
576
577
	struct pool *pool = m->tc->pool;

	m->err = read_err || write_err ? -EIO : 0;

	spin_lock_irqsave(&pool->lock, flags);
578
	m->prepared = true;
579
580
581
582
583
584
585
	__maybe_add_mapping(m);
	spin_unlock_irqrestore(&pool->lock, flags);
}

static void overwrite_endio(struct bio *bio, int err)
{
	unsigned long flags;
586
	struct dm_thin_endio_hook *h = dm_per_bio_data(bio, sizeof(struct dm_thin_endio_hook));
Mike Snitzer's avatar
Mike Snitzer committed
587
	struct dm_thin_new_mapping *m = h->overwrite_mapping;
588
589
590
591
592
	struct pool *pool = m->tc->pool;

	m->err = err;

	spin_lock_irqsave(&pool->lock, flags);
593
	m->prepared = true;
594
595
596
597
598
599
600
601
602
603
604
605
606
607
608
609
610
	__maybe_add_mapping(m);
	spin_unlock_irqrestore(&pool->lock, flags);
}

/*----------------------------------------------------------------*/

/*
 * Workqueue.
 */

/*
 * Prepared mapping jobs.
 */

/*
 * This sends the bios in the cell back to the deferred_bios list.
 */
Joe Thornber's avatar
Joe Thornber committed
611
static void cell_defer(struct thin_c *tc, struct dm_bio_prison_cell *cell)
612
613
614
615
{
	struct pool *pool = tc->pool;
	unsigned long flags;

616
617
618
	spin_lock_irqsave(&tc->lock, flags);
	cell_release(pool, cell, &tc->deferred_bio_list);
	spin_unlock_irqrestore(&tc->lock, flags);
619
620
621
622
623

	wake_worker(pool);
}

/*
624
 * Same as cell_defer above, except it omits the original holder of the cell.
625
 */
626
static void cell_defer_no_holder(struct thin_c *tc, struct dm_bio_prison_cell *cell)
627
628
629
630
{
	struct pool *pool = tc->pool;
	unsigned long flags;

631
632
633
	spin_lock_irqsave(&tc->lock, flags);
	cell_release_no_holder(pool, cell, &tc->deferred_bio_list);
	spin_unlock_irqrestore(&tc->lock, flags);
634
635
636
637

	wake_worker(pool);
}

638
639
static void process_prepared_mapping_fail(struct dm_thin_new_mapping *m)
{
Kent Overstreet's avatar
Kent Overstreet committed
640
	if (m->bio) {
641
		m->bio->bi_end_io = m->saved_bi_end_io;
Kent Overstreet's avatar
Kent Overstreet committed
642
643
		atomic_inc(&m->bio->bi_remaining);
	}
644
	cell_error(m->tc->pool, m->cell);
645
646
647
	list_del(&m->list);
	mempool_free(m, m->tc->pool->mapping_pool);
}
648

Mike Snitzer's avatar
Mike Snitzer committed
649
static void process_prepared_mapping(struct dm_thin_new_mapping *m)
650
651
{
	struct thin_c *tc = m->tc;
652
	struct pool *pool = tc->pool;
653
654
655
656
	struct bio *bio;
	int r;

	bio = m->bio;
Kent Overstreet's avatar
Kent Overstreet committed
657
	if (bio) {
658
		bio->bi_end_io = m->saved_bi_end_io;
Kent Overstreet's avatar
Kent Overstreet committed
659
660
		atomic_inc(&bio->bi_remaining);
	}
661
662

	if (m->err) {
663
		cell_error(pool, m->cell);
664
		goto out;
665
666
667
668
669
670
671
672
673
	}

	/*
	 * Commit the prepared block into the mapping btree.
	 * Any I/O for this block arriving after this point will get
	 * remapped to it directly.
	 */
	r = dm_thin_insert_block(tc->td, m->virt_block, m->data_block);
	if (r) {
674
		metadata_operation_failed(pool, "dm_thin_insert_block", r);
675
		cell_error(pool, m->cell);
676
		goto out;
677
678
679
680
681
682
683
684
685
	}

	/*
	 * Release any bios held while the block was being provisioned.
	 * If we are processing a write bio that completely covers the block,
	 * we already processed it so can ignore it now when processing
	 * the bios in the cell.
	 */
	if (bio) {
686
		cell_defer_no_holder(tc, m->cell);
687
688
		bio_endio(bio, 0);
	} else
Joe Thornber's avatar
Joe Thornber committed
689
		cell_defer(tc, m->cell);
690

691
out:
692
	list_del(&m->list);
693
	mempool_free(m, pool->mapping_pool);
694
695
}

696
static void process_prepared_discard_fail(struct dm_thin_new_mapping *m)
Joe Thornber's avatar
Joe Thornber committed
697
698
699
{
	struct thin_c *tc = m->tc;

700
	bio_io_error(m->bio);
701
702
	cell_defer_no_holder(tc, m->cell);
	cell_defer_no_holder(tc, m->cell2);
703
704
705
706
707
708
	mempool_free(m, tc->pool->mapping_pool);
}

static void process_prepared_discard_passdown(struct dm_thin_new_mapping *m)
{
	struct thin_c *tc = m->tc;
Joe Thornber's avatar
Joe Thornber committed
709

710
	inc_all_io_entry(tc->pool, m->bio);
711
712
	cell_defer_no_holder(tc, m->cell);
	cell_defer_no_holder(tc, m->cell2);
713

Joe Thornber's avatar
Joe Thornber committed
714
	if (m->pass_discard)
715
716
717
718
719
720
721
722
723
		if (m->definitely_not_shared)
			remap_and_issue(tc, m->bio, m->data_block);
		else {
			bool used = false;
			if (dm_pool_block_is_used(tc->pool->pmd, m->data_block, &used) || used)
				bio_endio(m->bio, 0);
			else
				remap_and_issue(tc, m->bio, m->data_block);
		}
Joe Thornber's avatar
Joe Thornber committed
724
725
726
727
728
729
	else
		bio_endio(m->bio, 0);

	mempool_free(m, tc->pool->mapping_pool);
}

730
731
732
733
734
735
736
static void process_prepared_discard(struct dm_thin_new_mapping *m)
{
	int r;
	struct thin_c *tc = m->tc;

	r = dm_thin_remove_block(tc->td, m->virt_block);
	if (r)
737
		DMERR_LIMIT("dm_thin_remove_block() failed");
738
739
740
741

	process_prepared_discard_passdown(m);
}

Joe Thornber's avatar
Joe Thornber committed
742
static void process_prepared(struct pool *pool, struct list_head *head,
743
			     process_mapping_fn *fn)
744
745
746
{
	unsigned long flags;
	struct list_head maps;
Mike Snitzer's avatar
Mike Snitzer committed
747
	struct dm_thin_new_mapping *m, *tmp;
748
749
750

	INIT_LIST_HEAD(&maps);
	spin_lock_irqsave(&pool->lock, flags);
Joe Thornber's avatar
Joe Thornber committed
751
	list_splice_init(head, &maps);
752
753
754
	spin_unlock_irqrestore(&pool->lock, flags);

	list_for_each_entry_safe(m, tmp, &maps, list)
755
		(*fn)(m);
756
757
758
759
760
}

/*
 * Deferred bio jobs.
 */
Joe Thornber's avatar
Joe Thornber committed
761
static int io_overlaps_block(struct pool *pool, struct bio *bio)
762
{
763
764
	return bio->bi_iter.bi_size ==
		(pool->sectors_per_block << SECTOR_SHIFT);
Joe Thornber's avatar
Joe Thornber committed
765
766
767
768
769
770
}

static int io_overwrites_block(struct pool *pool, struct bio *bio)
{
	return (bio_data_dir(bio) == WRITE) &&
		io_overlaps_block(pool, bio);
771
772
773
774
775
776
777
778
779
780
781
782
783
784
785
786
787
788
789
}

static void save_and_set_endio(struct bio *bio, bio_end_io_t **save,
			       bio_end_io_t *fn)
{
	*save = bio->bi_end_io;
	bio->bi_end_io = fn;
}

static int ensure_next_mapping(struct pool *pool)
{
	if (pool->next_mapping)
		return 0;

	pool->next_mapping = mempool_alloc(pool->mapping_pool, GFP_ATOMIC);

	return pool->next_mapping ? 0 : -ENOMEM;
}

Mike Snitzer's avatar
Mike Snitzer committed
790
static struct dm_thin_new_mapping *get_next_mapping(struct pool *pool)
791
{
792
	struct dm_thin_new_mapping *m = pool->next_mapping;
793
794
795

	BUG_ON(!pool->next_mapping);

796
797
798
799
	memset(m, 0, sizeof(struct dm_thin_new_mapping));
	INIT_LIST_HEAD(&m->list);
	m->bio = NULL;

800
801
	pool->next_mapping = NULL;

802
	return m;
803
804
805
}

static void schedule_copy(struct thin_c *tc, dm_block_t virt_block,
806
807
			  struct dm_dev *origin, dm_block_t data_origin,
			  dm_block_t data_dest,
Mike Snitzer's avatar
Mike Snitzer committed
808
			  struct dm_bio_prison_cell *cell, struct bio *bio)
809
810
811
{
	int r;
	struct pool *pool = tc->pool;
Mike Snitzer's avatar
Mike Snitzer committed
812
	struct dm_thin_new_mapping *m = get_next_mapping(pool);
813
814
815
816
817
818

	m->tc = tc;
	m->virt_block = virt_block;
	m->data_block = data_dest;
	m->cell = cell;

819
	if (!dm_deferred_set_add_work(pool->shared_read_ds, &m->list))
820
		m->quiesced = true;
821
822
823
824
825
826
827
828

	/*
	 * IO to pool_dev remaps to the pool target's data_dev.
	 *
	 * If the whole block of data is being overwritten, we can issue the
	 * bio immediately. Otherwise we use kcopyd to clone the data first.
	 */
	if (io_overwrites_block(pool, bio)) {
829
		struct dm_thin_endio_hook *h = dm_per_bio_data(bio, sizeof(struct dm_thin_endio_hook));
Mike Snitzer's avatar
Mike Snitzer committed
830

831
		h->overwrite_mapping = m;
832
833
		m->bio = bio;
		save_and_set_endio(bio, &m->saved_bi_end_io, overwrite_endio);
834
		inc_all_io_entry(pool, bio);
835
836
837
838
		remap_and_issue(tc, bio, data_dest);
	} else {
		struct dm_io_region from, to;

839
		from.bdev = origin->bdev;
840
841
842
843
844
845
846
847
848
849
850
		from.sector = data_origin * pool->sectors_per_block;
		from.count = pool->sectors_per_block;

		to.bdev = tc->pool_dev->bdev;
		to.sector = data_dest * pool->sectors_per_block;
		to.count = pool->sectors_per_block;

		r = dm_kcopyd_copy(pool->copier, &from, 1, &to,
				   0, copy_complete, m);
		if (r < 0) {
			mempool_free(m, pool->mapping_pool);
851
			DMERR_LIMIT("dm_kcopyd_copy() failed");
852
			cell_error(pool, cell);
853
854
855
856
		}
	}
}

857
858
static void schedule_internal_copy(struct thin_c *tc, dm_block_t virt_block,
				   dm_block_t data_origin, dm_block_t data_dest,
Mike Snitzer's avatar
Mike Snitzer committed
859
				   struct dm_bio_prison_cell *cell, struct bio *bio)
860
861
862
863
864
865
866
{
	schedule_copy(tc, virt_block, tc->pool_dev,
		      data_origin, data_dest, cell, bio);
}

static void schedule_external_copy(struct thin_c *tc, dm_block_t virt_block,
				   dm_block_t data_dest,
Mike Snitzer's avatar
Mike Snitzer committed
867
				   struct dm_bio_prison_cell *cell, struct bio *bio)
868
869
870
871
872
{
	schedule_copy(tc, virt_block, tc->origin_dev,
		      virt_block, data_dest, cell, bio);
}

873
static void schedule_zero(struct thin_c *tc, dm_block_t virt_block,
Mike Snitzer's avatar
Mike Snitzer committed
874
			  dm_block_t data_block, struct dm_bio_prison_cell *cell,
875
876
877
			  struct bio *bio)
{
	struct pool *pool = tc->pool;
Mike Snitzer's avatar
Mike Snitzer committed
878
	struct dm_thin_new_mapping *m = get_next_mapping(pool);
879

880
881
	m->quiesced = true;
	m->prepared = false;
882
883
884
885
886
887
888
889
890
891
	m->tc = tc;
	m->virt_block = virt_block;
	m->data_block = data_block;
	m->cell = cell;

	/*
	 * If the whole block of data is being overwritten or we are not
	 * zeroing pre-existing data, we can issue the bio immediately.
	 * Otherwise we use kcopyd to zero the data first.
	 */
892
	if (!pool->pf.zero_new_blocks)
893
894
895
		process_prepared_mapping(m);

	else if (io_overwrites_block(pool, bio)) {
896
		struct dm_thin_endio_hook *h = dm_per_bio_data(bio, sizeof(struct dm_thin_endio_hook));
Mike Snitzer's avatar
Mike Snitzer committed
897

898
		h->overwrite_mapping = m;
899
900
		m->bio = bio;
		save_and_set_endio(bio, &m->saved_bi_end_io, overwrite_endio);
901
		inc_all_io_entry(pool, bio);
902
903
904
905
906
907
908
909
910
911
912
913
		remap_and_issue(tc, bio, data_block);
	} else {
		int r;
		struct dm_io_region to;

		to.bdev = tc->pool_dev->bdev;
		to.sector = data_block * pool->sectors_per_block;
		to.count = pool->sectors_per_block;

		r = dm_kcopyd_zero(pool->copier, 1, &to, 0, copy_complete, m);
		if (r < 0) {
			mempool_free(m, pool->mapping_pool);
914
			DMERR_LIMIT("dm_kcopyd_zero() failed");
915
			cell_error(pool, cell);
916
917
918
919
		}
	}
}

920
921
922
923
/*
 * A non-zero return indicates read_only or fail_io mode.
 * Many callers don't care about the return value.
 */
924
static int commit(struct pool *pool)
925
926
927
928
929
930
{
	int r;

	if (get_pool_mode(pool) != PM_WRITE)
		return -EINVAL;

931
	r = dm_pool_commit_metadata(pool->pmd);
932
933
	if (r)
		metadata_operation_failed(pool, "dm_pool_commit_metadata", r);
934
935
936
937

	return r;
}

938
939
940
941
942
943
944
945
946
947
948
949
950
951
static void check_low_water_mark(struct pool *pool, dm_block_t free_blocks)
{
	unsigned long flags;

	if (free_blocks <= pool->low_water_blocks && !pool->low_water_triggered) {
		DMWARN("%s: reached low water mark for data device: sending event.",
		       dm_device_name(pool->pool_md));
		spin_lock_irqsave(&pool->lock, flags);
		pool->low_water_triggered = true;
		spin_unlock_irqrestore(&pool->lock, flags);
		dm_table_event(pool->ti->table);
	}
}

952
953
static void set_pool_mode(struct pool *pool, enum pool_mode new_mode);

954
955
956
957
958
959
static int alloc_data_block(struct thin_c *tc, dm_block_t *result)
{
	int r;
	dm_block_t free_blocks;
	struct pool *pool = tc->pool;

960
	if (WARN_ON(get_pool_mode(pool) != PM_WRITE))
961
962
		return -EINVAL;

963
	r = dm_pool_get_free_block_count(pool->pmd, &free_blocks);
964
965
	if (r) {
		metadata_operation_failed(pool, "dm_pool_get_free_block_count", r);
966
		return r;
967
	}
968

969
	check_low_water_mark(pool, free_blocks);
970
971

	if (!free_blocks) {
972
973
974
975
		/*
		 * Try to commit to see if that will free up some
		 * more space.
		 */
976
977
978
		r = commit(pool);
		if (r)
			return r;
979

980
		r = dm_pool_get_free_block_count(pool->pmd, &free_blocks);
981
982
		if (r) {
			metadata_operation_failed(pool, "dm_pool_get_free_block_count", r);
983
			return r;
984
		}
985

986
		if (!free_blocks) {
987
			set_pool_mode(pool, PM_OUT_OF_DATA_SPACE);
988
			return -ENOSPC;
989
990
991
992
		}
	}

	r = dm_pool_alloc_data_block(pool->pmd, result);
993
	if (r) {
994
		metadata_operation_failed(pool, "dm_pool_alloc_data_block", r);
995
		return r;
996
	}
997
998
999
1000
1001
1002
1003
1004
1005
1006

	return 0;
}

/*
 * If we have run out of space, queue bios until the device is
 * resumed, presumably after having been reloaded with more space.
 */
static void retry_on_resume(struct bio *bio)
{
1007
	struct dm_thin_endio_hook *h = dm_per_bio_data(bio, sizeof(struct dm_thin_endio_hook));
1008
	struct thin_c *tc = h->tc;
1009
1010
	unsigned long flags;

1011
1012
1013
	spin_lock_irqsave(&tc->lock, flags);
	bio_list_add(&tc->retry_on_resume_list, bio);
	spin_unlock_irqrestore(&tc->lock, flags);
1014
1015
}

1016
static bool should_error_unserviceable_bio(struct pool *pool)
1017
{
1018
1019
1020
1021
1022
1023
1024
1025
1026
1027
1028
1029
1030
1031
1032
1033
1034
1035
1036
1037
	enum pool_mode m = get_pool_mode(pool);

	switch (m) {
	case PM_WRITE:
		/* Shouldn't get here */
		DMERR_LIMIT("bio unserviceable, yet pool is in PM_WRITE mode");
		return true;

	case PM_OUT_OF_DATA_SPACE:
		return pool->pf.error_if_no_space;

	case PM_READ_ONLY:
	case PM_FAIL:
		return true;
	default:
		/* Shouldn't get here */
		DMERR_LIMIT("bio unserviceable, yet pool has an unknown mode");
		return true;
	}
}
1038

1039
1040
1041
static void handle_unserviceable_bio(struct pool *pool, struct bio *bio)
{
	if (should_error_unserviceable_bio(pool))
1042
		bio_io_error(bio);
1043
1044
	else
		retry_on_resume(bio);
1045
1046
}

1047
static void retry_bios_on_resume(struct pool *pool, struct dm_bio_prison_cell *cell)
1048
1049
1050
1051
{
	struct bio *bio;
	struct bio_list bios;

1052
1053
1054
1055
1056
	if (should_error_unserviceable_bio(pool)) {
		cell_error(pool, cell);
		return;
	}

1057
	bio_list_init(&bios);
1058
	cell_release(pool, cell, &bios);
1059

1060
1061
1062
1063
1064
1065
	if (should_error_unserviceable_bio(pool))
		while ((bio = bio_list_pop(&bios)))
			bio_io_error(bio);
	else
		while ((bio = bio_list_pop(&bios)))
			retry_on_resume(bio);
1066
1067
}

Joe Thornber's avatar
Joe Thornber committed
1068
1069
1070
static void process_discard(struct thin_c *tc, struct bio *bio)
{
	int r;
1071
	unsigned long flags;
Joe Thornber's avatar
Joe Thornber committed
1072
	struct pool *pool = tc->pool;
Mike Snitzer's avatar
Mike Snitzer committed
1073
	struct dm_bio_prison_cell *cell, *cell2;
1074
	struct dm_cell_key key, key2;
Joe Thornber's avatar
Joe Thornber committed
1075
1076
	dm_block_t block = get_bio_block(tc, bio);
	struct dm_thin_lookup_result lookup_result;
Mike Snitzer's avatar
Mike Snitzer committed
1077
	struct dm_thin_new_mapping *m;
Joe Thornber's avatar
Joe Thornber committed
1078
1079

	build_virtual_key(tc->td, block, &key);
1080
	if (bio_detain(tc->pool, &key, bio, &cell))
Joe Thornber's avatar
Joe Thornber committed
1081
1082
1083
1084
1085
1086
1087
1088
1089
1090
1091
		return;

	r = dm_thin_find_block(tc->td, block, 1, &lookup_result);
	switch (r) {
	case 0:
		/*
		 * Check nobody is fiddling with this pool block.  This can
		 * happen if someone's in the process of breaking sharing
		 * on this block.
		 */
		build_data_key(tc->td, lookup_result.block, &key2);
1092
		if (bio_detain(tc->pool, &key2, bio, &cell2)) {
1093
			cell_defer_no_holder(tc, cell);
Joe Thornber's avatar
Joe Thornber committed
1094
1095
1096
1097
1098
1099
1100
1101
1102
1103
			break;
		}

		if (io_overlaps_block(pool, bio)) {
			/*
			 * IO may still be going to the destination block.  We must
			 * quiesce before we can do the removal.
			 */
			m = get_next_mapping(pool);
			m->tc = tc;
1104
1105
			m->pass_discard = pool->pf.discard_passdown;
			m->definitely_not_shared = !lookup_result.shared;
Joe Thornber's avatar
Joe Thornber committed
1106
1107
1108
1109
1110
1111
			m->virt_block = block;
			m->data_block = lookup_result.block;
			m->cell = cell;
			m->cell2 = cell2;
			m->bio = bio;

1112
			if (!dm_deferred_set_add_work(pool->all_io_ds, &m->list)) {
1113
				spin_lock_irqsave(&pool->lock, flags);
1114
				list_add_tail(&m->list, &pool->prepared_discards);
1115
				spin_unlock_irqrestore(&pool->lock, flags);
Joe Thornber's avatar
Joe Thornber committed
1116
1117
1118
				wake_worker(pool);
			}
		} else {
1119
			inc_all_io_entry(pool, bio);
1120
1121
			cell_defer_no_holder(tc, cell);
			cell_defer_no_holder(tc, cell2);
1122

Joe Thornber's avatar
Joe Thornber committed
1123
			/*
1124
1125
1126
			 * The DM core makes sure that the discard doesn't span
			 * a block boundary.  So we submit the discard of a
			 * partial block appropriately.
Joe Thornber's avatar
Joe Thornber committed
1127
			 */
1128
1129
1130
1131
			if ((!lookup_result.shared) && pool->pf.discard_passdown)
				remap_and_issue(tc, bio, lookup_result.block);
			else
				bio_endio(bio, 0);
Joe Thornber's avatar
Joe Thornber committed
1132
1133
1134
1135
1136
1137
1138
		}
		break;

	case -ENODATA:
		/*
		 * It isn't provisioned, just forget it.
		 */
1139
		cell_defer_no_holder(tc, cell);
Joe Thornber's avatar
Joe Thornber committed
1140
1141
1142
1143
		bio_endio(bio, 0);
		break;

	default:
1144
1145
		DMERR_LIMIT("%s: dm_thin_find_block() failed: error = %d",
			    __func__, r);
1146
		cell_defer_no_holder(tc, cell);
Joe Thornber's avatar
Joe Thornber committed
1147
1148
1149
1150
1151
		bio_io_error(bio);
		break;
	}
}

1152
static void break_sharing(struct thin_c *tc, struct bio *bio, dm_block_t block,
1153
			  struct dm_cell_key *key,
1154
			  struct dm_thin_lookup_result *lookup_result,
Mike Snitzer's avatar
Mike Snitzer committed
1155
			  struct dm_bio_prison_cell *cell)
1156
1157
1158
{
	int r;
	dm_block_t data_block;
1159
	struct pool *pool = tc->pool;
1160
1161
1162
1163

	r = alloc_data_block(tc, &data_block);
	switch (r) {
	case 0:
1164
1165
		schedule_internal_copy(tc, block, lookup_result->block,
				       data_block, cell, bio);
1166
1167
1168
		break;

	case -ENOSPC:
1169
		retry_bios_on_resume(pool, cell);
1170
1171
1172
		break;

	default:
1173
1174
		DMERR_LIMIT("%s: alloc_data_block() failed: error = %d",
			    __func__, r);
1175
		cell_error(pool, cell);
1176
1177
1178
1179
1180
1181
1182
1183
		break;
	}
}

static void process_shared_bio(struct thin_c *tc, struct bio *bio,
			       dm_block_t block,
			       struct dm_thin_lookup_result *lookup_result)
{
Mike Snitzer's avatar
Mike Snitzer committed
1184
	struct dm_bio_prison_cell *cell;
1185
	struct pool *pool = tc->pool;
1186
	struct dm_cell_key key;
1187
1188
1189
1190
1191
1192

	/*
	 * If cell is already occupied, then sharing is already in the process
	 * of being broken so we have nothing further to do here.
	 */
	build_data_key(tc->td, lookup_result->block, &key);
1193
	if (bio_detain(pool, &key, bio, &cell))
1194
1195
		return;

1196
	if (bio_data_dir(bio) == WRITE && bio->bi_iter.bi_size)
1197
1198
		break_sharing(tc, bio, block, &key, lookup_result, cell);
	else {
1199
		struct dm_thin_endio_hook *h = dm_per_bio_data(bio, sizeof(struct dm_thin_endio_hook));
1200

1201
		h->shared_read_entry = dm_deferred_entry_inc(pool->shared_read_ds);
1202
		inc_all_io_entry(pool, bio);
1203
		cell_defer_no_holder(tc, cell);
1204

1205
1206
1207
1208
1209
		remap_and_issue(tc, bio, lookup_result->block);
	}
}

static void provision_block(struct thin_c *tc, struct bio *bio, dm_block_t block,
Mike Snitzer's avatar
Mike Snitzer committed
1210
			    struct dm_bio_prison_cell *cell)
1211
1212
1213
{
	int r;
	dm_block_t data_block;
1214
	struct pool *pool = tc->pool;
1215
1216
1217
1218

	/*
	 * Remap empty bios (flushes) immediately, without provisioning.
	 */
1219
	if (!bio->bi_iter.bi_size) {
1220
		inc_all_io_entry(pool, bio);
1221
		cell_defer_no_holder(tc, cell);
1222

1223
1224
1225
1226
1227
1228
1229
1230
1231
		remap_and_issue(tc, bio, 0);
		return;
	}

	/*
	 * Fill read bios with zeroes and complete them immediately.
	 */
	if (bio_data_dir(bio) == READ) {
		zero_fill_bio(bio);
1232
		cell_defer_no_holder(tc, cell);
1233
1234
1235
1236
1237
1238
1239
		bio_endio(bio, 0);
		return;
	}

	r = alloc_data_block(tc, &data_block);
	switch (r) {
	case 0:
1240
1241
1242
1243
		if (tc->origin_dev)
			schedule_external_copy(tc, block, data_block, cell, bio);
		else
			schedule_zero(tc, block, data_block, cell, bio);
1244
1245
1246
		break;

	case -ENOSPC:
1247
		retry_bios_on_resume(pool, cell);
1248
1249
1250
		break;

	default:
1251
1252
		DMERR_LIMIT("%s: alloc_data_block() failed: error = %d",
			    __func__, r);
1253
		cell_error(pool, cell);
1254
1255