dm-thin.c 81.5 KB
Newer Older
1
/*
2
 * Copyright (C) 2011-2012 Red Hat UK.
3
4
5
6
7
 *
 * This file is released under the GPL.
 */

#include "dm-thin-metadata.h"
8
#include "dm-bio-prison.h"
9
#include "dm.h"
10
11
12
13
14
15
16
17
18
19
20
21
22
23

#include <linux/device-mapper.h>
#include <linux/dm-io.h>
#include <linux/dm-kcopyd.h>
#include <linux/list.h>
#include <linux/init.h>
#include <linux/module.h>
#include <linux/slab.h>

#define	DM_MSG_PREFIX	"thin"

/*
 * Tunable constants
 */
24
#define ENDIO_HOOK_POOL_SIZE 1024
25
26
#define MAPPING_POOL_SIZE 1024
#define PRISON_CELLS 1024
27
#define COMMIT_PERIOD HZ
28

29
30
31
DECLARE_DM_KCOPYD_THROTTLE_WITH_MODULE_PARM(snapshot_copy_throttle,
		"A percentage of time allocated for copy on write");

32
33
34
35
36
37
38
39
40
41
42
43
44
45
46
47
48
49
50
51
52
53
54
55
56
57
58
59
60
61
62
63
/*
 * The block size of the device holding pool data must be
 * between 64KB and 1GB.
 */
#define DATA_DEV_BLOCK_SIZE_MIN_SECTORS (64 * 1024 >> SECTOR_SHIFT)
#define DATA_DEV_BLOCK_SIZE_MAX_SECTORS (1024 * 1024 * 1024 >> SECTOR_SHIFT)

/*
 * Device id is restricted to 24 bits.
 */
#define MAX_DEV_ID ((1 << 24) - 1)

/*
 * How do we handle breaking sharing of data blocks?
 * =================================================
 *
 * We use a standard copy-on-write btree to store the mappings for the
 * devices (note I'm talking about copy-on-write of the metadata here, not
 * the data).  When you take an internal snapshot you clone the root node
 * of the origin btree.  After this there is no concept of an origin or a
 * snapshot.  They are just two device trees that happen to point to the
 * same data blocks.
 *
 * When we get a write in we decide if it's to a shared data block using
 * some timestamp magic.  If it is, we have to break sharing.
 *
 * Let's say we write to a shared block in what was the origin.  The
 * steps are:
 *
 * i) plug io further to this physical block. (see bio_prison code).
 *
 * ii) quiesce any read io to that shared data block.  Obviously
64
 * including all devices that share this block.  (see dm_deferred_set code)
65
66
67
68
69
 *
 * iii) copy the data block to a newly allocate block.  This step can be
 * missed out if the io covers the block. (schedule_copy).
 *
 * iv) insert the new mapping into the origin's btree
Joe Thornber's avatar
Joe Thornber committed
70
 * (process_prepared_mapping).  This act of inserting breaks some
71
72
73
74
75
76
77
78
79
80
81
82
83
84
85
86
87
88
89
90
91
92
93
94
95
96
97
98
99
100
101
102
103
104
105
106
107
 * sharing of btree nodes between the two devices.  Breaking sharing only
 * effects the btree of that specific device.  Btrees for the other
 * devices that share the block never change.  The btree for the origin
 * device as it was after the last commit is untouched, ie. we're using
 * persistent data structures in the functional programming sense.
 *
 * v) unplug io to this physical block, including the io that triggered
 * the breaking of sharing.
 *
 * Steps (ii) and (iii) occur in parallel.
 *
 * The metadata _doesn't_ need to be committed before the io continues.  We
 * get away with this because the io is always written to a _new_ block.
 * If there's a crash, then:
 *
 * - The origin mapping will point to the old origin block (the shared
 * one).  This will contain the data as it was before the io that triggered
 * the breaking of sharing came in.
 *
 * - The snap mapping still points to the old block.  As it would after
 * the commit.
 *
 * The downside of this scheme is the timestamp magic isn't perfect, and
 * will continue to think that data block in the snapshot device is shared
 * even after the write to the origin has broken sharing.  I suspect data
 * blocks will typically be shared by many different devices, so we're
 * breaking sharing n + 1 times, rather than n, where n is the number of
 * devices that reference this data block.  At the moment I think the
 * benefits far, far outweigh the disadvantages.
 */

/*----------------------------------------------------------------*/

/*
 * Key building.
 */
static void build_data_key(struct dm_thin_device *td,
108
			   dm_block_t b, struct dm_cell_key *key)
109
110
111
112
113
114
115
{
	key->virtual = 0;
	key->dev = dm_thin_dev_id(td);
	key->block = b;
}

static void build_virtual_key(struct dm_thin_device *td, dm_block_t b,
116
			      struct dm_cell_key *key)
117
118
119
120
121
122
123
124
125
126
127
128
129
{
	key->virtual = 1;
	key->dev = dm_thin_dev_id(td);
	key->block = b;
}

/*----------------------------------------------------------------*/

/*
 * A pool device ties together a metadata device and a data device.  It
 * also provides the interface for creating and destroying internal
 * devices.
 */
Mike Snitzer's avatar
Mike Snitzer committed
130
struct dm_thin_new_mapping;
131

132
/*
133
 * The pool runs in 4 modes.  Ordered in degraded order for comparisons.
134
135
136
 */
enum pool_mode {
	PM_WRITE,		/* metadata may be changed */
137
	PM_OUT_OF_DATA_SPACE,	/* metadata may be changed, though data may not be allocated */
138
139
140
141
	PM_READ_ONLY,		/* metadata may not be changed */
	PM_FAIL,		/* all I/O fails */
};

142
struct pool_features {
143
144
	enum pool_mode mode;

Mike Snitzer's avatar
Mike Snitzer committed
145
146
147
	bool zero_new_blocks:1;
	bool discard_enabled:1;
	bool discard_passdown:1;
148
	bool error_if_no_space:1;
149
150
};

151
152
153
154
struct thin_c;
typedef void (*process_bio_fn)(struct thin_c *tc, struct bio *bio);
typedef void (*process_mapping_fn)(struct dm_thin_new_mapping *m);

155
156
157
158
159
160
161
162
163
struct pool {
	struct list_head list;
	struct dm_target *ti;	/* Only set if a pool target is bound */

	struct mapped_device *pool_md;
	struct block_device *md_dev;
	struct dm_pool_metadata *pmd;

	dm_block_t low_water_blocks;
164
	uint32_t sectors_per_block;
165
	int sectors_per_block_shift;
166

167
	struct pool_features pf;
168
	bool low_water_triggered:1;	/* A dm event has been sent */
169

170
	struct dm_bio_prison *prison;
171
172
173
174
	struct dm_kcopyd_client *copier;

	struct workqueue_struct *wq;
	struct work_struct worker;
175
	struct delayed_work waker;
176

177
	unsigned long last_commit_jiffies;
178
	unsigned ref_count;
179
180
181
182
183

	spinlock_t lock;
	struct bio_list deferred_bios;
	struct bio_list deferred_flush_bios;
	struct list_head prepared_mappings;
Joe Thornber's avatar
Joe Thornber committed
184
	struct list_head prepared_discards;
185
186
187

	struct bio_list retry_on_resume_list;

188
189
	struct dm_deferred_set *shared_read_ds;
	struct dm_deferred_set *all_io_ds;
190

Mike Snitzer's avatar
Mike Snitzer committed
191
	struct dm_thin_new_mapping *next_mapping;
192
	mempool_t *mapping_pool;
193
194
195
196
197
198

	process_bio_fn process_bio;
	process_bio_fn process_discard;

	process_mapping_fn process_prepared_mapping;
	process_mapping_fn process_prepared_discard;
199
200
};

201
static enum pool_mode get_pool_mode(struct pool *pool);
202
static void metadata_operation_failed(struct pool *pool, const char *op, int r);
203

204
205
206
207
208
209
210
211
212
213
214
/*
 * Target context for a pool.
 */
struct pool_c {
	struct dm_target *ti;
	struct pool *pool;
	struct dm_dev *data_dev;
	struct dm_dev *metadata_dev;
	struct dm_target_callbacks callbacks;

	dm_block_t low_water_blocks;
215
216
	struct pool_features requested_pf; /* Features requested during table load */
	struct pool_features adjusted_pf;  /* Features used after adjusting for constituent devices */
217
218
219
220
221
222
223
};

/*
 * Target context for a thin.
 */
struct thin_c {
	struct dm_dev *pool_dev;
224
	struct dm_dev *origin_dev;
225
226
227
228
	dm_thin_id dev_id;

	struct pool *pool;
	struct dm_thin_device *td;
229
	bool requeue_mode:1;
230
231
232
233
};

/*----------------------------------------------------------------*/

234
235
236
237
238
239
240
241
242
243
244
/*
 * wake_worker() is used when new work is queued and when pool_resume is
 * ready to continue deferred IO processing.
 */
static void wake_worker(struct pool *pool)
{
	queue_work(pool->wq, &pool->worker);
}

/*----------------------------------------------------------------*/

245
246
247
248
249
250
251
252
253
254
255
256
257
258
259
260
261
262
263
264
265
266
267
268
269
270
271
272
273
274
275
276
277
278
279
280
281
282
283
static int bio_detain(struct pool *pool, struct dm_cell_key *key, struct bio *bio,
		      struct dm_bio_prison_cell **cell_result)
{
	int r;
	struct dm_bio_prison_cell *cell_prealloc;

	/*
	 * Allocate a cell from the prison's mempool.
	 * This might block but it can't fail.
	 */
	cell_prealloc = dm_bio_prison_alloc_cell(pool->prison, GFP_NOIO);

	r = dm_bio_detain(pool->prison, key, bio, cell_prealloc, cell_result);
	if (r)
		/*
		 * We reused an old cell; we can get rid of
		 * the new one.
		 */
		dm_bio_prison_free_cell(pool->prison, cell_prealloc);

	return r;
}

static void cell_release(struct pool *pool,
			 struct dm_bio_prison_cell *cell,
			 struct bio_list *bios)
{
	dm_cell_release(pool->prison, cell, bios);
	dm_bio_prison_free_cell(pool->prison, cell);
}

static void cell_release_no_holder(struct pool *pool,
				   struct dm_bio_prison_cell *cell,
				   struct bio_list *bios)
{
	dm_cell_release_no_holder(pool->prison, cell, bios);
	dm_bio_prison_free_cell(pool->prison, cell);
}

284
285
286
287
288
289
290
291
292
293
294
295
296
static void cell_defer_no_holder_no_free(struct thin_c *tc,
					 struct dm_bio_prison_cell *cell)
{
	struct pool *pool = tc->pool;
	unsigned long flags;

	spin_lock_irqsave(&pool->lock, flags);
	dm_cell_release_no_holder(pool->prison, cell, &pool->deferred_bios);
	spin_unlock_irqrestore(&pool->lock, flags);

	wake_worker(pool);
}

297
298
299
300
301
302
303
304
305
static void cell_error(struct pool *pool,
		       struct dm_bio_prison_cell *cell)
{
	dm_cell_error(pool->prison, cell);
	dm_bio_prison_free_cell(pool->prison, cell);
}

/*----------------------------------------------------------------*/

306
307
308
309
310
311
312
313
314
315
316
317
318
319
320
321
322
323
324
325
326
327
328
329
330
331
332
333
334
335
336
337
338
339
340
341
342
343
344
345
346
347
348
349
350
351
352
353
354
355
356
357
358
359
360
361
362
363
364
365
/*
 * A global list of pools that uses a struct mapped_device as a key.
 */
static struct dm_thin_pool_table {
	struct mutex mutex;
	struct list_head pools;
} dm_thin_pool_table;

static void pool_table_init(void)
{
	mutex_init(&dm_thin_pool_table.mutex);
	INIT_LIST_HEAD(&dm_thin_pool_table.pools);
}

static void __pool_table_insert(struct pool *pool)
{
	BUG_ON(!mutex_is_locked(&dm_thin_pool_table.mutex));
	list_add(&pool->list, &dm_thin_pool_table.pools);
}

static void __pool_table_remove(struct pool *pool)
{
	BUG_ON(!mutex_is_locked(&dm_thin_pool_table.mutex));
	list_del(&pool->list);
}

static struct pool *__pool_table_lookup(struct mapped_device *md)
{
	struct pool *pool = NULL, *tmp;

	BUG_ON(!mutex_is_locked(&dm_thin_pool_table.mutex));

	list_for_each_entry(tmp, &dm_thin_pool_table.pools, list) {
		if (tmp->pool_md == md) {
			pool = tmp;
			break;
		}
	}

	return pool;
}

static struct pool *__pool_table_lookup_metadata_dev(struct block_device *md_dev)
{
	struct pool *pool = NULL, *tmp;

	BUG_ON(!mutex_is_locked(&dm_thin_pool_table.mutex));

	list_for_each_entry(tmp, &dm_thin_pool_table.pools, list) {
		if (tmp->md_dev == md_dev) {
			pool = tmp;
			break;
		}
	}

	return pool;
}

/*----------------------------------------------------------------*/

Mike Snitzer's avatar
Mike Snitzer committed
366
struct dm_thin_endio_hook {
367
	struct thin_c *tc;
368
369
	struct dm_deferred_entry *shared_read_entry;
	struct dm_deferred_entry *all_io_entry;
Mike Snitzer's avatar
Mike Snitzer committed
370
	struct dm_thin_new_mapping *overwrite_mapping;
371
372
};

373
static void requeue_bio_list(struct thin_c *tc, struct bio_list *master)
374
375
376
{
	struct bio *bio;
	struct bio_list bios;
377
	unsigned long flags;
378
379

	bio_list_init(&bios);
380
381

	spin_lock_irqsave(&tc->pool->lock, flags);
382
383
	bio_list_merge(&bios, master);
	bio_list_init(master);
384
	spin_unlock_irqrestore(&tc->pool->lock, flags);
385
386

	while ((bio = bio_list_pop(&bios))) {
387
		struct dm_thin_endio_hook *h = dm_per_bio_data(bio, sizeof(struct dm_thin_endio_hook));
Mike Snitzer's avatar
Mike Snitzer committed
388

389
		if (h->tc == tc)
390
391
392
393
394
395
396
397
398
399
			bio_endio(bio, DM_ENDIO_REQUEUE);
		else
			bio_list_add(master, bio);
	}
}

static void requeue_io(struct thin_c *tc)
{
	struct pool *pool = tc->pool;

400
401
	requeue_bio_list(tc, &pool->deferred_bios);
	requeue_bio_list(tc, &pool->retry_on_resume_list);
402
403
}

404
405
406
407
408
409
410
411
412
413
414
415
416
417
418
419
420
static void error_retry_list(struct pool *pool)
{
	struct bio *bio;
	unsigned long flags;
	struct bio_list bios;

	bio_list_init(&bios);

	spin_lock_irqsave(&pool->lock, flags);
	bio_list_merge(&bios, &pool->retry_on_resume_list);
	bio_list_init(&pool->retry_on_resume_list);
	spin_unlock_irqrestore(&pool->lock, flags);

	while ((bio = bio_list_pop(&bios)))
		bio_io_error(bio);
}

421
422
423
424
425
426
427
/*
 * This section of code contains the logic for processing a thin device's IO.
 * Much of the code depends on pool object resources (lists, workqueues, etc)
 * but most is exclusively called from the thin target rather than the thin-pool
 * target.
 */

428
429
430
431
432
static bool block_size_is_power_of_two(struct pool *pool)
{
	return pool->sectors_per_block_shift >= 0;
}

433
434
static dm_block_t get_bio_block(struct thin_c *tc, struct bio *bio)
{
435
	struct pool *pool = tc->pool;
436
	sector_t block_nr = bio->bi_iter.bi_sector;
437

438
439
	if (block_size_is_power_of_two(pool))
		block_nr >>= pool->sectors_per_block_shift;
440
	else
441
		(void) sector_div(block_nr, pool->sectors_per_block);
442
443

	return block_nr;
444
445
446
447
448
}

static void remap(struct thin_c *tc, struct bio *bio, dm_block_t block)
{
	struct pool *pool = tc->pool;
449
	sector_t bi_sector = bio->bi_iter.bi_sector;
450
451

	bio->bi_bdev = tc->pool_dev->bdev;
452
	if (block_size_is_power_of_two(pool))
453
454
455
		bio->bi_iter.bi_sector =
			(block << pool->sectors_per_block_shift) |
			(bi_sector & (pool->sectors_per_block - 1));
456
	else
457
		bio->bi_iter.bi_sector = (block * pool->sectors_per_block) +
458
				 sector_div(bi_sector, pool->sectors_per_block);
459
460
}

461
462
463
464
465
static void remap_to_origin(struct thin_c *tc, struct bio *bio)
{
	bio->bi_bdev = tc->origin_dev->bdev;
}

466
467
468
469
470
471
static int bio_triggers_commit(struct thin_c *tc, struct bio *bio)
{
	return (bio->bi_rw & (REQ_FLUSH | REQ_FUA)) &&
		dm_thin_changed_this_transaction(tc->td);
}

472
473
474
475
476
477
478
static void inc_all_io_entry(struct pool *pool, struct bio *bio)
{
	struct dm_thin_endio_hook *h;

	if (bio->bi_rw & REQ_DISCARD)
		return;

479
	h = dm_per_bio_data(bio, sizeof(struct dm_thin_endio_hook));
480
481
482
	h->all_io_entry = dm_deferred_entry_inc(pool->all_io_ds);
}

483
static void issue(struct thin_c *tc, struct bio *bio)
484
485
486
487
{
	struct pool *pool = tc->pool;
	unsigned long flags;

488
489
490
491
492
	if (!bio_triggers_commit(tc, bio)) {
		generic_make_request(bio);
		return;
	}

493
	/*
494
495
496
	 * Complete bio with an error if earlier I/O caused changes to
	 * the metadata that can't be committed e.g, due to I/O errors
	 * on the metadata device.
497
	 */
498
499
500
501
502
503
504
505
506
507
508
509
	if (dm_thin_aborted_changes(tc->td)) {
		bio_io_error(bio);
		return;
	}

	/*
	 * Batch together any bios that trigger commits and then issue a
	 * single commit for them in process_deferred_bios().
	 */
	spin_lock_irqsave(&pool->lock, flags);
	bio_list_add(&pool->deferred_flush_bios, bio);
	spin_unlock_irqrestore(&pool->lock, flags);
510
511
}

512
513
514
515
516
517
518
519
520
521
522
523
524
static void remap_to_origin_and_issue(struct thin_c *tc, struct bio *bio)
{
	remap_to_origin(tc, bio);
	issue(tc, bio);
}

static void remap_and_issue(struct thin_c *tc, struct bio *bio,
			    dm_block_t block)
{
	remap(tc, bio, block);
	issue(tc, bio);
}

525
526
527
528
529
/*----------------------------------------------------------------*/

/*
 * Bio endio functions.
 */
Mike Snitzer's avatar
Mike Snitzer committed
530
struct dm_thin_new_mapping {
531
532
	struct list_head list;

533
534
535
536
	bool quiesced:1;
	bool prepared:1;
	bool pass_discard:1;
	bool definitely_not_shared:1;
537

538
	int err;
539
540
541
	struct thin_c *tc;
	dm_block_t virt_block;
	dm_block_t data_block;
Mike Snitzer's avatar
Mike Snitzer committed
542
	struct dm_bio_prison_cell *cell, *cell2;
543
544
545
546
547
548
549
550
551
552
553

	/*
	 * If the bio covers the whole area of a block then we can avoid
	 * zeroing or copying.  Instead this bio is hooked.  The bio will
	 * still be in the cell, so care has to be taken to avoid issuing
	 * the bio twice.
	 */
	struct bio *bio;
	bio_end_io_t *saved_bi_end_io;
};

Mike Snitzer's avatar
Mike Snitzer committed
554
static void __maybe_add_mapping(struct dm_thin_new_mapping *m)
555
556
557
{
	struct pool *pool = m->tc->pool;

558
	if (m->quiesced && m->prepared) {
559
		list_add_tail(&m->list, &pool->prepared_mappings);
560
561
562
563
564
565
566
		wake_worker(pool);
	}
}

static void copy_complete(int read_err, unsigned long write_err, void *context)
{
	unsigned long flags;
Mike Snitzer's avatar
Mike Snitzer committed
567
	struct dm_thin_new_mapping *m = context;
568
569
570
571
572
	struct pool *pool = m->tc->pool;

	m->err = read_err || write_err ? -EIO : 0;

	spin_lock_irqsave(&pool->lock, flags);
573
	m->prepared = true;
574
575
576
577
578
579
580
	__maybe_add_mapping(m);
	spin_unlock_irqrestore(&pool->lock, flags);
}

static void overwrite_endio(struct bio *bio, int err)
{
	unsigned long flags;
581
	struct dm_thin_endio_hook *h = dm_per_bio_data(bio, sizeof(struct dm_thin_endio_hook));
Mike Snitzer's avatar
Mike Snitzer committed
582
	struct dm_thin_new_mapping *m = h->overwrite_mapping;
583
584
585
586
587
	struct pool *pool = m->tc->pool;

	m->err = err;

	spin_lock_irqsave(&pool->lock, flags);
588
	m->prepared = true;
589
590
591
592
593
594
595
596
597
598
599
600
601
602
603
604
605
	__maybe_add_mapping(m);
	spin_unlock_irqrestore(&pool->lock, flags);
}

/*----------------------------------------------------------------*/

/*
 * Workqueue.
 */

/*
 * Prepared mapping jobs.
 */

/*
 * This sends the bios in the cell back to the deferred_bios list.
 */
Joe Thornber's avatar
Joe Thornber committed
606
static void cell_defer(struct thin_c *tc, struct dm_bio_prison_cell *cell)
607
608
609
610
611
{
	struct pool *pool = tc->pool;
	unsigned long flags;

	spin_lock_irqsave(&pool->lock, flags);
612
	cell_release(pool, cell, &pool->deferred_bios);
613
614
615
616
617
618
	spin_unlock_irqrestore(&tc->pool->lock, flags);

	wake_worker(pool);
}

/*
619
 * Same as cell_defer above, except it omits the original holder of the cell.
620
 */
621
static void cell_defer_no_holder(struct thin_c *tc, struct dm_bio_prison_cell *cell)
622
623
624
625
626
{
	struct pool *pool = tc->pool;
	unsigned long flags;

	spin_lock_irqsave(&pool->lock, flags);
627
	cell_release_no_holder(pool, cell, &pool->deferred_bios);
628
629
630
631
632
	spin_unlock_irqrestore(&pool->lock, flags);

	wake_worker(pool);
}

633
634
static void process_prepared_mapping_fail(struct dm_thin_new_mapping *m)
{
Kent Overstreet's avatar
Kent Overstreet committed
635
	if (m->bio) {
636
		m->bio->bi_end_io = m->saved_bi_end_io;
Kent Overstreet's avatar
Kent Overstreet committed
637
638
		atomic_inc(&m->bio->bi_remaining);
	}
639
	cell_error(m->tc->pool, m->cell);
640
641
642
	list_del(&m->list);
	mempool_free(m, m->tc->pool->mapping_pool);
}
643

Mike Snitzer's avatar
Mike Snitzer committed
644
static void process_prepared_mapping(struct dm_thin_new_mapping *m)
645
646
{
	struct thin_c *tc = m->tc;
647
	struct pool *pool = tc->pool;
648
649
650
651
	struct bio *bio;
	int r;

	bio = m->bio;
Kent Overstreet's avatar
Kent Overstreet committed
652
	if (bio) {
653
		bio->bi_end_io = m->saved_bi_end_io;
Kent Overstreet's avatar
Kent Overstreet committed
654
655
		atomic_inc(&bio->bi_remaining);
	}
656
657

	if (m->err) {
658
		cell_error(pool, m->cell);
659
		goto out;
660
661
662
663
664
665
666
667
668
	}

	/*
	 * Commit the prepared block into the mapping btree.
	 * Any I/O for this block arriving after this point will get
	 * remapped to it directly.
	 */
	r = dm_thin_insert_block(tc->td, m->virt_block, m->data_block);
	if (r) {
669
		metadata_operation_failed(pool, "dm_thin_insert_block", r);
670
		cell_error(pool, m->cell);
671
		goto out;
672
673
674
675
676
677
678
679
680
	}

	/*
	 * Release any bios held while the block was being provisioned.
	 * If we are processing a write bio that completely covers the block,
	 * we already processed it so can ignore it now when processing
	 * the bios in the cell.
	 */
	if (bio) {
681
		cell_defer_no_holder(tc, m->cell);
682
683
		bio_endio(bio, 0);
	} else
Joe Thornber's avatar
Joe Thornber committed
684
		cell_defer(tc, m->cell);
685

686
out:
687
	list_del(&m->list);
688
	mempool_free(m, pool->mapping_pool);
689
690
}

691
static void process_prepared_discard_fail(struct dm_thin_new_mapping *m)
Joe Thornber's avatar
Joe Thornber committed
692
693
694
{
	struct thin_c *tc = m->tc;

695
	bio_io_error(m->bio);
696
697
	cell_defer_no_holder(tc, m->cell);
	cell_defer_no_holder(tc, m->cell2);
698
699
700
701
702
703
	mempool_free(m, tc->pool->mapping_pool);
}

static void process_prepared_discard_passdown(struct dm_thin_new_mapping *m)
{
	struct thin_c *tc = m->tc;
Joe Thornber's avatar
Joe Thornber committed
704

705
	inc_all_io_entry(tc->pool, m->bio);
706
707
	cell_defer_no_holder(tc, m->cell);
	cell_defer_no_holder(tc, m->cell2);
708

Joe Thornber's avatar
Joe Thornber committed
709
	if (m->pass_discard)
710
711
712
713
714
715
716
717
718
		if (m->definitely_not_shared)
			remap_and_issue(tc, m->bio, m->data_block);
		else {
			bool used = false;
			if (dm_pool_block_is_used(tc->pool->pmd, m->data_block, &used) || used)
				bio_endio(m->bio, 0);
			else
				remap_and_issue(tc, m->bio, m->data_block);
		}
Joe Thornber's avatar
Joe Thornber committed
719
720
721
722
723
724
	else
		bio_endio(m->bio, 0);

	mempool_free(m, tc->pool->mapping_pool);
}

725
726
727
728
729
730
731
static void process_prepared_discard(struct dm_thin_new_mapping *m)
{
	int r;
	struct thin_c *tc = m->tc;

	r = dm_thin_remove_block(tc->td, m->virt_block);
	if (r)
732
		DMERR_LIMIT("dm_thin_remove_block() failed");
733
734
735
736

	process_prepared_discard_passdown(m);
}

Joe Thornber's avatar
Joe Thornber committed
737
static void process_prepared(struct pool *pool, struct list_head *head,
738
			     process_mapping_fn *fn)
739
740
741
{
	unsigned long flags;
	struct list_head maps;
Mike Snitzer's avatar
Mike Snitzer committed
742
	struct dm_thin_new_mapping *m, *tmp;
743
744
745

	INIT_LIST_HEAD(&maps);
	spin_lock_irqsave(&pool->lock, flags);
Joe Thornber's avatar
Joe Thornber committed
746
	list_splice_init(head, &maps);
747
748
749
	spin_unlock_irqrestore(&pool->lock, flags);

	list_for_each_entry_safe(m, tmp, &maps, list)
750
		(*fn)(m);
751
752
753
754
755
}

/*
 * Deferred bio jobs.
 */
Joe Thornber's avatar
Joe Thornber committed
756
static int io_overlaps_block(struct pool *pool, struct bio *bio)
757
{
758
759
	return bio->bi_iter.bi_size ==
		(pool->sectors_per_block << SECTOR_SHIFT);
Joe Thornber's avatar
Joe Thornber committed
760
761
762
763
764
765
}

static int io_overwrites_block(struct pool *pool, struct bio *bio)
{
	return (bio_data_dir(bio) == WRITE) &&
		io_overlaps_block(pool, bio);
766
767
768
769
770
771
772
773
774
775
776
777
778
779
780
781
782
783
784
}

static void save_and_set_endio(struct bio *bio, bio_end_io_t **save,
			       bio_end_io_t *fn)
{
	*save = bio->bi_end_io;
	bio->bi_end_io = fn;
}

static int ensure_next_mapping(struct pool *pool)
{
	if (pool->next_mapping)
		return 0;

	pool->next_mapping = mempool_alloc(pool->mapping_pool, GFP_ATOMIC);

	return pool->next_mapping ? 0 : -ENOMEM;
}

Mike Snitzer's avatar
Mike Snitzer committed
785
static struct dm_thin_new_mapping *get_next_mapping(struct pool *pool)
786
{
787
	struct dm_thin_new_mapping *m = pool->next_mapping;
788
789
790

	BUG_ON(!pool->next_mapping);

791
792
793
794
	memset(m, 0, sizeof(struct dm_thin_new_mapping));
	INIT_LIST_HEAD(&m->list);
	m->bio = NULL;

795
796
	pool->next_mapping = NULL;

797
	return m;
798
799
800
}

static void schedule_copy(struct thin_c *tc, dm_block_t virt_block,
801
802
			  struct dm_dev *origin, dm_block_t data_origin,
			  dm_block_t data_dest,
Mike Snitzer's avatar
Mike Snitzer committed
803
			  struct dm_bio_prison_cell *cell, struct bio *bio)
804
805
806
{
	int r;
	struct pool *pool = tc->pool;
Mike Snitzer's avatar
Mike Snitzer committed
807
	struct dm_thin_new_mapping *m = get_next_mapping(pool);
808
809
810
811
812
813

	m->tc = tc;
	m->virt_block = virt_block;
	m->data_block = data_dest;
	m->cell = cell;

814
	if (!dm_deferred_set_add_work(pool->shared_read_ds, &m->list))
815
		m->quiesced = true;
816
817
818
819
820
821
822
823

	/*
	 * IO to pool_dev remaps to the pool target's data_dev.
	 *
	 * If the whole block of data is being overwritten, we can issue the
	 * bio immediately. Otherwise we use kcopyd to clone the data first.
	 */
	if (io_overwrites_block(pool, bio)) {
824
		struct dm_thin_endio_hook *h = dm_per_bio_data(bio, sizeof(struct dm_thin_endio_hook));
Mike Snitzer's avatar
Mike Snitzer committed
825

826
		h->overwrite_mapping = m;
827
828
		m->bio = bio;
		save_and_set_endio(bio, &m->saved_bi_end_io, overwrite_endio);
829
		inc_all_io_entry(pool, bio);
830
831
832
833
		remap_and_issue(tc, bio, data_dest);
	} else {
		struct dm_io_region from, to;

834
		from.bdev = origin->bdev;
835
836
837
838
839
840
841
842
843
844
845
		from.sector = data_origin * pool->sectors_per_block;
		from.count = pool->sectors_per_block;

		to.bdev = tc->pool_dev->bdev;
		to.sector = data_dest * pool->sectors_per_block;
		to.count = pool->sectors_per_block;

		r = dm_kcopyd_copy(pool->copier, &from, 1, &to,
				   0, copy_complete, m);
		if (r < 0) {
			mempool_free(m, pool->mapping_pool);
846
			DMERR_LIMIT("dm_kcopyd_copy() failed");
847
			cell_error(pool, cell);
848
849
850
851
		}
	}
}

852
853
static void schedule_internal_copy(struct thin_c *tc, dm_block_t virt_block,
				   dm_block_t data_origin, dm_block_t data_dest,
Mike Snitzer's avatar
Mike Snitzer committed
854
				   struct dm_bio_prison_cell *cell, struct bio *bio)
855
856
857
858
859
860
861
{
	schedule_copy(tc, virt_block, tc->pool_dev,
		      data_origin, data_dest, cell, bio);
}

static void schedule_external_copy(struct thin_c *tc, dm_block_t virt_block,
				   dm_block_t data_dest,
Mike Snitzer's avatar
Mike Snitzer committed
862
				   struct dm_bio_prison_cell *cell, struct bio *bio)
863
864
865
866
867
{
	schedule_copy(tc, virt_block, tc->origin_dev,
		      virt_block, data_dest, cell, bio);
}

868
static void schedule_zero(struct thin_c *tc, dm_block_t virt_block,
Mike Snitzer's avatar
Mike Snitzer committed
869
			  dm_block_t data_block, struct dm_bio_prison_cell *cell,
870
871
872
			  struct bio *bio)
{
	struct pool *pool = tc->pool;
Mike Snitzer's avatar
Mike Snitzer committed
873
	struct dm_thin_new_mapping *m = get_next_mapping(pool);
874

875
876
	m->quiesced = true;
	m->prepared = false;
877
878
879
880
881
882
883
884
885
886
	m->tc = tc;
	m->virt_block = virt_block;
	m->data_block = data_block;
	m->cell = cell;

	/*
	 * If the whole block of data is being overwritten or we are not
	 * zeroing pre-existing data, we can issue the bio immediately.
	 * Otherwise we use kcopyd to zero the data first.
	 */
887
	if (!pool->pf.zero_new_blocks)
888
889
890
		process_prepared_mapping(m);

	else if (io_overwrites_block(pool, bio)) {
891
		struct dm_thin_endio_hook *h = dm_per_bio_data(bio, sizeof(struct dm_thin_endio_hook));
Mike Snitzer's avatar
Mike Snitzer committed
892

893
		h->overwrite_mapping = m;
894
895
		m->bio = bio;
		save_and_set_endio(bio, &m->saved_bi_end_io, overwrite_endio);
896
		inc_all_io_entry(pool, bio);
897
898
899
900
901
902
903
904
905
906
907
908
		remap_and_issue(tc, bio, data_block);
	} else {
		int r;
		struct dm_io_region to;

		to.bdev = tc->pool_dev->bdev;
		to.sector = data_block * pool->sectors_per_block;
		to.count = pool->sectors_per_block;

		r = dm_kcopyd_zero(pool->copier, 1, &to, 0, copy_complete, m);
		if (r < 0) {
			mempool_free(m, pool->mapping_pool);
909
			DMERR_LIMIT("dm_kcopyd_zero() failed");
910
			cell_error(pool, cell);
911
912
913
914
		}
	}
}

915
916
917
918
/*
 * A non-zero return indicates read_only or fail_io mode.
 * Many callers don't care about the return value.
 */
919
static int commit(struct pool *pool)
920
921
922
923
924
925
{
	int r;

	if (get_pool_mode(pool) != PM_WRITE)
		return -EINVAL;

926
	r = dm_pool_commit_metadata(pool->pmd);
927
928
	if (r)
		metadata_operation_failed(pool, "dm_pool_commit_metadata", r);
929
930
931
932

	return r;
}

933
934
935
936
937
938
939
940
941
942
943
944
945
946
static void check_low_water_mark(struct pool *pool, dm_block_t free_blocks)
{
	unsigned long flags;

	if (free_blocks <= pool->low_water_blocks && !pool->low_water_triggered) {
		DMWARN("%s: reached low water mark for data device: sending event.",
		       dm_device_name(pool->pool_md));
		spin_lock_irqsave(&pool->lock, flags);
		pool->low_water_triggered = true;
		spin_unlock_irqrestore(&pool->lock, flags);
		dm_table_event(pool->ti->table);
	}
}

947
948
static void set_pool_mode(struct pool *pool, enum pool_mode new_mode);

949
950
951
952
953
954
static int alloc_data_block(struct thin_c *tc, dm_block_t *result)
{
	int r;
	dm_block_t free_blocks;
	struct pool *pool = tc->pool;

955
	if (WARN_ON(get_pool_mode(pool) != PM_WRITE))
956
957
		return -EINVAL;

958
	r = dm_pool_get_free_block_count(pool->pmd, &free_blocks);
959
960
	if (r) {
		metadata_operation_failed(pool, "dm_pool_get_free_block_count", r);
961
		return r;
962
	}
963

964
	check_low_water_mark(pool, free_blocks);
965
966

	if (!free_blocks) {
967
968
969
970
		/*
		 * Try to commit to see if that will free up some
		 * more space.
		 */
971
972
973
		r = commit(pool);
		if (r)
			return r;
974

975
		r = dm_pool_get_free_block_count(pool->pmd, &free_blocks);
976
977
		if (r) {
			metadata_operation_failed(pool, "dm_pool_get_free_block_count", r);
978
			return r;
979
		}
980

981
		if (!free_blocks) {
982
			set_pool_mode(pool, PM_OUT_OF_DATA_SPACE);
983
			return -ENOSPC;
984
985
986
987
		}
	}

	r = dm_pool_alloc_data_block(pool->pmd, result);
988
	if (r) {
989
		metadata_operation_failed(pool, "dm_pool_alloc_data_block", r);
990
		return r;
991
	}
992
993
994
995
996
997
998
999
1000
1001

	return 0;
}

/*
 * If we have run out of space, queue bios until the device is
 * resumed, presumably after having been reloaded with more space.
 */
static void retry_on_resume(struct bio *bio)
{
1002
	struct dm_thin_endio_hook *h = dm_per_bio_data(bio, sizeof(struct dm_thin_endio_hook));
1003
	struct thin_c *tc = h->tc;
1004
1005
1006
1007
1008
1009
1010
1011
	struct pool *pool = tc->pool;
	unsigned long flags;

	spin_lock_irqsave(&pool->lock, flags);
	bio_list_add(&pool->retry_on_resume_list, bio);
	spin_unlock_irqrestore(&pool->lock, flags);
}

1012
static bool should_error_unserviceable_bio(struct pool *pool)
1013
{
1014
1015
1016
1017
1018
1019
1020
1021
1022
1023
1024
1025
1026
1027
1028
1029
1030
1031
1032
1033
	enum pool_mode m = get_pool_mode(pool);

	switch (m) {
	case PM_WRITE:
		/* Shouldn't get here */
		DMERR_LIMIT("bio unserviceable, yet pool is in PM_WRITE mode");
		return true;

	case PM_OUT_OF_DATA_SPACE:
		return pool->pf.error_if_no_space;

	case PM_READ_ONLY:
	case PM_FAIL:
		return true;
	default:
		/* Shouldn't get here */
		DMERR_LIMIT("bio unserviceable, yet pool has an unknown mode");
		return true;
	}
}
1034

1035
1036
1037
static void handle_unserviceable_bio(struct pool *pool, struct bio *bio)
{
	if (should_error_unserviceable_bio(pool))
1038
		bio_io_error(bio);
1039
1040
	else
		retry_on_resume(bio);
1041
1042
}

1043
static void retry_bios_on_resume(struct pool *pool, struct dm_bio_prison_cell *cell)
1044
1045
1046
1047
{
	struct bio *bio;
	struct bio_list bios;

1048
1049
1050
1051
1052
	if (should_error_unserviceable_bio(pool)) {
		cell_error(pool, cell);
		return;
	}

1053
	bio_list_init(&bios);
1054
	cell_release(pool, cell, &bios);
1055

1056
1057
1058
1059
1060
1061
	if (should_error_unserviceable_bio(pool))
		while ((bio = bio_list_pop(&bios)))
			bio_io_error(bio);
	else
		while ((bio = bio_list_pop(&bios)))
			retry_on_resume(bio);
1062
1063
}

Joe Thornber's avatar
Joe Thornber committed
1064
1065
1066
static void process_discard(struct thin_c *tc, struct bio *bio)
{
	int r;
1067
	unsigned long flags;
Joe Thornber's avatar
Joe Thornber committed
1068
	struct pool *pool = tc->pool;
Mike Snitzer's avatar
Mike Snitzer committed
1069
	struct dm_bio_prison_cell *cell, *cell2;
1070
	struct dm_cell_key key, key2;
Joe Thornber's avatar
Joe Thornber committed
1071
1072
	dm_block_t block = get_bio_block(tc, bio);
	struct dm_thin_lookup_result lookup_result;
Mike Snitzer's avatar
Mike Snitzer committed
1073
	struct dm_thin_new_mapping *m;
Joe Thornber's avatar
Joe Thornber committed
1074
1075

	build_virtual_key(tc->td, block, &key);
1076
	if (bio_detain(tc->pool, &key, bio, &cell))
Joe Thornber's avatar
Joe Thornber committed
1077
1078
1079
1080
1081
1082
1083
1084
1085
1086
1087
		return;

	r = dm_thin_find_block(tc->td, block, 1, &lookup_result);
	switch (r) {
	case 0:
		/*
		 * Check nobody is fiddling with this pool block.  This can
		 * happen if someone's in the process of breaking sharing
		 * on this block.
		 */
		build_data_key(tc->td, lookup_result.block, &key2);
1088
		if (bio_detain(tc->pool, &key2, bio, &cell2)) {
1089
			cell_defer_no_holder(tc, cell);
Joe Thornber's avatar
Joe Thornber committed
1090
1091
1092
1093
1094
1095
1096
1097
1098
1099
			break;
		}

		if (io_overlaps_block(pool, bio)) {
			/*
			 * IO may still be going to the destination block.  We must
			 * quiesce before we can do the removal.
			 */
			m = get_next_mapping(pool);
			m->tc = tc;
1100
1101
			m->pass_discard = pool->pf.discard_passdown;
			m->definitely_not_shared = !lookup_result.shared;
Joe Thornber's avatar
Joe Thornber committed
1102
1103
1104
1105
1106
1107
			m->virt_block = block;
			m->data_block = lookup_result.block;
			m->cell = cell;
			m->cell2 = cell2;
			m->bio = bio;

1108
			if (!dm_deferred_set_add_work(pool->all_io_ds, &m->list)) {
1109
				spin_lock_irqsave(&pool->lock, flags);
1110
				list_add_tail(&m->list, &pool->prepared_discards);
1111
				spin_unlock_irqrestore(&pool->lock, flags);
Joe Thornber's avatar
Joe Thornber committed
1112
1113
1114
				wake_worker(pool);
			}
		} else {
1115
			inc_all_io_entry(pool, bio);
1116
1117
			cell_defer_no_holder(tc, cell);
			cell_defer_no_holder(tc, cell2);
1118

Joe Thornber's avatar
Joe Thornber committed
1119
			/*
1120
1121
1122
			 * The DM core makes sure that the discard doesn't span
			 * a block boundary.  So we submit the discard of a
			 * partial block appropriately.
Joe Thornber's avatar
Joe Thornber committed
1123
			 */
1124
1125
1126
1127
			if ((!lookup_result.shared) && pool->pf.discard_passdown)
				remap_and_issue(tc, bio, lookup_result.block);
			else
				bio_endio(bio, 0);
Joe Thornber's avatar
Joe Thornber committed
1128
1129
1130
1131
1132
1133
1134
		}
		break;

	case -ENODATA:
		/*
		 * It isn't provisioned, just forget it.
		 */
1135
		cell_defer_no_holder(tc, cell);
Joe Thornber's avatar
Joe Thornber committed
1136
1137
1138
1139
		bio_endio(bio, 0);
		break;

	default:
1140
1141
		DMERR_LIMIT("%s: dm_thin_find_block() failed: error = %d",
			    __func__, r);
1142
		cell_defer_no_holder(tc, cell);
Joe Thornber's avatar
Joe Thornber committed
1143
1144
1145
1146
1147
		bio_io_error(bio);
		break;
	}
}

1148
static void break_sharing(struct thin_c *tc, struct bio *bio, dm_block_t block,
1149
			  struct dm_cell_key *key,
1150
			  struct dm_thin_lookup_result *lookup_result,
Mike Snitzer's avatar
Mike Snitzer committed
1151
			  struct dm_bio_prison_cell *cell)
1152
1153
1154
{
	int r;
	dm_block_t data_block;
1155
	struct pool *pool = tc->pool;
1156
1157
1158
1159

	r = alloc_data_block(tc, &data_block);
	switch (r) {
	case 0:
1160
1161
		schedule_internal_copy(tc, block, lookup_result->block,
				       data_block, cell, bio);
1162
1163
1164
		break;

	case -ENOSPC:
1165
		retry_bios_on_resume(pool, cell);
1166
1167
1168
		break;

	default:
1169
1170
		DMERR_LIMIT("%s: alloc_data_block() failed: error = %d",
			    __func__, r);
1171
		cell_error(pool, cell);
1172
1173
1174
1175
1176
1177
1178
1179
		break;
	}
}

static void process_shared_bio(struct thin_c *tc, struct bio *bio,
			       dm_block_t block,
			       struct dm_thin_lookup_result *lookup_result)
{
Mike Snitzer's avatar
Mike Snitzer committed
1180
	struct dm_bio_prison_cell *cell;
1181
	struct pool *pool = tc->pool;
1182
	struct dm_cell_key key;
1183
1184
1185
1186
1187
1188

	/*
	 * If cell is already occupied, then sharing is already in the process
	 * of being broken so we have nothing further to do here.
	 */
	build_data_key(tc->td, lookup_result->block, &key);
1189
	if (bio_detain(pool, &key, bio, &cell))
1190
1191
		return;

1192
	if (bio_data_dir(bio) == WRITE && bio->bi_iter.bi_size)
1193
1194
		break_sharing(tc, bio, block, &key, lookup_result, cell);
	else {
1195
		struct dm_thin_endio_hook *h = dm_per_bio_data(bio, sizeof(struct dm_thin_endio_hook));
1196

1197
		h->shared_read_entry = dm_deferred_entry_inc(pool->shared_read_ds);
1198
		inc_all_io_entry(pool, bio);
1199
		cell_defer_no_holder(tc, cell);
1200

1201
1202
1203
1204
1205
		remap_and_issue(tc, bio, lookup_result->block);
	}
}

static void provision_block(struct thin_c *tc, struct bio *bio, dm_block_t block,
Mike Snitzer's avatar
Mike Snitzer committed
1206
			    struct dm_bio_prison_cell *cell)
1207
1208
1209
{
	int r;
	dm_block_t data_block;
1210
	struct pool *pool = tc->pool;
1211
1212
1213
1214

	/*
	 * Remap empty bios (flushes) immediately, without provisioning.
	 */
1215
	if (!bio->bi_iter.bi_size) {
1216
		inc_all_io_entry(pool, bio);
1217
		cell_defer_no_holder(tc, cell);
1218

1219
1220
1221
1222
1223
1224
1225
1226
1227
		remap_and_issue(tc, bio, 0);
		return;
	}

	/*
	 * Fill read bios with zeroes and complete them immediately.
	 */
	if (bio_data_dir(bio) == READ) {
		zero_fill_bio(bio);
1228
		cell_defer_no_holder(tc, cell);
1229
1230
1231
1232
1233
1234
1235
		bio_endio(bio, 0);
		return;
	}

	r = alloc_data_block(tc, &data_block);
	switch (r) {
	case 0:
1236
1237
1238
1239
		if (tc->origin_dev)
			schedule_external_copy(tc, block, data_block, cell, bio);
		else
			schedule_zero(tc, block, data_block, cell, bio);
1240
1241
1242
		break;

	case -ENOSPC:
1243
		retry_bios_on_resume(pool, cell);
1244
1245
1246
		break;

	default:
1247
1248
		DMERR_LIMIT("%s: alloc_data_block() failed: error = %d",
			    __func__, r);
1249
		cell_error(pool, cell);
1250
1251
1252
1253
1254
1255
1256
		break;
	}
}

static void process_bio(struct thin_c *tc, struct bio *bio)
{
	int r;
1257
	struct pool *pool = tc->pool;
1258
	dm_block_t block = get_bio_block(tc, bio);
Mike Snitzer's avatar
Mike Snitzer committed
1259
	struct dm_bio_prison_cell *cell;
1260
	struct dm_cell_key key;
1261
1262
1263
1264
1265
1266
1267
	struct dm_thin_lookup_result lookup_result;

	/*
	 * If cell is already occupied, then the block is already
	 * being provisioned so we have nothing further to do here.
	 */
	build_virtual_key(tc->td, block, &key);
1268
	if (bio_detain(pool, &key, bio, &cell))
1269
1270
1271
1272
1273
		return;

	r = dm_thin_find_block(tc->td, block, 1, &lookup_result);
	switch (r) {
	case 0:
1274
		if (