dm-thin.c 86.6 KB
Newer Older
1
/*
2
 * Copyright (C) 2011-2012 Red Hat UK.
3
4
5
6
7
 *
 * This file is released under the GPL.
 */

#include "dm-thin-metadata.h"
8
#include "dm-bio-prison.h"
9
#include "dm.h"
10
11
12
13
14

#include <linux/device-mapper.h>
#include <linux/dm-io.h>
#include <linux/dm-kcopyd.h>
#include <linux/list.h>
15
#include <linux/rculist.h>
16
17
18
#include <linux/init.h>
#include <linux/module.h>
#include <linux/slab.h>
19
#include <linux/rbtree.h>
20
21
22
23
24
25

#define	DM_MSG_PREFIX	"thin"

/*
 * Tunable constants
 */
26
#define ENDIO_HOOK_POOL_SIZE 1024
27
28
#define MAPPING_POOL_SIZE 1024
#define PRISON_CELLS 1024
29
#define COMMIT_PERIOD HZ
30
#define NO_SPACE_TIMEOUT (HZ * 60)
31

32
33
34
DECLARE_DM_KCOPYD_THROTTLE_WITH_MODULE_PARM(snapshot_copy_throttle,
		"A percentage of time allocated for copy on write");

35
36
37
38
39
40
41
42
43
44
45
46
47
48
49
50
51
52
53
54
55
56
57
58
59
60
61
62
63
64
65
66
/*
 * The block size of the device holding pool data must be
 * between 64KB and 1GB.
 */
#define DATA_DEV_BLOCK_SIZE_MIN_SECTORS (64 * 1024 >> SECTOR_SHIFT)
#define DATA_DEV_BLOCK_SIZE_MAX_SECTORS (1024 * 1024 * 1024 >> SECTOR_SHIFT)

/*
 * Device id is restricted to 24 bits.
 */
#define MAX_DEV_ID ((1 << 24) - 1)

/*
 * How do we handle breaking sharing of data blocks?
 * =================================================
 *
 * We use a standard copy-on-write btree to store the mappings for the
 * devices (note I'm talking about copy-on-write of the metadata here, not
 * the data).  When you take an internal snapshot you clone the root node
 * of the origin btree.  After this there is no concept of an origin or a
 * snapshot.  They are just two device trees that happen to point to the
 * same data blocks.
 *
 * When we get a write in we decide if it's to a shared data block using
 * some timestamp magic.  If it is, we have to break sharing.
 *
 * Let's say we write to a shared block in what was the origin.  The
 * steps are:
 *
 * i) plug io further to this physical block. (see bio_prison code).
 *
 * ii) quiesce any read io to that shared data block.  Obviously
67
 * including all devices that share this block.  (see dm_deferred_set code)
68
69
70
71
72
 *
 * iii) copy the data block to a newly allocate block.  This step can be
 * missed out if the io covers the block. (schedule_copy).
 *
 * iv) insert the new mapping into the origin's btree
Joe Thornber's avatar
Joe Thornber committed
73
 * (process_prepared_mapping).  This act of inserting breaks some
74
75
76
77
78
79
80
81
82
83
84
85
86
87
88
89
90
91
92
93
94
95
96
97
98
99
100
101
102
103
104
105
106
107
108
109
110
 * sharing of btree nodes between the two devices.  Breaking sharing only
 * effects the btree of that specific device.  Btrees for the other
 * devices that share the block never change.  The btree for the origin
 * device as it was after the last commit is untouched, ie. we're using
 * persistent data structures in the functional programming sense.
 *
 * v) unplug io to this physical block, including the io that triggered
 * the breaking of sharing.
 *
 * Steps (ii) and (iii) occur in parallel.
 *
 * The metadata _doesn't_ need to be committed before the io continues.  We
 * get away with this because the io is always written to a _new_ block.
 * If there's a crash, then:
 *
 * - The origin mapping will point to the old origin block (the shared
 * one).  This will contain the data as it was before the io that triggered
 * the breaking of sharing came in.
 *
 * - The snap mapping still points to the old block.  As it would after
 * the commit.
 *
 * The downside of this scheme is the timestamp magic isn't perfect, and
 * will continue to think that data block in the snapshot device is shared
 * even after the write to the origin has broken sharing.  I suspect data
 * blocks will typically be shared by many different devices, so we're
 * breaking sharing n + 1 times, rather than n, where n is the number of
 * devices that reference this data block.  At the moment I think the
 * benefits far, far outweigh the disadvantages.
 */

/*----------------------------------------------------------------*/

/*
 * Key building.
 */
static void build_data_key(struct dm_thin_device *td,
111
			   dm_block_t b, struct dm_cell_key *key)
112
113
114
115
116
117
118
{
	key->virtual = 0;
	key->dev = dm_thin_dev_id(td);
	key->block = b;
}

static void build_virtual_key(struct dm_thin_device *td, dm_block_t b,
119
			      struct dm_cell_key *key)
120
121
122
123
124
125
126
127
128
129
130
131
132
{
	key->virtual = 1;
	key->dev = dm_thin_dev_id(td);
	key->block = b;
}

/*----------------------------------------------------------------*/

/*
 * A pool device ties together a metadata device and a data device.  It
 * also provides the interface for creating and destroying internal
 * devices.
 */
Mike Snitzer's avatar
Mike Snitzer committed
133
struct dm_thin_new_mapping;
134

135
/*
136
 * The pool runs in 4 modes.  Ordered in degraded order for comparisons.
137
138
139
 */
enum pool_mode {
	PM_WRITE,		/* metadata may be changed */
140
	PM_OUT_OF_DATA_SPACE,	/* metadata may be changed, though data may not be allocated */
141
142
143
144
	PM_READ_ONLY,		/* metadata may not be changed */
	PM_FAIL,		/* all I/O fails */
};

145
struct pool_features {
146
147
	enum pool_mode mode;

Mike Snitzer's avatar
Mike Snitzer committed
148
149
150
	bool zero_new_blocks:1;
	bool discard_enabled:1;
	bool discard_passdown:1;
151
	bool error_if_no_space:1;
152
153
};

154
155
156
157
struct thin_c;
typedef void (*process_bio_fn)(struct thin_c *tc, struct bio *bio);
typedef void (*process_mapping_fn)(struct dm_thin_new_mapping *m);

158
159
160
161
162
163
164
165
166
struct pool {
	struct list_head list;
	struct dm_target *ti;	/* Only set if a pool target is bound */

	struct mapped_device *pool_md;
	struct block_device *md_dev;
	struct dm_pool_metadata *pmd;

	dm_block_t low_water_blocks;
167
	uint32_t sectors_per_block;
168
	int sectors_per_block_shift;
169

170
	struct pool_features pf;
171
	bool low_water_triggered:1;	/* A dm event has been sent */
172

173
	struct dm_bio_prison *prison;
174
175
176
177
	struct dm_kcopyd_client *copier;

	struct workqueue_struct *wq;
	struct work_struct worker;
178
	struct delayed_work waker;
179
	struct delayed_work no_space_timeout;
180

181
	unsigned long last_commit_jiffies;
182
	unsigned ref_count;
183
184
185
186

	spinlock_t lock;
	struct bio_list deferred_flush_bios;
	struct list_head prepared_mappings;
Joe Thornber's avatar
Joe Thornber committed
187
	struct list_head prepared_discards;
188
	struct list_head active_thins;
189

190
191
	struct dm_deferred_set *shared_read_ds;
	struct dm_deferred_set *all_io_ds;
192

Mike Snitzer's avatar
Mike Snitzer committed
193
	struct dm_thin_new_mapping *next_mapping;
194
	mempool_t *mapping_pool;
195
196
197
198
199
200

	process_bio_fn process_bio;
	process_bio_fn process_discard;

	process_mapping_fn process_prepared_mapping;
	process_mapping_fn process_prepared_discard;
201
202
};

203
static enum pool_mode get_pool_mode(struct pool *pool);
204
static void metadata_operation_failed(struct pool *pool, const char *op, int r);
205

206
207
208
209
210
211
212
213
214
215
216
/*
 * Target context for a pool.
 */
struct pool_c {
	struct dm_target *ti;
	struct pool *pool;
	struct dm_dev *data_dev;
	struct dm_dev *metadata_dev;
	struct dm_target_callbacks callbacks;

	dm_block_t low_water_blocks;
217
218
	struct pool_features requested_pf; /* Features requested during table load */
	struct pool_features adjusted_pf;  /* Features used after adjusting for constituent devices */
219
220
221
222
223
224
};

/*
 * Target context for a thin.
 */
struct thin_c {
225
	struct list_head list;
226
	struct dm_dev *pool_dev;
227
	struct dm_dev *origin_dev;
228
229
230
231
	dm_thin_id dev_id;

	struct pool *pool;
	struct dm_thin_device *td;
232
	bool requeue_mode:1;
233
234
235
	spinlock_t lock;
	struct bio_list deferred_bio_list;
	struct bio_list retry_on_resume_list;
236
	struct rb_root sort_bio_list; /* sorted list of deferred bios */
237
238
239
240
241
242
243

	/*
	 * Ensures the thin is not destroyed until the worker has finished
	 * iterating the active_thins list.
	 */
	atomic_t refcount;
	struct completion can_destroy;
244
245
246
247
};

/*----------------------------------------------------------------*/

248
249
250
251
252
253
254
255
256
257
258
/*
 * wake_worker() is used when new work is queued and when pool_resume is
 * ready to continue deferred IO processing.
 */
static void wake_worker(struct pool *pool)
{
	queue_work(pool->wq, &pool->worker);
}

/*----------------------------------------------------------------*/

259
260
261
262
263
264
265
266
267
268
269
270
271
272
273
274
275
276
277
278
279
280
281
282
283
284
285
286
287
288
289
290
291
292
293
294
295
296
297
static int bio_detain(struct pool *pool, struct dm_cell_key *key, struct bio *bio,
		      struct dm_bio_prison_cell **cell_result)
{
	int r;
	struct dm_bio_prison_cell *cell_prealloc;

	/*
	 * Allocate a cell from the prison's mempool.
	 * This might block but it can't fail.
	 */
	cell_prealloc = dm_bio_prison_alloc_cell(pool->prison, GFP_NOIO);

	r = dm_bio_detain(pool->prison, key, bio, cell_prealloc, cell_result);
	if (r)
		/*
		 * We reused an old cell; we can get rid of
		 * the new one.
		 */
		dm_bio_prison_free_cell(pool->prison, cell_prealloc);

	return r;
}

static void cell_release(struct pool *pool,
			 struct dm_bio_prison_cell *cell,
			 struct bio_list *bios)
{
	dm_cell_release(pool->prison, cell, bios);
	dm_bio_prison_free_cell(pool->prison, cell);
}

static void cell_release_no_holder(struct pool *pool,
				   struct dm_bio_prison_cell *cell,
				   struct bio_list *bios)
{
	dm_cell_release_no_holder(pool->prison, cell, bios);
	dm_bio_prison_free_cell(pool->prison, cell);
}

298
299
300
301
302
303
static void cell_defer_no_holder_no_free(struct thin_c *tc,
					 struct dm_bio_prison_cell *cell)
{
	struct pool *pool = tc->pool;
	unsigned long flags;

304
305
306
	spin_lock_irqsave(&tc->lock, flags);
	dm_cell_release_no_holder(pool->prison, cell, &tc->deferred_bio_list);
	spin_unlock_irqrestore(&tc->lock, flags);
307
308
309
310

	wake_worker(pool);
}

311
312
313
314
315
316
317
318
319
static void cell_error(struct pool *pool,
		       struct dm_bio_prison_cell *cell)
{
	dm_cell_error(pool->prison, cell);
	dm_bio_prison_free_cell(pool->prison, cell);
}

/*----------------------------------------------------------------*/

320
321
322
323
324
325
326
327
328
329
330
331
332
333
334
335
336
337
338
339
340
341
342
343
344
345
346
347
348
349
350
351
352
353
354
355
356
357
358
359
360
361
362
363
364
365
366
367
368
369
370
371
372
373
374
375
376
377
378
379
/*
 * A global list of pools that uses a struct mapped_device as a key.
 */
static struct dm_thin_pool_table {
	struct mutex mutex;
	struct list_head pools;
} dm_thin_pool_table;

static void pool_table_init(void)
{
	mutex_init(&dm_thin_pool_table.mutex);
	INIT_LIST_HEAD(&dm_thin_pool_table.pools);
}

static void __pool_table_insert(struct pool *pool)
{
	BUG_ON(!mutex_is_locked(&dm_thin_pool_table.mutex));
	list_add(&pool->list, &dm_thin_pool_table.pools);
}

static void __pool_table_remove(struct pool *pool)
{
	BUG_ON(!mutex_is_locked(&dm_thin_pool_table.mutex));
	list_del(&pool->list);
}

static struct pool *__pool_table_lookup(struct mapped_device *md)
{
	struct pool *pool = NULL, *tmp;

	BUG_ON(!mutex_is_locked(&dm_thin_pool_table.mutex));

	list_for_each_entry(tmp, &dm_thin_pool_table.pools, list) {
		if (tmp->pool_md == md) {
			pool = tmp;
			break;
		}
	}

	return pool;
}

static struct pool *__pool_table_lookup_metadata_dev(struct block_device *md_dev)
{
	struct pool *pool = NULL, *tmp;

	BUG_ON(!mutex_is_locked(&dm_thin_pool_table.mutex));

	list_for_each_entry(tmp, &dm_thin_pool_table.pools, list) {
		if (tmp->md_dev == md_dev) {
			pool = tmp;
			break;
		}
	}

	return pool;
}

/*----------------------------------------------------------------*/

Mike Snitzer's avatar
Mike Snitzer committed
380
struct dm_thin_endio_hook {
381
	struct thin_c *tc;
382
383
	struct dm_deferred_entry *shared_read_entry;
	struct dm_deferred_entry *all_io_entry;
Mike Snitzer's avatar
Mike Snitzer committed
384
	struct dm_thin_new_mapping *overwrite_mapping;
385
	struct rb_node rb_node;
386
387
};

388
static void requeue_bio_list(struct thin_c *tc, struct bio_list *master)
389
390
391
{
	struct bio *bio;
	struct bio_list bios;
392
	unsigned long flags;
393
394

	bio_list_init(&bios);
395

396
	spin_lock_irqsave(&tc->lock, flags);
397
398
	bio_list_merge(&bios, master);
	bio_list_init(master);
399
	spin_unlock_irqrestore(&tc->lock, flags);
400

401
402
	while ((bio = bio_list_pop(&bios)))
		bio_endio(bio, DM_ENDIO_REQUEUE);
403
404
405
406
}

static void requeue_io(struct thin_c *tc)
{
407
408
	requeue_bio_list(tc, &tc->deferred_bio_list);
	requeue_bio_list(tc, &tc->retry_on_resume_list);
409
410
}

411
static void error_thin_retry_list(struct thin_c *tc)
412
413
414
415
416
417
418
{
	struct bio *bio;
	unsigned long flags;
	struct bio_list bios;

	bio_list_init(&bios);

419
420
421
422
	spin_lock_irqsave(&tc->lock, flags);
	bio_list_merge(&bios, &tc->retry_on_resume_list);
	bio_list_init(&tc->retry_on_resume_list);
	spin_unlock_irqrestore(&tc->lock, flags);
423
424
425
426
427

	while ((bio = bio_list_pop(&bios)))
		bio_io_error(bio);
}

428
429
430
431
432
433
434
435
436
437
static void error_retry_list(struct pool *pool)
{
	struct thin_c *tc;

	rcu_read_lock();
	list_for_each_entry_rcu(tc, &pool->active_thins, list)
		error_thin_retry_list(tc);
	rcu_read_unlock();
}

438
439
440
441
442
443
444
/*
 * This section of code contains the logic for processing a thin device's IO.
 * Much of the code depends on pool object resources (lists, workqueues, etc)
 * but most is exclusively called from the thin target rather than the thin-pool
 * target.
 */

445
446
447
448
449
static bool block_size_is_power_of_two(struct pool *pool)
{
	return pool->sectors_per_block_shift >= 0;
}

450
451
static dm_block_t get_bio_block(struct thin_c *tc, struct bio *bio)
{
452
	struct pool *pool = tc->pool;
453
	sector_t block_nr = bio->bi_iter.bi_sector;
454

455
456
	if (block_size_is_power_of_two(pool))
		block_nr >>= pool->sectors_per_block_shift;
457
	else
458
		(void) sector_div(block_nr, pool->sectors_per_block);
459
460

	return block_nr;
461
462
463
464
465
}

static void remap(struct thin_c *tc, struct bio *bio, dm_block_t block)
{
	struct pool *pool = tc->pool;
466
	sector_t bi_sector = bio->bi_iter.bi_sector;
467
468

	bio->bi_bdev = tc->pool_dev->bdev;
469
	if (block_size_is_power_of_two(pool))
470
471
472
		bio->bi_iter.bi_sector =
			(block << pool->sectors_per_block_shift) |
			(bi_sector & (pool->sectors_per_block - 1));
473
	else
474
		bio->bi_iter.bi_sector = (block * pool->sectors_per_block) +
475
				 sector_div(bi_sector, pool->sectors_per_block);
476
477
}

478
479
480
481
482
static void remap_to_origin(struct thin_c *tc, struct bio *bio)
{
	bio->bi_bdev = tc->origin_dev->bdev;
}

483
484
485
486
487
488
static int bio_triggers_commit(struct thin_c *tc, struct bio *bio)
{
	return (bio->bi_rw & (REQ_FLUSH | REQ_FUA)) &&
		dm_thin_changed_this_transaction(tc->td);
}

489
490
491
492
493
494
495
static void inc_all_io_entry(struct pool *pool, struct bio *bio)
{
	struct dm_thin_endio_hook *h;

	if (bio->bi_rw & REQ_DISCARD)
		return;

496
	h = dm_per_bio_data(bio, sizeof(struct dm_thin_endio_hook));
497
498
499
	h->all_io_entry = dm_deferred_entry_inc(pool->all_io_ds);
}

500
static void issue(struct thin_c *tc, struct bio *bio)
501
502
503
504
{
	struct pool *pool = tc->pool;
	unsigned long flags;

505
506
507
508
509
	if (!bio_triggers_commit(tc, bio)) {
		generic_make_request(bio);
		return;
	}

510
	/*
511
512
513
	 * Complete bio with an error if earlier I/O caused changes to
	 * the metadata that can't be committed e.g, due to I/O errors
	 * on the metadata device.
514
	 */
515
516
517
518
519
520
521
522
523
524
525
526
	if (dm_thin_aborted_changes(tc->td)) {
		bio_io_error(bio);
		return;
	}

	/*
	 * Batch together any bios that trigger commits and then issue a
	 * single commit for them in process_deferred_bios().
	 */
	spin_lock_irqsave(&pool->lock, flags);
	bio_list_add(&pool->deferred_flush_bios, bio);
	spin_unlock_irqrestore(&pool->lock, flags);
527
528
}

529
530
531
532
533
534
535
536
537
538
539
540
541
static void remap_to_origin_and_issue(struct thin_c *tc, struct bio *bio)
{
	remap_to_origin(tc, bio);
	issue(tc, bio);
}

static void remap_and_issue(struct thin_c *tc, struct bio *bio,
			    dm_block_t block)
{
	remap(tc, bio, block);
	issue(tc, bio);
}

542
543
544
545
546
/*----------------------------------------------------------------*/

/*
 * Bio endio functions.
 */
Mike Snitzer's avatar
Mike Snitzer committed
547
struct dm_thin_new_mapping {
548
549
	struct list_head list;

550
551
552
553
	bool quiesced:1;
	bool prepared:1;
	bool pass_discard:1;
	bool definitely_not_shared:1;
554

555
	int err;
556
557
558
	struct thin_c *tc;
	dm_block_t virt_block;
	dm_block_t data_block;
Mike Snitzer's avatar
Mike Snitzer committed
559
	struct dm_bio_prison_cell *cell, *cell2;
560
561
562
563
564
565
566
567
568
569
570

	/*
	 * If the bio covers the whole area of a block then we can avoid
	 * zeroing or copying.  Instead this bio is hooked.  The bio will
	 * still be in the cell, so care has to be taken to avoid issuing
	 * the bio twice.
	 */
	struct bio *bio;
	bio_end_io_t *saved_bi_end_io;
};

Mike Snitzer's avatar
Mike Snitzer committed
571
static void __maybe_add_mapping(struct dm_thin_new_mapping *m)
572
573
574
{
	struct pool *pool = m->tc->pool;

575
	if (m->quiesced && m->prepared) {
576
		list_add_tail(&m->list, &pool->prepared_mappings);
577
578
579
580
581
582
583
		wake_worker(pool);
	}
}

static void copy_complete(int read_err, unsigned long write_err, void *context)
{
	unsigned long flags;
Mike Snitzer's avatar
Mike Snitzer committed
584
	struct dm_thin_new_mapping *m = context;
585
586
587
588
589
	struct pool *pool = m->tc->pool;

	m->err = read_err || write_err ? -EIO : 0;

	spin_lock_irqsave(&pool->lock, flags);
590
	m->prepared = true;
591
592
593
594
595
596
597
	__maybe_add_mapping(m);
	spin_unlock_irqrestore(&pool->lock, flags);
}

static void overwrite_endio(struct bio *bio, int err)
{
	unsigned long flags;
598
	struct dm_thin_endio_hook *h = dm_per_bio_data(bio, sizeof(struct dm_thin_endio_hook));
Mike Snitzer's avatar
Mike Snitzer committed
599
	struct dm_thin_new_mapping *m = h->overwrite_mapping;
600
601
602
603
604
	struct pool *pool = m->tc->pool;

	m->err = err;

	spin_lock_irqsave(&pool->lock, flags);
605
	m->prepared = true;
606
607
608
609
610
611
612
613
614
615
616
617
618
619
620
621
622
	__maybe_add_mapping(m);
	spin_unlock_irqrestore(&pool->lock, flags);
}

/*----------------------------------------------------------------*/

/*
 * Workqueue.
 */

/*
 * Prepared mapping jobs.
 */

/*
 * This sends the bios in the cell back to the deferred_bios list.
 */
Joe Thornber's avatar
Joe Thornber committed
623
static void cell_defer(struct thin_c *tc, struct dm_bio_prison_cell *cell)
624
625
626
627
{
	struct pool *pool = tc->pool;
	unsigned long flags;

628
629
630
	spin_lock_irqsave(&tc->lock, flags);
	cell_release(pool, cell, &tc->deferred_bio_list);
	spin_unlock_irqrestore(&tc->lock, flags);
631
632
633
634
635

	wake_worker(pool);
}

/*
636
 * Same as cell_defer above, except it omits the original holder of the cell.
637
 */
638
static void cell_defer_no_holder(struct thin_c *tc, struct dm_bio_prison_cell *cell)
639
640
641
642
{
	struct pool *pool = tc->pool;
	unsigned long flags;

643
644
645
	spin_lock_irqsave(&tc->lock, flags);
	cell_release_no_holder(pool, cell, &tc->deferred_bio_list);
	spin_unlock_irqrestore(&tc->lock, flags);
646
647
648
649

	wake_worker(pool);
}

650
651
static void process_prepared_mapping_fail(struct dm_thin_new_mapping *m)
{
Kent Overstreet's avatar
Kent Overstreet committed
652
	if (m->bio) {
653
		m->bio->bi_end_io = m->saved_bi_end_io;
Kent Overstreet's avatar
Kent Overstreet committed
654
655
		atomic_inc(&m->bio->bi_remaining);
	}
656
	cell_error(m->tc->pool, m->cell);
657
658
659
	list_del(&m->list);
	mempool_free(m, m->tc->pool->mapping_pool);
}
660

Mike Snitzer's avatar
Mike Snitzer committed
661
static void process_prepared_mapping(struct dm_thin_new_mapping *m)
662
663
{
	struct thin_c *tc = m->tc;
664
	struct pool *pool = tc->pool;
665
666
667
668
	struct bio *bio;
	int r;

	bio = m->bio;
Kent Overstreet's avatar
Kent Overstreet committed
669
	if (bio) {
670
		bio->bi_end_io = m->saved_bi_end_io;
Kent Overstreet's avatar
Kent Overstreet committed
671
672
		atomic_inc(&bio->bi_remaining);
	}
673
674

	if (m->err) {
675
		cell_error(pool, m->cell);
676
		goto out;
677
678
679
680
681
682
683
684
685
	}

	/*
	 * Commit the prepared block into the mapping btree.
	 * Any I/O for this block arriving after this point will get
	 * remapped to it directly.
	 */
	r = dm_thin_insert_block(tc->td, m->virt_block, m->data_block);
	if (r) {
686
		metadata_operation_failed(pool, "dm_thin_insert_block", r);
687
		cell_error(pool, m->cell);
688
		goto out;
689
690
691
692
693
694
695
696
697
	}

	/*
	 * Release any bios held while the block was being provisioned.
	 * If we are processing a write bio that completely covers the block,
	 * we already processed it so can ignore it now when processing
	 * the bios in the cell.
	 */
	if (bio) {
698
		cell_defer_no_holder(tc, m->cell);
699
700
		bio_endio(bio, 0);
	} else
Joe Thornber's avatar
Joe Thornber committed
701
		cell_defer(tc, m->cell);
702

703
out:
704
	list_del(&m->list);
705
	mempool_free(m, pool->mapping_pool);
706
707
}

708
static void process_prepared_discard_fail(struct dm_thin_new_mapping *m)
Joe Thornber's avatar
Joe Thornber committed
709
710
711
{
	struct thin_c *tc = m->tc;

712
	bio_io_error(m->bio);
713
714
	cell_defer_no_holder(tc, m->cell);
	cell_defer_no_holder(tc, m->cell2);
715
716
717
718
719
720
	mempool_free(m, tc->pool->mapping_pool);
}

static void process_prepared_discard_passdown(struct dm_thin_new_mapping *m)
{
	struct thin_c *tc = m->tc;
Joe Thornber's avatar
Joe Thornber committed
721

722
	inc_all_io_entry(tc->pool, m->bio);
723
724
	cell_defer_no_holder(tc, m->cell);
	cell_defer_no_holder(tc, m->cell2);
725

Joe Thornber's avatar
Joe Thornber committed
726
	if (m->pass_discard)
727
728
729
730
731
732
733
734
735
		if (m->definitely_not_shared)
			remap_and_issue(tc, m->bio, m->data_block);
		else {
			bool used = false;
			if (dm_pool_block_is_used(tc->pool->pmd, m->data_block, &used) || used)
				bio_endio(m->bio, 0);
			else
				remap_and_issue(tc, m->bio, m->data_block);
		}
Joe Thornber's avatar
Joe Thornber committed
736
737
738
739
740
741
	else
		bio_endio(m->bio, 0);

	mempool_free(m, tc->pool->mapping_pool);
}

742
743
744
745
746
747
748
static void process_prepared_discard(struct dm_thin_new_mapping *m)
{
	int r;
	struct thin_c *tc = m->tc;

	r = dm_thin_remove_block(tc->td, m->virt_block);
	if (r)
749
		DMERR_LIMIT("dm_thin_remove_block() failed");
750
751
752
753

	process_prepared_discard_passdown(m);
}

Joe Thornber's avatar
Joe Thornber committed
754
static void process_prepared(struct pool *pool, struct list_head *head,
755
			     process_mapping_fn *fn)
756
757
758
{
	unsigned long flags;
	struct list_head maps;
Mike Snitzer's avatar
Mike Snitzer committed
759
	struct dm_thin_new_mapping *m, *tmp;
760
761
762

	INIT_LIST_HEAD(&maps);
	spin_lock_irqsave(&pool->lock, flags);
Joe Thornber's avatar
Joe Thornber committed
763
	list_splice_init(head, &maps);
764
765
766
	spin_unlock_irqrestore(&pool->lock, flags);

	list_for_each_entry_safe(m, tmp, &maps, list)
767
		(*fn)(m);
768
769
770
771
772
}

/*
 * Deferred bio jobs.
 */
Joe Thornber's avatar
Joe Thornber committed
773
static int io_overlaps_block(struct pool *pool, struct bio *bio)
774
{
775
776
	return bio->bi_iter.bi_size ==
		(pool->sectors_per_block << SECTOR_SHIFT);
Joe Thornber's avatar
Joe Thornber committed
777
778
779
780
781
782
}

static int io_overwrites_block(struct pool *pool, struct bio *bio)
{
	return (bio_data_dir(bio) == WRITE) &&
		io_overlaps_block(pool, bio);
783
784
785
786
787
788
789
790
791
792
793
794
795
796
797
798
799
800
801
}

static void save_and_set_endio(struct bio *bio, bio_end_io_t **save,
			       bio_end_io_t *fn)
{
	*save = bio->bi_end_io;
	bio->bi_end_io = fn;
}

static int ensure_next_mapping(struct pool *pool)
{
	if (pool->next_mapping)
		return 0;

	pool->next_mapping = mempool_alloc(pool->mapping_pool, GFP_ATOMIC);

	return pool->next_mapping ? 0 : -ENOMEM;
}

Mike Snitzer's avatar
Mike Snitzer committed
802
static struct dm_thin_new_mapping *get_next_mapping(struct pool *pool)
803
{
804
	struct dm_thin_new_mapping *m = pool->next_mapping;
805
806
807

	BUG_ON(!pool->next_mapping);

808
809
810
811
	memset(m, 0, sizeof(struct dm_thin_new_mapping));
	INIT_LIST_HEAD(&m->list);
	m->bio = NULL;

812
813
	pool->next_mapping = NULL;

814
	return m;
815
816
817
}

static void schedule_copy(struct thin_c *tc, dm_block_t virt_block,
818
819
			  struct dm_dev *origin, dm_block_t data_origin,
			  dm_block_t data_dest,
Mike Snitzer's avatar
Mike Snitzer committed
820
			  struct dm_bio_prison_cell *cell, struct bio *bio)
821
822
823
{
	int r;
	struct pool *pool = tc->pool;
Mike Snitzer's avatar
Mike Snitzer committed
824
	struct dm_thin_new_mapping *m = get_next_mapping(pool);
825
826
827
828
829
830

	m->tc = tc;
	m->virt_block = virt_block;
	m->data_block = data_dest;
	m->cell = cell;

831
	if (!dm_deferred_set_add_work(pool->shared_read_ds, &m->list))
832
		m->quiesced = true;
833
834
835
836
837
838
839
840

	/*
	 * IO to pool_dev remaps to the pool target's data_dev.
	 *
	 * If the whole block of data is being overwritten, we can issue the
	 * bio immediately. Otherwise we use kcopyd to clone the data first.
	 */
	if (io_overwrites_block(pool, bio)) {
841
		struct dm_thin_endio_hook *h = dm_per_bio_data(bio, sizeof(struct dm_thin_endio_hook));
Mike Snitzer's avatar
Mike Snitzer committed
842

843
		h->overwrite_mapping = m;
844
845
		m->bio = bio;
		save_and_set_endio(bio, &m->saved_bi_end_io, overwrite_endio);
846
		inc_all_io_entry(pool, bio);
847
848
849
850
		remap_and_issue(tc, bio, data_dest);
	} else {
		struct dm_io_region from, to;

851
		from.bdev = origin->bdev;
852
853
854
855
856
857
858
859
860
861
862
		from.sector = data_origin * pool->sectors_per_block;
		from.count = pool->sectors_per_block;

		to.bdev = tc->pool_dev->bdev;
		to.sector = data_dest * pool->sectors_per_block;
		to.count = pool->sectors_per_block;

		r = dm_kcopyd_copy(pool->copier, &from, 1, &to,
				   0, copy_complete, m);
		if (r < 0) {
			mempool_free(m, pool->mapping_pool);
863
			DMERR_LIMIT("dm_kcopyd_copy() failed");
864
			cell_error(pool, cell);
865
866
867
868
		}
	}
}

869
870
static void schedule_internal_copy(struct thin_c *tc, dm_block_t virt_block,
				   dm_block_t data_origin, dm_block_t data_dest,
Mike Snitzer's avatar
Mike Snitzer committed
871
				   struct dm_bio_prison_cell *cell, struct bio *bio)
872
873
874
875
876
877
878
{
	schedule_copy(tc, virt_block, tc->pool_dev,
		      data_origin, data_dest, cell, bio);
}

static void schedule_external_copy(struct thin_c *tc, dm_block_t virt_block,
				   dm_block_t data_dest,
Mike Snitzer's avatar
Mike Snitzer committed
879
				   struct dm_bio_prison_cell *cell, struct bio *bio)
880
881
882
883
884
{
	schedule_copy(tc, virt_block, tc->origin_dev,
		      virt_block, data_dest, cell, bio);
}

885
static void schedule_zero(struct thin_c *tc, dm_block_t virt_block,
Mike Snitzer's avatar
Mike Snitzer committed
886
			  dm_block_t data_block, struct dm_bio_prison_cell *cell,
887
888
889
			  struct bio *bio)
{
	struct pool *pool = tc->pool;
Mike Snitzer's avatar
Mike Snitzer committed
890
	struct dm_thin_new_mapping *m = get_next_mapping(pool);
891

892
893
	m->quiesced = true;
	m->prepared = false;
894
895
896
897
898
899
900
901
902
903
	m->tc = tc;
	m->virt_block = virt_block;
	m->data_block = data_block;
	m->cell = cell;

	/*
	 * If the whole block of data is being overwritten or we are not
	 * zeroing pre-existing data, we can issue the bio immediately.
	 * Otherwise we use kcopyd to zero the data first.
	 */
904
	if (!pool->pf.zero_new_blocks)
905
906
907
		process_prepared_mapping(m);

	else if (io_overwrites_block(pool, bio)) {
908
		struct dm_thin_endio_hook *h = dm_per_bio_data(bio, sizeof(struct dm_thin_endio_hook));
Mike Snitzer's avatar
Mike Snitzer committed
909

910
		h->overwrite_mapping = m;
911
912
		m->bio = bio;
		save_and_set_endio(bio, &m->saved_bi_end_io, overwrite_endio);
913
		inc_all_io_entry(pool, bio);
914
915
916
917
918
919
920
921
922
923
924
925
		remap_and_issue(tc, bio, data_block);
	} else {
		int r;
		struct dm_io_region to;

		to.bdev = tc->pool_dev->bdev;
		to.sector = data_block * pool->sectors_per_block;
		to.count = pool->sectors_per_block;

		r = dm_kcopyd_zero(pool->copier, 1, &to, 0, copy_complete, m);
		if (r < 0) {
			mempool_free(m, pool->mapping_pool);
926
			DMERR_LIMIT("dm_kcopyd_zero() failed");
927
			cell_error(pool, cell);
928
929
930
931
		}
	}
}

932
933
934
935
/*
 * A non-zero return indicates read_only or fail_io mode.
 * Many callers don't care about the return value.
 */
936
static int commit(struct pool *pool)
937
938
939
{
	int r;

940
	if (get_pool_mode(pool) >= PM_READ_ONLY)
941
942
		return -EINVAL;

943
	r = dm_pool_commit_metadata(pool->pmd);
944
945
	if (r)
		metadata_operation_failed(pool, "dm_pool_commit_metadata", r);
946
947
948
949

	return r;
}

950
951
952
953
954
955
956
957
958
959
960
961
962
963
static void check_low_water_mark(struct pool *pool, dm_block_t free_blocks)
{
	unsigned long flags;

	if (free_blocks <= pool->low_water_blocks && !pool->low_water_triggered) {
		DMWARN("%s: reached low water mark for data device: sending event.",
		       dm_device_name(pool->pool_md));
		spin_lock_irqsave(&pool->lock, flags);
		pool->low_water_triggered = true;
		spin_unlock_irqrestore(&pool->lock, flags);
		dm_table_event(pool->ti->table);
	}
}

964
965
static void set_pool_mode(struct pool *pool, enum pool_mode new_mode);

966
967
968
969
970
971
static int alloc_data_block(struct thin_c *tc, dm_block_t *result)
{
	int r;
	dm_block_t free_blocks;
	struct pool *pool = tc->pool;

972
	if (WARN_ON(get_pool_mode(pool) != PM_WRITE))
973
974
		return -EINVAL;

975
	r = dm_pool_get_free_block_count(pool->pmd, &free_blocks);
976
977
	if (r) {
		metadata_operation_failed(pool, "dm_pool_get_free_block_count", r);
978
		return r;
979
	}
980

981
	check_low_water_mark(pool, free_blocks);
982
983

	if (!free_blocks) {
984
985
986
987
		/*
		 * Try to commit to see if that will free up some
		 * more space.
		 */
988
989
990
		r = commit(pool);
		if (r)
			return r;
991

992
		r = dm_pool_get_free_block_count(pool->pmd, &free_blocks);
993
994
		if (r) {
			metadata_operation_failed(pool, "dm_pool_get_free_block_count", r);
995
			return r;
996
		}
997

998
		if (!free_blocks) {
999
			set_pool_mode(pool, PM_OUT_OF_DATA_SPACE);
1000
			return -ENOSPC;
1001
1002
1003
1004
		}
	}

	r = dm_pool_alloc_data_block(pool->pmd, result);
1005
	if (r) {
1006
		metadata_operation_failed(pool, "dm_pool_alloc_data_block", r);
1007
		return r;
1008
	}
1009
1010
1011
1012
1013
1014
1015
1016
1017
1018

	return 0;
}

/*
 * If we have run out of space, queue bios until the device is
 * resumed, presumably after having been reloaded with more space.
 */
static void retry_on_resume(struct bio *bio)
{
1019
	struct dm_thin_endio_hook *h = dm_per_bio_data(bio, sizeof(struct dm_thin_endio_hook));
1020
	struct thin_c *tc = h->tc;
1021
1022
	unsigned long flags;

1023
1024
1025
	spin_lock_irqsave(&tc->lock, flags);
	bio_list_add(&tc->retry_on_resume_list, bio);
	spin_unlock_irqrestore(&tc->lock, flags);
1026
1027
}

1028
static bool should_error_unserviceable_bio(struct pool *pool)
1029
{
1030
1031
1032
1033
1034
1035
1036
1037
1038
1039
1040
1041
1042
1043
1044
1045
1046
1047
1048
1049
	enum pool_mode m = get_pool_mode(pool);

	switch (m) {
	case PM_WRITE:
		/* Shouldn't get here */
		DMERR_LIMIT("bio unserviceable, yet pool is in PM_WRITE mode");
		return true;

	case PM_OUT_OF_DATA_SPACE:
		return pool->pf.error_if_no_space;

	case PM_READ_ONLY:
	case PM_FAIL:
		return true;
	default:
		/* Shouldn't get here */
		DMERR_LIMIT("bio unserviceable, yet pool has an unknown mode");
		return true;
	}
}
1050

1051
1052
1053
static void handle_unserviceable_bio(struct pool *pool, struct bio *bio)
{
	if (should_error_unserviceable_bio(pool))
1054
		bio_io_error(bio);
1055
1056
	else
		retry_on_resume(bio);
1057
1058
}

1059
static void retry_bios_on_resume(struct pool *pool, struct dm_bio_prison_cell *cell)
1060
1061
1062
1063
{
	struct bio *bio;
	struct bio_list bios;

1064
1065
1066
1067
1068
	if (should_error_unserviceable_bio(pool)) {
		cell_error(pool, cell);
		return;
	}

1069
	bio_list_init(&bios);
1070
	cell_release(pool, cell, &bios);
1071

1072
1073
1074
1075
1076
1077
	if (should_error_unserviceable_bio(pool))
		while ((bio = bio_list_pop(&bios)))
			bio_io_error(bio);
	else
		while ((bio = bio_list_pop(&bios)))
			retry_on_resume(bio);
1078
1079
}

Joe Thornber's avatar
Joe Thornber committed
1080
1081
1082
static void process_discard(struct thin_c *tc, struct bio *bio)
{
	int r;
1083
	unsigned long flags;
Joe Thornber's avatar
Joe Thornber committed
1084
	struct pool *pool = tc->pool;
Mike Snitzer's avatar
Mike Snitzer committed
1085
	struct dm_bio_prison_cell *cell, *cell2;
1086
	struct dm_cell_key key, key2;
Joe Thornber's avatar
Joe Thornber committed
1087
1088
	dm_block_t block = get_bio_block(tc, bio);
	struct dm_thin_lookup_result lookup_result;
Mike Snitzer's avatar
Mike Snitzer committed
1089
	struct dm_thin_new_mapping *m;
Joe Thornber's avatar
Joe Thornber committed
1090
1091

	build_virtual_key(tc->td, block, &key);
1092
	if (bio_detain(tc->pool, &key, bio, &cell))
Joe Thornber's avatar
Joe Thornber committed
1093
1094
1095
1096
1097
1098
1099
1100
1101
1102
1103
		return;

	r = dm_thin_find_block(tc->td, block, 1, &lookup_result);
	switch (r) {
	case 0:
		/*
		 * Check nobody is fiddling with this pool block.  This can
		 * happen if someone's in the process of breaking sharing
		 * on this block.
		 */
		build_data_key(tc->td, lookup_result.block, &key2);
1104
		if (bio_detain(tc->pool, &key2, bio, &cell2)) {
1105
			cell_defer_no_holder(tc, cell);
Joe Thornber's avatar
Joe Thornber committed
1106
1107
1108
1109
1110
1111
1112
1113
1114
1115
			break;
		}

		if (io_overlaps_block(pool, bio)) {
			/*
			 * IO may still be going to the destination block.  We must
			 * quiesce before we can do the removal.
			 */
			m = get_next_mapping(pool);
			m->tc = tc;
1116
1117
			m->pass_discard = pool->pf.discard_passdown;
			m->definitely_not_shared = !lookup_result.shared;
Joe Thornber's avatar
Joe Thornber committed
1118
1119
1120
1121
1122
1123
			m->virt_block = block;
			m->data_block = lookup_result.block;
			m->cell = cell;
			m->cell2 = cell2;
			m->bio = bio;

1124
			if (!dm_deferred_set_add_work(pool->all_io_ds, &m->list)) {
1125
				spin_lock_irqsave(&pool->lock, flags);
1126
				list_add_tail(&m->list, &pool->prepared_discards);
1127
				spin_unlock_irqrestore(&pool->lock, flags);
Joe Thornber's avatar
Joe Thornber committed
1128
1129
1130
				wake_worker(pool);
			}
		} else {
1131
			inc_all_io_entry(pool, bio);
1132
1133
			cell_defer_no_holder(tc, cell);
			cell_defer_no_holder(tc, cell2);
1134

Joe Thornber's avatar
Joe Thornber committed
1135
			/*
1136
1137
1138
			 * The DM core makes sure that the discard doesn't span
			 * a block boundary.  So we submit the discard of a
			 * partial block appropriately.
Joe Thornber's avatar
Joe Thornber committed
1139
			 */
1140
1141
1142
1143
			if ((!lookup_result.shared) && pool->pf.discard_passdown)
				remap_and_issue(tc, bio, lookup_result.block);
			else
				bio_endio(bio, 0);
Joe Thornber's avatar
Joe Thornber committed
1144
1145
1146
1147
1148
1149
1150
		}
		break;

	case -ENODATA:
		/*
		 * It isn't provisioned, just forget it.
		 */
1151
		cell_defer_no_holder(tc, cell);
Joe Thornber's avatar
Joe Thornber committed
1152
1153
1154
1155
		bio_endio(bio, 0);
		break;

	default:
1156
1157
		DMERR_LIMIT("%s: dm_thin_find_block() failed: error = %d",
			    __func__, r);
1158
		cell_defer_no_holder(tc, cell);
Joe Thornber's avatar
Joe Thornber committed
1159
1160
1161
1162
1163
		bio_io_error(bio);
		break;
	}
}

1164
static void break_sharing(struct thin_c *tc, struct bio *bio, dm_block_t block,
1165
			  struct dm_cell_key *key,
1166
			  struct dm_thin_lookup_result *lookup_result,
Mike Snitzer's avatar
Mike Snitzer committed
1167
			  struct dm_bio_prison_cell *cell)
1168
1169
1170
{
	int r;
	dm_block_t data_block;
1171
	struct pool *pool = tc->pool;
1172
1173
1174
1175

	r = alloc_data_block(tc, &data_block);
	switch (r) {
	case 0:
1176
1177
		schedule_internal_copy(tc, block, lookup_result->block,
				       data_block, cell, bio);
1178
1179
1180
		break;

	case -ENOSPC:
1181
		retry_bios_on_resume(pool, cell);
1182
1183
1184
		break;

	default:
1185
1186
		DMERR_LIMIT("%s: alloc_data_block() failed: error = %d",
			    __func__, r);
1187
		cell_error(pool, cell);
1188
1189
1190
1191
1192
1193
1194
1195
		break;
	}
}

static void process_shared_bio(struct thin_c *tc, struct bio *bio,
			       dm_block_t block,
			       struct dm_thin_lookup_result *lookup_result)
{
Mike Snitzer's avatar
Mike Snitzer committed
1196
	struct dm_bio_prison_cell *cell;
1197
	struct pool *pool = tc->pool;
1198
	struct dm_cell_key key;
1199
1200
1201
1202
1203
1204

	/*
	 * If cell is already occupied, then sharing is already in the process
	 * of being broken so we have nothing further to do here.
	 */
	build_data_key(tc->td, lookup_result->block, &key);
1205
	if (bio_detain(pool, &key, bio, &cell))
1206
1207
		return;

1208
	if (bio_data_dir(bio) == WRITE && bio->bi_iter.bi_size)
1209
1210
		break_sharing(tc, bio, block, &key, lookup_result, cell);
	else {
1211
		struct dm_thin_endio_hook *h = dm_per_bio_data(bio, sizeof(struct dm_thin_endio_hook));
1212

1213
		h->shared_read_entry = dm_deferred_entry_inc(pool->shared_read_ds);
1214
		inc_all_io_entry(pool, bio);
1215
		cell_defer_no_holder(tc, cell);
1216

1217
1218
1219
1220
1221
		remap_and_issue(tc, bio, lookup_result->block);
	}
}

static void provision_block(struct thin_c *tc, struct bio *bio, dm_block_t block,
Mike Snitzer's avatar
Mike Snitzer committed
1222
			    struct dm_bio_prison_cell *cell)
1223
1224
1225
{
	int r;
	dm_block_t data_block;
1226
	struct pool *pool = tc->pool;
1227
1228
1229
1230

	/*
	 * Remap empty bios (flushes) immediately, without provisioning.
	 */
1231
	if (!bio->bi_iter.bi_size) {
1232
		inc_all_io_entry(pool, bio);
1233
		cell_defer_no_holder(tc, cell);
1234

1235
1236
1237
1238
1239
1240
1241
1242
1243
		remap_and_issue(tc, bio, 0);
		return;
	}

	/*
	 * Fill read bios with zeroes and complete them immediately.
	 */
	if (bio_data_dir(bio) == READ) {
		zero_fill_bio(bio);
1244
		cell_defer_no_holder(tc, cell);
1245
1246
1247