dm-thin.c 87.3 KB
Newer Older
1
/*
2
 * Copyright (C) 2011-2012 Red Hat UK.
3
4
5
6
7
 *
 * This file is released under the GPL.
 */

#include "dm-thin-metadata.h"
8
#include "dm-bio-prison.h"
9
#include "dm.h"
10
11
12
13
14

#include <linux/device-mapper.h>
#include <linux/dm-io.h>
#include <linux/dm-kcopyd.h>
#include <linux/list.h>
15
#include <linux/rculist.h>
16
17
18
#include <linux/init.h>
#include <linux/module.h>
#include <linux/slab.h>
19
#include <linux/rbtree.h>
20
21
22
23
24
25

#define	DM_MSG_PREFIX	"thin"

/*
 * Tunable constants
 */
26
#define ENDIO_HOOK_POOL_SIZE 1024
27
28
#define MAPPING_POOL_SIZE 1024
#define PRISON_CELLS 1024
29
#define COMMIT_PERIOD HZ
30
31
32
#define NO_SPACE_TIMEOUT_SECS 60

static unsigned no_space_timeout_secs = NO_SPACE_TIMEOUT_SECS;
33

34
35
36
DECLARE_DM_KCOPYD_THROTTLE_WITH_MODULE_PARM(snapshot_copy_throttle,
		"A percentage of time allocated for copy on write");

37
38
39
40
41
42
43
44
45
46
47
48
49
50
51
52
53
54
55
56
57
58
59
60
61
62
63
64
65
66
67
68
/*
 * The block size of the device holding pool data must be
 * between 64KB and 1GB.
 */
#define DATA_DEV_BLOCK_SIZE_MIN_SECTORS (64 * 1024 >> SECTOR_SHIFT)
#define DATA_DEV_BLOCK_SIZE_MAX_SECTORS (1024 * 1024 * 1024 >> SECTOR_SHIFT)

/*
 * Device id is restricted to 24 bits.
 */
#define MAX_DEV_ID ((1 << 24) - 1)

/*
 * How do we handle breaking sharing of data blocks?
 * =================================================
 *
 * We use a standard copy-on-write btree to store the mappings for the
 * devices (note I'm talking about copy-on-write of the metadata here, not
 * the data).  When you take an internal snapshot you clone the root node
 * of the origin btree.  After this there is no concept of an origin or a
 * snapshot.  They are just two device trees that happen to point to the
 * same data blocks.
 *
 * When we get a write in we decide if it's to a shared data block using
 * some timestamp magic.  If it is, we have to break sharing.
 *
 * Let's say we write to a shared block in what was the origin.  The
 * steps are:
 *
 * i) plug io further to this physical block. (see bio_prison code).
 *
 * ii) quiesce any read io to that shared data block.  Obviously
69
 * including all devices that share this block.  (see dm_deferred_set code)
70
71
72
73
74
 *
 * iii) copy the data block to a newly allocate block.  This step can be
 * missed out if the io covers the block. (schedule_copy).
 *
 * iv) insert the new mapping into the origin's btree
Joe Thornber's avatar
Joe Thornber committed
75
 * (process_prepared_mapping).  This act of inserting breaks some
76
77
78
79
80
81
82
83
84
85
86
87
88
89
90
91
92
93
94
95
96
97
98
99
100
101
102
103
104
105
106
107
108
109
110
111
112
 * sharing of btree nodes between the two devices.  Breaking sharing only
 * effects the btree of that specific device.  Btrees for the other
 * devices that share the block never change.  The btree for the origin
 * device as it was after the last commit is untouched, ie. we're using
 * persistent data structures in the functional programming sense.
 *
 * v) unplug io to this physical block, including the io that triggered
 * the breaking of sharing.
 *
 * Steps (ii) and (iii) occur in parallel.
 *
 * The metadata _doesn't_ need to be committed before the io continues.  We
 * get away with this because the io is always written to a _new_ block.
 * If there's a crash, then:
 *
 * - The origin mapping will point to the old origin block (the shared
 * one).  This will contain the data as it was before the io that triggered
 * the breaking of sharing came in.
 *
 * - The snap mapping still points to the old block.  As it would after
 * the commit.
 *
 * The downside of this scheme is the timestamp magic isn't perfect, and
 * will continue to think that data block in the snapshot device is shared
 * even after the write to the origin has broken sharing.  I suspect data
 * blocks will typically be shared by many different devices, so we're
 * breaking sharing n + 1 times, rather than n, where n is the number of
 * devices that reference this data block.  At the moment I think the
 * benefits far, far outweigh the disadvantages.
 */

/*----------------------------------------------------------------*/

/*
 * Key building.
 */
static void build_data_key(struct dm_thin_device *td,
113
			   dm_block_t b, struct dm_cell_key *key)
114
115
116
117
118
119
120
{
	key->virtual = 0;
	key->dev = dm_thin_dev_id(td);
	key->block = b;
}

static void build_virtual_key(struct dm_thin_device *td, dm_block_t b,
121
			      struct dm_cell_key *key)
122
123
124
125
126
127
128
129
130
131
132
133
134
{
	key->virtual = 1;
	key->dev = dm_thin_dev_id(td);
	key->block = b;
}

/*----------------------------------------------------------------*/

/*
 * A pool device ties together a metadata device and a data device.  It
 * also provides the interface for creating and destroying internal
 * devices.
 */
Mike Snitzer's avatar
Mike Snitzer committed
135
struct dm_thin_new_mapping;
136

137
/*
138
 * The pool runs in 4 modes.  Ordered in degraded order for comparisons.
139
140
141
 */
enum pool_mode {
	PM_WRITE,		/* metadata may be changed */
142
	PM_OUT_OF_DATA_SPACE,	/* metadata may be changed, though data may not be allocated */
143
144
145
146
	PM_READ_ONLY,		/* metadata may not be changed */
	PM_FAIL,		/* all I/O fails */
};

147
struct pool_features {
148
149
	enum pool_mode mode;

Mike Snitzer's avatar
Mike Snitzer committed
150
151
152
	bool zero_new_blocks:1;
	bool discard_enabled:1;
	bool discard_passdown:1;
153
	bool error_if_no_space:1;
154
155
};

156
157
158
159
struct thin_c;
typedef void (*process_bio_fn)(struct thin_c *tc, struct bio *bio);
typedef void (*process_mapping_fn)(struct dm_thin_new_mapping *m);

160
161
162
163
164
165
166
167
168
struct pool {
	struct list_head list;
	struct dm_target *ti;	/* Only set if a pool target is bound */

	struct mapped_device *pool_md;
	struct block_device *md_dev;
	struct dm_pool_metadata *pmd;

	dm_block_t low_water_blocks;
169
	uint32_t sectors_per_block;
170
	int sectors_per_block_shift;
171

172
	struct pool_features pf;
173
	bool low_water_triggered:1;	/* A dm event has been sent */
174

175
	struct dm_bio_prison *prison;
176
177
178
179
	struct dm_kcopyd_client *copier;

	struct workqueue_struct *wq;
	struct work_struct worker;
180
	struct delayed_work waker;
181
	struct delayed_work no_space_timeout;
182

183
	unsigned long last_commit_jiffies;
184
	unsigned ref_count;
185
186
187
188

	spinlock_t lock;
	struct bio_list deferred_flush_bios;
	struct list_head prepared_mappings;
Joe Thornber's avatar
Joe Thornber committed
189
	struct list_head prepared_discards;
190
	struct list_head active_thins;
191

192
193
	struct dm_deferred_set *shared_read_ds;
	struct dm_deferred_set *all_io_ds;
194

Mike Snitzer's avatar
Mike Snitzer committed
195
	struct dm_thin_new_mapping *next_mapping;
196
	mempool_t *mapping_pool;
197
198
199
200
201
202

	process_bio_fn process_bio;
	process_bio_fn process_discard;

	process_mapping_fn process_prepared_mapping;
	process_mapping_fn process_prepared_discard;
203
204
};

205
static enum pool_mode get_pool_mode(struct pool *pool);
206
static void metadata_operation_failed(struct pool *pool, const char *op, int r);
207

208
209
210
211
212
213
214
215
216
217
218
/*
 * Target context for a pool.
 */
struct pool_c {
	struct dm_target *ti;
	struct pool *pool;
	struct dm_dev *data_dev;
	struct dm_dev *metadata_dev;
	struct dm_target_callbacks callbacks;

	dm_block_t low_water_blocks;
219
220
	struct pool_features requested_pf; /* Features requested during table load */
	struct pool_features adjusted_pf;  /* Features used after adjusting for constituent devices */
221
222
223
224
225
226
};

/*
 * Target context for a thin.
 */
struct thin_c {
227
	struct list_head list;
228
	struct dm_dev *pool_dev;
229
	struct dm_dev *origin_dev;
230
231
232
233
	dm_thin_id dev_id;

	struct pool *pool;
	struct dm_thin_device *td;
234
	bool requeue_mode:1;
235
236
237
	spinlock_t lock;
	struct bio_list deferred_bio_list;
	struct bio_list retry_on_resume_list;
238
	struct rb_root sort_bio_list; /* sorted list of deferred bios */
239
240
241
242
243
244
245

	/*
	 * Ensures the thin is not destroyed until the worker has finished
	 * iterating the active_thins list.
	 */
	atomic_t refcount;
	struct completion can_destroy;
246
247
248
249
};

/*----------------------------------------------------------------*/

250
251
252
253
254
255
256
257
258
259
260
/*
 * wake_worker() is used when new work is queued and when pool_resume is
 * ready to continue deferred IO processing.
 */
static void wake_worker(struct pool *pool)
{
	queue_work(pool->wq, &pool->worker);
}

/*----------------------------------------------------------------*/

261
262
263
264
265
266
267
268
269
270
271
272
273
274
275
276
277
278
279
280
281
282
283
284
285
286
287
288
289
290
291
292
293
294
295
296
297
298
299
static int bio_detain(struct pool *pool, struct dm_cell_key *key, struct bio *bio,
		      struct dm_bio_prison_cell **cell_result)
{
	int r;
	struct dm_bio_prison_cell *cell_prealloc;

	/*
	 * Allocate a cell from the prison's mempool.
	 * This might block but it can't fail.
	 */
	cell_prealloc = dm_bio_prison_alloc_cell(pool->prison, GFP_NOIO);

	r = dm_bio_detain(pool->prison, key, bio, cell_prealloc, cell_result);
	if (r)
		/*
		 * We reused an old cell; we can get rid of
		 * the new one.
		 */
		dm_bio_prison_free_cell(pool->prison, cell_prealloc);

	return r;
}

static void cell_release(struct pool *pool,
			 struct dm_bio_prison_cell *cell,
			 struct bio_list *bios)
{
	dm_cell_release(pool->prison, cell, bios);
	dm_bio_prison_free_cell(pool->prison, cell);
}

static void cell_release_no_holder(struct pool *pool,
				   struct dm_bio_prison_cell *cell,
				   struct bio_list *bios)
{
	dm_cell_release_no_holder(pool->prison, cell, bios);
	dm_bio_prison_free_cell(pool->prison, cell);
}

300
301
302
303
304
305
static void cell_defer_no_holder_no_free(struct thin_c *tc,
					 struct dm_bio_prison_cell *cell)
{
	struct pool *pool = tc->pool;
	unsigned long flags;

306
307
308
	spin_lock_irqsave(&tc->lock, flags);
	dm_cell_release_no_holder(pool->prison, cell, &tc->deferred_bio_list);
	spin_unlock_irqrestore(&tc->lock, flags);
309
310
311
312

	wake_worker(pool);
}

313
314
315
316
317
318
319
320
321
static void cell_error(struct pool *pool,
		       struct dm_bio_prison_cell *cell)
{
	dm_cell_error(pool->prison, cell);
	dm_bio_prison_free_cell(pool->prison, cell);
}

/*----------------------------------------------------------------*/

322
323
324
325
326
327
328
329
330
331
332
333
334
335
336
337
338
339
340
341
342
343
344
345
346
347
348
349
350
351
352
353
354
355
356
357
358
359
360
361
362
363
364
365
366
367
368
369
370
371
372
373
374
375
376
377
378
379
380
381
/*
 * A global list of pools that uses a struct mapped_device as a key.
 */
static struct dm_thin_pool_table {
	struct mutex mutex;
	struct list_head pools;
} dm_thin_pool_table;

static void pool_table_init(void)
{
	mutex_init(&dm_thin_pool_table.mutex);
	INIT_LIST_HEAD(&dm_thin_pool_table.pools);
}

static void __pool_table_insert(struct pool *pool)
{
	BUG_ON(!mutex_is_locked(&dm_thin_pool_table.mutex));
	list_add(&pool->list, &dm_thin_pool_table.pools);
}

static void __pool_table_remove(struct pool *pool)
{
	BUG_ON(!mutex_is_locked(&dm_thin_pool_table.mutex));
	list_del(&pool->list);
}

static struct pool *__pool_table_lookup(struct mapped_device *md)
{
	struct pool *pool = NULL, *tmp;

	BUG_ON(!mutex_is_locked(&dm_thin_pool_table.mutex));

	list_for_each_entry(tmp, &dm_thin_pool_table.pools, list) {
		if (tmp->pool_md == md) {
			pool = tmp;
			break;
		}
	}

	return pool;
}

static struct pool *__pool_table_lookup_metadata_dev(struct block_device *md_dev)
{
	struct pool *pool = NULL, *tmp;

	BUG_ON(!mutex_is_locked(&dm_thin_pool_table.mutex));

	list_for_each_entry(tmp, &dm_thin_pool_table.pools, list) {
		if (tmp->md_dev == md_dev) {
			pool = tmp;
			break;
		}
	}

	return pool;
}

/*----------------------------------------------------------------*/

Mike Snitzer's avatar
Mike Snitzer committed
382
struct dm_thin_endio_hook {
383
	struct thin_c *tc;
384
385
	struct dm_deferred_entry *shared_read_entry;
	struct dm_deferred_entry *all_io_entry;
Mike Snitzer's avatar
Mike Snitzer committed
386
	struct dm_thin_new_mapping *overwrite_mapping;
387
	struct rb_node rb_node;
388
389
};

390
static void requeue_bio_list(struct thin_c *tc, struct bio_list *master)
391
392
393
{
	struct bio *bio;
	struct bio_list bios;
394
	unsigned long flags;
395
396

	bio_list_init(&bios);
397

398
	spin_lock_irqsave(&tc->lock, flags);
399
400
	bio_list_merge(&bios, master);
	bio_list_init(master);
401
	spin_unlock_irqrestore(&tc->lock, flags);
402

403
404
	while ((bio = bio_list_pop(&bios)))
		bio_endio(bio, DM_ENDIO_REQUEUE);
405
406
407
408
}

static void requeue_io(struct thin_c *tc)
{
409
410
	requeue_bio_list(tc, &tc->deferred_bio_list);
	requeue_bio_list(tc, &tc->retry_on_resume_list);
411
412
}

413
static void error_thin_retry_list(struct thin_c *tc)
414
415
416
417
418
419
420
{
	struct bio *bio;
	unsigned long flags;
	struct bio_list bios;

	bio_list_init(&bios);

421
422
423
424
	spin_lock_irqsave(&tc->lock, flags);
	bio_list_merge(&bios, &tc->retry_on_resume_list);
	bio_list_init(&tc->retry_on_resume_list);
	spin_unlock_irqrestore(&tc->lock, flags);
425
426
427
428
429

	while ((bio = bio_list_pop(&bios)))
		bio_io_error(bio);
}

430
431
432
433
434
435
436
437
438
439
static void error_retry_list(struct pool *pool)
{
	struct thin_c *tc;

	rcu_read_lock();
	list_for_each_entry_rcu(tc, &pool->active_thins, list)
		error_thin_retry_list(tc);
	rcu_read_unlock();
}

440
441
442
443
444
445
446
/*
 * This section of code contains the logic for processing a thin device's IO.
 * Much of the code depends on pool object resources (lists, workqueues, etc)
 * but most is exclusively called from the thin target rather than the thin-pool
 * target.
 */

447
448
449
450
451
static bool block_size_is_power_of_two(struct pool *pool)
{
	return pool->sectors_per_block_shift >= 0;
}

452
453
static dm_block_t get_bio_block(struct thin_c *tc, struct bio *bio)
{
454
	struct pool *pool = tc->pool;
455
	sector_t block_nr = bio->bi_iter.bi_sector;
456

457
458
	if (block_size_is_power_of_two(pool))
		block_nr >>= pool->sectors_per_block_shift;
459
	else
460
		(void) sector_div(block_nr, pool->sectors_per_block);
461
462

	return block_nr;
463
464
465
466
467
}

static void remap(struct thin_c *tc, struct bio *bio, dm_block_t block)
{
	struct pool *pool = tc->pool;
468
	sector_t bi_sector = bio->bi_iter.bi_sector;
469
470

	bio->bi_bdev = tc->pool_dev->bdev;
471
	if (block_size_is_power_of_two(pool))
472
473
474
		bio->bi_iter.bi_sector =
			(block << pool->sectors_per_block_shift) |
			(bi_sector & (pool->sectors_per_block - 1));
475
	else
476
		bio->bi_iter.bi_sector = (block * pool->sectors_per_block) +
477
				 sector_div(bi_sector, pool->sectors_per_block);
478
479
}

480
481
482
483
484
static void remap_to_origin(struct thin_c *tc, struct bio *bio)
{
	bio->bi_bdev = tc->origin_dev->bdev;
}

485
486
487
488
489
490
static int bio_triggers_commit(struct thin_c *tc, struct bio *bio)
{
	return (bio->bi_rw & (REQ_FLUSH | REQ_FUA)) &&
		dm_thin_changed_this_transaction(tc->td);
}

491
492
493
494
495
496
497
static void inc_all_io_entry(struct pool *pool, struct bio *bio)
{
	struct dm_thin_endio_hook *h;

	if (bio->bi_rw & REQ_DISCARD)
		return;

498
	h = dm_per_bio_data(bio, sizeof(struct dm_thin_endio_hook));
499
500
501
	h->all_io_entry = dm_deferred_entry_inc(pool->all_io_ds);
}

502
static void issue(struct thin_c *tc, struct bio *bio)
503
504
505
506
{
	struct pool *pool = tc->pool;
	unsigned long flags;

507
508
509
510
511
	if (!bio_triggers_commit(tc, bio)) {
		generic_make_request(bio);
		return;
	}

512
	/*
513
514
515
	 * Complete bio with an error if earlier I/O caused changes to
	 * the metadata that can't be committed e.g, due to I/O errors
	 * on the metadata device.
516
	 */
517
518
519
520
521
522
523
524
525
526
527
528
	if (dm_thin_aborted_changes(tc->td)) {
		bio_io_error(bio);
		return;
	}

	/*
	 * Batch together any bios that trigger commits and then issue a
	 * single commit for them in process_deferred_bios().
	 */
	spin_lock_irqsave(&pool->lock, flags);
	bio_list_add(&pool->deferred_flush_bios, bio);
	spin_unlock_irqrestore(&pool->lock, flags);
529
530
}

531
532
533
534
535
536
537
538
539
540
541
542
543
static void remap_to_origin_and_issue(struct thin_c *tc, struct bio *bio)
{
	remap_to_origin(tc, bio);
	issue(tc, bio);
}

static void remap_and_issue(struct thin_c *tc, struct bio *bio,
			    dm_block_t block)
{
	remap(tc, bio, block);
	issue(tc, bio);
}

544
545
546
547
548
/*----------------------------------------------------------------*/

/*
 * Bio endio functions.
 */
Mike Snitzer's avatar
Mike Snitzer committed
549
struct dm_thin_new_mapping {
550
551
	struct list_head list;

552
553
554
555
	bool quiesced:1;
	bool prepared:1;
	bool pass_discard:1;
	bool definitely_not_shared:1;
556

557
	int err;
558
559
560
	struct thin_c *tc;
	dm_block_t virt_block;
	dm_block_t data_block;
Mike Snitzer's avatar
Mike Snitzer committed
561
	struct dm_bio_prison_cell *cell, *cell2;
562
563
564
565
566
567
568
569
570
571
572

	/*
	 * If the bio covers the whole area of a block then we can avoid
	 * zeroing or copying.  Instead this bio is hooked.  The bio will
	 * still be in the cell, so care has to be taken to avoid issuing
	 * the bio twice.
	 */
	struct bio *bio;
	bio_end_io_t *saved_bi_end_io;
};

Mike Snitzer's avatar
Mike Snitzer committed
573
static void __maybe_add_mapping(struct dm_thin_new_mapping *m)
574
575
576
{
	struct pool *pool = m->tc->pool;

577
	if (m->quiesced && m->prepared) {
578
		list_add_tail(&m->list, &pool->prepared_mappings);
579
580
581
582
583
584
585
		wake_worker(pool);
	}
}

static void copy_complete(int read_err, unsigned long write_err, void *context)
{
	unsigned long flags;
Mike Snitzer's avatar
Mike Snitzer committed
586
	struct dm_thin_new_mapping *m = context;
587
588
589
590
591
	struct pool *pool = m->tc->pool;

	m->err = read_err || write_err ? -EIO : 0;

	spin_lock_irqsave(&pool->lock, flags);
592
	m->prepared = true;
593
594
595
596
597
598
599
	__maybe_add_mapping(m);
	spin_unlock_irqrestore(&pool->lock, flags);
}

static void overwrite_endio(struct bio *bio, int err)
{
	unsigned long flags;
600
	struct dm_thin_endio_hook *h = dm_per_bio_data(bio, sizeof(struct dm_thin_endio_hook));
Mike Snitzer's avatar
Mike Snitzer committed
601
	struct dm_thin_new_mapping *m = h->overwrite_mapping;
602
603
604
605
606
	struct pool *pool = m->tc->pool;

	m->err = err;

	spin_lock_irqsave(&pool->lock, flags);
607
	m->prepared = true;
608
609
610
611
612
613
614
615
616
617
618
619
620
621
622
623
624
	__maybe_add_mapping(m);
	spin_unlock_irqrestore(&pool->lock, flags);
}

/*----------------------------------------------------------------*/

/*
 * Workqueue.
 */

/*
 * Prepared mapping jobs.
 */

/*
 * This sends the bios in the cell back to the deferred_bios list.
 */
Joe Thornber's avatar
Joe Thornber committed
625
static void cell_defer(struct thin_c *tc, struct dm_bio_prison_cell *cell)
626
627
628
629
{
	struct pool *pool = tc->pool;
	unsigned long flags;

630
631
632
	spin_lock_irqsave(&tc->lock, flags);
	cell_release(pool, cell, &tc->deferred_bio_list);
	spin_unlock_irqrestore(&tc->lock, flags);
633
634
635
636
637

	wake_worker(pool);
}

/*
638
 * Same as cell_defer above, except it omits the original holder of the cell.
639
 */
640
static void cell_defer_no_holder(struct thin_c *tc, struct dm_bio_prison_cell *cell)
641
642
643
644
{
	struct pool *pool = tc->pool;
	unsigned long flags;

645
646
647
	spin_lock_irqsave(&tc->lock, flags);
	cell_release_no_holder(pool, cell, &tc->deferred_bio_list);
	spin_unlock_irqrestore(&tc->lock, flags);
648
649
650
651

	wake_worker(pool);
}

652
653
static void process_prepared_mapping_fail(struct dm_thin_new_mapping *m)
{
Kent Overstreet's avatar
Kent Overstreet committed
654
	if (m->bio) {
655
		m->bio->bi_end_io = m->saved_bi_end_io;
Kent Overstreet's avatar
Kent Overstreet committed
656
657
		atomic_inc(&m->bio->bi_remaining);
	}
658
	cell_error(m->tc->pool, m->cell);
659
660
661
	list_del(&m->list);
	mempool_free(m, m->tc->pool->mapping_pool);
}
662

Mike Snitzer's avatar
Mike Snitzer committed
663
static void process_prepared_mapping(struct dm_thin_new_mapping *m)
664
665
{
	struct thin_c *tc = m->tc;
666
	struct pool *pool = tc->pool;
667
668
669
670
	struct bio *bio;
	int r;

	bio = m->bio;
Kent Overstreet's avatar
Kent Overstreet committed
671
	if (bio) {
672
		bio->bi_end_io = m->saved_bi_end_io;
Kent Overstreet's avatar
Kent Overstreet committed
673
674
		atomic_inc(&bio->bi_remaining);
	}
675
676

	if (m->err) {
677
		cell_error(pool, m->cell);
678
		goto out;
679
680
681
682
683
684
685
686
687
	}

	/*
	 * Commit the prepared block into the mapping btree.
	 * Any I/O for this block arriving after this point will get
	 * remapped to it directly.
	 */
	r = dm_thin_insert_block(tc->td, m->virt_block, m->data_block);
	if (r) {
688
		metadata_operation_failed(pool, "dm_thin_insert_block", r);
689
		cell_error(pool, m->cell);
690
		goto out;
691
692
693
694
695
696
697
698
699
	}

	/*
	 * Release any bios held while the block was being provisioned.
	 * If we are processing a write bio that completely covers the block,
	 * we already processed it so can ignore it now when processing
	 * the bios in the cell.
	 */
	if (bio) {
700
		cell_defer_no_holder(tc, m->cell);
701
702
		bio_endio(bio, 0);
	} else
Joe Thornber's avatar
Joe Thornber committed
703
		cell_defer(tc, m->cell);
704

705
out:
706
	list_del(&m->list);
707
	mempool_free(m, pool->mapping_pool);
708
709
}

710
static void process_prepared_discard_fail(struct dm_thin_new_mapping *m)
Joe Thornber's avatar
Joe Thornber committed
711
712
713
{
	struct thin_c *tc = m->tc;

714
	bio_io_error(m->bio);
715
716
	cell_defer_no_holder(tc, m->cell);
	cell_defer_no_holder(tc, m->cell2);
717
718
719
720
721
722
	mempool_free(m, tc->pool->mapping_pool);
}

static void process_prepared_discard_passdown(struct dm_thin_new_mapping *m)
{
	struct thin_c *tc = m->tc;
Joe Thornber's avatar
Joe Thornber committed
723

724
	inc_all_io_entry(tc->pool, m->bio);
725
726
	cell_defer_no_holder(tc, m->cell);
	cell_defer_no_holder(tc, m->cell2);
727

Joe Thornber's avatar
Joe Thornber committed
728
	if (m->pass_discard)
729
730
731
732
733
734
735
736
737
		if (m->definitely_not_shared)
			remap_and_issue(tc, m->bio, m->data_block);
		else {
			bool used = false;
			if (dm_pool_block_is_used(tc->pool->pmd, m->data_block, &used) || used)
				bio_endio(m->bio, 0);
			else
				remap_and_issue(tc, m->bio, m->data_block);
		}
Joe Thornber's avatar
Joe Thornber committed
738
739
740
741
742
743
	else
		bio_endio(m->bio, 0);

	mempool_free(m, tc->pool->mapping_pool);
}

744
745
746
747
748
749
750
static void process_prepared_discard(struct dm_thin_new_mapping *m)
{
	int r;
	struct thin_c *tc = m->tc;

	r = dm_thin_remove_block(tc->td, m->virt_block);
	if (r)
751
		DMERR_LIMIT("dm_thin_remove_block() failed");
752
753
754
755

	process_prepared_discard_passdown(m);
}

Joe Thornber's avatar
Joe Thornber committed
756
static void process_prepared(struct pool *pool, struct list_head *head,
757
			     process_mapping_fn *fn)
758
759
760
{
	unsigned long flags;
	struct list_head maps;
Mike Snitzer's avatar
Mike Snitzer committed
761
	struct dm_thin_new_mapping *m, *tmp;
762
763
764

	INIT_LIST_HEAD(&maps);
	spin_lock_irqsave(&pool->lock, flags);
Joe Thornber's avatar
Joe Thornber committed
765
	list_splice_init(head, &maps);
766
767
768
	spin_unlock_irqrestore(&pool->lock, flags);

	list_for_each_entry_safe(m, tmp, &maps, list)
769
		(*fn)(m);
770
771
772
773
774
}

/*
 * Deferred bio jobs.
 */
Joe Thornber's avatar
Joe Thornber committed
775
static int io_overlaps_block(struct pool *pool, struct bio *bio)
776
{
777
778
	return bio->bi_iter.bi_size ==
		(pool->sectors_per_block << SECTOR_SHIFT);
Joe Thornber's avatar
Joe Thornber committed
779
780
781
782
783
784
}

static int io_overwrites_block(struct pool *pool, struct bio *bio)
{
	return (bio_data_dir(bio) == WRITE) &&
		io_overlaps_block(pool, bio);
785
786
787
788
789
790
791
792
793
794
795
796
797
798
799
800
801
802
803
}

static void save_and_set_endio(struct bio *bio, bio_end_io_t **save,
			       bio_end_io_t *fn)
{
	*save = bio->bi_end_io;
	bio->bi_end_io = fn;
}

static int ensure_next_mapping(struct pool *pool)
{
	if (pool->next_mapping)
		return 0;

	pool->next_mapping = mempool_alloc(pool->mapping_pool, GFP_ATOMIC);

	return pool->next_mapping ? 0 : -ENOMEM;
}

Mike Snitzer's avatar
Mike Snitzer committed
804
static struct dm_thin_new_mapping *get_next_mapping(struct pool *pool)
805
{
806
	struct dm_thin_new_mapping *m = pool->next_mapping;
807
808
809

	BUG_ON(!pool->next_mapping);

810
811
812
813
	memset(m, 0, sizeof(struct dm_thin_new_mapping));
	INIT_LIST_HEAD(&m->list);
	m->bio = NULL;

814
815
	pool->next_mapping = NULL;

816
	return m;
817
818
819
}

static void schedule_copy(struct thin_c *tc, dm_block_t virt_block,
820
821
			  struct dm_dev *origin, dm_block_t data_origin,
			  dm_block_t data_dest,
Mike Snitzer's avatar
Mike Snitzer committed
822
			  struct dm_bio_prison_cell *cell, struct bio *bio)
823
824
825
{
	int r;
	struct pool *pool = tc->pool;
Mike Snitzer's avatar
Mike Snitzer committed
826
	struct dm_thin_new_mapping *m = get_next_mapping(pool);
827
828
829
830
831
832

	m->tc = tc;
	m->virt_block = virt_block;
	m->data_block = data_dest;
	m->cell = cell;

833
	if (!dm_deferred_set_add_work(pool->shared_read_ds, &m->list))
834
		m->quiesced = true;
835
836
837
838
839
840
841
842

	/*
	 * IO to pool_dev remaps to the pool target's data_dev.
	 *
	 * If the whole block of data is being overwritten, we can issue the
	 * bio immediately. Otherwise we use kcopyd to clone the data first.
	 */
	if (io_overwrites_block(pool, bio)) {
843
		struct dm_thin_endio_hook *h = dm_per_bio_data(bio, sizeof(struct dm_thin_endio_hook));
Mike Snitzer's avatar
Mike Snitzer committed
844

845
		h->overwrite_mapping = m;
846
847
		m->bio = bio;
		save_and_set_endio(bio, &m->saved_bi_end_io, overwrite_endio);
848
		inc_all_io_entry(pool, bio);
849
850
851
852
		remap_and_issue(tc, bio, data_dest);
	} else {
		struct dm_io_region from, to;

853
		from.bdev = origin->bdev;
854
855
856
857
858
859
860
861
862
863
864
		from.sector = data_origin * pool->sectors_per_block;
		from.count = pool->sectors_per_block;

		to.bdev = tc->pool_dev->bdev;
		to.sector = data_dest * pool->sectors_per_block;
		to.count = pool->sectors_per_block;

		r = dm_kcopyd_copy(pool->copier, &from, 1, &to,
				   0, copy_complete, m);
		if (r < 0) {
			mempool_free(m, pool->mapping_pool);
865
			DMERR_LIMIT("dm_kcopyd_copy() failed");
866
			cell_error(pool, cell);
867
868
869
870
		}
	}
}

871
872
static void schedule_internal_copy(struct thin_c *tc, dm_block_t virt_block,
				   dm_block_t data_origin, dm_block_t data_dest,
Mike Snitzer's avatar
Mike Snitzer committed
873
				   struct dm_bio_prison_cell *cell, struct bio *bio)
874
875
876
877
878
879
880
{
	schedule_copy(tc, virt_block, tc->pool_dev,
		      data_origin, data_dest, cell, bio);
}

static void schedule_external_copy(struct thin_c *tc, dm_block_t virt_block,
				   dm_block_t data_dest,
Mike Snitzer's avatar
Mike Snitzer committed
881
				   struct dm_bio_prison_cell *cell, struct bio *bio)
882
883
884
885
886
{
	schedule_copy(tc, virt_block, tc->origin_dev,
		      virt_block, data_dest, cell, bio);
}

887
static void schedule_zero(struct thin_c *tc, dm_block_t virt_block,
Mike Snitzer's avatar
Mike Snitzer committed
888
			  dm_block_t data_block, struct dm_bio_prison_cell *cell,
889
890
891
			  struct bio *bio)
{
	struct pool *pool = tc->pool;
Mike Snitzer's avatar
Mike Snitzer committed
892
	struct dm_thin_new_mapping *m = get_next_mapping(pool);
893

894
895
	m->quiesced = true;
	m->prepared = false;
896
897
898
899
900
901
902
903
904
905
	m->tc = tc;
	m->virt_block = virt_block;
	m->data_block = data_block;
	m->cell = cell;

	/*
	 * If the whole block of data is being overwritten or we are not
	 * zeroing pre-existing data, we can issue the bio immediately.
	 * Otherwise we use kcopyd to zero the data first.
	 */
906
	if (!pool->pf.zero_new_blocks)
907
908
909
		process_prepared_mapping(m);

	else if (io_overwrites_block(pool, bio)) {
910
		struct dm_thin_endio_hook *h = dm_per_bio_data(bio, sizeof(struct dm_thin_endio_hook));
Mike Snitzer's avatar
Mike Snitzer committed
911

912
		h->overwrite_mapping = m;
913
914
		m->bio = bio;
		save_and_set_endio(bio, &m->saved_bi_end_io, overwrite_endio);
915
		inc_all_io_entry(pool, bio);
916
917
918
919
920
921
922
923
924
925
926
927
		remap_and_issue(tc, bio, data_block);
	} else {
		int r;
		struct dm_io_region to;

		to.bdev = tc->pool_dev->bdev;
		to.sector = data_block * pool->sectors_per_block;
		to.count = pool->sectors_per_block;

		r = dm_kcopyd_zero(pool->copier, 1, &to, 0, copy_complete, m);
		if (r < 0) {
			mempool_free(m, pool->mapping_pool);
928
			DMERR_LIMIT("dm_kcopyd_zero() failed");
929
			cell_error(pool, cell);
930
931
932
933
		}
	}
}

934
935
936
937
/*
 * A non-zero return indicates read_only or fail_io mode.
 * Many callers don't care about the return value.
 */
938
static int commit(struct pool *pool)
939
940
941
{
	int r;

942
	if (get_pool_mode(pool) >= PM_READ_ONLY)
943
944
		return -EINVAL;

945
	r = dm_pool_commit_metadata(pool->pmd);
946
947
	if (r)
		metadata_operation_failed(pool, "dm_pool_commit_metadata", r);
948
949
950
951

	return r;
}

952
953
954
955
956
957
958
959
960
961
962
963
964
965
static void check_low_water_mark(struct pool *pool, dm_block_t free_blocks)
{
	unsigned long flags;

	if (free_blocks <= pool->low_water_blocks && !pool->low_water_triggered) {
		DMWARN("%s: reached low water mark for data device: sending event.",
		       dm_device_name(pool->pool_md));
		spin_lock_irqsave(&pool->lock, flags);
		pool->low_water_triggered = true;
		spin_unlock_irqrestore(&pool->lock, flags);
		dm_table_event(pool->ti->table);
	}
}

966
967
static void set_pool_mode(struct pool *pool, enum pool_mode new_mode);

968
969
970
971
972
973
static int alloc_data_block(struct thin_c *tc, dm_block_t *result)
{
	int r;
	dm_block_t free_blocks;
	struct pool *pool = tc->pool;

974
	if (WARN_ON(get_pool_mode(pool) != PM_WRITE))
975
976
		return -EINVAL;

977
	r = dm_pool_get_free_block_count(pool->pmd, &free_blocks);
978
979
	if (r) {
		metadata_operation_failed(pool, "dm_pool_get_free_block_count", r);
980
		return r;
981
	}
982

983
	check_low_water_mark(pool, free_blocks);
984
985

	if (!free_blocks) {
986
987
988
989
		/*
		 * Try to commit to see if that will free up some
		 * more space.
		 */
990
991
992
		r = commit(pool);
		if (r)
			return r;
993

994
		r = dm_pool_get_free_block_count(pool->pmd, &free_blocks);
995
996
		if (r) {
			metadata_operation_failed(pool, "dm_pool_get_free_block_count", r);
997
			return r;
998
		}
999

1000
		if (!free_blocks) {
1001
			set_pool_mode(pool, PM_OUT_OF_DATA_SPACE);
1002
			return -ENOSPC;
1003
1004
1005
1006
		}
	}

	r = dm_pool_alloc_data_block(pool->pmd, result);
1007
	if (r) {
1008
		metadata_operation_failed(pool, "dm_pool_alloc_data_block", r);
1009
		return r;
1010
	}
1011
1012
1013
1014
1015
1016
1017
1018
1019
1020

	return 0;
}

/*
 * If we have run out of space, queue bios until the device is
 * resumed, presumably after having been reloaded with more space.
 */
static void retry_on_resume(struct bio *bio)
{
1021
	struct dm_thin_endio_hook *h = dm_per_bio_data(bio, sizeof(struct dm_thin_endio_hook));
1022
	struct thin_c *tc = h->tc;
1023
1024
	unsigned long flags;

1025
1026
1027
	spin_lock_irqsave(&tc->lock, flags);
	bio_list_add(&tc->retry_on_resume_list, bio);
	spin_unlock_irqrestore(&tc->lock, flags);
1028
1029
}

1030
static bool should_error_unserviceable_bio(struct pool *pool)
1031
{
1032
1033
1034
1035
1036
1037
1038
1039
1040
1041
1042
1043
1044
1045
1046
1047
1048
1049
1050
1051
	enum pool_mode m = get_pool_mode(pool);

	switch (m) {
	case PM_WRITE:
		/* Shouldn't get here */
		DMERR_LIMIT("bio unserviceable, yet pool is in PM_WRITE mode");
		return true;

	case PM_OUT_OF_DATA_SPACE:
		return pool->pf.error_if_no_space;

	case PM_READ_ONLY:
	case PM_FAIL:
		return true;
	default:
		/* Shouldn't get here */
		DMERR_LIMIT("bio unserviceable, yet pool has an unknown mode");
		return true;
	}
}
1052

1053
1054
1055
static void handle_unserviceable_bio(struct pool *pool, struct bio *bio)
{
	if (should_error_unserviceable_bio(pool))
1056
		bio_io_error(bio);
1057
1058
	else
		retry_on_resume(bio);
1059
1060
}

1061
static void retry_bios_on_resume(struct pool *pool, struct dm_bio_prison_cell *cell)
1062
1063
1064
1065
{
	struct bio *bio;
	struct bio_list bios;

1066
1067
1068
1069
1070
	if (should_error_unserviceable_bio(pool)) {
		cell_error(pool, cell);
		return;
	}

1071
	bio_list_init(&bios);
1072
	cell_release(pool, cell, &bios);
1073

1074
1075
1076
1077
1078
1079
	if (should_error_unserviceable_bio(pool))
		while ((bio = bio_list_pop(&bios)))
			bio_io_error(bio);
	else
		while ((bio = bio_list_pop(&bios)))
			retry_on_resume(bio);
1080
1081
}

Joe Thornber's avatar
Joe Thornber committed
1082
1083
1084
static void process_discard(struct thin_c *tc, struct bio *bio)
{
	int r;
1085
	unsigned long flags;
Joe Thornber's avatar
Joe Thornber committed
1086
	struct pool *pool = tc->pool;
Mike Snitzer's avatar
Mike Snitzer committed
1087
	struct dm_bio_prison_cell *cell, *cell2;
1088
	struct dm_cell_key key, key2;
Joe Thornber's avatar
Joe Thornber committed
1089
1090
	dm_block_t block = get_bio_block(tc, bio);
	struct dm_thin_lookup_result lookup_result;
Mike Snitzer's avatar
Mike Snitzer committed
1091
	struct dm_thin_new_mapping *m;
Joe Thornber's avatar
Joe Thornber committed
1092
1093

	build_virtual_key(tc->td, block, &key);
1094
	if (bio_detain(tc->pool, &key, bio, &cell))
Joe Thornber's avatar
Joe Thornber committed
1095
1096
1097
1098
1099
1100
1101
1102
1103
1104
1105
		return;

	r = dm_thin_find_block(tc->td, block, 1, &lookup_result);
	switch (r) {
	case 0:
		/*
		 * Check nobody is fiddling with this pool block.  This can
		 * happen if someone's in the process of breaking sharing
		 * on this block.
		 */
		build_data_key(tc->td, lookup_result.block, &key2);
1106
		if (bio_detain(tc->pool, &key2, bio, &cell2)) {
1107
			cell_defer_no_holder(tc, cell);
Joe Thornber's avatar
Joe Thornber committed
1108
1109
1110
1111
1112
1113
1114
1115
1116
1117
			break;
		}

		if (io_overlaps_block(pool, bio)) {
			/*
			 * IO may still be going to the destination block.  We must
			 * quiesce before we can do the removal.
			 */
			m = get_next_mapping(pool);
			m->tc = tc;
1118
1119
			m->pass_discard = pool->pf.discard_passdown;
			m->definitely_not_shared = !lookup_result.shared;
Joe Thornber's avatar
Joe Thornber committed
1120
1121
1122
1123
1124
1125
			m->virt_block = block;
			m->data_block = lookup_result.block;
			m->cell = cell;
			m->cell2 = cell2;
			m->bio = bio;

1126
			if (!dm_deferred_set_add_work(pool->all_io_ds, &m->list)) {
1127
				spin_lock_irqsave(&pool->lock, flags);
1128
				list_add_tail(&m->list, &pool->prepared_discards);
1129
				spin_unlock_irqrestore(&pool->lock, flags);
Joe Thornber's avatar
Joe Thornber committed
1130
1131
1132
				wake_worker(pool);
			}
		} else {
1133
			inc_all_io_entry(pool, bio);
1134
1135
			cell_defer_no_holder(tc, cell);
			cell_defer_no_holder(tc, cell2);
1136

Joe Thornber's avatar
Joe Thornber committed
1137
			/*
1138
1139
1140
			 * The DM core makes sure that the discard doesn't span
			 * a block boundary.  So we submit the discard of a
			 * partial block appropriately.
Joe Thornber's avatar
Joe Thornber committed
1141
			 */
1142
1143
1144
1145
			if ((!lookup_result.shared) && pool->pf.discard_passdown)
				remap_and_issue(tc, bio, lookup_result.block);
			else
				bio_endio(bio, 0);
Joe Thornber's avatar
Joe Thornber committed
1146
1147
1148
1149
1150
1151
1152
		}
		break;

	case -ENODATA:
		/*
		 * It isn't provisioned, just forget it.
		 */
1153
		cell_defer_no_holder(tc, cell);
Joe Thornber's avatar
Joe Thornber committed
1154
1155
1156
1157
		bio_endio(bio, 0);
		break;

	default:
1158
1159
		DMERR_LIMIT("%s: dm_thin_find_block() failed: error = %d",
			    __func__, r);
1160
		cell_defer_no_holder(tc, cell);
Joe Thornber's avatar
Joe Thornber committed
1161
1162
1163
1164
1165
		bio_io_error(bio);
		break;
	}
}

1166
static void break_sharing(struct thin_c *tc, struct bio *bio, dm_block_t block,
1167
			  struct dm_cell_key *key,
1168
			  struct dm_thin_lookup_result *lookup_result,
Mike Snitzer's avatar
Mike Snitzer committed
1169
			  struct dm_bio_prison_cell *cell)
1170
1171
1172
{
	int r;
	dm_block_t data_block;
1173
	struct pool *pool = tc->pool;
1174
1175
1176
1177

	r = alloc_data_block(tc, &data_block);
	switch (r) {
	case 0:
1178
1179
		schedule_internal_copy(tc, block, lookup_result->block,
				       data_block, cell, bio);
1180
1181
1182
		break;

	case -ENOSPC:
1183
		retry_bios_on_resume(pool, cell);
1184
1185
1186
		break;

	default:
1187
1188
		DMERR_LIMIT("%s: alloc_data_block() failed: error = %d",
			    __func__, r);
1189
		cell_error(pool, cell);
1190
1191
1192
1193
1194
1195
1196
1197
		break;
	}
}

static void process_shared_bio(struct thin_c *tc, struct bio *bio,
			       dm_block_t block,
			       struct dm_thin_lookup_result *lookup_result)
{
Mike Snitzer's avatar
Mike Snitzer committed
1198
	struct dm_bio_prison_cell *cell;
1199
	struct pool *pool = tc->pool;
1200
	struct dm_cell_key key;
1201
1202
1203
1204
1205
1206

	/*
	 * If cell is already occupied, then sharing is already in the process
	 * of being broken so we have nothing further to do here.
	 */
	build_data_key(tc->td, lookup_result->block, &key);
1207
	if (bio_detain(pool, &key, bio, &cell))
1208
1209
		return;

1210
	if (bio_data_dir(bio) == WRITE && bio->bi_iter.bi_size)
1211
1212
		break_sharing(tc, bio, block, &key, lookup_result, cell);
	else {
1213
		struct dm_thin_endio_hook *h = dm_per_bio_data(bio, sizeof(struct dm_thin_endio_hook));
1214

1215
		h->shared_read_entry = dm_deferred_entry_inc(pool->shared_read_ds);
1216
		inc_all_io_entry(pool, bio);
1217
		cell_defer_no_holder(tc, cell);
1218

1219
1220
1221
1222
1223
		remap_and_issue(tc, bio, lookup_result->block);
	}
}

static void provision_block(struct thin_c *tc, struct bio *bio, dm_block_t block,
Mike Snitzer's avatar
Mike Snitzer committed
1224
			    struct dm_bio_prison_cell *cell)
1225
1226
1227
{
	int r;
	dm_block_t data_block;
1228
	struct pool *pool = tc->pool;
1229
1230
1231
1232

	/*
	 * Remap empty bios (flushes) immediately, without provisioning.
	 */
1233
	if (!bio->bi_iter.bi_size) {
1234
		inc_all_io_entry(pool, bio);
1235
		cell_defer_no_holder(tc, cell);
1236

1237
1238
1239
1240
1241
1242
1243
1244
1245
		remap_and_issue(tc, bio, 0);
		return;
	}

	/*
	 * Fill read bios with zeroes and complete them immediately.
	 */
	if (bio_data_dir(bio) == READ) {
		zero_fill_bio(bio);
1246
		cell_defer_no_holder(tc, cell);