dm-thin.c 98.8 KB
Newer Older
1
/*
2
 * Copyright (C) 2011-2012 Red Hat UK.
3
4
5
6
7
 *
 * This file is released under the GPL.
 */

#include "dm-thin-metadata.h"
8
#include "dm-bio-prison.h"
9
#include "dm.h"
10
11
12
13

#include <linux/device-mapper.h>
#include <linux/dm-io.h>
#include <linux/dm-kcopyd.h>
14
#include <linux/log2.h>
15
#include <linux/list.h>
16
#include <linux/rculist.h>
17
18
19
#include <linux/init.h>
#include <linux/module.h>
#include <linux/slab.h>
20
#include <linux/rbtree.h>
21
22
23
24
25
26

#define	DM_MSG_PREFIX	"thin"

/*
 * Tunable constants
 */
27
#define ENDIO_HOOK_POOL_SIZE 1024
28
#define MAPPING_POOL_SIZE 1024
29
#define COMMIT_PERIOD HZ
30
31
32
#define NO_SPACE_TIMEOUT_SECS 60

static unsigned no_space_timeout_secs = NO_SPACE_TIMEOUT_SECS;
33

34
35
36
DECLARE_DM_KCOPYD_THROTTLE_WITH_MODULE_PARM(snapshot_copy_throttle,
		"A percentage of time allocated for copy on write");

37
38
39
40
41
42
43
44
45
46
47
48
49
50
51
52
53
54
55
56
57
58
59
60
61
62
63
64
65
66
67
68
/*
 * The block size of the device holding pool data must be
 * between 64KB and 1GB.
 */
#define DATA_DEV_BLOCK_SIZE_MIN_SECTORS (64 * 1024 >> SECTOR_SHIFT)
#define DATA_DEV_BLOCK_SIZE_MAX_SECTORS (1024 * 1024 * 1024 >> SECTOR_SHIFT)

/*
 * Device id is restricted to 24 bits.
 */
#define MAX_DEV_ID ((1 << 24) - 1)

/*
 * How do we handle breaking sharing of data blocks?
 * =================================================
 *
 * We use a standard copy-on-write btree to store the mappings for the
 * devices (note I'm talking about copy-on-write of the metadata here, not
 * the data).  When you take an internal snapshot you clone the root node
 * of the origin btree.  After this there is no concept of an origin or a
 * snapshot.  They are just two device trees that happen to point to the
 * same data blocks.
 *
 * When we get a write in we decide if it's to a shared data block using
 * some timestamp magic.  If it is, we have to break sharing.
 *
 * Let's say we write to a shared block in what was the origin.  The
 * steps are:
 *
 * i) plug io further to this physical block. (see bio_prison code).
 *
 * ii) quiesce any read io to that shared data block.  Obviously
69
 * including all devices that share this block.  (see dm_deferred_set code)
70
71
72
73
74
 *
 * iii) copy the data block to a newly allocate block.  This step can be
 * missed out if the io covers the block. (schedule_copy).
 *
 * iv) insert the new mapping into the origin's btree
Joe Thornber's avatar
Joe Thornber committed
75
 * (process_prepared_mapping).  This act of inserting breaks some
76
77
78
79
80
81
82
83
84
85
86
87
88
89
90
91
92
93
94
95
96
97
98
99
100
101
102
103
104
105
106
107
108
109
110
111
112
 * sharing of btree nodes between the two devices.  Breaking sharing only
 * effects the btree of that specific device.  Btrees for the other
 * devices that share the block never change.  The btree for the origin
 * device as it was after the last commit is untouched, ie. we're using
 * persistent data structures in the functional programming sense.
 *
 * v) unplug io to this physical block, including the io that triggered
 * the breaking of sharing.
 *
 * Steps (ii) and (iii) occur in parallel.
 *
 * The metadata _doesn't_ need to be committed before the io continues.  We
 * get away with this because the io is always written to a _new_ block.
 * If there's a crash, then:
 *
 * - The origin mapping will point to the old origin block (the shared
 * one).  This will contain the data as it was before the io that triggered
 * the breaking of sharing came in.
 *
 * - The snap mapping still points to the old block.  As it would after
 * the commit.
 *
 * The downside of this scheme is the timestamp magic isn't perfect, and
 * will continue to think that data block in the snapshot device is shared
 * even after the write to the origin has broken sharing.  I suspect data
 * blocks will typically be shared by many different devices, so we're
 * breaking sharing n + 1 times, rather than n, where n is the number of
 * devices that reference this data block.  At the moment I think the
 * benefits far, far outweigh the disadvantages.
 */

/*----------------------------------------------------------------*/

/*
 * Key building.
 */
static void build_data_key(struct dm_thin_device *td,
113
			   dm_block_t b, struct dm_cell_key *key)
114
115
116
117
118
119
120
{
	key->virtual = 0;
	key->dev = dm_thin_dev_id(td);
	key->block = b;
}

static void build_virtual_key(struct dm_thin_device *td, dm_block_t b,
121
			      struct dm_cell_key *key)
122
123
124
125
126
127
128
129
{
	key->virtual = 1;
	key->dev = dm_thin_dev_id(td);
	key->block = b;
}

/*----------------------------------------------------------------*/

Joe Thornber's avatar
Joe Thornber committed
130
131
132
133
134
135
136
137
138
139
140
141
142
143
144
145
146
147
148
149
150
151
152
153
154
155
156
157
158
159
160
161
162
163
164
165
166
167
168
169
170
171
172
173
174
175
176
#define THROTTLE_THRESHOLD (1 * HZ)

struct throttle {
	struct rw_semaphore lock;
	unsigned long threshold;
	bool throttle_applied;
};

static void throttle_init(struct throttle *t)
{
	init_rwsem(&t->lock);
	t->throttle_applied = false;
}

static void throttle_work_start(struct throttle *t)
{
	t->threshold = jiffies + THROTTLE_THRESHOLD;
}

static void throttle_work_update(struct throttle *t)
{
	if (!t->throttle_applied && jiffies > t->threshold) {
		down_write(&t->lock);
		t->throttle_applied = true;
	}
}

static void throttle_work_complete(struct throttle *t)
{
	if (t->throttle_applied) {
		t->throttle_applied = false;
		up_write(&t->lock);
	}
}

static void throttle_lock(struct throttle *t)
{
	down_read(&t->lock);
}

static void throttle_unlock(struct throttle *t)
{
	up_read(&t->lock);
}

/*----------------------------------------------------------------*/

177
178
179
180
181
/*
 * A pool device ties together a metadata device and a data device.  It
 * also provides the interface for creating and destroying internal
 * devices.
 */
Mike Snitzer's avatar
Mike Snitzer committed
182
struct dm_thin_new_mapping;
183

184
/*
185
 * The pool runs in 4 modes.  Ordered in degraded order for comparisons.
186
187
188
 */
enum pool_mode {
	PM_WRITE,		/* metadata may be changed */
189
	PM_OUT_OF_DATA_SPACE,	/* metadata may be changed, though data may not be allocated */
190
191
192
193
	PM_READ_ONLY,		/* metadata may not be changed */
	PM_FAIL,		/* all I/O fails */
};

194
struct pool_features {
195
196
	enum pool_mode mode;

Mike Snitzer's avatar
Mike Snitzer committed
197
198
199
	bool zero_new_blocks:1;
	bool discard_enabled:1;
	bool discard_passdown:1;
200
	bool error_if_no_space:1;
201
202
};

203
204
struct thin_c;
typedef void (*process_bio_fn)(struct thin_c *tc, struct bio *bio);
205
typedef void (*process_cell_fn)(struct thin_c *tc, struct dm_bio_prison_cell *cell);
206
207
typedef void (*process_mapping_fn)(struct dm_thin_new_mapping *m);

208
209
210
211
212
213
214
215
216
struct pool {
	struct list_head list;
	struct dm_target *ti;	/* Only set if a pool target is bound */

	struct mapped_device *pool_md;
	struct block_device *md_dev;
	struct dm_pool_metadata *pmd;

	dm_block_t low_water_blocks;
217
	uint32_t sectors_per_block;
218
	int sectors_per_block_shift;
219

220
	struct pool_features pf;
221
	bool low_water_triggered:1;	/* A dm event has been sent */
222

223
	struct dm_bio_prison *prison;
224
225
226
	struct dm_kcopyd_client *copier;

	struct workqueue_struct *wq;
Joe Thornber's avatar
Joe Thornber committed
227
	struct throttle throttle;
228
	struct work_struct worker;
229
	struct delayed_work waker;
230
	struct delayed_work no_space_timeout;
231

232
	unsigned long last_commit_jiffies;
233
	unsigned ref_count;
234
235
236
237

	spinlock_t lock;
	struct bio_list deferred_flush_bios;
	struct list_head prepared_mappings;
Joe Thornber's avatar
Joe Thornber committed
238
	struct list_head prepared_discards;
239
	struct list_head active_thins;
240

241
242
	struct dm_deferred_set *shared_read_ds;
	struct dm_deferred_set *all_io_ds;
243

Mike Snitzer's avatar
Mike Snitzer committed
244
	struct dm_thin_new_mapping *next_mapping;
245
	mempool_t *mapping_pool;
246
247
248
249

	process_bio_fn process_bio;
	process_bio_fn process_discard;

250
251
252
	process_cell_fn process_cell;
	process_cell_fn process_discard_cell;

253
254
	process_mapping_fn process_prepared_mapping;
	process_mapping_fn process_prepared_discard;
255
256
};

257
static enum pool_mode get_pool_mode(struct pool *pool);
258
static void metadata_operation_failed(struct pool *pool, const char *op, int r);
259

260
261
262
263
264
265
266
267
268
269
270
/*
 * Target context for a pool.
 */
struct pool_c {
	struct dm_target *ti;
	struct pool *pool;
	struct dm_dev *data_dev;
	struct dm_dev *metadata_dev;
	struct dm_target_callbacks callbacks;

	dm_block_t low_water_blocks;
271
272
	struct pool_features requested_pf; /* Features requested during table load */
	struct pool_features adjusted_pf;  /* Features used after adjusting for constituent devices */
273
274
275
276
277
278
};

/*
 * Target context for a thin.
 */
struct thin_c {
279
	struct list_head list;
280
	struct dm_dev *pool_dev;
281
	struct dm_dev *origin_dev;
282
	sector_t origin_size;
283
284
285
286
	dm_thin_id dev_id;

	struct pool *pool;
	struct dm_thin_device *td;
287
	bool requeue_mode:1;
288
	spinlock_t lock;
289
	struct list_head deferred_cells;
290
291
	struct bio_list deferred_bio_list;
	struct bio_list retry_on_resume_list;
292
	struct rb_root sort_bio_list; /* sorted list of deferred bios */
293
294
295
296
297
298
299

	/*
	 * Ensures the thin is not destroyed until the worker has finished
	 * iterating the active_thins list.
	 */
	atomic_t refcount;
	struct completion can_destroy;
300
301
302
303
};

/*----------------------------------------------------------------*/

304
305
306
307
308
309
310
311
312
313
314
/*
 * wake_worker() is used when new work is queued and when pool_resume is
 * ready to continue deferred IO processing.
 */
static void wake_worker(struct pool *pool)
{
	queue_work(pool->wq, &pool->worker);
}

/*----------------------------------------------------------------*/

315
316
317
318
319
320
321
322
323
324
325
326
327
328
329
330
331
332
333
334
335
336
337
338
339
340
341
342
343
344
345
static int bio_detain(struct pool *pool, struct dm_cell_key *key, struct bio *bio,
		      struct dm_bio_prison_cell **cell_result)
{
	int r;
	struct dm_bio_prison_cell *cell_prealloc;

	/*
	 * Allocate a cell from the prison's mempool.
	 * This might block but it can't fail.
	 */
	cell_prealloc = dm_bio_prison_alloc_cell(pool->prison, GFP_NOIO);

	r = dm_bio_detain(pool->prison, key, bio, cell_prealloc, cell_result);
	if (r)
		/*
		 * We reused an old cell; we can get rid of
		 * the new one.
		 */
		dm_bio_prison_free_cell(pool->prison, cell_prealloc);

	return r;
}

static void cell_release(struct pool *pool,
			 struct dm_bio_prison_cell *cell,
			 struct bio_list *bios)
{
	dm_cell_release(pool->prison, cell, bios);
	dm_bio_prison_free_cell(pool->prison, cell);
}

346
347
348
349
350
351
352
353
354
static void cell_visit_release(struct pool *pool,
			       void (*fn)(void *, struct dm_bio_prison_cell *),
			       void *context,
			       struct dm_bio_prison_cell *cell)
{
	dm_cell_visit_release(pool->prison, fn, context, cell);
	dm_bio_prison_free_cell(pool->prison, cell);
}

355
356
357
358
359
360
361
362
static void cell_release_no_holder(struct pool *pool,
				   struct dm_bio_prison_cell *cell,
				   struct bio_list *bios)
{
	dm_cell_release_no_holder(pool->prison, cell, bios);
	dm_bio_prison_free_cell(pool->prison, cell);
}

363
364
static void cell_error_with_code(struct pool *pool,
				 struct dm_bio_prison_cell *cell, int error_code)
365
{
366
	dm_cell_error(pool->prison, cell, error_code);
367
368
369
	dm_bio_prison_free_cell(pool->prison, cell);
}

370
371
372
373
374
static void cell_error(struct pool *pool, struct dm_bio_prison_cell *cell)
{
	cell_error_with_code(pool, cell, -EIO);
}

375
376
377
378
379
380
381
382
383
384
static void cell_success(struct pool *pool, struct dm_bio_prison_cell *cell)
{
	cell_error_with_code(pool, cell, 0);
}

static void cell_requeue(struct pool *pool, struct dm_bio_prison_cell *cell)
{
	cell_error_with_code(pool, cell, DM_ENDIO_REQUEUE);
}

385
386
/*----------------------------------------------------------------*/

387
388
389
390
391
392
393
394
395
396
397
398
399
400
401
402
403
404
405
406
407
408
409
410
411
412
413
414
415
416
417
418
419
420
421
422
423
424
425
426
427
428
429
430
431
432
433
434
435
436
437
438
439
440
441
442
443
444
445
446
/*
 * A global list of pools that uses a struct mapped_device as a key.
 */
static struct dm_thin_pool_table {
	struct mutex mutex;
	struct list_head pools;
} dm_thin_pool_table;

static void pool_table_init(void)
{
	mutex_init(&dm_thin_pool_table.mutex);
	INIT_LIST_HEAD(&dm_thin_pool_table.pools);
}

static void __pool_table_insert(struct pool *pool)
{
	BUG_ON(!mutex_is_locked(&dm_thin_pool_table.mutex));
	list_add(&pool->list, &dm_thin_pool_table.pools);
}

static void __pool_table_remove(struct pool *pool)
{
	BUG_ON(!mutex_is_locked(&dm_thin_pool_table.mutex));
	list_del(&pool->list);
}

static struct pool *__pool_table_lookup(struct mapped_device *md)
{
	struct pool *pool = NULL, *tmp;

	BUG_ON(!mutex_is_locked(&dm_thin_pool_table.mutex));

	list_for_each_entry(tmp, &dm_thin_pool_table.pools, list) {
		if (tmp->pool_md == md) {
			pool = tmp;
			break;
		}
	}

	return pool;
}

static struct pool *__pool_table_lookup_metadata_dev(struct block_device *md_dev)
{
	struct pool *pool = NULL, *tmp;

	BUG_ON(!mutex_is_locked(&dm_thin_pool_table.mutex));

	list_for_each_entry(tmp, &dm_thin_pool_table.pools, list) {
		if (tmp->md_dev == md_dev) {
			pool = tmp;
			break;
		}
	}

	return pool;
}

/*----------------------------------------------------------------*/

Mike Snitzer's avatar
Mike Snitzer committed
447
struct dm_thin_endio_hook {
448
	struct thin_c *tc;
449
450
	struct dm_deferred_entry *shared_read_entry;
	struct dm_deferred_entry *all_io_entry;
Mike Snitzer's avatar
Mike Snitzer committed
451
	struct dm_thin_new_mapping *overwrite_mapping;
452
	struct rb_node rb_node;
453
454
};

455
static void requeue_bio_list(struct thin_c *tc, struct bio_list *master)
456
457
458
{
	struct bio *bio;
	struct bio_list bios;
459
	unsigned long flags;
460
461

	bio_list_init(&bios);
462

463
	spin_lock_irqsave(&tc->lock, flags);
464
465
	bio_list_merge(&bios, master);
	bio_list_init(master);
466
	spin_unlock_irqrestore(&tc->lock, flags);
467

468
469
	while ((bio = bio_list_pop(&bios)))
		bio_endio(bio, DM_ENDIO_REQUEUE);
470
471
}

472
473
474
475
476
477
478
479
480
481
482
483
484
485
486
487
488
static void requeue_deferred_cells(struct thin_c *tc)
{
	struct pool *pool = tc->pool;
	unsigned long flags;
	struct list_head cells;
	struct dm_bio_prison_cell *cell, *tmp;

	INIT_LIST_HEAD(&cells);

	spin_lock_irqsave(&tc->lock, flags);
	list_splice_init(&tc->deferred_cells, &cells);
	spin_unlock_irqrestore(&tc->lock, flags);

	list_for_each_entry_safe(cell, tmp, &cells, user_list)
		cell_requeue(pool, cell);
}

489
490
static void requeue_io(struct thin_c *tc)
{
491
492
	requeue_bio_list(tc, &tc->deferred_bio_list);
	requeue_bio_list(tc, &tc->retry_on_resume_list);
493
	requeue_deferred_cells(tc);
494
495
}

496
static void error_thin_retry_list(struct thin_c *tc)
497
498
499
500
501
502
503
{
	struct bio *bio;
	unsigned long flags;
	struct bio_list bios;

	bio_list_init(&bios);

504
505
506
507
	spin_lock_irqsave(&tc->lock, flags);
	bio_list_merge(&bios, &tc->retry_on_resume_list);
	bio_list_init(&tc->retry_on_resume_list);
	spin_unlock_irqrestore(&tc->lock, flags);
508
509
510
511
512

	while ((bio = bio_list_pop(&bios)))
		bio_io_error(bio);
}

513
514
515
516
517
518
519
520
521
522
static void error_retry_list(struct pool *pool)
{
	struct thin_c *tc;

	rcu_read_lock();
	list_for_each_entry_rcu(tc, &pool->active_thins, list)
		error_thin_retry_list(tc);
	rcu_read_unlock();
}

523
524
525
526
527
528
529
/*
 * This section of code contains the logic for processing a thin device's IO.
 * Much of the code depends on pool object resources (lists, workqueues, etc)
 * but most is exclusively called from the thin target rather than the thin-pool
 * target.
 */

530
531
532
533
534
static bool block_size_is_power_of_two(struct pool *pool)
{
	return pool->sectors_per_block_shift >= 0;
}

535
536
static dm_block_t get_bio_block(struct thin_c *tc, struct bio *bio)
{
537
	struct pool *pool = tc->pool;
538
	sector_t block_nr = bio->bi_iter.bi_sector;
539

540
541
	if (block_size_is_power_of_two(pool))
		block_nr >>= pool->sectors_per_block_shift;
542
	else
543
		(void) sector_div(block_nr, pool->sectors_per_block);
544
545

	return block_nr;
546
547
548
549
550
}

static void remap(struct thin_c *tc, struct bio *bio, dm_block_t block)
{
	struct pool *pool = tc->pool;
551
	sector_t bi_sector = bio->bi_iter.bi_sector;
552
553

	bio->bi_bdev = tc->pool_dev->bdev;
554
	if (block_size_is_power_of_two(pool))
555
556
557
		bio->bi_iter.bi_sector =
			(block << pool->sectors_per_block_shift) |
			(bi_sector & (pool->sectors_per_block - 1));
558
	else
559
		bio->bi_iter.bi_sector = (block * pool->sectors_per_block) +
560
				 sector_div(bi_sector, pool->sectors_per_block);
561
562
}

563
564
565
566
567
static void remap_to_origin(struct thin_c *tc, struct bio *bio)
{
	bio->bi_bdev = tc->origin_dev->bdev;
}

568
569
570
571
572
573
static int bio_triggers_commit(struct thin_c *tc, struct bio *bio)
{
	return (bio->bi_rw & (REQ_FLUSH | REQ_FUA)) &&
		dm_thin_changed_this_transaction(tc->td);
}

574
575
576
577
578
579
580
static void inc_all_io_entry(struct pool *pool, struct bio *bio)
{
	struct dm_thin_endio_hook *h;

	if (bio->bi_rw & REQ_DISCARD)
		return;

581
	h = dm_per_bio_data(bio, sizeof(struct dm_thin_endio_hook));
582
583
584
	h->all_io_entry = dm_deferred_entry_inc(pool->all_io_ds);
}

585
static void issue(struct thin_c *tc, struct bio *bio)
586
587
588
589
{
	struct pool *pool = tc->pool;
	unsigned long flags;

590
591
592
593
594
	if (!bio_triggers_commit(tc, bio)) {
		generic_make_request(bio);
		return;
	}

595
	/*
596
597
598
	 * Complete bio with an error if earlier I/O caused changes to
	 * the metadata that can't be committed e.g, due to I/O errors
	 * on the metadata device.
599
	 */
600
601
602
603
604
605
606
607
608
609
610
611
	if (dm_thin_aborted_changes(tc->td)) {
		bio_io_error(bio);
		return;
	}

	/*
	 * Batch together any bios that trigger commits and then issue a
	 * single commit for them in process_deferred_bios().
	 */
	spin_lock_irqsave(&pool->lock, flags);
	bio_list_add(&pool->deferred_flush_bios, bio);
	spin_unlock_irqrestore(&pool->lock, flags);
612
613
}

614
615
616
617
618
619
620
621
622
623
624
625
626
static void remap_to_origin_and_issue(struct thin_c *tc, struct bio *bio)
{
	remap_to_origin(tc, bio);
	issue(tc, bio);
}

static void remap_and_issue(struct thin_c *tc, struct bio *bio,
			    dm_block_t block)
{
	remap(tc, bio, block);
	issue(tc, bio);
}

627
628
629
630
631
/*----------------------------------------------------------------*/

/*
 * Bio endio functions.
 */
Mike Snitzer's avatar
Mike Snitzer committed
632
struct dm_thin_new_mapping {
633
634
	struct list_head list;

635
636
	bool pass_discard:1;
	bool definitely_not_shared:1;
637

638
639
640
641
642
643
644
	/*
	 * Track quiescing, copying and zeroing preparation actions.  When this
	 * counter hits zero the block is prepared and can be inserted into the
	 * btree.
	 */
	atomic_t prepare_actions;

645
	int err;
646
647
648
	struct thin_c *tc;
	dm_block_t virt_block;
	dm_block_t data_block;
Mike Snitzer's avatar
Mike Snitzer committed
649
	struct dm_bio_prison_cell *cell, *cell2;
650
651
652
653
654
655
656
657
658
659
660

	/*
	 * If the bio covers the whole area of a block then we can avoid
	 * zeroing or copying.  Instead this bio is hooked.  The bio will
	 * still be in the cell, so care has to be taken to avoid issuing
	 * the bio twice.
	 */
	struct bio *bio;
	bio_end_io_t *saved_bi_end_io;
};

661
static void __complete_mapping_preparation(struct dm_thin_new_mapping *m)
662
663
664
{
	struct pool *pool = m->tc->pool;

665
	if (atomic_dec_and_test(&m->prepare_actions)) {
666
		list_add_tail(&m->list, &pool->prepared_mappings);
667
668
669
670
		wake_worker(pool);
	}
}

671
static void complete_mapping_preparation(struct dm_thin_new_mapping *m)
672
673
674
675
676
{
	unsigned long flags;
	struct pool *pool = m->tc->pool;

	spin_lock_irqsave(&pool->lock, flags);
677
	__complete_mapping_preparation(m);
678
679
680
	spin_unlock_irqrestore(&pool->lock, flags);
}

681
682
683
684
685
686
687
688
static void copy_complete(int read_err, unsigned long write_err, void *context)
{
	struct dm_thin_new_mapping *m = context;

	m->err = read_err || write_err ? -EIO : 0;
	complete_mapping_preparation(m);
}

689
690
static void overwrite_endio(struct bio *bio, int err)
{
691
	struct dm_thin_endio_hook *h = dm_per_bio_data(bio, sizeof(struct dm_thin_endio_hook));
Mike Snitzer's avatar
Mike Snitzer committed
692
	struct dm_thin_new_mapping *m = h->overwrite_mapping;
693
694

	m->err = err;
695
	complete_mapping_preparation(m);
696
697
698
699
700
701
702
703
704
705
706
707
708
}

/*----------------------------------------------------------------*/

/*
 * Workqueue.
 */

/*
 * Prepared mapping jobs.
 */

/*
709
710
 * This sends the bios in the cell, except the original holder, back
 * to the deferred_bios list.
711
 */
712
static void cell_defer_no_holder(struct thin_c *tc, struct dm_bio_prison_cell *cell)
713
714
715
716
{
	struct pool *pool = tc->pool;
	unsigned long flags;

717
718
719
	spin_lock_irqsave(&tc->lock, flags);
	cell_release_no_holder(pool, cell, &tc->deferred_bio_list);
	spin_unlock_irqrestore(&tc->lock, flags);
720
721
722
723

	wake_worker(pool);
}

724
725
static void thin_defer_bio(struct thin_c *tc, struct bio *bio);

726
727
728
729
730
731
732
733
struct remap_info {
	struct thin_c *tc;
	struct bio_list defer_bios;
	struct bio_list issue_bios;
};

static void __inc_remap_and_issue_cell(void *context,
				       struct dm_bio_prison_cell *cell)
734
{
735
	struct remap_info *info = context;
736
737
	struct bio *bio;

738
	while ((bio = bio_list_pop(&cell->bios))) {
739
		if (bio->bi_rw & (REQ_DISCARD | REQ_FLUSH | REQ_FUA))
740
			bio_list_add(&info->defer_bios, bio);
741
		else {
742
743
744
745
746
747
748
749
			inc_all_io_entry(info->tc->pool, bio);

			/*
			 * We can't issue the bios with the bio prison lock
			 * held, so we add them to a list to issue on
			 * return from this function.
			 */
			bio_list_add(&info->issue_bios, bio);
750
751
752
753
		}
	}
}

754
755
756
757
758
759
760
761
762
763
764
765
766
767
768
769
770
771
772
773
774
775
776
777
778
779
static void inc_remap_and_issue_cell(struct thin_c *tc,
				     struct dm_bio_prison_cell *cell,
				     dm_block_t block)
{
	struct bio *bio;
	struct remap_info info;

	info.tc = tc;
	bio_list_init(&info.defer_bios);
	bio_list_init(&info.issue_bios);

	/*
	 * We have to be careful to inc any bios we're about to issue
	 * before the cell is released, and avoid a race with new bios
	 * being added to the cell.
	 */
	cell_visit_release(tc->pool, __inc_remap_and_issue_cell,
			   &info, cell);

	while ((bio = bio_list_pop(&info.defer_bios)))
		thin_defer_bio(tc, bio);

	while ((bio = bio_list_pop(&info.issue_bios)))
		remap_and_issue(info.tc, bio, block);
}

780
781
static void process_prepared_mapping_fail(struct dm_thin_new_mapping *m)
{
Kent Overstreet's avatar
Kent Overstreet committed
782
	if (m->bio) {
783
		m->bio->bi_end_io = m->saved_bi_end_io;
Kent Overstreet's avatar
Kent Overstreet committed
784
785
		atomic_inc(&m->bio->bi_remaining);
	}
786
	cell_error(m->tc->pool, m->cell);
787
788
789
	list_del(&m->list);
	mempool_free(m, m->tc->pool->mapping_pool);
}
790

Mike Snitzer's avatar
Mike Snitzer committed
791
static void process_prepared_mapping(struct dm_thin_new_mapping *m)
792
793
{
	struct thin_c *tc = m->tc;
794
	struct pool *pool = tc->pool;
795
796
797
798
	struct bio *bio;
	int r;

	bio = m->bio;
Kent Overstreet's avatar
Kent Overstreet committed
799
	if (bio) {
800
		bio->bi_end_io = m->saved_bi_end_io;
Kent Overstreet's avatar
Kent Overstreet committed
801
802
		atomic_inc(&bio->bi_remaining);
	}
803
804

	if (m->err) {
805
		cell_error(pool, m->cell);
806
		goto out;
807
808
809
810
811
812
813
814
815
	}

	/*
	 * Commit the prepared block into the mapping btree.
	 * Any I/O for this block arriving after this point will get
	 * remapped to it directly.
	 */
	r = dm_thin_insert_block(tc->td, m->virt_block, m->data_block);
	if (r) {
816
		metadata_operation_failed(pool, "dm_thin_insert_block", r);
817
		cell_error(pool, m->cell);
818
		goto out;
819
820
821
822
823
824
825
826
827
	}

	/*
	 * Release any bios held while the block was being provisioned.
	 * If we are processing a write bio that completely covers the block,
	 * we already processed it so can ignore it now when processing
	 * the bios in the cell.
	 */
	if (bio) {
828
		inc_remap_and_issue_cell(tc, m->cell, m->data_block);
829
		bio_endio(bio, 0);
830
831
832
833
834
	} else {
		inc_all_io_entry(tc->pool, m->cell->holder);
		remap_and_issue(tc, m->cell->holder, m->data_block);
		inc_remap_and_issue_cell(tc, m->cell, m->data_block);
	}
835

836
out:
837
	list_del(&m->list);
838
	mempool_free(m, pool->mapping_pool);
839
840
}

841
static void process_prepared_discard_fail(struct dm_thin_new_mapping *m)
Joe Thornber's avatar
Joe Thornber committed
842
843
844
{
	struct thin_c *tc = m->tc;

845
	bio_io_error(m->bio);
846
847
	cell_defer_no_holder(tc, m->cell);
	cell_defer_no_holder(tc, m->cell2);
848
849
850
851
852
853
	mempool_free(m, tc->pool->mapping_pool);
}

static void process_prepared_discard_passdown(struct dm_thin_new_mapping *m)
{
	struct thin_c *tc = m->tc;
Joe Thornber's avatar
Joe Thornber committed
854

855
	inc_all_io_entry(tc->pool, m->bio);
856
857
	cell_defer_no_holder(tc, m->cell);
	cell_defer_no_holder(tc, m->cell2);
858

Joe Thornber's avatar
Joe Thornber committed
859
	if (m->pass_discard)
860
861
862
863
864
865
866
867
868
		if (m->definitely_not_shared)
			remap_and_issue(tc, m->bio, m->data_block);
		else {
			bool used = false;
			if (dm_pool_block_is_used(tc->pool->pmd, m->data_block, &used) || used)
				bio_endio(m->bio, 0);
			else
				remap_and_issue(tc, m->bio, m->data_block);
		}
Joe Thornber's avatar
Joe Thornber committed
869
870
871
872
873
874
	else
		bio_endio(m->bio, 0);

	mempool_free(m, tc->pool->mapping_pool);
}

875
876
877
878
879
880
881
static void process_prepared_discard(struct dm_thin_new_mapping *m)
{
	int r;
	struct thin_c *tc = m->tc;

	r = dm_thin_remove_block(tc->td, m->virt_block);
	if (r)
882
		DMERR_LIMIT("dm_thin_remove_block() failed");
883
884
885
886

	process_prepared_discard_passdown(m);
}

Joe Thornber's avatar
Joe Thornber committed
887
static void process_prepared(struct pool *pool, struct list_head *head,
888
			     process_mapping_fn *fn)
889
890
891
{
	unsigned long flags;
	struct list_head maps;
Mike Snitzer's avatar
Mike Snitzer committed
892
	struct dm_thin_new_mapping *m, *tmp;
893
894
895

	INIT_LIST_HEAD(&maps);
	spin_lock_irqsave(&pool->lock, flags);
Joe Thornber's avatar
Joe Thornber committed
896
	list_splice_init(head, &maps);
897
898
899
	spin_unlock_irqrestore(&pool->lock, flags);

	list_for_each_entry_safe(m, tmp, &maps, list)
900
		(*fn)(m);
901
902
903
904
905
}

/*
 * Deferred bio jobs.
 */
Joe Thornber's avatar
Joe Thornber committed
906
static int io_overlaps_block(struct pool *pool, struct bio *bio)
907
{
908
909
	return bio->bi_iter.bi_size ==
		(pool->sectors_per_block << SECTOR_SHIFT);
Joe Thornber's avatar
Joe Thornber committed
910
911
912
913
914
915
}

static int io_overwrites_block(struct pool *pool, struct bio *bio)
{
	return (bio_data_dir(bio) == WRITE) &&
		io_overlaps_block(pool, bio);
916
917
918
919
920
921
922
923
924
925
926
927
928
929
930
931
932
933
934
}

static void save_and_set_endio(struct bio *bio, bio_end_io_t **save,
			       bio_end_io_t *fn)
{
	*save = bio->bi_end_io;
	bio->bi_end_io = fn;
}

static int ensure_next_mapping(struct pool *pool)
{
	if (pool->next_mapping)
		return 0;

	pool->next_mapping = mempool_alloc(pool->mapping_pool, GFP_ATOMIC);

	return pool->next_mapping ? 0 : -ENOMEM;
}

Mike Snitzer's avatar
Mike Snitzer committed
935
static struct dm_thin_new_mapping *get_next_mapping(struct pool *pool)
936
{
937
	struct dm_thin_new_mapping *m = pool->next_mapping;
938
939
940

	BUG_ON(!pool->next_mapping);

941
942
943
944
	memset(m, 0, sizeof(struct dm_thin_new_mapping));
	INIT_LIST_HEAD(&m->list);
	m->bio = NULL;

945
946
	pool->next_mapping = NULL;

947
	return m;
948
949
}

950
951
952
953
954
955
956
957
958
959
960
961
962
963
964
965
966
static void ll_zero(struct thin_c *tc, struct dm_thin_new_mapping *m,
		    sector_t begin, sector_t end)
{
	int r;
	struct dm_io_region to;

	to.bdev = tc->pool_dev->bdev;
	to.sector = begin;
	to.count = end - begin;

	r = dm_kcopyd_zero(tc->pool->copier, 1, &to, 0, copy_complete, m);
	if (r < 0) {
		DMERR_LIMIT("dm_kcopyd_zero() failed");
		copy_complete(1, 1, m);
	}
}

967
968
969
970
971
972
973
974
975
976
977
978
979
980
static void remap_and_issue_overwrite(struct thin_c *tc, struct bio *bio,
				      dm_block_t data_block,
				      struct dm_thin_new_mapping *m)
{
	struct pool *pool = tc->pool;
	struct dm_thin_endio_hook *h = dm_per_bio_data(bio, sizeof(struct dm_thin_endio_hook));

	h->overwrite_mapping = m;
	m->bio = bio;
	save_and_set_endio(bio, &m->saved_bi_end_io, overwrite_endio);
	inc_all_io_entry(pool, bio);
	remap_and_issue(tc, bio, data_block);
}

981
982
983
/*
 * A partial copy also needs to zero the uncopied region.
 */
984
static void schedule_copy(struct thin_c *tc, dm_block_t virt_block,
985
986
			  struct dm_dev *origin, dm_block_t data_origin,
			  dm_block_t data_dest,
987
988
			  struct dm_bio_prison_cell *cell, struct bio *bio,
			  sector_t len)
989
990
991
{
	int r;
	struct pool *pool = tc->pool;
Mike Snitzer's avatar
Mike Snitzer committed
992
	struct dm_thin_new_mapping *m = get_next_mapping(pool);
993
994
995
996
997
998

	m->tc = tc;
	m->virt_block = virt_block;
	m->data_block = data_dest;
	m->cell = cell;

999
1000
1001
1002
1003
1004
1005
	/*
	 * quiesce action + copy action + an extra reference held for the
	 * duration of this function (we may need to inc later for a
	 * partial zero).
	 */
	atomic_set(&m->prepare_actions, 3);

1006
	if (!dm_deferred_set_add_work(pool->shared_read_ds, &m->list))
1007
		complete_mapping_preparation(m); /* already quiesced */
1008
1009
1010
1011
1012
1013
1014

	/*
	 * IO to pool_dev remaps to the pool target's data_dev.
	 *
	 * If the whole block of data is being overwritten, we can issue the
	 * bio immediately. Otherwise we use kcopyd to clone the data first.
	 */
1015
1016
1017
	if (io_overwrites_block(pool, bio))
		remap_and_issue_overwrite(tc, bio, data_dest, m);
	else {
1018
1019
		struct dm_io_region from, to;

1020
		from.bdev = origin->bdev;
1021
		from.sector = data_origin * pool->sectors_per_block;
1022
		from.count = len;
1023
1024
1025

		to.bdev = tc->pool_dev->bdev;
		to.sector = data_dest * pool->sectors_per_block;
1026
		to.count = len;
1027
1028
1029
1030

		r = dm_kcopyd_copy(pool->copier, &from, 1, &to,
				   0, copy_complete, m);
		if (r < 0) {
1031
			DMERR_LIMIT("dm_kcopyd_copy() failed");
1032
1033
1034
1035
1036
1037
1038
1039
1040
1041
1042
1043
1044
1045
1046
1047
1048
1049
			copy_complete(1, 1, m);

			/*
			 * We allow the zero to be issued, to simplify the
			 * error path.  Otherwise we'd need to start
			 * worrying about decrementing the prepare_actions
			 * counter.
			 */
		}

		/*
		 * Do we need to zero a tail region?
		 */
		if (len < pool->sectors_per_block && pool->pf.zero_new_blocks) {
			atomic_inc(&m->prepare_actions);
			ll_zero(tc, m,
				data_dest * pool->sectors_per_block + len,
				(data_dest + 1) * pool->sectors_per_block);
1050
1051
		}
	}
1052
1053

	complete_mapping_preparation(m); /* drop our ref */
1054
1055
}

1056
1057
static void schedule_internal_copy(struct thin_c *tc, dm_block_t virt_block,
				   dm_block_t data_origin, dm_block_t data_dest,
Mike Snitzer's avatar
Mike Snitzer committed
1058
				   struct dm_bio_prison_cell *cell, struct bio *bio)
1059
1060
{
	schedule_copy(tc, virt_block, tc->pool_dev,
1061
1062
		      data_origin, data_dest, cell, bio,
		      tc->pool->sectors_per_block);
1063
1064
}

1065
static void schedule_zero(struct thin_c *tc, dm_block_t virt_block,
Mike Snitzer's avatar
Mike Snitzer committed
1066
			  dm_block_t data_block, struct dm_bio_prison_cell *cell,
1067
1068
1069
			  struct bio *bio)
{
	struct pool *pool = tc->pool;
Mike Snitzer's avatar
Mike Snitzer committed
1070
	struct dm_thin_new_mapping *m = get_next_mapping(pool);
1071

1072
	atomic_set(&m->prepare_actions, 1); /* no need to quiesce */
1073
1074
1075
1076
1077
1078
1079
1080
1081
1082
	m->tc = tc;
	m->virt_block = virt_block;
	m->data_block = data_block;
	m->cell = cell;

	/*
	 * If the whole block of data is being overwritten or we are not
	 * zeroing pre-existing data, we can issue the bio immediately.
	 * Otherwise we use kcopyd to zero the data first.
	 */
1083
	if (!pool->pf.zero_new_blocks)
1084
1085
		process_prepared_mapping(m);

1086
1087
	else if (io_overwrites_block(pool, bio))
		remap_and_issue_overwrite(tc, bio, data_block, m);
1088

1089
	else
1090
1091
1092
1093
		ll_zero(tc, m,
			data_block * pool->sectors_per_block,
			(data_block + 1) * pool->sectors_per_block);
}
1094

1095
1096
1097
1098
1099
1100
1101
1102
1103
1104
1105
1106
1107
1108
1109
1110
1111
1112
1113
1114
static void schedule_external_copy(struct thin_c *tc, dm_block_t virt_block,
				   dm_block_t data_dest,
				   struct dm_bio_prison_cell *cell, struct bio *bio)
{
	struct pool *pool = tc->pool;
	sector_t virt_block_begin = virt_block * pool->sectors_per_block;
	sector_t virt_block_end = (virt_block + 1) * pool->sectors_per_block;

	if (virt_block_end <= tc->origin_size)
		schedule_copy(tc, virt_block, tc->origin_dev,
			      virt_block, data_dest, cell, bio,
			      pool->sectors_per_block);

	else if (virt_block_begin < tc->origin_size)
		schedule_copy(tc, virt_block, tc->origin_dev,
			      virt_block, data_dest, cell, bio,
			      tc->origin_size - virt_block_begin);

	else
		schedule_zero(tc, virt_block, data_dest, cell, bio);
1115
1116
}

1117
1118
1119
1120
/*
 * A non-zero return indicates read_only or fail_io mode.
 * Many callers don't care about the return value.
 */
1121
static int commit(struct pool *pool)
1122
1123
1124
{
	int r;

1125
	if (get_pool_mode(pool) >= PM_READ_ONLY)
1126
1127
		return -EINVAL;

1128
	r = dm_pool_commit_metadata(pool->pmd);
1129
1130
	if (r)
		metadata_operation_failed(pool, "dm_pool_commit_metadata", r);
1131
1132
1133
1134

	return r;
}

1135
1136
1137
1138
1139
1140
1141
1142
1143
1144
1145
1146
1147
1148
static void check_low_water_mark(struct pool *pool, dm_block_t free_blocks)
{
	unsigned long flags;

	if (free_blocks <= pool->low_water_blocks && !pool->low_water_triggered) {
		DMWARN("%s: reached low water mark for data device: sending event.",
		       dm_device_name(pool->pool_md));
		spin_lock_irqsave(&pool->lock, flags);
		pool->low_water_triggered = true;
		spin_unlock_irqrestore(&pool->lock, flags);
		dm_table_event(pool->ti->table);
	}
}

1149
1150
static void set_pool_mode(struct pool *pool, enum pool_mode new_mode);

1151
1152
1153
1154
1155
1156
static int alloc_data_block(struct thin_c *tc, dm_block_t *result)
{
	int r;
	dm_block_t free_blocks;
	struct pool *pool = tc->pool;

1157
	if (WARN_ON(get_pool_mode(pool) != PM_WRITE))
1158
1159
		return -EINVAL;

1160
	r = dm_pool_get_free_block_count(pool->pmd, &free_blocks);
1161
1162
	if (r) {
		metadata_operation_failed(pool, "dm_pool_get_free_block_count", r);
1163
		return r;
1164
	}
1165

1166
	check_low_water_mark(pool, free_blocks);
1167
1168

	if (!free_blocks) {
1169
1170
1171
1172
		/*
		 * Try to commit to see if that will free up some
		 * more space.
		 */
1173
1174
1175
		r = commit(pool);
		if (r)
			return r;
1176

1177
		r = dm_pool_get_free_block_count(pool->pmd, &free_blocks);
1178
1179
		if (r) {
			metadata_operation_failed(pool, "dm_pool_get_free_block_count", r);
1180
			return r;
1181
		}
1182

1183
		if (!free_blocks) {
1184
			set_pool_mode(pool, PM_OUT_OF_DATA_SPACE);
1185
			return -ENOSPC;
1186
1187
1188
1189
		}
	}

	r = dm_pool_alloc_data_block(pool->pmd, result);
1190
	if (r) {
1191
		metadata_operation_failed(pool, "dm_pool_alloc_data_block", r);
1192
		return r;
1193
	}
1194
1195
1196
1197
1198
1199
1200
1201
1202
1203

	return 0;
}

/*
 * If we have run out of space, queue bios until the device is
 * resumed, presumably after having been reloaded with more space.
 */
static void retry_on_resume(struct bio *bio)
{
1204
	struct dm_thin_endio_hook *h = dm_per_bio_data(bio, sizeof(struct dm_thin_endio_hook));
1205
	struct thin_c *tc = h->tc;
1206
1207
	unsigned long flags;

1208
1209
1210
	spin_lock_irqsave(&tc->lock, flags);
	bio_list_add(&tc->retry_on_resume_list, bio);
	spin_unlock_irqrestore(&tc->lock, flags);
1211
1212
}

1213
static int should_error_unserviceable_bio(struct pool *pool)
1214
{
1215
1216
1217
1218
1219
1220
	enum pool_mode m = get_pool_mode(pool);

	switch (m) {
	case PM_WRITE:
		/* Shouldn't get here */
		DMERR_LIMIT("bio unserviceable, yet pool is in PM_WRITE mode");
1221
		return -EIO;
1222
1223

	case PM_OUT_OF_DATA_SPACE:
1224
		return pool->pf.error_if_no_space ? -ENOSPC : 0;
1225
1226
1227

	case PM_READ_ONLY:
	case PM_FAIL:
1228
		return -EIO;
1229
1230
1231
	default:
		/* Shouldn't get here */
		DMERR_LIMIT("bio unserviceable, yet pool has an unknown mode");
1232
		return -EIO;
1233
1234
	}
}
1235

1236
1237
static void handle_unserviceable_bio(struct pool *pool, struct bio *bio)
{
1238
1239
1240
1241
	int error = should_error_unserviceable_bio(pool);

	if (error)
		bio_endio(bio, error);
1242
1243
	else
		retry_on_resume(bio);
1244
1245
}

1246
static void retry_bios_on_resume(struct pool *pool, struct dm_bio_prison_cell *cell)
1247
1248
1249
{
	struct bio *bio;
	struct bio_list bios;
1250
	int error;
1251

1252
1253
1254
	error = should_error_unserviceable_bio(pool);
	if (error) {
		cell_error_with_code(pool, cell, error);
1255
1256
1257
		return;
	}

1258
	bio_list_init(&bios);
1259
	cell_release(pool, cell, &bios);
1260

1261
1262
	error = should_error_unserviceable_bio(pool);
	if (error)
1263
		while ((bio = bio_list_pop(&bios)))
1264
			bio_endio(bio, error);
1265
1266
1267
	else
		while ((bio = bio_list_pop(&bios)))
			retry_on_resume(bio);
1268
1269
}

1270
static void process_discard_cell(struct thin_c *tc, struct dm_bio_prison_cell *cell)
Joe Thornber's avatar
Joe Thornber committed
1271
1272
{
	int r;
1273
	struct bio *bio = cell->holder;
Joe Thornber's avatar
Joe Thornber committed
1274
	struct pool *pool = tc->pool;
1275
1276
	struct dm_bio_prison_cell *cell2;
	struct dm_cell_key key2;
Joe Thornber's avatar
Joe Thornber committed
1277
1278
	dm_block_t block = get_bio_block(tc, bio);
	struct dm_thin_lookup_result lookup_result;
Mike Snitzer's avatar
Mike Snitzer committed
1279
	struct dm_thin_new_mapping *m;
Joe Thornber's avatar
Joe Thornber committed
1280

1281
1282
	if (tc->requeue_mode) {
		cell_requeue(pool, cell);
Joe Thornber's avatar
Joe Thornber committed
1283
		return;
1284
	}
Joe Thornber's avatar
Joe Thornber committed
1285
1286
1287
1288
1289
1290
1291
1292
1293
1294

	r = dm_thin_find_block(tc->td, block, 1, &lookup_result);
	switch (