dm-thin.c 102 KB
Newer Older
1
/*
2
 * Copyright (C) 2011-2012 Red Hat UK.
3
4
5
6
7
 *
 * This file is released under the GPL.
 */

#include "dm-thin-metadata.h"
8
#include "dm-bio-prison.h"
9
#include "dm.h"
10
11
12
13

#include <linux/device-mapper.h>
#include <linux/dm-io.h>
#include <linux/dm-kcopyd.h>
14
#include <linux/log2.h>
15
#include <linux/list.h>
16
#include <linux/rculist.h>
17
18
19
#include <linux/init.h>
#include <linux/module.h>
#include <linux/slab.h>
20
#include <linux/sort.h>
21
#include <linux/rbtree.h>
22
23
24
25
26
27

#define	DM_MSG_PREFIX	"thin"

/*
 * Tunable constants
 */
28
#define ENDIO_HOOK_POOL_SIZE 1024
29
#define MAPPING_POOL_SIZE 1024
30
#define COMMIT_PERIOD HZ
31
32
33
#define NO_SPACE_TIMEOUT_SECS 60

static unsigned no_space_timeout_secs = NO_SPACE_TIMEOUT_SECS;
34

35
36
37
DECLARE_DM_KCOPYD_THROTTLE_WITH_MODULE_PARM(snapshot_copy_throttle,
		"A percentage of time allocated for copy on write");

38
39
40
41
42
43
44
45
46
47
48
49
50
51
52
53
54
55
56
57
58
59
60
61
62
63
64
65
66
67
68
69
/*
 * The block size of the device holding pool data must be
 * between 64KB and 1GB.
 */
#define DATA_DEV_BLOCK_SIZE_MIN_SECTORS (64 * 1024 >> SECTOR_SHIFT)
#define DATA_DEV_BLOCK_SIZE_MAX_SECTORS (1024 * 1024 * 1024 >> SECTOR_SHIFT)

/*
 * Device id is restricted to 24 bits.
 */
#define MAX_DEV_ID ((1 << 24) - 1)

/*
 * How do we handle breaking sharing of data blocks?
 * =================================================
 *
 * We use a standard copy-on-write btree to store the mappings for the
 * devices (note I'm talking about copy-on-write of the metadata here, not
 * the data).  When you take an internal snapshot you clone the root node
 * of the origin btree.  After this there is no concept of an origin or a
 * snapshot.  They are just two device trees that happen to point to the
 * same data blocks.
 *
 * When we get a write in we decide if it's to a shared data block using
 * some timestamp magic.  If it is, we have to break sharing.
 *
 * Let's say we write to a shared block in what was the origin.  The
 * steps are:
 *
 * i) plug io further to this physical block. (see bio_prison code).
 *
 * ii) quiesce any read io to that shared data block.  Obviously
70
 * including all devices that share this block.  (see dm_deferred_set code)
71
72
73
74
75
 *
 * iii) copy the data block to a newly allocate block.  This step can be
 * missed out if the io covers the block. (schedule_copy).
 *
 * iv) insert the new mapping into the origin's btree
Joe Thornber's avatar
Joe Thornber committed
76
 * (process_prepared_mapping).  This act of inserting breaks some
77
78
79
80
81
82
83
84
85
86
87
88
89
90
91
92
93
94
95
96
97
98
99
100
101
102
103
104
105
106
107
108
109
110
111
112
113
 * sharing of btree nodes between the two devices.  Breaking sharing only
 * effects the btree of that specific device.  Btrees for the other
 * devices that share the block never change.  The btree for the origin
 * device as it was after the last commit is untouched, ie. we're using
 * persistent data structures in the functional programming sense.
 *
 * v) unplug io to this physical block, including the io that triggered
 * the breaking of sharing.
 *
 * Steps (ii) and (iii) occur in parallel.
 *
 * The metadata _doesn't_ need to be committed before the io continues.  We
 * get away with this because the io is always written to a _new_ block.
 * If there's a crash, then:
 *
 * - The origin mapping will point to the old origin block (the shared
 * one).  This will contain the data as it was before the io that triggered
 * the breaking of sharing came in.
 *
 * - The snap mapping still points to the old block.  As it would after
 * the commit.
 *
 * The downside of this scheme is the timestamp magic isn't perfect, and
 * will continue to think that data block in the snapshot device is shared
 * even after the write to the origin has broken sharing.  I suspect data
 * blocks will typically be shared by many different devices, so we're
 * breaking sharing n + 1 times, rather than n, where n is the number of
 * devices that reference this data block.  At the moment I think the
 * benefits far, far outweigh the disadvantages.
 */

/*----------------------------------------------------------------*/

/*
 * Key building.
 */
static void build_data_key(struct dm_thin_device *td,
114
			   dm_block_t b, struct dm_cell_key *key)
115
116
117
{
	key->virtual = 0;
	key->dev = dm_thin_dev_id(td);
118
119
	key->block_begin = b;
	key->block_end = b + 1ULL;
120
121
122
}

static void build_virtual_key(struct dm_thin_device *td, dm_block_t b,
123
			      struct dm_cell_key *key)
124
125
126
{
	key->virtual = 1;
	key->dev = dm_thin_dev_id(td);
127
128
	key->block_begin = b;
	key->block_end = b + 1ULL;
129
130
131
132
}

/*----------------------------------------------------------------*/

Joe Thornber's avatar
Joe Thornber committed
133
134
135
136
137
138
139
140
141
142
143
144
145
146
147
148
149
150
151
152
153
154
155
156
157
158
159
160
161
162
163
164
165
166
167
168
169
170
171
172
173
174
175
176
177
178
179
#define THROTTLE_THRESHOLD (1 * HZ)

struct throttle {
	struct rw_semaphore lock;
	unsigned long threshold;
	bool throttle_applied;
};

static void throttle_init(struct throttle *t)
{
	init_rwsem(&t->lock);
	t->throttle_applied = false;
}

static void throttle_work_start(struct throttle *t)
{
	t->threshold = jiffies + THROTTLE_THRESHOLD;
}

static void throttle_work_update(struct throttle *t)
{
	if (!t->throttle_applied && jiffies > t->threshold) {
		down_write(&t->lock);
		t->throttle_applied = true;
	}
}

static void throttle_work_complete(struct throttle *t)
{
	if (t->throttle_applied) {
		t->throttle_applied = false;
		up_write(&t->lock);
	}
}

static void throttle_lock(struct throttle *t)
{
	down_read(&t->lock);
}

static void throttle_unlock(struct throttle *t)
{
	up_read(&t->lock);
}

/*----------------------------------------------------------------*/

180
181
182
183
184
/*
 * A pool device ties together a metadata device and a data device.  It
 * also provides the interface for creating and destroying internal
 * devices.
 */
Mike Snitzer's avatar
Mike Snitzer committed
185
struct dm_thin_new_mapping;
186

187
/*
188
 * The pool runs in 4 modes.  Ordered in degraded order for comparisons.
189
190
191
 */
enum pool_mode {
	PM_WRITE,		/* metadata may be changed */
192
	PM_OUT_OF_DATA_SPACE,	/* metadata may be changed, though data may not be allocated */
193
194
195
196
	PM_READ_ONLY,		/* metadata may not be changed */
	PM_FAIL,		/* all I/O fails */
};

197
struct pool_features {
198
199
	enum pool_mode mode;

Mike Snitzer's avatar
Mike Snitzer committed
200
201
202
	bool zero_new_blocks:1;
	bool discard_enabled:1;
	bool discard_passdown:1;
203
	bool error_if_no_space:1;
204
205
};

206
207
struct thin_c;
typedef void (*process_bio_fn)(struct thin_c *tc, struct bio *bio);
208
typedef void (*process_cell_fn)(struct thin_c *tc, struct dm_bio_prison_cell *cell);
209
210
typedef void (*process_mapping_fn)(struct dm_thin_new_mapping *m);

211
212
#define CELL_SORT_ARRAY_SIZE 8192

213
214
215
216
217
218
219
220
221
struct pool {
	struct list_head list;
	struct dm_target *ti;	/* Only set if a pool target is bound */

	struct mapped_device *pool_md;
	struct block_device *md_dev;
	struct dm_pool_metadata *pmd;

	dm_block_t low_water_blocks;
222
	uint32_t sectors_per_block;
223
	int sectors_per_block_shift;
224

225
	struct pool_features pf;
226
	bool low_water_triggered:1;	/* A dm event has been sent */
227
	bool suspended:1;
228

229
	struct dm_bio_prison *prison;
230
231
232
	struct dm_kcopyd_client *copier;

	struct workqueue_struct *wq;
Joe Thornber's avatar
Joe Thornber committed
233
	struct throttle throttle;
234
	struct work_struct worker;
235
	struct delayed_work waker;
236
	struct delayed_work no_space_timeout;
237

238
	unsigned long last_commit_jiffies;
239
	unsigned ref_count;
240
241
242
243

	spinlock_t lock;
	struct bio_list deferred_flush_bios;
	struct list_head prepared_mappings;
Joe Thornber's avatar
Joe Thornber committed
244
	struct list_head prepared_discards;
245
	struct list_head active_thins;
246

247
248
	struct dm_deferred_set *shared_read_ds;
	struct dm_deferred_set *all_io_ds;
249

Mike Snitzer's avatar
Mike Snitzer committed
250
	struct dm_thin_new_mapping *next_mapping;
251
	mempool_t *mapping_pool;
252
253
254
255

	process_bio_fn process_bio;
	process_bio_fn process_discard;

256
257
258
	process_cell_fn process_cell;
	process_cell_fn process_discard_cell;

259
260
	process_mapping_fn process_prepared_mapping;
	process_mapping_fn process_prepared_discard;
261
262

	struct dm_bio_prison_cell *cell_sort_array[CELL_SORT_ARRAY_SIZE];
263
264
};

265
static enum pool_mode get_pool_mode(struct pool *pool);
266
static void metadata_operation_failed(struct pool *pool, const char *op, int r);
267

268
269
270
271
272
273
274
275
276
277
278
/*
 * Target context for a pool.
 */
struct pool_c {
	struct dm_target *ti;
	struct pool *pool;
	struct dm_dev *data_dev;
	struct dm_dev *metadata_dev;
	struct dm_target_callbacks callbacks;

	dm_block_t low_water_blocks;
279
280
	struct pool_features requested_pf; /* Features requested during table load */
	struct pool_features adjusted_pf;  /* Features used after adjusting for constituent devices */
281
282
283
284
285
286
};

/*
 * Target context for a thin.
 */
struct thin_c {
287
	struct list_head list;
288
	struct dm_dev *pool_dev;
289
	struct dm_dev *origin_dev;
290
	sector_t origin_size;
291
292
293
294
	dm_thin_id dev_id;

	struct pool *pool;
	struct dm_thin_device *td;
295
296
	struct mapped_device *thin_md;

297
	bool requeue_mode:1;
298
	spinlock_t lock;
299
	struct list_head deferred_cells;
300
301
	struct bio_list deferred_bio_list;
	struct bio_list retry_on_resume_list;
302
	struct rb_root sort_bio_list; /* sorted list of deferred bios */
303
304
305
306
307
308
309

	/*
	 * Ensures the thin is not destroyed until the worker has finished
	 * iterating the active_thins list.
	 */
	atomic_t refcount;
	struct completion can_destroy;
310
311
312
313
};

/*----------------------------------------------------------------*/

314
315
316
317
318
319
320
321
322
323
324
/*
 * wake_worker() is used when new work is queued and when pool_resume is
 * ready to continue deferred IO processing.
 */
static void wake_worker(struct pool *pool)
{
	queue_work(pool->wq, &pool->worker);
}

/*----------------------------------------------------------------*/

325
326
327
328
329
330
331
332
333
334
335
336
337
338
339
340
341
342
343
344
345
346
347
348
349
350
351
352
353
354
355
static int bio_detain(struct pool *pool, struct dm_cell_key *key, struct bio *bio,
		      struct dm_bio_prison_cell **cell_result)
{
	int r;
	struct dm_bio_prison_cell *cell_prealloc;

	/*
	 * Allocate a cell from the prison's mempool.
	 * This might block but it can't fail.
	 */
	cell_prealloc = dm_bio_prison_alloc_cell(pool->prison, GFP_NOIO);

	r = dm_bio_detain(pool->prison, key, bio, cell_prealloc, cell_result);
	if (r)
		/*
		 * We reused an old cell; we can get rid of
		 * the new one.
		 */
		dm_bio_prison_free_cell(pool->prison, cell_prealloc);

	return r;
}

static void cell_release(struct pool *pool,
			 struct dm_bio_prison_cell *cell,
			 struct bio_list *bios)
{
	dm_cell_release(pool->prison, cell, bios);
	dm_bio_prison_free_cell(pool->prison, cell);
}

356
357
358
359
360
361
362
363
364
static void cell_visit_release(struct pool *pool,
			       void (*fn)(void *, struct dm_bio_prison_cell *),
			       void *context,
			       struct dm_bio_prison_cell *cell)
{
	dm_cell_visit_release(pool->prison, fn, context, cell);
	dm_bio_prison_free_cell(pool->prison, cell);
}

365
366
367
368
369
370
371
372
static void cell_release_no_holder(struct pool *pool,
				   struct dm_bio_prison_cell *cell,
				   struct bio_list *bios)
{
	dm_cell_release_no_holder(pool->prison, cell, bios);
	dm_bio_prison_free_cell(pool->prison, cell);
}

373
374
static void cell_error_with_code(struct pool *pool,
				 struct dm_bio_prison_cell *cell, int error_code)
375
{
376
	dm_cell_error(pool->prison, cell, error_code);
377
378
379
	dm_bio_prison_free_cell(pool->prison, cell);
}

380
381
382
383
384
static void cell_error(struct pool *pool, struct dm_bio_prison_cell *cell)
{
	cell_error_with_code(pool, cell, -EIO);
}

385
386
387
388
389
390
391
392
393
394
static void cell_success(struct pool *pool, struct dm_bio_prison_cell *cell)
{
	cell_error_with_code(pool, cell, 0);
}

static void cell_requeue(struct pool *pool, struct dm_bio_prison_cell *cell)
{
	cell_error_with_code(pool, cell, DM_ENDIO_REQUEUE);
}

395
396
/*----------------------------------------------------------------*/

397
398
399
400
401
402
403
404
405
406
407
408
409
410
411
412
413
414
415
416
417
418
419
420
421
422
423
424
425
426
427
428
429
430
431
432
433
434
435
436
437
438
439
440
441
442
443
444
445
446
447
448
449
450
451
452
453
454
455
456
/*
 * A global list of pools that uses a struct mapped_device as a key.
 */
static struct dm_thin_pool_table {
	struct mutex mutex;
	struct list_head pools;
} dm_thin_pool_table;

static void pool_table_init(void)
{
	mutex_init(&dm_thin_pool_table.mutex);
	INIT_LIST_HEAD(&dm_thin_pool_table.pools);
}

static void __pool_table_insert(struct pool *pool)
{
	BUG_ON(!mutex_is_locked(&dm_thin_pool_table.mutex));
	list_add(&pool->list, &dm_thin_pool_table.pools);
}

static void __pool_table_remove(struct pool *pool)
{
	BUG_ON(!mutex_is_locked(&dm_thin_pool_table.mutex));
	list_del(&pool->list);
}

static struct pool *__pool_table_lookup(struct mapped_device *md)
{
	struct pool *pool = NULL, *tmp;

	BUG_ON(!mutex_is_locked(&dm_thin_pool_table.mutex));

	list_for_each_entry(tmp, &dm_thin_pool_table.pools, list) {
		if (tmp->pool_md == md) {
			pool = tmp;
			break;
		}
	}

	return pool;
}

static struct pool *__pool_table_lookup_metadata_dev(struct block_device *md_dev)
{
	struct pool *pool = NULL, *tmp;

	BUG_ON(!mutex_is_locked(&dm_thin_pool_table.mutex));

	list_for_each_entry(tmp, &dm_thin_pool_table.pools, list) {
		if (tmp->md_dev == md_dev) {
			pool = tmp;
			break;
		}
	}

	return pool;
}

/*----------------------------------------------------------------*/

Mike Snitzer's avatar
Mike Snitzer committed
457
struct dm_thin_endio_hook {
458
	struct thin_c *tc;
459
460
	struct dm_deferred_entry *shared_read_entry;
	struct dm_deferred_entry *all_io_entry;
Mike Snitzer's avatar
Mike Snitzer committed
461
	struct dm_thin_new_mapping *overwrite_mapping;
462
	struct rb_node rb_node;
463
464
};

465
466
467
468
469
470
471
static void __merge_bio_list(struct bio_list *bios, struct bio_list *master)
{
	bio_list_merge(bios, master);
	bio_list_init(master);
}

static void error_bio_list(struct bio_list *bios, int error)
472
473
{
	struct bio *bio;
474
475
476
477
478
479
480

	while ((bio = bio_list_pop(bios)))
		bio_endio(bio, error);
}

static void error_thin_bio_list(struct thin_c *tc, struct bio_list *master, int error)
{
481
	struct bio_list bios;
482
	unsigned long flags;
483
484

	bio_list_init(&bios);
485

486
	spin_lock_irqsave(&tc->lock, flags);
487
	__merge_bio_list(&bios, master);
488
	spin_unlock_irqrestore(&tc->lock, flags);
489

490
	error_bio_list(&bios, error);
491
492
}

493
494
495
496
497
498
499
500
501
502
503
504
505
506
507
508
509
static void requeue_deferred_cells(struct thin_c *tc)
{
	struct pool *pool = tc->pool;
	unsigned long flags;
	struct list_head cells;
	struct dm_bio_prison_cell *cell, *tmp;

	INIT_LIST_HEAD(&cells);

	spin_lock_irqsave(&tc->lock, flags);
	list_splice_init(&tc->deferred_cells, &cells);
	spin_unlock_irqrestore(&tc->lock, flags);

	list_for_each_entry_safe(cell, tmp, &cells, user_list)
		cell_requeue(pool, cell);
}

510
511
static void requeue_io(struct thin_c *tc)
{
512
	struct bio_list bios;
513
	unsigned long flags;
514
515
516

	bio_list_init(&bios);

517
	spin_lock_irqsave(&tc->lock, flags);
518
519
	__merge_bio_list(&bios, &tc->deferred_bio_list);
	__merge_bio_list(&bios, &tc->retry_on_resume_list);
520
	spin_unlock_irqrestore(&tc->lock, flags);
521

522
523
	error_bio_list(&bios, DM_ENDIO_REQUEUE);
	requeue_deferred_cells(tc);
524
525
}

526
527
528
529
530
531
static void error_retry_list(struct pool *pool)
{
	struct thin_c *tc;

	rcu_read_lock();
	list_for_each_entry_rcu(tc, &pool->active_thins, list)
532
		error_thin_bio_list(tc, &tc->retry_on_resume_list, -EIO);
533
534
535
	rcu_read_unlock();
}

536
537
538
539
540
541
542
/*
 * This section of code contains the logic for processing a thin device's IO.
 * Much of the code depends on pool object resources (lists, workqueues, etc)
 * but most is exclusively called from the thin target rather than the thin-pool
 * target.
 */

543
544
545
546
547
static bool block_size_is_power_of_two(struct pool *pool)
{
	return pool->sectors_per_block_shift >= 0;
}

548
549
static dm_block_t get_bio_block(struct thin_c *tc, struct bio *bio)
{
550
	struct pool *pool = tc->pool;
551
	sector_t block_nr = bio->bi_iter.bi_sector;
552

553
554
	if (block_size_is_power_of_two(pool))
		block_nr >>= pool->sectors_per_block_shift;
555
	else
556
		(void) sector_div(block_nr, pool->sectors_per_block);
557
558

	return block_nr;
559
560
561
562
563
}

static void remap(struct thin_c *tc, struct bio *bio, dm_block_t block)
{
	struct pool *pool = tc->pool;
564
	sector_t bi_sector = bio->bi_iter.bi_sector;
565
566

	bio->bi_bdev = tc->pool_dev->bdev;
567
	if (block_size_is_power_of_two(pool))
568
569
570
		bio->bi_iter.bi_sector =
			(block << pool->sectors_per_block_shift) |
			(bi_sector & (pool->sectors_per_block - 1));
571
	else
572
		bio->bi_iter.bi_sector = (block * pool->sectors_per_block) +
573
				 sector_div(bi_sector, pool->sectors_per_block);
574
575
}

576
577
578
579
580
static void remap_to_origin(struct thin_c *tc, struct bio *bio)
{
	bio->bi_bdev = tc->origin_dev->bdev;
}

581
582
583
584
585
586
static int bio_triggers_commit(struct thin_c *tc, struct bio *bio)
{
	return (bio->bi_rw & (REQ_FLUSH | REQ_FUA)) &&
		dm_thin_changed_this_transaction(tc->td);
}

587
588
589
590
591
592
593
static void inc_all_io_entry(struct pool *pool, struct bio *bio)
{
	struct dm_thin_endio_hook *h;

	if (bio->bi_rw & REQ_DISCARD)
		return;

594
	h = dm_per_bio_data(bio, sizeof(struct dm_thin_endio_hook));
595
596
597
	h->all_io_entry = dm_deferred_entry_inc(pool->all_io_ds);
}

598
static void issue(struct thin_c *tc, struct bio *bio)
599
600
601
602
{
	struct pool *pool = tc->pool;
	unsigned long flags;

603
604
605
606
607
	if (!bio_triggers_commit(tc, bio)) {
		generic_make_request(bio);
		return;
	}

608
	/*
609
610
611
	 * Complete bio with an error if earlier I/O caused changes to
	 * the metadata that can't be committed e.g, due to I/O errors
	 * on the metadata device.
612
	 */
613
614
615
616
617
618
619
620
621
622
623
624
	if (dm_thin_aborted_changes(tc->td)) {
		bio_io_error(bio);
		return;
	}

	/*
	 * Batch together any bios that trigger commits and then issue a
	 * single commit for them in process_deferred_bios().
	 */
	spin_lock_irqsave(&pool->lock, flags);
	bio_list_add(&pool->deferred_flush_bios, bio);
	spin_unlock_irqrestore(&pool->lock, flags);
625
626
}

627
628
629
630
631
632
633
634
635
636
637
638
639
static void remap_to_origin_and_issue(struct thin_c *tc, struct bio *bio)
{
	remap_to_origin(tc, bio);
	issue(tc, bio);
}

static void remap_and_issue(struct thin_c *tc, struct bio *bio,
			    dm_block_t block)
{
	remap(tc, bio, block);
	issue(tc, bio);
}

640
641
642
643
644
/*----------------------------------------------------------------*/

/*
 * Bio endio functions.
 */
Mike Snitzer's avatar
Mike Snitzer committed
645
struct dm_thin_new_mapping {
646
647
	struct list_head list;

648
649
	bool pass_discard:1;
	bool definitely_not_shared:1;
650

651
652
653
654
655
656
657
	/*
	 * Track quiescing, copying and zeroing preparation actions.  When this
	 * counter hits zero the block is prepared and can be inserted into the
	 * btree.
	 */
	atomic_t prepare_actions;

658
	int err;
659
660
661
	struct thin_c *tc;
	dm_block_t virt_block;
	dm_block_t data_block;
Mike Snitzer's avatar
Mike Snitzer committed
662
	struct dm_bio_prison_cell *cell, *cell2;
663
664
665
666
667
668
669
670
671
672
673

	/*
	 * If the bio covers the whole area of a block then we can avoid
	 * zeroing or copying.  Instead this bio is hooked.  The bio will
	 * still be in the cell, so care has to be taken to avoid issuing
	 * the bio twice.
	 */
	struct bio *bio;
	bio_end_io_t *saved_bi_end_io;
};

674
static void __complete_mapping_preparation(struct dm_thin_new_mapping *m)
675
676
677
{
	struct pool *pool = m->tc->pool;

678
	if (atomic_dec_and_test(&m->prepare_actions)) {
679
		list_add_tail(&m->list, &pool->prepared_mappings);
680
681
682
683
		wake_worker(pool);
	}
}

684
static void complete_mapping_preparation(struct dm_thin_new_mapping *m)
685
686
687
688
689
{
	unsigned long flags;
	struct pool *pool = m->tc->pool;

	spin_lock_irqsave(&pool->lock, flags);
690
	__complete_mapping_preparation(m);
691
692
693
	spin_unlock_irqrestore(&pool->lock, flags);
}

694
695
696
697
698
699
700
701
static void copy_complete(int read_err, unsigned long write_err, void *context)
{
	struct dm_thin_new_mapping *m = context;

	m->err = read_err || write_err ? -EIO : 0;
	complete_mapping_preparation(m);
}

702
703
static void overwrite_endio(struct bio *bio, int err)
{
704
	struct dm_thin_endio_hook *h = dm_per_bio_data(bio, sizeof(struct dm_thin_endio_hook));
Mike Snitzer's avatar
Mike Snitzer committed
705
	struct dm_thin_new_mapping *m = h->overwrite_mapping;
706
707

	m->err = err;
708
	complete_mapping_preparation(m);
709
710
711
712
713
714
715
716
717
718
719
720
721
}

/*----------------------------------------------------------------*/

/*
 * Workqueue.
 */

/*
 * Prepared mapping jobs.
 */

/*
722
723
 * This sends the bios in the cell, except the original holder, back
 * to the deferred_bios list.
724
 */
725
static void cell_defer_no_holder(struct thin_c *tc, struct dm_bio_prison_cell *cell)
726
727
728
729
{
	struct pool *pool = tc->pool;
	unsigned long flags;

730
731
732
	spin_lock_irqsave(&tc->lock, flags);
	cell_release_no_holder(pool, cell, &tc->deferred_bio_list);
	spin_unlock_irqrestore(&tc->lock, flags);
733
734
735
736

	wake_worker(pool);
}

737
738
static void thin_defer_bio(struct thin_c *tc, struct bio *bio);

739
740
741
742
743
744
745
746
struct remap_info {
	struct thin_c *tc;
	struct bio_list defer_bios;
	struct bio_list issue_bios;
};

static void __inc_remap_and_issue_cell(void *context,
				       struct dm_bio_prison_cell *cell)
747
{
748
	struct remap_info *info = context;
749
750
	struct bio *bio;

751
	while ((bio = bio_list_pop(&cell->bios))) {
752
		if (bio->bi_rw & (REQ_DISCARD | REQ_FLUSH | REQ_FUA))
753
			bio_list_add(&info->defer_bios, bio);
754
		else {
755
756
757
758
759
760
761
762
			inc_all_io_entry(info->tc->pool, bio);

			/*
			 * We can't issue the bios with the bio prison lock
			 * held, so we add them to a list to issue on
			 * return from this function.
			 */
			bio_list_add(&info->issue_bios, bio);
763
764
765
766
		}
	}
}

767
768
769
770
771
772
773
774
775
776
777
778
779
780
781
782
783
784
785
786
787
788
789
790
791
792
static void inc_remap_and_issue_cell(struct thin_c *tc,
				     struct dm_bio_prison_cell *cell,
				     dm_block_t block)
{
	struct bio *bio;
	struct remap_info info;

	info.tc = tc;
	bio_list_init(&info.defer_bios);
	bio_list_init(&info.issue_bios);

	/*
	 * We have to be careful to inc any bios we're about to issue
	 * before the cell is released, and avoid a race with new bios
	 * being added to the cell.
	 */
	cell_visit_release(tc->pool, __inc_remap_and_issue_cell,
			   &info, cell);

	while ((bio = bio_list_pop(&info.defer_bios)))
		thin_defer_bio(tc, bio);

	while ((bio = bio_list_pop(&info.issue_bios)))
		remap_and_issue(info.tc, bio, block);
}

793
794
static void process_prepared_mapping_fail(struct dm_thin_new_mapping *m)
{
Kent Overstreet's avatar
Kent Overstreet committed
795
	if (m->bio) {
796
		m->bio->bi_end_io = m->saved_bi_end_io;
Kent Overstreet's avatar
Kent Overstreet committed
797
798
		atomic_inc(&m->bio->bi_remaining);
	}
799
	cell_error(m->tc->pool, m->cell);
800
801
802
	list_del(&m->list);
	mempool_free(m, m->tc->pool->mapping_pool);
}
803

Mike Snitzer's avatar
Mike Snitzer committed
804
static void process_prepared_mapping(struct dm_thin_new_mapping *m)
805
806
{
	struct thin_c *tc = m->tc;
807
	struct pool *pool = tc->pool;
808
809
810
811
	struct bio *bio;
	int r;

	bio = m->bio;
Kent Overstreet's avatar
Kent Overstreet committed
812
	if (bio) {
813
		bio->bi_end_io = m->saved_bi_end_io;
Kent Overstreet's avatar
Kent Overstreet committed
814
815
		atomic_inc(&bio->bi_remaining);
	}
816
817

	if (m->err) {
818
		cell_error(pool, m->cell);
819
		goto out;
820
821
822
823
824
825
826
827
828
	}

	/*
	 * Commit the prepared block into the mapping btree.
	 * Any I/O for this block arriving after this point will get
	 * remapped to it directly.
	 */
	r = dm_thin_insert_block(tc->td, m->virt_block, m->data_block);
	if (r) {
829
		metadata_operation_failed(pool, "dm_thin_insert_block", r);
830
		cell_error(pool, m->cell);
831
		goto out;
832
833
834
835
836
837
838
839
840
	}

	/*
	 * Release any bios held while the block was being provisioned.
	 * If we are processing a write bio that completely covers the block,
	 * we already processed it so can ignore it now when processing
	 * the bios in the cell.
	 */
	if (bio) {
841
		inc_remap_and_issue_cell(tc, m->cell, m->data_block);
842
		bio_endio(bio, 0);
843
844
845
846
847
	} else {
		inc_all_io_entry(tc->pool, m->cell->holder);
		remap_and_issue(tc, m->cell->holder, m->data_block);
		inc_remap_and_issue_cell(tc, m->cell, m->data_block);
	}
848

849
out:
850
	list_del(&m->list);
851
	mempool_free(m, pool->mapping_pool);
852
853
}

854
static void process_prepared_discard_fail(struct dm_thin_new_mapping *m)
Joe Thornber's avatar
Joe Thornber committed
855
856
857
{
	struct thin_c *tc = m->tc;

858
	bio_io_error(m->bio);
859
860
	cell_defer_no_holder(tc, m->cell);
	cell_defer_no_holder(tc, m->cell2);
861
862
863
864
865
866
	mempool_free(m, tc->pool->mapping_pool);
}

static void process_prepared_discard_passdown(struct dm_thin_new_mapping *m)
{
	struct thin_c *tc = m->tc;
Joe Thornber's avatar
Joe Thornber committed
867

868
	inc_all_io_entry(tc->pool, m->bio);
869
870
	cell_defer_no_holder(tc, m->cell);
	cell_defer_no_holder(tc, m->cell2);
871

Joe Thornber's avatar
Joe Thornber committed
872
	if (m->pass_discard)
873
874
875
876
877
878
879
880
881
		if (m->definitely_not_shared)
			remap_and_issue(tc, m->bio, m->data_block);
		else {
			bool used = false;
			if (dm_pool_block_is_used(tc->pool->pmd, m->data_block, &used) || used)
				bio_endio(m->bio, 0);
			else
				remap_and_issue(tc, m->bio, m->data_block);
		}
Joe Thornber's avatar
Joe Thornber committed
882
883
884
885
886
887
	else
		bio_endio(m->bio, 0);

	mempool_free(m, tc->pool->mapping_pool);
}

888
889
890
891
892
893
894
static void process_prepared_discard(struct dm_thin_new_mapping *m)
{
	int r;
	struct thin_c *tc = m->tc;

	r = dm_thin_remove_block(tc->td, m->virt_block);
	if (r)
895
		DMERR_LIMIT("dm_thin_remove_block() failed");
896
897
898
899

	process_prepared_discard_passdown(m);
}

Joe Thornber's avatar
Joe Thornber committed
900
static void process_prepared(struct pool *pool, struct list_head *head,
901
			     process_mapping_fn *fn)
902
903
904
{
	unsigned long flags;
	struct list_head maps;
Mike Snitzer's avatar
Mike Snitzer committed
905
	struct dm_thin_new_mapping *m, *tmp;
906
907
908

	INIT_LIST_HEAD(&maps);
	spin_lock_irqsave(&pool->lock, flags);
Joe Thornber's avatar
Joe Thornber committed
909
	list_splice_init(head, &maps);
910
911
912
	spin_unlock_irqrestore(&pool->lock, flags);

	list_for_each_entry_safe(m, tmp, &maps, list)
913
		(*fn)(m);
914
915
916
917
918
}

/*
 * Deferred bio jobs.
 */
Joe Thornber's avatar
Joe Thornber committed
919
static int io_overlaps_block(struct pool *pool, struct bio *bio)
920
{
921
922
	return bio->bi_iter.bi_size ==
		(pool->sectors_per_block << SECTOR_SHIFT);
Joe Thornber's avatar
Joe Thornber committed
923
924
925
926
927
928
}

static int io_overwrites_block(struct pool *pool, struct bio *bio)
{
	return (bio_data_dir(bio) == WRITE) &&
		io_overlaps_block(pool, bio);
929
930
931
932
933
934
935
936
937
938
939
940
941
942
943
944
945
946
947
}

static void save_and_set_endio(struct bio *bio, bio_end_io_t **save,
			       bio_end_io_t *fn)
{
	*save = bio->bi_end_io;
	bio->bi_end_io = fn;
}

static int ensure_next_mapping(struct pool *pool)
{
	if (pool->next_mapping)
		return 0;

	pool->next_mapping = mempool_alloc(pool->mapping_pool, GFP_ATOMIC);

	return pool->next_mapping ? 0 : -ENOMEM;
}

Mike Snitzer's avatar
Mike Snitzer committed
948
static struct dm_thin_new_mapping *get_next_mapping(struct pool *pool)
949
{
950
	struct dm_thin_new_mapping *m = pool->next_mapping;
951
952
953

	BUG_ON(!pool->next_mapping);

954
955
956
957
	memset(m, 0, sizeof(struct dm_thin_new_mapping));
	INIT_LIST_HEAD(&m->list);
	m->bio = NULL;

958
959
	pool->next_mapping = NULL;

960
	return m;
961
962
}

963
964
965
966
967
968
969
970
971
972
973
974
975
976
977
978
979
static void ll_zero(struct thin_c *tc, struct dm_thin_new_mapping *m,
		    sector_t begin, sector_t end)
{
	int r;
	struct dm_io_region to;

	to.bdev = tc->pool_dev->bdev;
	to.sector = begin;
	to.count = end - begin;

	r = dm_kcopyd_zero(tc->pool->copier, 1, &to, 0, copy_complete, m);
	if (r < 0) {
		DMERR_LIMIT("dm_kcopyd_zero() failed");
		copy_complete(1, 1, m);
	}
}

980
981
982
983
984
985
986
987
988
989
990
991
992
993
static void remap_and_issue_overwrite(struct thin_c *tc, struct bio *bio,
				      dm_block_t data_block,
				      struct dm_thin_new_mapping *m)
{
	struct pool *pool = tc->pool;
	struct dm_thin_endio_hook *h = dm_per_bio_data(bio, sizeof(struct dm_thin_endio_hook));

	h->overwrite_mapping = m;
	m->bio = bio;
	save_and_set_endio(bio, &m->saved_bi_end_io, overwrite_endio);
	inc_all_io_entry(pool, bio);
	remap_and_issue(tc, bio, data_block);
}

994
995
996
/*
 * A partial copy also needs to zero the uncopied region.
 */
997
static void schedule_copy(struct thin_c *tc, dm_block_t virt_block,
998
999
			  struct dm_dev *origin, dm_block_t data_origin,
			  dm_block_t data_dest,
1000
1001
			  struct dm_bio_prison_cell *cell, struct bio *bio,
			  sector_t len)
1002
1003
1004
{
	int r;
	struct pool *pool = tc->pool;
Mike Snitzer's avatar
Mike Snitzer committed
1005
	struct dm_thin_new_mapping *m = get_next_mapping(pool);
1006
1007
1008
1009
1010
1011

	m->tc = tc;
	m->virt_block = virt_block;
	m->data_block = data_dest;
	m->cell = cell;

1012
1013
1014
1015
1016
1017
1018
	/*
	 * quiesce action + copy action + an extra reference held for the
	 * duration of this function (we may need to inc later for a
	 * partial zero).
	 */
	atomic_set(&m->prepare_actions, 3);

1019
	if (!dm_deferred_set_add_work(pool->shared_read_ds, &m->list))
1020
		complete_mapping_preparation(m); /* already quiesced */
1021
1022
1023
1024
1025
1026
1027

	/*
	 * IO to pool_dev remaps to the pool target's data_dev.
	 *
	 * If the whole block of data is being overwritten, we can issue the
	 * bio immediately. Otherwise we use kcopyd to clone the data first.
	 */
1028
1029
1030
	if (io_overwrites_block(pool, bio))
		remap_and_issue_overwrite(tc, bio, data_dest, m);
	else {
1031
1032
		struct dm_io_region from, to;

1033
		from.bdev = origin->bdev;
1034
		from.sector = data_origin * pool->sectors_per_block;
1035
		from.count = len;
1036
1037
1038

		to.bdev = tc->pool_dev->bdev;
		to.sector = data_dest * pool->sectors_per_block;
1039
		to.count = len;
1040
1041
1042
1043

		r = dm_kcopyd_copy(pool->copier, &from, 1, &to,
				   0, copy_complete, m);
		if (r < 0) {
1044
			DMERR_LIMIT("dm_kcopyd_copy() failed");
1045
1046
1047
1048
1049
1050
1051
1052
1053
1054
1055
1056
1057
1058
1059
1060
1061
1062
			copy_complete(1, 1, m);

			/*
			 * We allow the zero to be issued, to simplify the
			 * error path.  Otherwise we'd need to start
			 * worrying about decrementing the prepare_actions
			 * counter.
			 */
		}

		/*
		 * Do we need to zero a tail region?
		 */
		if (len < pool->sectors_per_block && pool->pf.zero_new_blocks) {
			atomic_inc(&m->prepare_actions);
			ll_zero(tc, m,
				data_dest * pool->sectors_per_block + len,
				(data_dest + 1) * pool->sectors_per_block);
1063
1064
		}
	}
1065
1066

	complete_mapping_preparation(m); /* drop our ref */
1067
1068
}

1069
1070
static void schedule_internal_copy(struct thin_c *tc, dm_block_t virt_block,
				   dm_block_t data_origin, dm_block_t data_dest,
Mike Snitzer's avatar
Mike Snitzer committed
1071
				   struct dm_bio_prison_cell *cell, struct bio *bio)
1072
1073
{
	schedule_copy(tc, virt_block, tc->pool_dev,
1074
1075
		      data_origin, data_dest, cell, bio,
		      tc->pool->sectors_per_block);
1076
1077
}

1078
static void schedule_zero(struct thin_c *tc, dm_block_t virt_block,
Mike Snitzer's avatar
Mike Snitzer committed
1079
			  dm_block_t data_block, struct dm_bio_prison_cell *cell,
1080
1081
1082
			  struct bio *bio)
{
	struct pool *pool = tc->pool;
Mike Snitzer's avatar
Mike Snitzer committed
1083
	struct dm_thin_new_mapping *m = get_next_mapping(pool);
1084

1085
	atomic_set(&m->prepare_actions, 1); /* no need to quiesce */
1086
1087
1088
1089
1090
1091
1092
1093
1094
1095
	m->tc = tc;
	m->virt_block = virt_block;
	m->data_block = data_block;
	m->cell = cell;

	/*
	 * If the whole block of data is being overwritten or we are not
	 * zeroing pre-existing data, we can issue the bio immediately.
	 * Otherwise we use kcopyd to zero the data first.
	 */
1096
	if (!pool->pf.zero_new_blocks)
1097
1098
		process_prepared_mapping(m);

1099
1100
	else if (io_overwrites_block(pool, bio))
		remap_and_issue_overwrite(tc, bio, data_block, m);
1101

1102
	else
1103
1104
1105
1106
		ll_zero(tc, m,
			data_block * pool->sectors_per_block,
			(data_block + 1) * pool->sectors_per_block);
}
1107

1108
1109
1110
1111
1112
1113
1114
1115
1116
1117
1118
1119
1120
1121
1122
1123
1124
1125
1126
1127
static void schedule_external_copy(struct thin_c *tc, dm_block_t virt_block,
				   dm_block_t data_dest,
				   struct dm_bio_prison_cell *cell, struct bio *bio)
{
	struct pool *pool = tc->pool;
	sector_t virt_block_begin = virt_block * pool->sectors_per_block;
	sector_t virt_block_end = (virt_block + 1) * pool->sectors_per_block;

	if (virt_block_end <= tc->origin_size)
		schedule_copy(tc, virt_block, tc->origin_dev,
			      virt_block, data_dest, cell, bio,
			      pool->sectors_per_block);

	else if (virt_block_begin < tc->origin_size)
		schedule_copy(tc, virt_block, tc->origin_dev,
			      virt_block, data_dest, cell, bio,
			      tc->origin_size - virt_block_begin);

	else
		schedule_zero(tc, virt_block, data_dest, cell, bio);
1128
1129
}

1130
1131
1132
1133
1134
1135
1136
1137
1138
1139
1140
1141
1142
1143
1144
1145
1146
1147
static void set_pool_mode(struct pool *pool, enum pool_mode new_mode);

static void check_for_space(struct pool *pool)
{
	int r;
	dm_block_t nr_free;

	if (get_pool_mode(pool) != PM_OUT_OF_DATA_SPACE)
		return;

	r = dm_pool_get_free_block_count(pool->pmd, &nr_free);
	if (r)
		return;

	if (nr_free)
		set_pool_mode(pool, PM_WRITE);
}

1148
1149
1150
1151
/*
 * A non-zero return indicates read_only or fail_io mode.
 * Many callers don't care about the return value.
 */
1152
static int commit(struct pool *pool)
1153
1154
1155
{
	int r;

1156
	if (get_pool_mode(pool) >= PM_READ_ONLY)
1157
1158
		return -EINVAL;

1159
	r = dm_pool_commit_metadata(pool->pmd);
1160
1161
	if (r)
		metadata_operation_failed(pool, "dm_pool_commit_metadata", r);
1162
1163
	else
		check_for_space(pool);
1164
1165
1166
1167

	return r;
}

1168
1169
1170
1171
1172
1173
1174
1175
1176
1177
1178
1179
1180
1181
static void check_low_water_mark(struct pool *pool, dm_block_t free_blocks)
{
	unsigned long flags;

	if (free_blocks <= pool->low_water_blocks && !pool->low_water_triggered) {
		DMWARN("%s: reached low water mark for data device: sending event.",
		       dm_device_name(pool->pool_md));
		spin_lock_irqsave(&pool->lock, flags);
		pool->low_water_triggered = true;
		spin_unlock_irqrestore(&pool->lock, flags);
		dm_table_event(pool->ti->table);
	}
}

1182
1183
1184
1185
1186
1187
static int alloc_data_block(struct thin_c *tc, dm_block_t *result)
{
	int r;
	dm_block_t free_blocks;
	struct pool *pool = tc->pool;

1188
	if (WARN_ON(get_pool_mode(pool) != PM_WRITE))
1189
1190
		return -EINVAL;

1191
	r = dm_pool_get_free_block_count(pool->pmd, &free_blocks);
1192
1193
	if (r) {
		metadata_operation_failed(pool, "dm_pool_get_free_block_count", r);
1194
		return r;
1195
	}
1196

1197
	check_low_water_mark(pool, free_blocks);
1198
1199

	if (!free_blocks) {
1200
1201
1202
1203
		/*
		 * Try to commit to see if that will free up some
		 * more space.
		 */
1204
1205
1206
		r = commit(pool);
		if (r)
			return r;
1207

1208
		r = dm_pool_get_free_block_count(pool->pmd, &free_blocks);
1209
1210
		if (r) {
			metadata_operation_failed(pool, "dm_pool_get_free_block_count", r);
1211
			return r;
1212
		}
1213

1214
		if (!free_blocks) {
1215
			set_pool_mode(pool, PM_OUT_OF_DATA_SPACE);
1216
			return -ENOSPC;
1217
1218
1219
1220
		}
	}

	r = dm_pool_alloc_data_block(pool->pmd, result);
1221
	if (r) {
1222
		metadata_operation_failed(pool, "dm_pool_alloc_data_block", r);
1223
		return r;
1224
	}
1225
1226
1227
1228
1229
1230
1231
1232
1233
1234

	return 0;
}

/*
 * If we have run out of space, queue bios until the device is
 * resumed, presumably after having been reloaded with more space.
 */
static void retry_on_resume(struct bio *bio)
{
1235
	struct dm_thin_endio_hook *h = dm_per_bio_data(bio, sizeof(struct dm_thin_endio_hook));
1236
	struct thin_c *tc = h->tc;
1237
1238
	unsigned long flags;

1239
1240
1241
	spin_lock_irqsave(&tc->lock, flags);
	bio_list_add(&tc->retry_on_resume_list, bio);
	spin_unlock_irqrestore(&tc->lock, flags);
1242
1243
}

1244
static int should_error_unserviceable_bio(struct pool *pool)
1245
{
1246
1247
1248
1249
1250
1251
	enum pool_mode m = get_pool_mode(pool);

	switch (m) {
	case PM_WRITE:
		/* Shouldn't get here */
		DMERR_LIMIT("bio unserviceable, yet pool is in PM_WRITE mode");
1252
		return -EIO;
1253
1254

	case PM_OUT_OF_DATA_SPACE:
1255
		return pool->pf.error_if_no_space ? -ENOSPC : 0;
1256
1257
1258

	case PM_READ_ONLY:
	case PM_FAIL:
1259
		return -EIO;
1260
1261
1262
	default:
		/* Shouldn't get here */
		DMERR_LIMIT("bio unserviceable, yet pool has an unknown mode");
1263
		return -EIO;
1264
1265
	}
}
1266

1267
1268
static void handle_unserviceable_bio(struct pool *pool, struct bio *bio)
{
1269
1270
1271
1272
	int error = should_error_unserviceable_bio(pool);

	if (error)
		bio_endio(bio, error);
1273
1274
	else
		retry_on_resume(bio);
1275
1276
}

1277
static void retry_bios_on_resume(struct pool *pool, struct dm_bio_prison_cell *cell)
1278
1279
1280
{
	struct bio *bio;
	struct bio_list bios;
1281
	int error;
1282

1283
1284
1285
	error = should_error_unserviceable_bio(pool);
	if (error) {
		cell_error_with_code(pool, cell, error);
1286
1287
1288
		return;
	}

1289
	bio_list_init(&bios);
1290
	cell_release(pool, cell, &bios);
1291

1292
1293