dm-thin.c 78.6 KB
Newer Older
1
/*
2
 * Copyright (C) 2011-2012 Red Hat UK.
3
4
5
6
7
 *
 * This file is released under the GPL.
 */

#include "dm-thin-metadata.h"
8
#include "dm-bio-prison.h"
9
#include "dm.h"
10
11
12
13
14
15
16
17
18
19
20
21
22
23

#include <linux/device-mapper.h>
#include <linux/dm-io.h>
#include <linux/dm-kcopyd.h>
#include <linux/list.h>
#include <linux/init.h>
#include <linux/module.h>
#include <linux/slab.h>

#define	DM_MSG_PREFIX	"thin"

/*
 * Tunable constants
 */
24
#define ENDIO_HOOK_POOL_SIZE 1024
25
26
#define MAPPING_POOL_SIZE 1024
#define PRISON_CELLS 1024
27
#define COMMIT_PERIOD HZ
28

29
30
31
DECLARE_DM_KCOPYD_THROTTLE_WITH_MODULE_PARM(snapshot_copy_throttle,
		"A percentage of time allocated for copy on write");

32
33
34
35
36
37
38
39
40
41
42
43
44
45
46
47
48
49
50
51
52
53
54
55
56
57
58
59
60
61
62
63
/*
 * The block size of the device holding pool data must be
 * between 64KB and 1GB.
 */
#define DATA_DEV_BLOCK_SIZE_MIN_SECTORS (64 * 1024 >> SECTOR_SHIFT)
#define DATA_DEV_BLOCK_SIZE_MAX_SECTORS (1024 * 1024 * 1024 >> SECTOR_SHIFT)

/*
 * Device id is restricted to 24 bits.
 */
#define MAX_DEV_ID ((1 << 24) - 1)

/*
 * How do we handle breaking sharing of data blocks?
 * =================================================
 *
 * We use a standard copy-on-write btree to store the mappings for the
 * devices (note I'm talking about copy-on-write of the metadata here, not
 * the data).  When you take an internal snapshot you clone the root node
 * of the origin btree.  After this there is no concept of an origin or a
 * snapshot.  They are just two device trees that happen to point to the
 * same data blocks.
 *
 * When we get a write in we decide if it's to a shared data block using
 * some timestamp magic.  If it is, we have to break sharing.
 *
 * Let's say we write to a shared block in what was the origin.  The
 * steps are:
 *
 * i) plug io further to this physical block. (see bio_prison code).
 *
 * ii) quiesce any read io to that shared data block.  Obviously
64
 * including all devices that share this block.  (see dm_deferred_set code)
65
66
67
68
69
 *
 * iii) copy the data block to a newly allocate block.  This step can be
 * missed out if the io covers the block. (schedule_copy).
 *
 * iv) insert the new mapping into the origin's btree
Joe Thornber's avatar
Joe Thornber committed
70
 * (process_prepared_mapping).  This act of inserting breaks some
71
72
73
74
75
76
77
78
79
80
81
82
83
84
85
86
87
88
89
90
91
92
93
94
95
96
97
98
99
100
101
102
103
104
105
106
107
 * sharing of btree nodes between the two devices.  Breaking sharing only
 * effects the btree of that specific device.  Btrees for the other
 * devices that share the block never change.  The btree for the origin
 * device as it was after the last commit is untouched, ie. we're using
 * persistent data structures in the functional programming sense.
 *
 * v) unplug io to this physical block, including the io that triggered
 * the breaking of sharing.
 *
 * Steps (ii) and (iii) occur in parallel.
 *
 * The metadata _doesn't_ need to be committed before the io continues.  We
 * get away with this because the io is always written to a _new_ block.
 * If there's a crash, then:
 *
 * - The origin mapping will point to the old origin block (the shared
 * one).  This will contain the data as it was before the io that triggered
 * the breaking of sharing came in.
 *
 * - The snap mapping still points to the old block.  As it would after
 * the commit.
 *
 * The downside of this scheme is the timestamp magic isn't perfect, and
 * will continue to think that data block in the snapshot device is shared
 * even after the write to the origin has broken sharing.  I suspect data
 * blocks will typically be shared by many different devices, so we're
 * breaking sharing n + 1 times, rather than n, where n is the number of
 * devices that reference this data block.  At the moment I think the
 * benefits far, far outweigh the disadvantages.
 */

/*----------------------------------------------------------------*/

/*
 * Key building.
 */
static void build_data_key(struct dm_thin_device *td,
108
			   dm_block_t b, struct dm_cell_key *key)
109
110
111
112
113
114
115
{
	key->virtual = 0;
	key->dev = dm_thin_dev_id(td);
	key->block = b;
}

static void build_virtual_key(struct dm_thin_device *td, dm_block_t b,
116
			      struct dm_cell_key *key)
117
118
119
120
121
122
123
124
125
126
127
128
129
{
	key->virtual = 1;
	key->dev = dm_thin_dev_id(td);
	key->block = b;
}

/*----------------------------------------------------------------*/

/*
 * A pool device ties together a metadata device and a data device.  It
 * also provides the interface for creating and destroying internal
 * devices.
 */
Mike Snitzer's avatar
Mike Snitzer committed
130
struct dm_thin_new_mapping;
131

132
133
134
135
136
137
138
139
140
/*
 * The pool runs in 3 modes.  Ordered in degraded order for comparisons.
 */
enum pool_mode {
	PM_WRITE,		/* metadata may be changed */
	PM_READ_ONLY,		/* metadata may not be changed */
	PM_FAIL,		/* all I/O fails */
};

141
struct pool_features {
142
143
	enum pool_mode mode;

Mike Snitzer's avatar
Mike Snitzer committed
144
145
146
	bool zero_new_blocks:1;
	bool discard_enabled:1;
	bool discard_passdown:1;
147
	bool error_if_no_space:1;
148
149
};

150
151
152
153
struct thin_c;
typedef void (*process_bio_fn)(struct thin_c *tc, struct bio *bio);
typedef void (*process_mapping_fn)(struct dm_thin_new_mapping *m);

154
155
156
157
158
159
160
161
162
struct pool {
	struct list_head list;
	struct dm_target *ti;	/* Only set if a pool target is bound */

	struct mapped_device *pool_md;
	struct block_device *md_dev;
	struct dm_pool_metadata *pmd;

	dm_block_t low_water_blocks;
163
	uint32_t sectors_per_block;
164
	int sectors_per_block_shift;
165

166
	struct pool_features pf;
167
	bool low_water_triggered:1;	/* A dm event has been sent */
168

169
	struct dm_bio_prison *prison;
170
171
172
173
	struct dm_kcopyd_client *copier;

	struct workqueue_struct *wq;
	struct work_struct worker;
174
	struct delayed_work waker;
175

176
	unsigned long last_commit_jiffies;
177
	unsigned ref_count;
178
179
180
181
182

	spinlock_t lock;
	struct bio_list deferred_bios;
	struct bio_list deferred_flush_bios;
	struct list_head prepared_mappings;
Joe Thornber's avatar
Joe Thornber committed
183
	struct list_head prepared_discards;
184
185
186

	struct bio_list retry_on_resume_list;

187
188
	struct dm_deferred_set *shared_read_ds;
	struct dm_deferred_set *all_io_ds;
189

Mike Snitzer's avatar
Mike Snitzer committed
190
	struct dm_thin_new_mapping *next_mapping;
191
	mempool_t *mapping_pool;
192
193
194
195
196
197

	process_bio_fn process_bio;
	process_bio_fn process_discard;

	process_mapping_fn process_prepared_mapping;
	process_mapping_fn process_prepared_discard;
198
199
};

200
static enum pool_mode get_pool_mode(struct pool *pool);
201
static void out_of_data_space(struct pool *pool);
202
static void metadata_operation_failed(struct pool *pool, const char *op, int r);
203

204
205
206
207
208
209
210
211
212
213
214
/*
 * Target context for a pool.
 */
struct pool_c {
	struct dm_target *ti;
	struct pool *pool;
	struct dm_dev *data_dev;
	struct dm_dev *metadata_dev;
	struct dm_target_callbacks callbacks;

	dm_block_t low_water_blocks;
215
216
	struct pool_features requested_pf; /* Features requested during table load */
	struct pool_features adjusted_pf;  /* Features used after adjusting for constituent devices */
217
218
219
220
221
222
223
};

/*
 * Target context for a thin.
 */
struct thin_c {
	struct dm_dev *pool_dev;
224
	struct dm_dev *origin_dev;
225
226
227
228
229
230
231
232
	dm_thin_id dev_id;

	struct pool *pool;
	struct dm_thin_device *td;
};

/*----------------------------------------------------------------*/

233
234
235
236
237
238
239
240
241
242
243
/*
 * wake_worker() is used when new work is queued and when pool_resume is
 * ready to continue deferred IO processing.
 */
static void wake_worker(struct pool *pool)
{
	queue_work(pool->wq, &pool->worker);
}

/*----------------------------------------------------------------*/

244
245
246
247
248
249
250
251
252
253
254
255
256
257
258
259
260
261
262
263
264
265
266
267
268
269
270
271
272
273
274
275
276
277
278
279
280
281
282
static int bio_detain(struct pool *pool, struct dm_cell_key *key, struct bio *bio,
		      struct dm_bio_prison_cell **cell_result)
{
	int r;
	struct dm_bio_prison_cell *cell_prealloc;

	/*
	 * Allocate a cell from the prison's mempool.
	 * This might block but it can't fail.
	 */
	cell_prealloc = dm_bio_prison_alloc_cell(pool->prison, GFP_NOIO);

	r = dm_bio_detain(pool->prison, key, bio, cell_prealloc, cell_result);
	if (r)
		/*
		 * We reused an old cell; we can get rid of
		 * the new one.
		 */
		dm_bio_prison_free_cell(pool->prison, cell_prealloc);

	return r;
}

static void cell_release(struct pool *pool,
			 struct dm_bio_prison_cell *cell,
			 struct bio_list *bios)
{
	dm_cell_release(pool->prison, cell, bios);
	dm_bio_prison_free_cell(pool->prison, cell);
}

static void cell_release_no_holder(struct pool *pool,
				   struct dm_bio_prison_cell *cell,
				   struct bio_list *bios)
{
	dm_cell_release_no_holder(pool->prison, cell, bios);
	dm_bio_prison_free_cell(pool->prison, cell);
}

283
284
285
286
287
288
289
290
291
292
293
294
295
static void cell_defer_no_holder_no_free(struct thin_c *tc,
					 struct dm_bio_prison_cell *cell)
{
	struct pool *pool = tc->pool;
	unsigned long flags;

	spin_lock_irqsave(&pool->lock, flags);
	dm_cell_release_no_holder(pool->prison, cell, &pool->deferred_bios);
	spin_unlock_irqrestore(&pool->lock, flags);

	wake_worker(pool);
}

296
297
298
299
300
301
302
303
304
static void cell_error(struct pool *pool,
		       struct dm_bio_prison_cell *cell)
{
	dm_cell_error(pool->prison, cell);
	dm_bio_prison_free_cell(pool->prison, cell);
}

/*----------------------------------------------------------------*/

305
306
307
308
309
310
311
312
313
314
315
316
317
318
319
320
321
322
323
324
325
326
327
328
329
330
331
332
333
334
335
336
337
338
339
340
341
342
343
344
345
346
347
348
349
350
351
352
353
354
355
356
357
358
359
360
361
362
363
364
/*
 * A global list of pools that uses a struct mapped_device as a key.
 */
static struct dm_thin_pool_table {
	struct mutex mutex;
	struct list_head pools;
} dm_thin_pool_table;

static void pool_table_init(void)
{
	mutex_init(&dm_thin_pool_table.mutex);
	INIT_LIST_HEAD(&dm_thin_pool_table.pools);
}

static void __pool_table_insert(struct pool *pool)
{
	BUG_ON(!mutex_is_locked(&dm_thin_pool_table.mutex));
	list_add(&pool->list, &dm_thin_pool_table.pools);
}

static void __pool_table_remove(struct pool *pool)
{
	BUG_ON(!mutex_is_locked(&dm_thin_pool_table.mutex));
	list_del(&pool->list);
}

static struct pool *__pool_table_lookup(struct mapped_device *md)
{
	struct pool *pool = NULL, *tmp;

	BUG_ON(!mutex_is_locked(&dm_thin_pool_table.mutex));

	list_for_each_entry(tmp, &dm_thin_pool_table.pools, list) {
		if (tmp->pool_md == md) {
			pool = tmp;
			break;
		}
	}

	return pool;
}

static struct pool *__pool_table_lookup_metadata_dev(struct block_device *md_dev)
{
	struct pool *pool = NULL, *tmp;

	BUG_ON(!mutex_is_locked(&dm_thin_pool_table.mutex));

	list_for_each_entry(tmp, &dm_thin_pool_table.pools, list) {
		if (tmp->md_dev == md_dev) {
			pool = tmp;
			break;
		}
	}

	return pool;
}

/*----------------------------------------------------------------*/

Mike Snitzer's avatar
Mike Snitzer committed
365
struct dm_thin_endio_hook {
366
	struct thin_c *tc;
367
368
	struct dm_deferred_entry *shared_read_entry;
	struct dm_deferred_entry *all_io_entry;
Mike Snitzer's avatar
Mike Snitzer committed
369
	struct dm_thin_new_mapping *overwrite_mapping;
370
371
};

372
373
374
375
376
377
378
379
380
381
static void __requeue_bio_list(struct thin_c *tc, struct bio_list *master)
{
	struct bio *bio;
	struct bio_list bios;

	bio_list_init(&bios);
	bio_list_merge(&bios, master);
	bio_list_init(master);

	while ((bio = bio_list_pop(&bios))) {
382
		struct dm_thin_endio_hook *h = dm_per_bio_data(bio, sizeof(struct dm_thin_endio_hook));
Mike Snitzer's avatar
Mike Snitzer committed
383

384
		if (h->tc == tc)
385
386
387
388
389
390
391
392
393
394
395
396
397
398
399
400
401
402
403
404
405
406
407
408
			bio_endio(bio, DM_ENDIO_REQUEUE);
		else
			bio_list_add(master, bio);
	}
}

static void requeue_io(struct thin_c *tc)
{
	struct pool *pool = tc->pool;
	unsigned long flags;

	spin_lock_irqsave(&pool->lock, flags);
	__requeue_bio_list(tc, &pool->deferred_bios);
	__requeue_bio_list(tc, &pool->retry_on_resume_list);
	spin_unlock_irqrestore(&pool->lock, flags);
}

/*
 * This section of code contains the logic for processing a thin device's IO.
 * Much of the code depends on pool object resources (lists, workqueues, etc)
 * but most is exclusively called from the thin target rather than the thin-pool
 * target.
 */

409
410
411
412
413
static bool block_size_is_power_of_two(struct pool *pool)
{
	return pool->sectors_per_block_shift >= 0;
}

414
415
static dm_block_t get_bio_block(struct thin_c *tc, struct bio *bio)
{
416
	struct pool *pool = tc->pool;
417
	sector_t block_nr = bio->bi_iter.bi_sector;
418

419
420
	if (block_size_is_power_of_two(pool))
		block_nr >>= pool->sectors_per_block_shift;
421
	else
422
		(void) sector_div(block_nr, pool->sectors_per_block);
423
424

	return block_nr;
425
426
427
428
429
}

static void remap(struct thin_c *tc, struct bio *bio, dm_block_t block)
{
	struct pool *pool = tc->pool;
430
	sector_t bi_sector = bio->bi_iter.bi_sector;
431
432

	bio->bi_bdev = tc->pool_dev->bdev;
433
	if (block_size_is_power_of_two(pool))
434
435
436
		bio->bi_iter.bi_sector =
			(block << pool->sectors_per_block_shift) |
			(bi_sector & (pool->sectors_per_block - 1));
437
	else
438
		bio->bi_iter.bi_sector = (block * pool->sectors_per_block) +
439
				 sector_div(bi_sector, pool->sectors_per_block);
440
441
}

442
443
444
445
446
static void remap_to_origin(struct thin_c *tc, struct bio *bio)
{
	bio->bi_bdev = tc->origin_dev->bdev;
}

447
448
449
450
451
452
static int bio_triggers_commit(struct thin_c *tc, struct bio *bio)
{
	return (bio->bi_rw & (REQ_FLUSH | REQ_FUA)) &&
		dm_thin_changed_this_transaction(tc->td);
}

453
454
455
456
457
458
459
static void inc_all_io_entry(struct pool *pool, struct bio *bio)
{
	struct dm_thin_endio_hook *h;

	if (bio->bi_rw & REQ_DISCARD)
		return;

460
	h = dm_per_bio_data(bio, sizeof(struct dm_thin_endio_hook));
461
462
463
	h->all_io_entry = dm_deferred_entry_inc(pool->all_io_ds);
}

464
static void issue(struct thin_c *tc, struct bio *bio)
465
466
467
468
{
	struct pool *pool = tc->pool;
	unsigned long flags;

469
470
471
472
473
	if (!bio_triggers_commit(tc, bio)) {
		generic_make_request(bio);
		return;
	}

474
	/*
475
476
477
	 * Complete bio with an error if earlier I/O caused changes to
	 * the metadata that can't be committed e.g, due to I/O errors
	 * on the metadata device.
478
	 */
479
480
481
482
483
484
485
486
487
488
489
490
	if (dm_thin_aborted_changes(tc->td)) {
		bio_io_error(bio);
		return;
	}

	/*
	 * Batch together any bios that trigger commits and then issue a
	 * single commit for them in process_deferred_bios().
	 */
	spin_lock_irqsave(&pool->lock, flags);
	bio_list_add(&pool->deferred_flush_bios, bio);
	spin_unlock_irqrestore(&pool->lock, flags);
491
492
}

493
494
495
496
497
498
499
500
501
502
503
504
505
static void remap_to_origin_and_issue(struct thin_c *tc, struct bio *bio)
{
	remap_to_origin(tc, bio);
	issue(tc, bio);
}

static void remap_and_issue(struct thin_c *tc, struct bio *bio,
			    dm_block_t block)
{
	remap(tc, bio, block);
	issue(tc, bio);
}

506
507
508
509
510
/*----------------------------------------------------------------*/

/*
 * Bio endio functions.
 */
Mike Snitzer's avatar
Mike Snitzer committed
511
struct dm_thin_new_mapping {
512
513
	struct list_head list;

514
515
516
517
	bool quiesced:1;
	bool prepared:1;
	bool pass_discard:1;
	bool definitely_not_shared:1;
518

519
	int err;
520
521
522
	struct thin_c *tc;
	dm_block_t virt_block;
	dm_block_t data_block;
Mike Snitzer's avatar
Mike Snitzer committed
523
	struct dm_bio_prison_cell *cell, *cell2;
524
525
526
527
528
529
530
531
532
533
534

	/*
	 * If the bio covers the whole area of a block then we can avoid
	 * zeroing or copying.  Instead this bio is hooked.  The bio will
	 * still be in the cell, so care has to be taken to avoid issuing
	 * the bio twice.
	 */
	struct bio *bio;
	bio_end_io_t *saved_bi_end_io;
};

Mike Snitzer's avatar
Mike Snitzer committed
535
static void __maybe_add_mapping(struct dm_thin_new_mapping *m)
536
537
538
{
	struct pool *pool = m->tc->pool;

539
	if (m->quiesced && m->prepared) {
540
		list_add_tail(&m->list, &pool->prepared_mappings);
541
542
543
544
545
546
547
		wake_worker(pool);
	}
}

static void copy_complete(int read_err, unsigned long write_err, void *context)
{
	unsigned long flags;
Mike Snitzer's avatar
Mike Snitzer committed
548
	struct dm_thin_new_mapping *m = context;
549
550
551
552
553
	struct pool *pool = m->tc->pool;

	m->err = read_err || write_err ? -EIO : 0;

	spin_lock_irqsave(&pool->lock, flags);
554
	m->prepared = true;
555
556
557
558
559
560
561
	__maybe_add_mapping(m);
	spin_unlock_irqrestore(&pool->lock, flags);
}

static void overwrite_endio(struct bio *bio, int err)
{
	unsigned long flags;
562
	struct dm_thin_endio_hook *h = dm_per_bio_data(bio, sizeof(struct dm_thin_endio_hook));
Mike Snitzer's avatar
Mike Snitzer committed
563
	struct dm_thin_new_mapping *m = h->overwrite_mapping;
564
565
566
567
568
	struct pool *pool = m->tc->pool;

	m->err = err;

	spin_lock_irqsave(&pool->lock, flags);
569
	m->prepared = true;
570
571
572
573
574
575
576
577
578
579
580
581
582
583
584
585
586
	__maybe_add_mapping(m);
	spin_unlock_irqrestore(&pool->lock, flags);
}

/*----------------------------------------------------------------*/

/*
 * Workqueue.
 */

/*
 * Prepared mapping jobs.
 */

/*
 * This sends the bios in the cell back to the deferred_bios list.
 */
Joe Thornber's avatar
Joe Thornber committed
587
static void cell_defer(struct thin_c *tc, struct dm_bio_prison_cell *cell)
588
589
590
591
592
{
	struct pool *pool = tc->pool;
	unsigned long flags;

	spin_lock_irqsave(&pool->lock, flags);
593
	cell_release(pool, cell, &pool->deferred_bios);
594
595
596
597
598
599
	spin_unlock_irqrestore(&tc->pool->lock, flags);

	wake_worker(pool);
}

/*
600
 * Same as cell_defer above, except it omits the original holder of the cell.
601
 */
602
static void cell_defer_no_holder(struct thin_c *tc, struct dm_bio_prison_cell *cell)
603
604
605
606
607
{
	struct pool *pool = tc->pool;
	unsigned long flags;

	spin_lock_irqsave(&pool->lock, flags);
608
	cell_release_no_holder(pool, cell, &pool->deferred_bios);
609
610
611
612
613
	spin_unlock_irqrestore(&pool->lock, flags);

	wake_worker(pool);
}

614
615
static void process_prepared_mapping_fail(struct dm_thin_new_mapping *m)
{
Kent Overstreet's avatar
Kent Overstreet committed
616
	if (m->bio) {
617
		m->bio->bi_end_io = m->saved_bi_end_io;
Kent Overstreet's avatar
Kent Overstreet committed
618
619
		atomic_inc(&m->bio->bi_remaining);
	}
620
	cell_error(m->tc->pool, m->cell);
621
622
623
	list_del(&m->list);
	mempool_free(m, m->tc->pool->mapping_pool);
}
624

Mike Snitzer's avatar
Mike Snitzer committed
625
static void process_prepared_mapping(struct dm_thin_new_mapping *m)
626
627
{
	struct thin_c *tc = m->tc;
628
	struct pool *pool = tc->pool;
629
630
631
632
	struct bio *bio;
	int r;

	bio = m->bio;
Kent Overstreet's avatar
Kent Overstreet committed
633
	if (bio) {
634
		bio->bi_end_io = m->saved_bi_end_io;
Kent Overstreet's avatar
Kent Overstreet committed
635
636
		atomic_inc(&bio->bi_remaining);
	}
637
638

	if (m->err) {
639
		cell_error(pool, m->cell);
640
		goto out;
641
642
643
644
645
646
647
648
649
	}

	/*
	 * Commit the prepared block into the mapping btree.
	 * Any I/O for this block arriving after this point will get
	 * remapped to it directly.
	 */
	r = dm_thin_insert_block(tc->td, m->virt_block, m->data_block);
	if (r) {
650
		metadata_operation_failed(pool, "dm_thin_insert_block", r);
651
		cell_error(pool, m->cell);
652
		goto out;
653
654
655
656
657
658
659
660
661
	}

	/*
	 * Release any bios held while the block was being provisioned.
	 * If we are processing a write bio that completely covers the block,
	 * we already processed it so can ignore it now when processing
	 * the bios in the cell.
	 */
	if (bio) {
662
		cell_defer_no_holder(tc, m->cell);
663
664
		bio_endio(bio, 0);
	} else
Joe Thornber's avatar
Joe Thornber committed
665
		cell_defer(tc, m->cell);
666

667
out:
668
	list_del(&m->list);
669
	mempool_free(m, pool->mapping_pool);
670
671
}

672
static void process_prepared_discard_fail(struct dm_thin_new_mapping *m)
Joe Thornber's avatar
Joe Thornber committed
673
674
675
{
	struct thin_c *tc = m->tc;

676
	bio_io_error(m->bio);
677
678
	cell_defer_no_holder(tc, m->cell);
	cell_defer_no_holder(tc, m->cell2);
679
680
681
682
683
684
	mempool_free(m, tc->pool->mapping_pool);
}

static void process_prepared_discard_passdown(struct dm_thin_new_mapping *m)
{
	struct thin_c *tc = m->tc;
Joe Thornber's avatar
Joe Thornber committed
685

686
	inc_all_io_entry(tc->pool, m->bio);
687
688
	cell_defer_no_holder(tc, m->cell);
	cell_defer_no_holder(tc, m->cell2);
689

Joe Thornber's avatar
Joe Thornber committed
690
	if (m->pass_discard)
691
692
693
694
695
696
697
698
699
		if (m->definitely_not_shared)
			remap_and_issue(tc, m->bio, m->data_block);
		else {
			bool used = false;
			if (dm_pool_block_is_used(tc->pool->pmd, m->data_block, &used) || used)
				bio_endio(m->bio, 0);
			else
				remap_and_issue(tc, m->bio, m->data_block);
		}
Joe Thornber's avatar
Joe Thornber committed
700
701
702
703
704
705
	else
		bio_endio(m->bio, 0);

	mempool_free(m, tc->pool->mapping_pool);
}

706
707
708
709
710
711
712
static void process_prepared_discard(struct dm_thin_new_mapping *m)
{
	int r;
	struct thin_c *tc = m->tc;

	r = dm_thin_remove_block(tc->td, m->virt_block);
	if (r)
713
		DMERR_LIMIT("dm_thin_remove_block() failed");
714
715
716
717

	process_prepared_discard_passdown(m);
}

Joe Thornber's avatar
Joe Thornber committed
718
static void process_prepared(struct pool *pool, struct list_head *head,
719
			     process_mapping_fn *fn)
720
721
722
{
	unsigned long flags;
	struct list_head maps;
Mike Snitzer's avatar
Mike Snitzer committed
723
	struct dm_thin_new_mapping *m, *tmp;
724
725
726

	INIT_LIST_HEAD(&maps);
	spin_lock_irqsave(&pool->lock, flags);
Joe Thornber's avatar
Joe Thornber committed
727
	list_splice_init(head, &maps);
728
729
730
	spin_unlock_irqrestore(&pool->lock, flags);

	list_for_each_entry_safe(m, tmp, &maps, list)
731
		(*fn)(m);
732
733
734
735
736
}

/*
 * Deferred bio jobs.
 */
Joe Thornber's avatar
Joe Thornber committed
737
static int io_overlaps_block(struct pool *pool, struct bio *bio)
738
{
739
740
	return bio->bi_iter.bi_size ==
		(pool->sectors_per_block << SECTOR_SHIFT);
Joe Thornber's avatar
Joe Thornber committed
741
742
743
744
745
746
}

static int io_overwrites_block(struct pool *pool, struct bio *bio)
{
	return (bio_data_dir(bio) == WRITE) &&
		io_overlaps_block(pool, bio);
747
748
749
750
751
752
753
754
755
756
757
758
759
760
761
762
763
764
765
}

static void save_and_set_endio(struct bio *bio, bio_end_io_t **save,
			       bio_end_io_t *fn)
{
	*save = bio->bi_end_io;
	bio->bi_end_io = fn;
}

static int ensure_next_mapping(struct pool *pool)
{
	if (pool->next_mapping)
		return 0;

	pool->next_mapping = mempool_alloc(pool->mapping_pool, GFP_ATOMIC);

	return pool->next_mapping ? 0 : -ENOMEM;
}

Mike Snitzer's avatar
Mike Snitzer committed
766
static struct dm_thin_new_mapping *get_next_mapping(struct pool *pool)
767
{
768
	struct dm_thin_new_mapping *m = pool->next_mapping;
769
770
771

	BUG_ON(!pool->next_mapping);

772
773
774
775
	memset(m, 0, sizeof(struct dm_thin_new_mapping));
	INIT_LIST_HEAD(&m->list);
	m->bio = NULL;

776
777
	pool->next_mapping = NULL;

778
	return m;
779
780
781
}

static void schedule_copy(struct thin_c *tc, dm_block_t virt_block,
782
783
			  struct dm_dev *origin, dm_block_t data_origin,
			  dm_block_t data_dest,
Mike Snitzer's avatar
Mike Snitzer committed
784
			  struct dm_bio_prison_cell *cell, struct bio *bio)
785
786
787
{
	int r;
	struct pool *pool = tc->pool;
Mike Snitzer's avatar
Mike Snitzer committed
788
	struct dm_thin_new_mapping *m = get_next_mapping(pool);
789
790
791
792
793
794

	m->tc = tc;
	m->virt_block = virt_block;
	m->data_block = data_dest;
	m->cell = cell;

795
	if (!dm_deferred_set_add_work(pool->shared_read_ds, &m->list))
796
		m->quiesced = true;
797
798
799
800
801
802
803
804

	/*
	 * IO to pool_dev remaps to the pool target's data_dev.
	 *
	 * If the whole block of data is being overwritten, we can issue the
	 * bio immediately. Otherwise we use kcopyd to clone the data first.
	 */
	if (io_overwrites_block(pool, bio)) {
805
		struct dm_thin_endio_hook *h = dm_per_bio_data(bio, sizeof(struct dm_thin_endio_hook));
Mike Snitzer's avatar
Mike Snitzer committed
806

807
		h->overwrite_mapping = m;
808
809
		m->bio = bio;
		save_and_set_endio(bio, &m->saved_bi_end_io, overwrite_endio);
810
		inc_all_io_entry(pool, bio);
811
812
813
814
		remap_and_issue(tc, bio, data_dest);
	} else {
		struct dm_io_region from, to;

815
		from.bdev = origin->bdev;
816
817
818
819
820
821
822
823
824
825
826
		from.sector = data_origin * pool->sectors_per_block;
		from.count = pool->sectors_per_block;

		to.bdev = tc->pool_dev->bdev;
		to.sector = data_dest * pool->sectors_per_block;
		to.count = pool->sectors_per_block;

		r = dm_kcopyd_copy(pool->copier, &from, 1, &to,
				   0, copy_complete, m);
		if (r < 0) {
			mempool_free(m, pool->mapping_pool);
827
			DMERR_LIMIT("dm_kcopyd_copy() failed");
828
			cell_error(pool, cell);
829
830
831
832
		}
	}
}

833
834
static void schedule_internal_copy(struct thin_c *tc, dm_block_t virt_block,
				   dm_block_t data_origin, dm_block_t data_dest,
Mike Snitzer's avatar
Mike Snitzer committed
835
				   struct dm_bio_prison_cell *cell, struct bio *bio)
836
837
838
839
840
841
842
{
	schedule_copy(tc, virt_block, tc->pool_dev,
		      data_origin, data_dest, cell, bio);
}

static void schedule_external_copy(struct thin_c *tc, dm_block_t virt_block,
				   dm_block_t data_dest,
Mike Snitzer's avatar
Mike Snitzer committed
843
				   struct dm_bio_prison_cell *cell, struct bio *bio)
844
845
846
847
848
{
	schedule_copy(tc, virt_block, tc->origin_dev,
		      virt_block, data_dest, cell, bio);
}

849
static void schedule_zero(struct thin_c *tc, dm_block_t virt_block,
Mike Snitzer's avatar
Mike Snitzer committed
850
			  dm_block_t data_block, struct dm_bio_prison_cell *cell,
851
852
853
			  struct bio *bio)
{
	struct pool *pool = tc->pool;
Mike Snitzer's avatar
Mike Snitzer committed
854
	struct dm_thin_new_mapping *m = get_next_mapping(pool);
855

856
857
	m->quiesced = true;
	m->prepared = false;
858
859
860
861
862
863
864
865
866
867
	m->tc = tc;
	m->virt_block = virt_block;
	m->data_block = data_block;
	m->cell = cell;

	/*
	 * If the whole block of data is being overwritten or we are not
	 * zeroing pre-existing data, we can issue the bio immediately.
	 * Otherwise we use kcopyd to zero the data first.
	 */
868
	if (!pool->pf.zero_new_blocks)
869
870
871
		process_prepared_mapping(m);

	else if (io_overwrites_block(pool, bio)) {
872
		struct dm_thin_endio_hook *h = dm_per_bio_data(bio, sizeof(struct dm_thin_endio_hook));
Mike Snitzer's avatar
Mike Snitzer committed
873

874
		h->overwrite_mapping = m;
875
876
		m->bio = bio;
		save_and_set_endio(bio, &m->saved_bi_end_io, overwrite_endio);
877
		inc_all_io_entry(pool, bio);
878
879
880
881
882
883
884
885
886
887
888
889
		remap_and_issue(tc, bio, data_block);
	} else {
		int r;
		struct dm_io_region to;

		to.bdev = tc->pool_dev->bdev;
		to.sector = data_block * pool->sectors_per_block;
		to.count = pool->sectors_per_block;

		r = dm_kcopyd_zero(pool->copier, 1, &to, 0, copy_complete, m);
		if (r < 0) {
			mempool_free(m, pool->mapping_pool);
890
			DMERR_LIMIT("dm_kcopyd_zero() failed");
891
			cell_error(pool, cell);
892
893
894
895
		}
	}
}

896
897
898
899
/*
 * A non-zero return indicates read_only or fail_io mode.
 * Many callers don't care about the return value.
 */
900
static int commit(struct pool *pool)
901
902
903
904
905
906
{
	int r;

	if (get_pool_mode(pool) != PM_WRITE)
		return -EINVAL;

907
	r = dm_pool_commit_metadata(pool->pmd);
908
909
	if (r)
		metadata_operation_failed(pool, "dm_pool_commit_metadata", r);
910
911
912
913

	return r;
}

914
915
916
917
918
919
920
921
922
923
924
925
926
927
static void check_low_water_mark(struct pool *pool, dm_block_t free_blocks)
{
	unsigned long flags;

	if (free_blocks <= pool->low_water_blocks && !pool->low_water_triggered) {
		DMWARN("%s: reached low water mark for data device: sending event.",
		       dm_device_name(pool->pool_md));
		spin_lock_irqsave(&pool->lock, flags);
		pool->low_water_triggered = true;
		spin_unlock_irqrestore(&pool->lock, flags);
		dm_table_event(pool->ti->table);
	}
}

928
929
930
931
932
933
static int alloc_data_block(struct thin_c *tc, dm_block_t *result)
{
	int r;
	dm_block_t free_blocks;
	struct pool *pool = tc->pool;

934
935
936
	if (get_pool_mode(pool) != PM_WRITE)
		return -EINVAL;

937
	r = dm_pool_get_free_block_count(pool->pmd, &free_blocks);
938
939
	if (r) {
		metadata_operation_failed(pool, "dm_pool_get_free_block_count", r);
940
		return r;
941
	}
942

943
	check_low_water_mark(pool, free_blocks);
944
945

	if (!free_blocks) {
946
947
948
949
		/*
		 * Try to commit to see if that will free up some
		 * more space.
		 */
950
951
952
		r = commit(pool);
		if (r)
			return r;
953

954
		r = dm_pool_get_free_block_count(pool->pmd, &free_blocks);
955
956
		if (r) {
			metadata_operation_failed(pool, "dm_pool_get_free_block_count", r);
957
			return r;
958
		}
959

960
		if (!free_blocks) {
961
			out_of_data_space(pool);
962
			return -ENOSPC;
963
964
965
966
		}
	}

	r = dm_pool_alloc_data_block(pool->pmd, result);
967
	if (r) {
968
		metadata_operation_failed(pool, "dm_pool_alloc_data_block", r);
969
		return r;
970
	}
971
972
973
974
975
976
977
978
979
980

	return 0;
}

/*
 * If we have run out of space, queue bios until the device is
 * resumed, presumably after having been reloaded with more space.
 */
static void retry_on_resume(struct bio *bio)
{
981
	struct dm_thin_endio_hook *h = dm_per_bio_data(bio, sizeof(struct dm_thin_endio_hook));
982
	struct thin_c *tc = h->tc;
983
984
985
986
987
988
989
990
	struct pool *pool = tc->pool;
	unsigned long flags;

	spin_lock_irqsave(&pool->lock, flags);
	bio_list_add(&pool->retry_on_resume_list, bio);
	spin_unlock_irqrestore(&pool->lock, flags);
}

991
992
993
994
995
996
997
998
static void handle_unserviceable_bio(struct pool *pool, struct bio *bio)
{
	/*
	 * When pool is read-only, no cell locking is needed because
	 * nothing is changing.
	 */
	WARN_ON_ONCE(get_pool_mode(pool) != PM_READ_ONLY);

999
	if (pool->pf.error_if_no_space)
1000
		bio_io_error(bio);
1001
1002
	else
		retry_on_resume(bio);
1003
1004
}

1005
static void retry_bios_on_resume(struct pool *pool, struct dm_bio_prison_cell *cell)
1006
1007
1008
1009
1010
{
	struct bio *bio;
	struct bio_list bios;

	bio_list_init(&bios);
1011
	cell_release(pool, cell, &bios);
1012
1013

	while ((bio = bio_list_pop(&bios)))
1014
		handle_unserviceable_bio(pool, bio);
1015
1016
}

Joe Thornber's avatar
Joe Thornber committed
1017
1018
1019
static void process_discard(struct thin_c *tc, struct bio *bio)
{
	int r;
1020
	unsigned long flags;
Joe Thornber's avatar
Joe Thornber committed
1021
	struct pool *pool = tc->pool;
Mike Snitzer's avatar
Mike Snitzer committed
1022
	struct dm_bio_prison_cell *cell, *cell2;
1023
	struct dm_cell_key key, key2;
Joe Thornber's avatar
Joe Thornber committed
1024
1025
	dm_block_t block = get_bio_block(tc, bio);
	struct dm_thin_lookup_result lookup_result;
Mike Snitzer's avatar
Mike Snitzer committed
1026
	struct dm_thin_new_mapping *m;
Joe Thornber's avatar
Joe Thornber committed
1027
1028

	build_virtual_key(tc->td, block, &key);
1029
	if (bio_detain(tc->pool, &key, bio, &cell))
Joe Thornber's avatar
Joe Thornber committed
1030
1031
1032
1033
1034
1035
1036
1037
1038
1039
1040
		return;

	r = dm_thin_find_block(tc->td, block, 1, &lookup_result);
	switch (r) {
	case 0:
		/*
		 * Check nobody is fiddling with this pool block.  This can
		 * happen if someone's in the process of breaking sharing
		 * on this block.
		 */
		build_data_key(tc->td, lookup_result.block, &key2);
1041
		if (bio_detain(tc->pool, &key2, bio, &cell2)) {
1042
			cell_defer_no_holder(tc, cell);
Joe Thornber's avatar
Joe Thornber committed
1043
1044
1045
1046
1047
1048
1049
1050
1051
1052
			break;
		}

		if (io_overlaps_block(pool, bio)) {
			/*
			 * IO may still be going to the destination block.  We must
			 * quiesce before we can do the removal.
			 */
			m = get_next_mapping(pool);
			m->tc = tc;
1053
1054
			m->pass_discard = pool->pf.discard_passdown;
			m->definitely_not_shared = !lookup_result.shared;
Joe Thornber's avatar
Joe Thornber committed
1055
1056
1057
1058
1059
1060
			m->virt_block = block;
			m->data_block = lookup_result.block;
			m->cell = cell;
			m->cell2 = cell2;
			m->bio = bio;

1061
			if (!dm_deferred_set_add_work(pool->all_io_ds, &m->list)) {
1062
				spin_lock_irqsave(&pool->lock, flags);
1063
				list_add_tail(&m->list, &pool->prepared_discards);
1064
				spin_unlock_irqrestore(&pool->lock, flags);
Joe Thornber's avatar
Joe Thornber committed
1065
1066
1067
				wake_worker(pool);
			}
		} else {
1068
			inc_all_io_entry(pool, bio);
1069
1070
			cell_defer_no_holder(tc, cell);
			cell_defer_no_holder(tc, cell2);
1071

Joe Thornber's avatar
Joe Thornber committed
1072
			/*
1073
1074
1075
			 * The DM core makes sure that the discard doesn't span
			 * a block boundary.  So we submit the discard of a
			 * partial block appropriately.
Joe Thornber's avatar
Joe Thornber committed
1076
			 */
1077
1078
1079
1080
			if ((!lookup_result.shared) && pool->pf.discard_passdown)
				remap_and_issue(tc, bio, lookup_result.block);
			else
				bio_endio(bio, 0);
Joe Thornber's avatar
Joe Thornber committed
1081
1082
1083
1084
1085
1086
1087
		}
		break;

	case -ENODATA:
		/*
		 * It isn't provisioned, just forget it.
		 */
1088
		cell_defer_no_holder(tc, cell);
Joe Thornber's avatar
Joe Thornber committed
1089
1090
1091
1092
		bio_endio(bio, 0);
		break;

	default:
1093
1094
		DMERR_LIMIT("%s: dm_thin_find_block() failed: error = %d",
			    __func__, r);
1095
		cell_defer_no_holder(tc, cell);
Joe Thornber's avatar
Joe Thornber committed
1096
1097
1098
1099
1100
		bio_io_error(bio);
		break;
	}
}

1101
static void break_sharing(struct thin_c *tc, struct bio *bio, dm_block_t block,
1102
			  struct dm_cell_key *key,
1103
			  struct dm_thin_lookup_result *lookup_result,
Mike Snitzer's avatar
Mike Snitzer committed
1104
			  struct dm_bio_prison_cell *cell)
1105
1106
1107
{
	int r;
	dm_block_t data_block;
1108
	struct pool *pool = tc->pool;
1109
1110
1111
1112

	r = alloc_data_block(tc, &data_block);
	switch (r) {
	case 0:
1113
1114
		schedule_internal_copy(tc, block, lookup_result->block,
				       data_block, cell, bio);
1115
1116
1117
		break;

	case -ENOSPC:
1118
		retry_bios_on_resume(pool, cell);
1119
1120
1121
		break;

	default:
1122
1123
		DMERR_LIMIT("%s: alloc_data_block() failed: error = %d",
			    __func__, r);
1124
		cell_error(pool, cell);
1125
1126
1127
1128
1129
1130
1131
1132
		break;
	}
}

static void process_shared_bio(struct thin_c *tc, struct bio *bio,
			       dm_block_t block,
			       struct dm_thin_lookup_result *lookup_result)
{
Mike Snitzer's avatar
Mike Snitzer committed
1133
	struct dm_bio_prison_cell *cell;
1134
	struct pool *pool = tc->pool;
1135
	struct dm_cell_key key;
1136
1137
1138
1139
1140
1141

	/*
	 * If cell is already occupied, then sharing is already in the process
	 * of being broken so we have nothing further to do here.
	 */
	build_data_key(tc->td, lookup_result->block, &key);
1142
	if (bio_detain(pool, &key, bio, &cell))
1143
1144
		return;

1145
	if (bio_data_dir(bio) == WRITE && bio->bi_iter.bi_size)
1146
1147
		break_sharing(tc, bio, block, &key, lookup_result, cell);
	else {
1148
		struct dm_thin_endio_hook *h = dm_per_bio_data(bio, sizeof(struct dm_thin_endio_hook));
1149

1150
		h->shared_read_entry = dm_deferred_entry_inc(pool->shared_read_ds);
1151
		inc_all_io_entry(pool, bio);
1152
		cell_defer_no_holder(tc, cell);
1153

1154
1155
1156
1157
1158
		remap_and_issue(tc, bio, lookup_result->block);
	}
}

static void provision_block(struct thin_c *tc, struct bio *bio, dm_block_t block,
Mike Snitzer's avatar
Mike Snitzer committed
1159
			    struct dm_bio_prison_cell *cell)
1160
1161
1162
{
	int r;
	dm_block_t data_block;
1163
	struct pool *pool = tc->pool;
1164
1165
1166
1167

	/*
	 * Remap empty bios (flushes) immediately, without provisioning.
	 */
1168
	if (!bio->bi_iter.bi_size) {
1169
		inc_all_io_entry(pool, bio);
1170
		cell_defer_no_holder(tc, cell);
1171

1172
1173
1174
1175
1176
1177
1178
1179
1180
		remap_and_issue(tc, bio, 0);
		return;
	}

	/*
	 * Fill read bios with zeroes and complete them immediately.
	 */
	if (bio_data_dir(bio) == READ) {
		zero_fill_bio(bio);
1181
		cell_defer_no_holder(tc, cell);
1182
1183
1184
1185
1186
1187
1188
		bio_endio(bio, 0);
		return;
	}

	r = alloc_data_block(tc, &data_block);
	switch (r) {
	case 0:
1189
1190
1191
1192
		if (tc->origin_dev)
			schedule_external_copy(tc, block, data_block, cell, bio);
		else
			schedule_zero(tc, block, data_block, cell, bio);
1193
1194
1195
		break;

	case -ENOSPC:
1196
		retry_bios_on_resume(pool, cell);
1197
1198
1199
		break;

	default:
1200
1201
		DMERR_LIMIT("%s: alloc_data_block() failed: error = %d",
			    __func__, r);
1202
		cell_error(pool, cell);
1203
1204
1205
1206
1207
1208
1209
		break;
	}
}

static void process_bio(struct thin_c *tc, struct bio *bio)
{
	int r;
1210
	struct pool *pool = tc->pool;
1211
	dm_block_t block = get_bio_block(tc, bio);
Mike Snitzer's avatar
Mike Snitzer committed
1212
	struct dm_bio_prison_cell *cell;
1213
	struct dm_cell_key key;
1214
1215
1216
1217
1218
1219
1220
	struct dm_thin_lookup_result lookup_result;

	/*
	 * If cell is already occupied, then the block is already
	 * being provisioned so we have nothing further to do here.
	 */
	build_virtual_key(tc->td, block, &key);
1221
	if (bio_detain(pool, &key, bio, &cell))
1222
1223
1224
1225
1226
		return;

	r = dm_thin_find_block(tc->td, block, 1, &lookup_result);
	switch (r) {
	case 0:
1227
		if (lookup_result.shared) {
1228
			process_shared_bio(tc, bio, block, &lookup_result);
1229
			cell_defer_no_holder(tc, cell); /* FIXME: pass this cell into process_shared? */
1230
		} else {
1231
			inc_all_io_entry(pool, bio);
1232
			cell_defer_no_holder(tc, cell);
1233

1234
			remap_and_issue(tc, bio, lookup_result.block);
1235
		}
1236
1237
1238
		break;

	case -ENODATA:
1239
		if (bio_data_dir(bio) == READ && tc->origin_dev) {
1240
			inc_all_io_entry(pool, bio);
1241
			cell_defer_no_holder(tc, cell);
1242

1243
1244
1245
			remap_to_origin_and_issue(tc, bio);
		} else
			provision_block(tc, bio, block, cell);
1246
1247
1248
		break;

	default:
1249
1250
		DMERR_LIMIT("%s: dm_thin_find_block() failed: error = %d",
			    __func__, r);
1251
		cell_defer_no_holder(tc, cell);
1252
1253
1254
1255
1256
		bio_io_error(bio);
		break;
	}
}

1257
1258
1259
1260
1261
1262
1263
1264
1265
1266
static void process_bio_read_only(struct thin_c *tc, struct bio *bio)
{
	int r;
	int rw = bio_data_dir(bio);
	dm_block_t block = get_bio_block(tc, bio);
	struct dm_thin_lookup_result lookup_result;

	r = dm_thin_find_block(tc->td, block, 1, &lookup_result);
	switch (r) {
	case 0:
1267
		if (lookup_result.shared && (rw == WRITE) && bio->bi_iter.bi_size)
1268
			handle_unserviceable_bio(tc->pool, bio);
1269
1270
		else {
			inc_all_io_entry(tc->pool, bio);