dm-thin.c 77.1 KB
Newer Older
1
/*
2
 * Copyright (C) 2011-2012 Red Hat UK.
3
4
5
6
7
 *
 * This file is released under the GPL.
 */

#include "dm-thin-metadata.h"
8
#include "dm.h"
9
10
11
12
13
14
15
16
17
18
19
20
21
22

#include <linux/device-mapper.h>
#include <linux/dm-io.h>
#include <linux/dm-kcopyd.h>
#include <linux/list.h>
#include <linux/init.h>
#include <linux/module.h>
#include <linux/slab.h>

#define	DM_MSG_PREFIX	"thin"

/*
 * Tunable constants
 */
23
#define ENDIO_HOOK_POOL_SIZE 1024
24
25
26
#define DEFERRED_SET_SIZE 64
#define MAPPING_POOL_SIZE 1024
#define PRISON_CELLS 1024
27
#define COMMIT_PERIOD HZ
28
29
30
31
32
33
34
35
36
37
38
39
40
41
42
43
44
45
46
47
48
49
50
51
52
53
54
55
56
57
58
59
60

/*
 * The block size of the device holding pool data must be
 * between 64KB and 1GB.
 */
#define DATA_DEV_BLOCK_SIZE_MIN_SECTORS (64 * 1024 >> SECTOR_SHIFT)
#define DATA_DEV_BLOCK_SIZE_MAX_SECTORS (1024 * 1024 * 1024 >> SECTOR_SHIFT)

/*
 * Device id is restricted to 24 bits.
 */
#define MAX_DEV_ID ((1 << 24) - 1)

/*
 * How do we handle breaking sharing of data blocks?
 * =================================================
 *
 * We use a standard copy-on-write btree to store the mappings for the
 * devices (note I'm talking about copy-on-write of the metadata here, not
 * the data).  When you take an internal snapshot you clone the root node
 * of the origin btree.  After this there is no concept of an origin or a
 * snapshot.  They are just two device trees that happen to point to the
 * same data blocks.
 *
 * When we get a write in we decide if it's to a shared data block using
 * some timestamp magic.  If it is, we have to break sharing.
 *
 * Let's say we write to a shared block in what was the origin.  The
 * steps are:
 *
 * i) plug io further to this physical block. (see bio_prison code).
 *
 * ii) quiesce any read io to that shared data block.  Obviously
61
 * including all devices that share this block.  (see dm_deferred_set code)
62
63
64
65
66
 *
 * iii) copy the data block to a newly allocate block.  This step can be
 * missed out if the io covers the block. (schedule_copy).
 *
 * iv) insert the new mapping into the origin's btree
Joe Thornber's avatar
Joe Thornber committed
67
 * (process_prepared_mapping).  This act of inserting breaks some
68
69
70
71
72
73
74
75
76
77
78
79
80
81
82
83
84
85
86
87
88
89
90
91
92
93
94
95
96
97
98
99
100
101
102
103
104
105
106
 * sharing of btree nodes between the two devices.  Breaking sharing only
 * effects the btree of that specific device.  Btrees for the other
 * devices that share the block never change.  The btree for the origin
 * device as it was after the last commit is untouched, ie. we're using
 * persistent data structures in the functional programming sense.
 *
 * v) unplug io to this physical block, including the io that triggered
 * the breaking of sharing.
 *
 * Steps (ii) and (iii) occur in parallel.
 *
 * The metadata _doesn't_ need to be committed before the io continues.  We
 * get away with this because the io is always written to a _new_ block.
 * If there's a crash, then:
 *
 * - The origin mapping will point to the old origin block (the shared
 * one).  This will contain the data as it was before the io that triggered
 * the breaking of sharing came in.
 *
 * - The snap mapping still points to the old block.  As it would after
 * the commit.
 *
 * The downside of this scheme is the timestamp magic isn't perfect, and
 * will continue to think that data block in the snapshot device is shared
 * even after the write to the origin has broken sharing.  I suspect data
 * blocks will typically be shared by many different devices, so we're
 * breaking sharing n + 1 times, rather than n, where n is the number of
 * devices that reference this data block.  At the moment I think the
 * benefits far, far outweigh the disadvantages.
 */

/*----------------------------------------------------------------*/

/*
 * Sometimes we can't deal with a bio straight away.  We put them in prison
 * where they can't cause any mischief.  Bios are put in a cell identified
 * by a key, multiple bios can be in the same cell.  When the cell is
 * subsequently unlocked the bios become available.
 */
107
struct dm_bio_prison;
108

109
struct dm_cell_key {
110
111
112
113
114
	int virtual;
	dm_thin_id dev;
	dm_block_t block;
};

Mike Snitzer's avatar
Mike Snitzer committed
115
struct dm_bio_prison_cell {
116
	struct hlist_node list;
117
118
	struct dm_bio_prison *prison;
	struct dm_cell_key key;
119
	struct bio *holder;
120
121
122
	struct bio_list bios;
};

123
struct dm_bio_prison {
124
125
126
127
128
129
130
131
132
133
134
135
136
137
138
139
140
141
142
143
144
	spinlock_t lock;
	mempool_t *cell_pool;

	unsigned nr_buckets;
	unsigned hash_mask;
	struct hlist_head *cells;
};

static uint32_t calc_nr_buckets(unsigned nr_cells)
{
	uint32_t n = 128;

	nr_cells /= 4;
	nr_cells = min(nr_cells, 8192u);

	while (n < nr_cells)
		n <<= 1;

	return n;
}

Mike Snitzer's avatar
Mike Snitzer committed
145
146
static struct kmem_cache *_cell_cache;

147
148
149
150
/*
 * @nr_cells should be the number of cells you want in use _concurrently_.
 * Don't confuse it with the number of distinct keys.
 */
151
static struct dm_bio_prison *dm_bio_prison_create(unsigned nr_cells)
152
153
154
{
	unsigned i;
	uint32_t nr_buckets = calc_nr_buckets(nr_cells);
155
	size_t len = sizeof(struct dm_bio_prison) +
156
		(sizeof(struct hlist_head) * nr_buckets);
157
	struct dm_bio_prison *prison = kmalloc(len, GFP_KERNEL);
158
159
160
161
162

	if (!prison)
		return NULL;

	spin_lock_init(&prison->lock);
Mike Snitzer's avatar
Mike Snitzer committed
163
	prison->cell_pool = mempool_create_slab_pool(nr_cells, _cell_cache);
164
165
166
167
168
169
170
171
172
173
174
175
176
177
	if (!prison->cell_pool) {
		kfree(prison);
		return NULL;
	}

	prison->nr_buckets = nr_buckets;
	prison->hash_mask = nr_buckets - 1;
	prison->cells = (struct hlist_head *) (prison + 1);
	for (i = 0; i < nr_buckets; i++)
		INIT_HLIST_HEAD(prison->cells + i);

	return prison;
}

178
static void dm_bio_prison_destroy(struct dm_bio_prison *prison)
179
180
181
182
183
{
	mempool_destroy(prison->cell_pool);
	kfree(prison);
}

184
static uint32_t hash_key(struct dm_bio_prison *prison, struct dm_cell_key *key)
185
186
187
188
189
190
191
{
	const unsigned long BIG_PRIME = 4294967291UL;
	uint64_t hash = key->block * BIG_PRIME;

	return (uint32_t) (hash & prison->hash_mask);
}

192
static int keys_equal(struct dm_cell_key *lhs, struct dm_cell_key *rhs)
193
194
195
196
197
198
{
	       return (lhs->virtual == rhs->virtual) &&
		       (lhs->dev == rhs->dev) &&
		       (lhs->block == rhs->block);
}

Mike Snitzer's avatar
Mike Snitzer committed
199
static struct dm_bio_prison_cell *__search_bucket(struct hlist_head *bucket,
200
						  struct dm_cell_key *key)
201
{
Mike Snitzer's avatar
Mike Snitzer committed
202
	struct dm_bio_prison_cell *cell;
203
204
205
206
207
208
209
210
211
212
213
214
215
	struct hlist_node *tmp;

	hlist_for_each_entry(cell, tmp, bucket, list)
		if (keys_equal(&cell->key, key))
			return cell;

	return NULL;
}

/*
 * This may block if a new cell needs allocating.  You must ensure that
 * cells will be unlocked even if the calling thread is blocked.
 *
216
 * Returns 1 if the cell was already held, 0 if @inmate is the new holder.
217
 */
218
219
static int dm_bio_detain(struct dm_bio_prison *prison, struct dm_cell_key *key,
			 struct bio *inmate, struct dm_bio_prison_cell **ref)
220
{
221
	int r = 1;
222
223
	unsigned long flags;
	uint32_t hash = hash_key(prison, key);
Mike Snitzer's avatar
Mike Snitzer committed
224
	struct dm_bio_prison_cell *cell, *cell2;
225
226
227
228
229

	BUG_ON(hash > prison->nr_buckets);

	spin_lock_irqsave(&prison->lock, flags);

230
231
232
233
	cell = __search_bucket(prison->cells + hash, key);
	if (cell) {
		bio_list_add(&cell->bios, inmate);
		goto out;
234
235
	}

236
237
238
	/*
	 * Allocate a new cell
	 */
239
	spin_unlock_irqrestore(&prison->lock, flags);
240
241
	cell2 = mempool_alloc(prison->cell_pool, GFP_NOIO);
	spin_lock_irqsave(&prison->lock, flags);
242

243
244
245
246
247
248
	/*
	 * We've been unlocked, so we have to double check that
	 * nobody else has inserted this cell in the meantime.
	 */
	cell = __search_bucket(prison->cells + hash, key);
	if (cell) {
249
		mempool_free(cell2, prison->cell_pool);
250
251
252
253
254
255
256
257
258
259
260
261
262
263
264
265
266
267
268
		bio_list_add(&cell->bios, inmate);
		goto out;
	}

	/*
	 * Use new cell.
	 */
	cell = cell2;

	cell->prison = prison;
	memcpy(&cell->key, key, sizeof(cell->key));
	cell->holder = inmate;
	bio_list_init(&cell->bios);
	hlist_add_head(&cell->list, prison->cells + hash);

	r = 0;

out:
	spin_unlock_irqrestore(&prison->lock, flags);
269
270
271
272
273
274
275
276
277

	*ref = cell;

	return r;
}

/*
 * @inmates must have been initialised prior to this call
 */
Mike Snitzer's avatar
Mike Snitzer committed
278
static void __cell_release(struct dm_bio_prison_cell *cell, struct bio_list *inmates)
279
{
280
	struct dm_bio_prison *prison = cell->prison;
281
282
283

	hlist_del(&cell->list);

284
285
286
287
	if (inmates) {
		bio_list_add(inmates, cell->holder);
		bio_list_merge(inmates, &cell->bios);
	}
288
289
290
291

	mempool_free(cell, prison->cell_pool);
}

292
static void dm_cell_release(struct dm_bio_prison_cell *cell, struct bio_list *bios)
293
294
{
	unsigned long flags;
295
	struct dm_bio_prison *prison = cell->prison;
296
297
298
299
300
301
302
303
304
305
306
307

	spin_lock_irqsave(&prison->lock, flags);
	__cell_release(cell, bios);
	spin_unlock_irqrestore(&prison->lock, flags);
}

/*
 * There are a couple of places where we put a bio into a cell briefly
 * before taking it out again.  In these situations we know that no other
 * bio may be in the cell.  This function releases the cell, and also does
 * a sanity check.
 */
Mike Snitzer's avatar
Mike Snitzer committed
308
static void __cell_release_singleton(struct dm_bio_prison_cell *cell, struct bio *bio)
309
310
311
{
	BUG_ON(cell->holder != bio);
	BUG_ON(!bio_list_empty(&cell->bios));
312
313

	__cell_release(cell, NULL);
314
315
}

316
static void dm_cell_release_singleton(struct dm_bio_prison_cell *cell, struct bio *bio)
317
318
{
	unsigned long flags;
319
	struct dm_bio_prison *prison = cell->prison;
320
321

	spin_lock_irqsave(&prison->lock, flags);
322
	__cell_release_singleton(cell, bio);
323
	spin_unlock_irqrestore(&prison->lock, flags);
324
325
326
327
328
}

/*
 * Sometimes we don't want the holder, just the additional bios.
 */
Mike Snitzer's avatar
Mike Snitzer committed
329
330
static void __cell_release_no_holder(struct dm_bio_prison_cell *cell,
				     struct bio_list *inmates)
331
{
332
	struct dm_bio_prison *prison = cell->prison;
333
334
335
336
337
338
339

	hlist_del(&cell->list);
	bio_list_merge(inmates, &cell->bios);

	mempool_free(cell, prison->cell_pool);
}

340
341
static void dm_cell_release_no_holder(struct dm_bio_prison_cell *cell,
				      struct bio_list *inmates)
342
343
{
	unsigned long flags;
344
	struct dm_bio_prison *prison = cell->prison;
345

346
347
348
	spin_lock_irqsave(&prison->lock, flags);
	__cell_release_no_holder(cell, inmates);
	spin_unlock_irqrestore(&prison->lock, flags);
349
350
}

351
static void dm_cell_error(struct dm_bio_prison_cell *cell)
352
{
353
	struct dm_bio_prison *prison = cell->prison;
354
355
356
357
358
359
360
361
362
363
364
365
366
367
368
369
370
371
372
373
374
375
376
	struct bio_list bios;
	struct bio *bio;
	unsigned long flags;

	bio_list_init(&bios);

	spin_lock_irqsave(&prison->lock, flags);
	__cell_release(cell, &bios);
	spin_unlock_irqrestore(&prison->lock, flags);

	while ((bio = bio_list_pop(&bios)))
		bio_io_error(bio);
}

/*----------------------------------------------------------------*/

/*
 * We use the deferred set to keep track of pending reads to shared blocks.
 * We do this to ensure the new mapping caused by a write isn't performed
 * until these prior reads have completed.  Otherwise the insertion of the
 * new mapping could free the old block that the read bios are mapped to.
 */

377
378
379
struct dm_deferred_set;
struct dm_deferred_entry {
	struct dm_deferred_set *ds;
380
381
382
383
	unsigned count;
	struct list_head work_items;
};

384
struct dm_deferred_set {
385
386
387
	spinlock_t lock;
	unsigned current_entry;
	unsigned sweeper;
388
	struct dm_deferred_entry entries[DEFERRED_SET_SIZE];
389
390
};

391
static struct dm_deferred_set *dm_deferred_set_create(void)
392
393
{
	int i;
394
395
396
397
398
	struct dm_deferred_set *ds;

	ds = kmalloc(sizeof(*ds), GFP_KERNEL);
	if (!ds)
		return NULL;
399
400
401
402
403
404
405
406
407

	spin_lock_init(&ds->lock);
	ds->current_entry = 0;
	ds->sweeper = 0;
	for (i = 0; i < DEFERRED_SET_SIZE; i++) {
		ds->entries[i].ds = ds;
		ds->entries[i].count = 0;
		INIT_LIST_HEAD(&ds->entries[i].work_items);
	}
408
409

	return ds;
410
411
}

412
413
414
415
416
417
static void dm_deferred_set_destroy(struct dm_deferred_set *ds)
{
	kfree(ds);
}

static struct dm_deferred_entry *dm_deferred_entry_inc(struct dm_deferred_set *ds)
418
419
{
	unsigned long flags;
420
	struct dm_deferred_entry *entry;
421
422
423
424
425
426
427
428
429
430
431
432
433
434

	spin_lock_irqsave(&ds->lock, flags);
	entry = ds->entries + ds->current_entry;
	entry->count++;
	spin_unlock_irqrestore(&ds->lock, flags);

	return entry;
}

static unsigned ds_next(unsigned index)
{
	return (index + 1) % DEFERRED_SET_SIZE;
}

435
static void __sweep(struct dm_deferred_set *ds, struct list_head *head)
436
437
438
439
440
441
442
443
444
445
446
{
	while ((ds->sweeper != ds->current_entry) &&
	       !ds->entries[ds->sweeper].count) {
		list_splice_init(&ds->entries[ds->sweeper].work_items, head);
		ds->sweeper = ds_next(ds->sweeper);
	}

	if ((ds->sweeper == ds->current_entry) && !ds->entries[ds->sweeper].count)
		list_splice_init(&ds->entries[ds->sweeper].work_items, head);
}

447
static void dm_deferred_entry_dec(struct dm_deferred_entry *entry, struct list_head *head)
448
449
450
451
452
453
454
455
456
457
458
459
460
{
	unsigned long flags;

	spin_lock_irqsave(&entry->ds->lock, flags);
	BUG_ON(!entry->count);
	--entry->count;
	__sweep(entry->ds, head);
	spin_unlock_irqrestore(&entry->ds->lock, flags);
}

/*
 * Returns 1 if deferred or 0 if no pending items to delay job.
 */
461
static int dm_deferred_set_add_work(struct dm_deferred_set *ds, struct list_head *work)
462
463
464
465
466
467
468
469
470
471
472
473
474
475
476
477
478
479
480
481
{
	int r = 1;
	unsigned long flags;
	unsigned next_entry;

	spin_lock_irqsave(&ds->lock, flags);
	if ((ds->sweeper == ds->current_entry) &&
	    !ds->entries[ds->current_entry].count)
		r = 0;
	else {
		list_add(work, &ds->entries[ds->current_entry].work_items);
		next_entry = ds_next(ds->current_entry);
		if (!ds->entries[next_entry].count)
			ds->current_entry = next_entry;
	}
	spin_unlock_irqrestore(&ds->lock, flags);

	return r;
}

482
483
484
485
486
487
488
489
490
491
492
493
494
495
496
static int __init dm_bio_prison_init(void)
{
	_cell_cache = KMEM_CACHE(dm_bio_prison_cell, 0);
	if (!_cell_cache)
		return -ENOMEM;

	return 0;
}

static void __exit dm_bio_prison_exit(void)
{
	kmem_cache_destroy(_cell_cache);
	_cell_cache = NULL;
}

497
498
499
500
501
502
/*----------------------------------------------------------------*/

/*
 * Key building.
 */
static void build_data_key(struct dm_thin_device *td,
503
			   dm_block_t b, struct dm_cell_key *key)
504
505
506
507
508
509
510
{
	key->virtual = 0;
	key->dev = dm_thin_dev_id(td);
	key->block = b;
}

static void build_virtual_key(struct dm_thin_device *td, dm_block_t b,
511
			      struct dm_cell_key *key)
512
513
514
515
516
517
518
519
520
521
522
523
524
{
	key->virtual = 1;
	key->dev = dm_thin_dev_id(td);
	key->block = b;
}

/*----------------------------------------------------------------*/

/*
 * A pool device ties together a metadata device and a data device.  It
 * also provides the interface for creating and destroying internal
 * devices.
 */
Mike Snitzer's avatar
Mike Snitzer committed
525
struct dm_thin_new_mapping;
526

527
528
529
530
531
532
533
534
535
/*
 * The pool runs in 3 modes.  Ordered in degraded order for comparisons.
 */
enum pool_mode {
	PM_WRITE,		/* metadata may be changed */
	PM_READ_ONLY,		/* metadata may not be changed */
	PM_FAIL,		/* all I/O fails */
};

536
struct pool_features {
537
538
	enum pool_mode mode;

Mike Snitzer's avatar
Mike Snitzer committed
539
540
541
	bool zero_new_blocks:1;
	bool discard_enabled:1;
	bool discard_passdown:1;
542
543
};

544
545
546
547
struct thin_c;
typedef void (*process_bio_fn)(struct thin_c *tc, struct bio *bio);
typedef void (*process_mapping_fn)(struct dm_thin_new_mapping *m);

548
549
550
551
552
553
554
555
556
struct pool {
	struct list_head list;
	struct dm_target *ti;	/* Only set if a pool target is bound */

	struct mapped_device *pool_md;
	struct block_device *md_dev;
	struct dm_pool_metadata *pmd;

	dm_block_t low_water_blocks;
557
	uint32_t sectors_per_block;
558
	int sectors_per_block_shift;
559

560
	struct pool_features pf;
561
562
563
	unsigned low_water_triggered:1;	/* A dm event has been sent */
	unsigned no_free_space:1;	/* A -ENOSPC warning has been issued */

564
	struct dm_bio_prison *prison;
565
566
567
568
	struct dm_kcopyd_client *copier;

	struct workqueue_struct *wq;
	struct work_struct worker;
569
	struct delayed_work waker;
570

571
	unsigned long last_commit_jiffies;
572
	unsigned ref_count;
573
574
575
576
577

	spinlock_t lock;
	struct bio_list deferred_bios;
	struct bio_list deferred_flush_bios;
	struct list_head prepared_mappings;
Joe Thornber's avatar
Joe Thornber committed
578
	struct list_head prepared_discards;
579
580
581

	struct bio_list retry_on_resume_list;

582
583
	struct dm_deferred_set *shared_read_ds;
	struct dm_deferred_set *all_io_ds;
584

Mike Snitzer's avatar
Mike Snitzer committed
585
	struct dm_thin_new_mapping *next_mapping;
586
587
	mempool_t *mapping_pool;
	mempool_t *endio_hook_pool;
588
589
590
591
592
593

	process_bio_fn process_bio;
	process_bio_fn process_discard;

	process_mapping_fn process_prepared_mapping;
	process_mapping_fn process_prepared_discard;
594
595
};

596
597
598
static enum pool_mode get_pool_mode(struct pool *pool);
static void set_pool_mode(struct pool *pool, enum pool_mode mode);

599
600
601
602
603
604
605
606
607
608
609
/*
 * Target context for a pool.
 */
struct pool_c {
	struct dm_target *ti;
	struct pool *pool;
	struct dm_dev *data_dev;
	struct dm_dev *metadata_dev;
	struct dm_target_callbacks callbacks;

	dm_block_t low_water_blocks;
610
611
	struct pool_features requested_pf; /* Features requested during table load */
	struct pool_features adjusted_pf;  /* Features used after adjusting for constituent devices */
612
613
614
615
616
617
618
};

/*
 * Target context for a thin.
 */
struct thin_c {
	struct dm_dev *pool_dev;
619
	struct dm_dev *origin_dev;
620
621
622
623
624
625
626
627
628
629
630
631
632
633
634
635
636
637
638
639
640
641
642
643
644
645
646
647
648
649
650
651
652
653
654
655
656
657
658
659
660
661
662
663
664
665
666
667
668
669
670
671
672
673
674
675
676
677
678
679
680
681
682
683
684
685
686
687
	dm_thin_id dev_id;

	struct pool *pool;
	struct dm_thin_device *td;
};

/*----------------------------------------------------------------*/

/*
 * A global list of pools that uses a struct mapped_device as a key.
 */
static struct dm_thin_pool_table {
	struct mutex mutex;
	struct list_head pools;
} dm_thin_pool_table;

static void pool_table_init(void)
{
	mutex_init(&dm_thin_pool_table.mutex);
	INIT_LIST_HEAD(&dm_thin_pool_table.pools);
}

static void __pool_table_insert(struct pool *pool)
{
	BUG_ON(!mutex_is_locked(&dm_thin_pool_table.mutex));
	list_add(&pool->list, &dm_thin_pool_table.pools);
}

static void __pool_table_remove(struct pool *pool)
{
	BUG_ON(!mutex_is_locked(&dm_thin_pool_table.mutex));
	list_del(&pool->list);
}

static struct pool *__pool_table_lookup(struct mapped_device *md)
{
	struct pool *pool = NULL, *tmp;

	BUG_ON(!mutex_is_locked(&dm_thin_pool_table.mutex));

	list_for_each_entry(tmp, &dm_thin_pool_table.pools, list) {
		if (tmp->pool_md == md) {
			pool = tmp;
			break;
		}
	}

	return pool;
}

static struct pool *__pool_table_lookup_metadata_dev(struct block_device *md_dev)
{
	struct pool *pool = NULL, *tmp;

	BUG_ON(!mutex_is_locked(&dm_thin_pool_table.mutex));

	list_for_each_entry(tmp, &dm_thin_pool_table.pools, list) {
		if (tmp->md_dev == md_dev) {
			pool = tmp;
			break;
		}
	}

	return pool;
}

/*----------------------------------------------------------------*/

Mike Snitzer's avatar
Mike Snitzer committed
688
struct dm_thin_endio_hook {
689
	struct thin_c *tc;
690
691
	struct dm_deferred_entry *shared_read_entry;
	struct dm_deferred_entry *all_io_entry;
Mike Snitzer's avatar
Mike Snitzer committed
692
	struct dm_thin_new_mapping *overwrite_mapping;
693
694
};

695
696
697
698
699
700
701
702
703
704
static void __requeue_bio_list(struct thin_c *tc, struct bio_list *master)
{
	struct bio *bio;
	struct bio_list bios;

	bio_list_init(&bios);
	bio_list_merge(&bios, master);
	bio_list_init(master);

	while ((bio = bio_list_pop(&bios))) {
Mike Snitzer's avatar
Mike Snitzer committed
705
706
		struct dm_thin_endio_hook *h = dm_get_mapinfo(bio)->ptr;

707
		if (h->tc == tc)
708
709
710
711
712
713
714
715
716
717
718
719
720
721
722
723
724
725
726
727
728
729
730
731
732
733
			bio_endio(bio, DM_ENDIO_REQUEUE);
		else
			bio_list_add(master, bio);
	}
}

static void requeue_io(struct thin_c *tc)
{
	struct pool *pool = tc->pool;
	unsigned long flags;

	spin_lock_irqsave(&pool->lock, flags);
	__requeue_bio_list(tc, &pool->deferred_bios);
	__requeue_bio_list(tc, &pool->retry_on_resume_list);
	spin_unlock_irqrestore(&pool->lock, flags);
}

/*
 * This section of code contains the logic for processing a thin device's IO.
 * Much of the code depends on pool object resources (lists, workqueues, etc)
 * but most is exclusively called from the thin target rather than the thin-pool
 * target.
 */

static dm_block_t get_bio_block(struct thin_c *tc, struct bio *bio)
{
734
735
	sector_t block_nr = bio->bi_sector;

736
737
738
739
	if (tc->pool->sectors_per_block_shift < 0)
		(void) sector_div(block_nr, tc->pool->sectors_per_block);
	else
		block_nr >>= tc->pool->sectors_per_block_shift;
740
741

	return block_nr;
742
743
744
745
746
}

static void remap(struct thin_c *tc, struct bio *bio, dm_block_t block)
{
	struct pool *pool = tc->pool;
747
	sector_t bi_sector = bio->bi_sector;
748
749

	bio->bi_bdev = tc->pool_dev->bdev;
750
751
752
753
754
755
	if (tc->pool->sectors_per_block_shift < 0)
		bio->bi_sector = (block * pool->sectors_per_block) +
				 sector_div(bi_sector, pool->sectors_per_block);
	else
		bio->bi_sector = (block << pool->sectors_per_block_shift) |
				(bi_sector & (pool->sectors_per_block - 1));
756
757
}

758
759
760
761
762
static void remap_to_origin(struct thin_c *tc, struct bio *bio)
{
	bio->bi_bdev = tc->origin_dev->bdev;
}

763
764
765
766
767
768
static int bio_triggers_commit(struct thin_c *tc, struct bio *bio)
{
	return (bio->bi_rw & (REQ_FLUSH | REQ_FUA)) &&
		dm_thin_changed_this_transaction(tc->td);
}

769
static void issue(struct thin_c *tc, struct bio *bio)
770
771
772
773
{
	struct pool *pool = tc->pool;
	unsigned long flags;

774
775
776
777
778
	if (!bio_triggers_commit(tc, bio)) {
		generic_make_request(bio);
		return;
	}

779
	/*
780
781
782
	 * Complete bio with an error if earlier I/O caused changes to
	 * the metadata that can't be committed e.g, due to I/O errors
	 * on the metadata device.
783
	 */
784
785
786
787
788
789
790
791
792
793
794
795
	if (dm_thin_aborted_changes(tc->td)) {
		bio_io_error(bio);
		return;
	}

	/*
	 * Batch together any bios that trigger commits and then issue a
	 * single commit for them in process_deferred_bios().
	 */
	spin_lock_irqsave(&pool->lock, flags);
	bio_list_add(&pool->deferred_flush_bios, bio);
	spin_unlock_irqrestore(&pool->lock, flags);
796
797
}

798
799
800
801
802
803
804
805
806
807
808
809
810
static void remap_to_origin_and_issue(struct thin_c *tc, struct bio *bio)
{
	remap_to_origin(tc, bio);
	issue(tc, bio);
}

static void remap_and_issue(struct thin_c *tc, struct bio *bio,
			    dm_block_t block)
{
	remap(tc, bio, block);
	issue(tc, bio);
}

811
812
813
814
815
816
817
818
819
820
821
822
823
824
/*
 * wake_worker() is used when new work is queued and when pool_resume is
 * ready to continue deferred IO processing.
 */
static void wake_worker(struct pool *pool)
{
	queue_work(pool->wq, &pool->worker);
}

/*----------------------------------------------------------------*/

/*
 * Bio endio functions.
 */
Mike Snitzer's avatar
Mike Snitzer committed
825
struct dm_thin_new_mapping {
826
827
	struct list_head list;

828
829
	unsigned quiesced:1;
	unsigned prepared:1;
Joe Thornber's avatar
Joe Thornber committed
830
	unsigned pass_discard:1;
831
832
833
834

	struct thin_c *tc;
	dm_block_t virt_block;
	dm_block_t data_block;
Mike Snitzer's avatar
Mike Snitzer committed
835
	struct dm_bio_prison_cell *cell, *cell2;
836
837
838
839
840
841
842
843
844
845
846
847
	int err;

	/*
	 * If the bio covers the whole area of a block then we can avoid
	 * zeroing or copying.  Instead this bio is hooked.  The bio will
	 * still be in the cell, so care has to be taken to avoid issuing
	 * the bio twice.
	 */
	struct bio *bio;
	bio_end_io_t *saved_bi_end_io;
};

Mike Snitzer's avatar
Mike Snitzer committed
848
static void __maybe_add_mapping(struct dm_thin_new_mapping *m)
849
850
851
{
	struct pool *pool = m->tc->pool;

852
	if (m->quiesced && m->prepared) {
853
854
855
856
857
858
859
860
		list_add(&m->list, &pool->prepared_mappings);
		wake_worker(pool);
	}
}

static void copy_complete(int read_err, unsigned long write_err, void *context)
{
	unsigned long flags;
Mike Snitzer's avatar
Mike Snitzer committed
861
	struct dm_thin_new_mapping *m = context;
862
863
864
865
866
867
868
869
870
871
872
873
874
	struct pool *pool = m->tc->pool;

	m->err = read_err || write_err ? -EIO : 0;

	spin_lock_irqsave(&pool->lock, flags);
	m->prepared = 1;
	__maybe_add_mapping(m);
	spin_unlock_irqrestore(&pool->lock, flags);
}

static void overwrite_endio(struct bio *bio, int err)
{
	unsigned long flags;
Mike Snitzer's avatar
Mike Snitzer committed
875
876
	struct dm_thin_endio_hook *h = dm_get_mapinfo(bio)->ptr;
	struct dm_thin_new_mapping *m = h->overwrite_mapping;
877
878
879
880
881
882
883
884
885
886
887
888
889
890
891
892
893
894
895
896
897
898
899
	struct pool *pool = m->tc->pool;

	m->err = err;

	spin_lock_irqsave(&pool->lock, flags);
	m->prepared = 1;
	__maybe_add_mapping(m);
	spin_unlock_irqrestore(&pool->lock, flags);
}

/*----------------------------------------------------------------*/

/*
 * Workqueue.
 */

/*
 * Prepared mapping jobs.
 */

/*
 * This sends the bios in the cell back to the deferred_bios list.
 */
Mike Snitzer's avatar
Mike Snitzer committed
900
static void cell_defer(struct thin_c *tc, struct dm_bio_prison_cell *cell,
901
902
903
904
905
906
		       dm_block_t data_block)
{
	struct pool *pool = tc->pool;
	unsigned long flags;

	spin_lock_irqsave(&pool->lock, flags);
907
	dm_cell_release(cell, &pool->deferred_bios);
908
909
910
911
912
913
914
915
916
	spin_unlock_irqrestore(&tc->pool->lock, flags);

	wake_worker(pool);
}

/*
 * Same as cell_defer above, except it omits one particular detainee,
 * a write bio that covers the block and has already been processed.
 */
Mike Snitzer's avatar
Mike Snitzer committed
917
static void cell_defer_except(struct thin_c *tc, struct dm_bio_prison_cell *cell)
918
919
920
921
922
923
924
925
{
	struct bio_list bios;
	struct pool *pool = tc->pool;
	unsigned long flags;

	bio_list_init(&bios);

	spin_lock_irqsave(&pool->lock, flags);
926
	dm_cell_release_no_holder(cell, &pool->deferred_bios);
927
928
929
930
931
	spin_unlock_irqrestore(&pool->lock, flags);

	wake_worker(pool);
}

932
933
934
935
static void process_prepared_mapping_fail(struct dm_thin_new_mapping *m)
{
	if (m->bio)
		m->bio->bi_end_io = m->saved_bi_end_io;
936
	dm_cell_error(m->cell);
937
938
939
	list_del(&m->list);
	mempool_free(m, m->tc->pool->mapping_pool);
}
Mike Snitzer's avatar
Mike Snitzer committed
940
static void process_prepared_mapping(struct dm_thin_new_mapping *m)
941
942
943
944
945
946
947
948
949
950
{
	struct thin_c *tc = m->tc;
	struct bio *bio;
	int r;

	bio = m->bio;
	if (bio)
		bio->bi_end_io = m->saved_bi_end_io;

	if (m->err) {
951
		dm_cell_error(m->cell);
952
		goto out;
953
954
955
956
957
958
959
960
961
962
	}

	/*
	 * Commit the prepared block into the mapping btree.
	 * Any I/O for this block arriving after this point will get
	 * remapped to it directly.
	 */
	r = dm_thin_insert_block(tc->td, m->virt_block, m->data_block);
	if (r) {
		DMERR("dm_thin_insert_block() failed");
963
		dm_cell_error(m->cell);
964
		goto out;
965
966
967
968
969
970
971
972
973
	}

	/*
	 * Release any bios held while the block was being provisioned.
	 * If we are processing a write bio that completely covers the block,
	 * we already processed it so can ignore it now when processing
	 * the bios in the cell.
	 */
	if (bio) {
974
		cell_defer_except(tc, m->cell);
975
976
977
978
		bio_endio(bio, 0);
	} else
		cell_defer(tc, m->cell, m->data_block);

979
out:
980
981
982
983
	list_del(&m->list);
	mempool_free(m, tc->pool->mapping_pool);
}

984
static void process_prepared_discard_fail(struct dm_thin_new_mapping *m)
Joe Thornber's avatar
Joe Thornber committed
985
986
987
{
	struct thin_c *tc = m->tc;

988
989
990
991
992
993
994
995
996
	bio_io_error(m->bio);
	cell_defer_except(tc, m->cell);
	cell_defer_except(tc, m->cell2);
	mempool_free(m, tc->pool->mapping_pool);
}

static void process_prepared_discard_passdown(struct dm_thin_new_mapping *m)
{
	struct thin_c *tc = m->tc;
Joe Thornber's avatar
Joe Thornber committed
997
998
999
1000
1001
1002
1003
1004
1005
1006
1007

	if (m->pass_discard)
		remap_and_issue(tc, m->bio, m->data_block);
	else
		bio_endio(m->bio, 0);

	cell_defer_except(tc, m->cell);
	cell_defer_except(tc, m->cell2);
	mempool_free(m, tc->pool->mapping_pool);
}

1008
1009
1010
1011
1012
1013
1014
1015
1016
1017
1018
1019
static void process_prepared_discard(struct dm_thin_new_mapping *m)
{
	int r;
	struct thin_c *tc = m->tc;

	r = dm_thin_remove_block(tc->td, m->virt_block);
	if (r)
		DMERR("dm_thin_remove_block() failed");

	process_prepared_discard_passdown(m);
}

Joe Thornber's avatar
Joe Thornber committed
1020
static void process_prepared(struct pool *pool, struct list_head *head,
1021
			     process_mapping_fn *fn)
1022
1023
1024
{
	unsigned long flags;
	struct list_head maps;
Mike Snitzer's avatar
Mike Snitzer committed
1025
	struct dm_thin_new_mapping *m, *tmp;
1026
1027
1028

	INIT_LIST_HEAD(&maps);
	spin_lock_irqsave(&pool->lock, flags);
Joe Thornber's avatar
Joe Thornber committed
1029
	list_splice_init(head, &maps);
1030
1031
1032
	spin_unlock_irqrestore(&pool->lock, flags);

	list_for_each_entry_safe(m, tmp, &maps, list)
1033
		(*fn)(m);
1034
1035
1036
1037
1038
}

/*
 * Deferred bio jobs.
 */
Joe Thornber's avatar
Joe Thornber committed
1039
static int io_overlaps_block(struct pool *pool, struct bio *bio)
1040
{
1041
	return bio->bi_size == (pool->sectors_per_block << SECTOR_SHIFT);
Joe Thornber's avatar
Joe Thornber committed
1042
1043
1044
1045
1046
1047
}

static int io_overwrites_block(struct pool *pool, struct bio *bio)
{
	return (bio_data_dir(bio) == WRITE) &&
		io_overlaps_block(pool, bio);
1048
1049
1050
1051
1052
1053
1054
1055
1056
1057
1058
1059
1060
1061
1062
1063
1064
1065
1066
}

static void save_and_set_endio(struct bio *bio, bio_end_io_t **save,
			       bio_end_io_t *fn)
{
	*save = bio->bi_end_io;
	bio->bi_end_io = fn;
}

static int ensure_next_mapping(struct pool *pool)
{
	if (pool->next_mapping)
		return 0;

	pool->next_mapping = mempool_alloc(pool->mapping_pool, GFP_ATOMIC);

	return pool->next_mapping ? 0 : -ENOMEM;
}

Mike Snitzer's avatar
Mike Snitzer committed
1067
static struct dm_thin_new_mapping *get_next_mapping(struct pool *pool)
1068
{
Mike Snitzer's avatar
Mike Snitzer committed
1069
	struct dm_thin_new_mapping *r = pool->next_mapping;
1070
1071
1072
1073
1074
1075
1076
1077
1078

	BUG_ON(!pool->next_mapping);

	pool->next_mapping = NULL;

	return r;
}

static void schedule_copy(struct thin_c *tc, dm_block_t virt_block,
1079
1080
			  struct dm_dev *origin, dm_block_t data_origin,
			  dm_block_t data_dest,
Mike Snitzer's avatar
Mike Snitzer committed
1081
			  struct dm_bio_prison_cell *cell, struct bio *bio)
1082
1083
1084
{
	int r;
	struct pool *pool = tc->pool;
Mike Snitzer's avatar
Mike Snitzer committed
1085
	struct dm_thin_new_mapping *m = get_next_mapping(pool);
1086
1087

	INIT_LIST_HEAD(&m->list);
1088
	m->quiesced = 0;
1089
1090
1091
1092
1093
1094
1095
1096
	m->prepared = 0;
	m->tc = tc;
	m->virt_block = virt_block;
	m->data_block = data_dest;
	m->cell = cell;
	m->err = 0;
	m->bio = NULL;

1097
	if (!dm_deferred_set_add_work(pool->shared_read_ds, &m->list))
1098
		m->quiesced = 1;
1099
1100
1101
1102
1103
1104
1105
1106

	/*
	 * IO to pool_dev remaps to the pool target's data_dev.
	 *
	 * If the whole block of data is being overwritten, we can issue the
	 * bio immediately. Otherwise we use kcopyd to clone the data first.
	 */
	if (io_overwrites_block(pool, bio)) {
Mike Snitzer's avatar
Mike Snitzer committed
1107
1108
		struct dm_thin_endio_hook *h = dm_get_mapinfo(bio)->ptr;

1109
		h->overwrite_mapping = m;
1110
1111
1112
1113
1114
1115
		m->bio = bio;
		save_and_set_endio(bio, &m->saved_bi_end_io, overwrite_endio);
		remap_and_issue(tc, bio, data_dest);
	} else {
		struct dm_io_region from, to;

1116
		from.bdev = origin->bdev;
1117
1118
1119
1120
1121
1122
1123
1124
1125
1126
1127
1128
		from.sector = data_origin * pool->sectors_per_block;
		from.count = pool->sectors_per_block;

		to.bdev = tc->pool_dev->bdev;
		to.sector = data_dest * pool->sectors_per_block;
		to.count = pool->sectors_per_block;

		r = dm_kcopyd_copy(pool->copier, &from, 1, &to,
				   0, copy_complete, m);
		if (r < 0) {
			mempool_free(m, pool->mapping_pool);
			DMERR("dm_kcopyd_copy() failed");
1129
			dm_cell_error(cell);
1130
1131
1132
1133
		}
	}
}

1134
1135
static void schedule_internal_copy(struct thin_c *tc, dm_block_t virt_block,
				   dm_block_t data_origin, dm_block_t data_dest,
Mike Snitzer's avatar
Mike Snitzer committed
1136
				   struct dm_bio_prison_cell *cell, struct bio *bio)
1137
1138
1139
1140
1141
1142
1143
{
	schedule_copy(tc, virt_block, tc->pool_dev,
		      data_origin, data_dest, cell, bio);
}

static void schedule_external_copy(struct thin_c *tc, dm_block_t virt_block,
				   dm_block_t data_dest,
Mike Snitzer's avatar
Mike Snitzer committed
1144
				   struct dm_bio_prison_cell *cell, struct bio *bio)
1145
1146
1147
1148
1149
{
	schedule_copy(tc, virt_block, tc->origin_dev,
		      virt_block, data_dest, cell, bio);
}

1150
static void schedule_zero(struct thin_c *tc, dm_block_t virt_block,
Mike Snitzer's avatar
Mike Snitzer committed
1151
			  dm_block_t data_block, struct dm_bio_prison_cell *cell,
1152
1153
1154
			  struct bio *bio)
{
	struct pool *pool = tc->pool;
Mike Snitzer's avatar
Mike Snitzer committed
1155
	struct dm_thin_new_mapping *m = get_next_mapping(pool);
1156
1157

	INIT_LIST_HEAD(&m->list);
1158
	m->quiesced = 1;
1159
1160
1161
1162
1163
1164
1165
1166
1167
1168
1169
1170
1171
	m->prepared = 0;
	m->tc = tc;
	m->virt_block = virt_block;
	m->data_block = data_block;
	m->cell = cell;
	m->err = 0;
	m->bio = NULL;

	/*
	 * If the whole block of data is being overwritten or we are not
	 * zeroing pre-existing data, we can issue the bio immediately.
	 * Otherwise we use kcopyd to zero the data first.
	 */
1172
	if (!pool->pf.zero_new_blocks)
1173
1174
1175
		process_prepared_mapping(m);

	else if (io_overwrites_block(pool, bio)) {
Mike Snitzer's avatar
Mike Snitzer committed
1176
1177
		struct dm_thin_endio_hook *h = dm_get_mapinfo(bio)->ptr;

1178
		h->overwrite_mapping = m;
1179
1180
1181
1182
1183
1184
1185
1186
1187
1188
1189
1190
1191
1192
1193
		m->bio = bio;
		save_and_set_endio(bio, &m->saved_bi_end_io, overwrite_endio);
		remap_and_issue(tc, bio, data_block);
	} else {
		int r;
		struct dm_io_region to;

		to.bdev = tc->pool_dev->bdev;
		to.sector = data_block * pool->sectors_per_block;
		to.count = pool->sectors_per_block;

		r = dm_kcopyd_zero(pool->copier, 1, &to, 0, copy_complete, m);
		if (r < 0) {
			mempool_free(m, pool->mapping_pool);
			DMERR("dm_kcopyd_zero() failed");
1194
			dm_cell_error(cell);
1195
1196
1197
1198
		}
	}
}

1199
1200
1201
1202
1203
1204
1205
1206
1207
1208
1209
1210
1211
1212
1213
1214
1215
1216
1217
1218
1219
1220
1221
1222
1223
1224
1225
1226
1227
static int commit(struct pool *pool)
{
	int r;

	r = dm_pool_commit_metadata(pool->pmd);
	if (r)
		DMERR("commit failed, error = %d", r);

	return r;
}

/*
 * A non-zero return indicates read_only or fail_io mode.
 * Many callers don't care about the return value.
 */
static int commit_or_fallback(struct pool *pool)
{
	int r;

	if (get_pool_mode(pool) != PM_WRITE)
		return -EINVAL;

	r = commit(pool);
	if (r)
		set_pool_mode(pool, PM_READ_ONLY);

	return r;
}

1228
1229
1230
1231
1232
1233
1234
1235
1236
1237
1238
1239
1240
1241
1242
1243
1244
1245
1246
1247
1248
1249
1250
1251
1252
1253
1254
1255
static int alloc_data_block(struct thin_c *tc, dm_block_t *result)
{
	int r;
	dm_block_t free_blocks;
	unsigned long flags;
	struct pool *pool = tc->pool;

	r = dm_pool_get_free_block_count(pool->pmd, &free_blocks);
	if (r)
		return r;

	if (free_blocks <= pool->low_water_blocks && !pool->low_water_triggered) {
		DMWARN("%s: reached low water mark, sending event.",
		       dm_device_name(pool->pool_md));
		spin_lock_irqsave(&pool->lock, flags);
		pool->low_water_triggered = 1;
		spin_unlock_irqrestore(&pool->lock, flags);
		dm_table_event(pool->ti->table);
	}

	if (!free_blocks) {
		if (pool->no_free_space)
			return -ENOSPC;
		else {
			/*
			 * Try to commit to see if that will free up some
			 * more space.
			 */
1256
			(void) commit_or_fallback(pool);
1257
1258
1259
1260
1261
1262
1263
1264
1265
1266
1267
1268
1269
1270
1271
1272
1273
1274
1275
1276
1277
1278
1279
1280
1281
1282
1283
1284
1285
1286
1287
1288
1289

			r = dm_pool_get_free_block_count(pool->pmd, &free_blocks);
			if (r)
				return r;

			/*
			 * If we still have no space we set a flag to avoid
			 * doing all this checking and return -ENOSPC.
			 */
			if (!free_blocks) {
				DMWARN("%s: no free space available.",
				       dm_device_name(pool->pool_md));
				spin_lock_irqsave(&pool->lock, flags);
				pool->no_free_space = 1;
				spin_unlock_irqrestore(&pool->lock, flags);
				return -ENOSPC;
			}
		}
	}

	r = dm_pool_alloc_data_block(pool->pmd, result);
	if (r)
		return r;

	return 0;
}

/*
 * If we have run out of space, queue bios until the device is
 * resumed, presumably after having been reloaded with more space.
 */
static void retry_on_resume(struct bio *bio)
{
Mike Snitzer's avatar
Mike Snitzer committed
1290
	struct dm_thin_endio_hook *h = dm_get_mapinfo(bio)->ptr;
1291
	struct thin_c *tc = h->tc;
1292
1293
1294
1295
1296
1297
1298
1299
	struct pool *pool = tc->pool;
	unsigned long flags;

	spin_lock_irqsave(&pool->lock, flags);
	bio_list_add(&pool->retry_on_resume_list, bio);
	spin_unlock_irqrestore(&pool->lock, flags);
}

Mike Snitzer's avatar
Mike Snitzer committed
1300
static void no_space(struct dm_bio_prison_cell *cell)
1301
1302
1303
1304
1305
{
	struct bio *bio;
	struct bio_list bios;

	bio_list_init(&bios);
1306
	dm_cell_release(cell, &bios);
1307
1308
1309
1310
1311

	while ((bio = bio_list_pop(&bios)))
		retry_on_resume(bio);
}

Joe Thornber's avatar
Joe Thornber committed
1312
1313
1314
static void process_discard(struct thin_c *tc, struct bio *bio)
{
	int r;
1315
	unsigned long flags;
Joe Thornber's avatar
Joe Thornber committed
1316
	struct pool *pool = tc->pool;
Mike Snitzer's avatar
Mike Snitzer committed
1317
	struct dm_bio_prison_cell *cell, *cell2;
1318
	struct dm_cell_key key, key2;
Joe Thornber's avatar
Joe Thornber committed
1319
1320
	dm_block_t block = get_bio_block(tc, bio);
	struct dm_thin_lookup_result lookup_result;
Mike Snitzer's avatar
Mike Snitzer committed
1321
	struct dm_thin_new_mapping *m;
Joe Thornber's avatar
Joe Thornber committed
1322
1323

	build_virtual_key(tc->td, block, &key);
1324
	if (dm_bio_detain(tc->pool->prison, &key, bio, &cell))
Joe Thornber's avatar
Joe Thornber committed
1325
1326
1327
1328
1329
1330
1331
1332
1333
1334
1335
		return;

	r = dm_thin_find_block(tc->td, block, 1, &lookup_result);
	switch (r) {
	case 0:
		/*
		 * Check nobody is fiddling with this pool block.  This can
		 * happen if someone's in the process of breaking sharing
		 * on this block.
		 */
		build_data_key(tc->td, lookup_result.block, &key2);
1336
1337
		if (dm_bio_detain(tc->pool->prison, &key2, bio, &cell2)) {
			dm_cell_release_singleton(cell, bio);
Joe Thornber's avatar
Joe Thornber committed
1338
1339
1340
1341
1342
1343
1344
1345
1346
1347
			break;
		}

		if (io_overlaps_block(pool, bio)) {
			/*
			 * IO may still be going to the destination block.  We must
			 * quiesce before we can do the removal.
			 */
			m = get_next_mapping(pool);
			m->tc = tc;
1348
			m->pass_discard = (!lookup_result.shared) && pool->pf.discard_passdown;
Joe Thornber's avatar
Joe Thornber committed
1349
1350
1351
1352
1353
1354
1355
			m->virt_block = block;
			m->data_block = lookup_result.block;
			m->cell = cell;
			m->cell2 = cell2;
			m->err = 0;
			m->bio = bio;

1356
			if (!dm_deferred_set_add_work(pool->all_io_ds, &m->list)) {
1357
				spin_lock_irqsave(&pool->lock, flags);
Joe Thornber's avatar
Joe Thornber committed
1358
				list_add(&m->list, &pool->prepared_discards);
1359
				spin_unlock_irqrestore(&pool->lock, flags);
Joe Thornber's avatar
Joe Thornber committed
1360
1361
1362
1363
				wake_worker(pool);
			}
		} else {
			/*
1364
1365
1366
			 * The DM core makes sure that the discard doesn't span
			 * a block boundary.  So we submit the discard of a
			 * partial block appropriately.
Joe Thornber's avatar
Joe Thornber committed
1367
			 */
1368
1369
			dm_cell_release_singleton(cell, bio);
			dm_cell_release_singleton(cell2, bio);
1370
1371
1372
1373
			if ((!lookup_result.shared) && pool->pf.discard_passdown)
				remap_and_issue(tc, bio, lookup_result.block);
			else
				bio_endio(bio, 0);
Joe Thornber's avatar
Joe Thornber committed
1374
1375
1376
1377
1378
1379
1380
		}
		break;

	case -ENODATA:
		/*
		 * It isn't provisioned, just forget it.
		 */
1381
		dm_cell_release_singleton(cell, bio);
Joe Thornber's avatar
Joe Thornber committed
1382
1383
1384
1385
1386
		bio_endio(bio, 0);
		break;

	default:
		DMERR("discard: find block unexpectedly returned %d", r);
1387
		dm_cell_release_singleton(cell, bio);
Joe Thornber's avatar
Joe Thornber committed
1388
1389
1390
1391
1392
		bio_io_error(bio);
		break;
	}
}

1393
static void break_sharing(struct thin_c *tc, struct bio *bio, dm_block_t block,
1394
			  struct dm_cell_key *key,
1395
			  struct dm_thin_lookup_result *lookup_result,
Mike Snitzer's avatar
Mike Snitzer committed
1396
			  struct dm_bio_prison_cell *cell)
1397
1398
1399
1400
1401
1402
1403
{
	int r;
	dm_block_t data_block;

	r = alloc_data_block(tc, &data_block);
	switch (r) {
	case 0:
1404
1405
		schedule_internal_copy(tc, block, lookup_result->block,
				       data_block, cell, bio);
1406
1407
1408
1409
1410
1411
1412
1413
		break;

	case -ENOSPC:
		no_space(cell);
		break;

	default:
		DMERR("%s: alloc_data_block() failed, error = %d", __func__, r);
1414
		dm_cell_error(cell);
1415
1416
1417
1418
1419
1420
1421
1422
		break;
	}
}

static void process_shared_bio(struct thin_c *tc, struct bio *bio,
			       dm_block_t block,
			       struct dm_thin_lookup_result *lookup_result)
{
Mike Snitzer's avatar
Mike Snitzer committed
1423
	struct dm_bio_prison_cell *cell;
1424
	struct pool *pool = tc->pool;
1425
	struct dm_cell_key key;
1426
1427
1428
1429
1430
1431

	/*
	 * If cell is already occupied, then sharing is already in the process
	 * of being broken so we have nothing further to do here.
	 */
	build_data_key(tc->td, lookup_result->block, &key);
1432
	if (dm_bio_detain(pool->prison, &key, bio, &cell))
1433
1434
		return;

1435
	if (bio_data_dir(bio) == WRITE && bio->bi_size)
1436
1437
		break_sharing(tc, bio, block, &key, lookup_result, cell);
	else {
Mike Snitzer's avatar
Mike Snitzer committed
1438
		struct dm_thin_endio_hook *h = dm_get_mapinfo(bio)->ptr;
1439

1440
		h->shared_read_entry = dm_deferred_entry_inc(pool->shared_read_ds);
1441

1442
		dm_cell_release_singleton(cell, bio);
1443
1444
1445
1446
1447
		remap_and_issue(tc, bio, lookup_result->block);
	}
}

static void provision_block(struct thin_c *tc, struct bio *bio, dm_block_t block,
Mike Snitzer's avatar
Mike Snitzer committed
1448
			    struct dm_bio_prison_cell *cell)
1449
1450
1451
1452
1453
1454
1455
1456
{
	int r;
	dm_block_t data_block;

	/*
	 * Remap empty bios (flushes) immediately, without provisioning.
	 */
	if (!bio->bi_size) {
1457
		dm_cell_release_singleton(cell, bio);
1458
1459
1460
1461
1462
1463
1464
1465
1466
		remap_and_issue(tc, bio, 0);
		return;
	}

	/*
	 * Fill read bios with zeroes and complete them immediately.
	 */
	if (bio_data_dir(bio) == READ) {
		zero_fill_bio(bio);
1467
		dm_cell_release_singleton(cell, bio);
1468
1469
1470
1471
1472
1473
1474
		bio_endio(bio, 0);
		return;
	}

	r = alloc_data_block(tc, &data_block);
	switch (r) {
	case 0:
1475
1476
1477
1478
		if (tc->origin_dev)
			schedule_external_copy(tc, block, data_block, cell, bio);
		else
			schedule_zero(tc, block, data_block, cell, bio);
1479
1480
1481
1482
1483
1484
1485
1486
		break;

	case -ENOSPC:
		no_space(cell);
		break;

	default:
		DMERR("%s: alloc_data_block() failed, error = %d", __func__, r);
1487
		set_pool_mode(tc->pool, PM_READ_ONLY);