dm-thin.c 67.6 KB
Newer Older
1
/*
2
 * Copyright (C) 2011-2012 Red Hat UK.
3
4
5
6
7
 *
 * This file is released under the GPL.
 */

#include "dm-thin-metadata.h"
8
#include "dm-bio-prison.h"
9
#include "dm.h"
10
11
12
13
14
15
16
17
18
19
20
21
22
23

#include <linux/device-mapper.h>
#include <linux/dm-io.h>
#include <linux/dm-kcopyd.h>
#include <linux/list.h>
#include <linux/init.h>
#include <linux/module.h>
#include <linux/slab.h>

#define	DM_MSG_PREFIX	"thin"

/*
 * Tunable constants
 */
24
#define ENDIO_HOOK_POOL_SIZE 1024
25
26
#define MAPPING_POOL_SIZE 1024
#define PRISON_CELLS 1024
27
#define COMMIT_PERIOD HZ
28
29
30
31
32
33
34
35
36
37
38
39
40
41
42
43
44
45
46
47
48
49
50
51
52
53
54
55
56
57
58
59
60

/*
 * The block size of the device holding pool data must be
 * between 64KB and 1GB.
 */
#define DATA_DEV_BLOCK_SIZE_MIN_SECTORS (64 * 1024 >> SECTOR_SHIFT)
#define DATA_DEV_BLOCK_SIZE_MAX_SECTORS (1024 * 1024 * 1024 >> SECTOR_SHIFT)

/*
 * Device id is restricted to 24 bits.
 */
#define MAX_DEV_ID ((1 << 24) - 1)

/*
 * How do we handle breaking sharing of data blocks?
 * =================================================
 *
 * We use a standard copy-on-write btree to store the mappings for the
 * devices (note I'm talking about copy-on-write of the metadata here, not
 * the data).  When you take an internal snapshot you clone the root node
 * of the origin btree.  After this there is no concept of an origin or a
 * snapshot.  They are just two device trees that happen to point to the
 * same data blocks.
 *
 * When we get a write in we decide if it's to a shared data block using
 * some timestamp magic.  If it is, we have to break sharing.
 *
 * Let's say we write to a shared block in what was the origin.  The
 * steps are:
 *
 * i) plug io further to this physical block. (see bio_prison code).
 *
 * ii) quiesce any read io to that shared data block.  Obviously
61
 * including all devices that share this block.  (see dm_deferred_set code)
62
63
64
65
66
 *
 * iii) copy the data block to a newly allocate block.  This step can be
 * missed out if the io covers the block. (schedule_copy).
 *
 * iv) insert the new mapping into the origin's btree
Joe Thornber's avatar
Joe Thornber committed
67
 * (process_prepared_mapping).  This act of inserting breaks some
68
69
70
71
72
73
74
75
76
77
78
79
80
81
82
83
84
85
86
87
88
89
90
91
92
93
94
95
96
97
98
99
100
101
102
103
104
 * sharing of btree nodes between the two devices.  Breaking sharing only
 * effects the btree of that specific device.  Btrees for the other
 * devices that share the block never change.  The btree for the origin
 * device as it was after the last commit is untouched, ie. we're using
 * persistent data structures in the functional programming sense.
 *
 * v) unplug io to this physical block, including the io that triggered
 * the breaking of sharing.
 *
 * Steps (ii) and (iii) occur in parallel.
 *
 * The metadata _doesn't_ need to be committed before the io continues.  We
 * get away with this because the io is always written to a _new_ block.
 * If there's a crash, then:
 *
 * - The origin mapping will point to the old origin block (the shared
 * one).  This will contain the data as it was before the io that triggered
 * the breaking of sharing came in.
 *
 * - The snap mapping still points to the old block.  As it would after
 * the commit.
 *
 * The downside of this scheme is the timestamp magic isn't perfect, and
 * will continue to think that data block in the snapshot device is shared
 * even after the write to the origin has broken sharing.  I suspect data
 * blocks will typically be shared by many different devices, so we're
 * breaking sharing n + 1 times, rather than n, where n is the number of
 * devices that reference this data block.  At the moment I think the
 * benefits far, far outweigh the disadvantages.
 */

/*----------------------------------------------------------------*/

/*
 * Key building.
 */
static void build_data_key(struct dm_thin_device *td,
105
			   dm_block_t b, struct dm_cell_key *key)
106
107
108
109
110
111
112
{
	key->virtual = 0;
	key->dev = dm_thin_dev_id(td);
	key->block = b;
}

static void build_virtual_key(struct dm_thin_device *td, dm_block_t b,
113
			      struct dm_cell_key *key)
114
115
116
117
118
119
120
121
122
123
124
125
126
{
	key->virtual = 1;
	key->dev = dm_thin_dev_id(td);
	key->block = b;
}

/*----------------------------------------------------------------*/

/*
 * A pool device ties together a metadata device and a data device.  It
 * also provides the interface for creating and destroying internal
 * devices.
 */
Mike Snitzer's avatar
Mike Snitzer committed
127
struct dm_thin_new_mapping;
128

129
130
131
132
133
134
135
136
137
/*
 * The pool runs in 3 modes.  Ordered in degraded order for comparisons.
 */
enum pool_mode {
	PM_WRITE,		/* metadata may be changed */
	PM_READ_ONLY,		/* metadata may not be changed */
	PM_FAIL,		/* all I/O fails */
};

138
struct pool_features {
139
140
	enum pool_mode mode;

Mike Snitzer's avatar
Mike Snitzer committed
141
142
143
	bool zero_new_blocks:1;
	bool discard_enabled:1;
	bool discard_passdown:1;
144
145
};

146
147
148
149
struct thin_c;
typedef void (*process_bio_fn)(struct thin_c *tc, struct bio *bio);
typedef void (*process_mapping_fn)(struct dm_thin_new_mapping *m);

150
151
152
153
154
155
156
157
158
struct pool {
	struct list_head list;
	struct dm_target *ti;	/* Only set if a pool target is bound */

	struct mapped_device *pool_md;
	struct block_device *md_dev;
	struct dm_pool_metadata *pmd;

	dm_block_t low_water_blocks;
159
	uint32_t sectors_per_block;
160
	int sectors_per_block_shift;
161

162
	struct pool_features pf;
163
164
165
	unsigned low_water_triggered:1;	/* A dm event has been sent */
	unsigned no_free_space:1;	/* A -ENOSPC warning has been issued */

166
	struct dm_bio_prison *prison;
167
168
169
170
	struct dm_kcopyd_client *copier;

	struct workqueue_struct *wq;
	struct work_struct worker;
171
	struct delayed_work waker;
172

173
	unsigned long last_commit_jiffies;
174
	unsigned ref_count;
175
176
177
178
179

	spinlock_t lock;
	struct bio_list deferred_bios;
	struct bio_list deferred_flush_bios;
	struct list_head prepared_mappings;
Joe Thornber's avatar
Joe Thornber committed
180
	struct list_head prepared_discards;
181
182
183

	struct bio_list retry_on_resume_list;

184
185
	struct dm_deferred_set *shared_read_ds;
	struct dm_deferred_set *all_io_ds;
186

Mike Snitzer's avatar
Mike Snitzer committed
187
	struct dm_thin_new_mapping *next_mapping;
188
189
	mempool_t *mapping_pool;
	mempool_t *endio_hook_pool;
190
191
192
193
194
195

	process_bio_fn process_bio;
	process_bio_fn process_discard;

	process_mapping_fn process_prepared_mapping;
	process_mapping_fn process_prepared_discard;
196
197
};

198
199
200
static enum pool_mode get_pool_mode(struct pool *pool);
static void set_pool_mode(struct pool *pool, enum pool_mode mode);

201
202
203
204
205
206
207
208
209
210
211
/*
 * Target context for a pool.
 */
struct pool_c {
	struct dm_target *ti;
	struct pool *pool;
	struct dm_dev *data_dev;
	struct dm_dev *metadata_dev;
	struct dm_target_callbacks callbacks;

	dm_block_t low_water_blocks;
212
213
	struct pool_features requested_pf; /* Features requested during table load */
	struct pool_features adjusted_pf;  /* Features used after adjusting for constituent devices */
214
215
216
217
218
219
220
};

/*
 * Target context for a thin.
 */
struct thin_c {
	struct dm_dev *pool_dev;
221
	struct dm_dev *origin_dev;
222
223
224
225
226
227
228
229
230
231
232
233
234
235
236
237
238
239
240
241
242
243
244
245
246
247
248
249
250
251
252
253
254
255
256
257
258
259
260
261
262
263
264
265
266
267
268
269
270
271
272
273
274
275
276
277
278
279
280
281
282
283
284
285
286
287
288
289
	dm_thin_id dev_id;

	struct pool *pool;
	struct dm_thin_device *td;
};

/*----------------------------------------------------------------*/

/*
 * A global list of pools that uses a struct mapped_device as a key.
 */
static struct dm_thin_pool_table {
	struct mutex mutex;
	struct list_head pools;
} dm_thin_pool_table;

static void pool_table_init(void)
{
	mutex_init(&dm_thin_pool_table.mutex);
	INIT_LIST_HEAD(&dm_thin_pool_table.pools);
}

static void __pool_table_insert(struct pool *pool)
{
	BUG_ON(!mutex_is_locked(&dm_thin_pool_table.mutex));
	list_add(&pool->list, &dm_thin_pool_table.pools);
}

static void __pool_table_remove(struct pool *pool)
{
	BUG_ON(!mutex_is_locked(&dm_thin_pool_table.mutex));
	list_del(&pool->list);
}

static struct pool *__pool_table_lookup(struct mapped_device *md)
{
	struct pool *pool = NULL, *tmp;

	BUG_ON(!mutex_is_locked(&dm_thin_pool_table.mutex));

	list_for_each_entry(tmp, &dm_thin_pool_table.pools, list) {
		if (tmp->pool_md == md) {
			pool = tmp;
			break;
		}
	}

	return pool;
}

static struct pool *__pool_table_lookup_metadata_dev(struct block_device *md_dev)
{
	struct pool *pool = NULL, *tmp;

	BUG_ON(!mutex_is_locked(&dm_thin_pool_table.mutex));

	list_for_each_entry(tmp, &dm_thin_pool_table.pools, list) {
		if (tmp->md_dev == md_dev) {
			pool = tmp;
			break;
		}
	}

	return pool;
}

/*----------------------------------------------------------------*/

Mike Snitzer's avatar
Mike Snitzer committed
290
struct dm_thin_endio_hook {
291
	struct thin_c *tc;
292
293
	struct dm_deferred_entry *shared_read_entry;
	struct dm_deferred_entry *all_io_entry;
Mike Snitzer's avatar
Mike Snitzer committed
294
	struct dm_thin_new_mapping *overwrite_mapping;
295
296
};

297
298
299
300
301
302
303
304
305
306
static void __requeue_bio_list(struct thin_c *tc, struct bio_list *master)
{
	struct bio *bio;
	struct bio_list bios;

	bio_list_init(&bios);
	bio_list_merge(&bios, master);
	bio_list_init(master);

	while ((bio = bio_list_pop(&bios))) {
Mike Snitzer's avatar
Mike Snitzer committed
307
308
		struct dm_thin_endio_hook *h = dm_get_mapinfo(bio)->ptr;

309
		if (h->tc == tc)
310
311
312
313
314
315
316
317
318
319
320
321
322
323
324
325
326
327
328
329
330
331
332
333
334
335
			bio_endio(bio, DM_ENDIO_REQUEUE);
		else
			bio_list_add(master, bio);
	}
}

static void requeue_io(struct thin_c *tc)
{
	struct pool *pool = tc->pool;
	unsigned long flags;

	spin_lock_irqsave(&pool->lock, flags);
	__requeue_bio_list(tc, &pool->deferred_bios);
	__requeue_bio_list(tc, &pool->retry_on_resume_list);
	spin_unlock_irqrestore(&pool->lock, flags);
}

/*
 * This section of code contains the logic for processing a thin device's IO.
 * Much of the code depends on pool object resources (lists, workqueues, etc)
 * but most is exclusively called from the thin target rather than the thin-pool
 * target.
 */

static dm_block_t get_bio_block(struct thin_c *tc, struct bio *bio)
{
336
337
	sector_t block_nr = bio->bi_sector;

338
339
340
341
	if (tc->pool->sectors_per_block_shift < 0)
		(void) sector_div(block_nr, tc->pool->sectors_per_block);
	else
		block_nr >>= tc->pool->sectors_per_block_shift;
342
343

	return block_nr;
344
345
346
347
348
}

static void remap(struct thin_c *tc, struct bio *bio, dm_block_t block)
{
	struct pool *pool = tc->pool;
349
	sector_t bi_sector = bio->bi_sector;
350
351

	bio->bi_bdev = tc->pool_dev->bdev;
352
353
354
355
356
357
	if (tc->pool->sectors_per_block_shift < 0)
		bio->bi_sector = (block * pool->sectors_per_block) +
				 sector_div(bi_sector, pool->sectors_per_block);
	else
		bio->bi_sector = (block << pool->sectors_per_block_shift) |
				(bi_sector & (pool->sectors_per_block - 1));
358
359
}

360
361
362
363
364
static void remap_to_origin(struct thin_c *tc, struct bio *bio)
{
	bio->bi_bdev = tc->origin_dev->bdev;
}

365
366
367
368
369
370
static int bio_triggers_commit(struct thin_c *tc, struct bio *bio)
{
	return (bio->bi_rw & (REQ_FLUSH | REQ_FUA)) &&
		dm_thin_changed_this_transaction(tc->td);
}

371
static void issue(struct thin_c *tc, struct bio *bio)
372
373
374
375
{
	struct pool *pool = tc->pool;
	unsigned long flags;

376
377
378
379
380
	if (!bio_triggers_commit(tc, bio)) {
		generic_make_request(bio);
		return;
	}

381
	/*
382
383
384
	 * Complete bio with an error if earlier I/O caused changes to
	 * the metadata that can't be committed e.g, due to I/O errors
	 * on the metadata device.
385
	 */
386
387
388
389
390
391
392
393
394
395
396
397
	if (dm_thin_aborted_changes(tc->td)) {
		bio_io_error(bio);
		return;
	}

	/*
	 * Batch together any bios that trigger commits and then issue a
	 * single commit for them in process_deferred_bios().
	 */
	spin_lock_irqsave(&pool->lock, flags);
	bio_list_add(&pool->deferred_flush_bios, bio);
	spin_unlock_irqrestore(&pool->lock, flags);
398
399
}

400
401
402
403
404
405
406
407
408
409
410
411
412
static void remap_to_origin_and_issue(struct thin_c *tc, struct bio *bio)
{
	remap_to_origin(tc, bio);
	issue(tc, bio);
}

static void remap_and_issue(struct thin_c *tc, struct bio *bio,
			    dm_block_t block)
{
	remap(tc, bio, block);
	issue(tc, bio);
}

413
414
415
416
417
418
419
420
421
422
423
424
425
426
/*
 * wake_worker() is used when new work is queued and when pool_resume is
 * ready to continue deferred IO processing.
 */
static void wake_worker(struct pool *pool)
{
	queue_work(pool->wq, &pool->worker);
}

/*----------------------------------------------------------------*/

/*
 * Bio endio functions.
 */
Mike Snitzer's avatar
Mike Snitzer committed
427
struct dm_thin_new_mapping {
428
429
	struct list_head list;

430
431
	unsigned quiesced:1;
	unsigned prepared:1;
Joe Thornber's avatar
Joe Thornber committed
432
	unsigned pass_discard:1;
433
434
435
436

	struct thin_c *tc;
	dm_block_t virt_block;
	dm_block_t data_block;
Mike Snitzer's avatar
Mike Snitzer committed
437
	struct dm_bio_prison_cell *cell, *cell2;
438
439
440
441
442
443
444
445
446
447
448
449
	int err;

	/*
	 * If the bio covers the whole area of a block then we can avoid
	 * zeroing or copying.  Instead this bio is hooked.  The bio will
	 * still be in the cell, so care has to be taken to avoid issuing
	 * the bio twice.
	 */
	struct bio *bio;
	bio_end_io_t *saved_bi_end_io;
};

Mike Snitzer's avatar
Mike Snitzer committed
450
static void __maybe_add_mapping(struct dm_thin_new_mapping *m)
451
452
453
{
	struct pool *pool = m->tc->pool;

454
	if (m->quiesced && m->prepared) {
455
456
457
458
459
460
461
462
		list_add(&m->list, &pool->prepared_mappings);
		wake_worker(pool);
	}
}

static void copy_complete(int read_err, unsigned long write_err, void *context)
{
	unsigned long flags;
Mike Snitzer's avatar
Mike Snitzer committed
463
	struct dm_thin_new_mapping *m = context;
464
465
466
467
468
469
470
471
472
473
474
475
476
	struct pool *pool = m->tc->pool;

	m->err = read_err || write_err ? -EIO : 0;

	spin_lock_irqsave(&pool->lock, flags);
	m->prepared = 1;
	__maybe_add_mapping(m);
	spin_unlock_irqrestore(&pool->lock, flags);
}

static void overwrite_endio(struct bio *bio, int err)
{
	unsigned long flags;
Mike Snitzer's avatar
Mike Snitzer committed
477
478
	struct dm_thin_endio_hook *h = dm_get_mapinfo(bio)->ptr;
	struct dm_thin_new_mapping *m = h->overwrite_mapping;
479
480
481
482
483
484
485
486
487
488
489
490
491
492
493
494
495
496
497
498
499
500
501
	struct pool *pool = m->tc->pool;

	m->err = err;

	spin_lock_irqsave(&pool->lock, flags);
	m->prepared = 1;
	__maybe_add_mapping(m);
	spin_unlock_irqrestore(&pool->lock, flags);
}

/*----------------------------------------------------------------*/

/*
 * Workqueue.
 */

/*
 * Prepared mapping jobs.
 */

/*
 * This sends the bios in the cell back to the deferred_bios list.
 */
Mike Snitzer's avatar
Mike Snitzer committed
502
static void cell_defer(struct thin_c *tc, struct dm_bio_prison_cell *cell,
503
504
505
506
507
508
		       dm_block_t data_block)
{
	struct pool *pool = tc->pool;
	unsigned long flags;

	spin_lock_irqsave(&pool->lock, flags);
509
	dm_cell_release(cell, &pool->deferred_bios);
510
511
512
513
514
515
	spin_unlock_irqrestore(&tc->pool->lock, flags);

	wake_worker(pool);
}

/*
516
 * Same as cell_defer except it omits the original holder of the cell.
517
 */
Mike Snitzer's avatar
Mike Snitzer committed
518
static void cell_defer_except(struct thin_c *tc, struct dm_bio_prison_cell *cell)
519
520
521
522
523
524
525
526
{
	struct bio_list bios;
	struct pool *pool = tc->pool;
	unsigned long flags;

	bio_list_init(&bios);

	spin_lock_irqsave(&pool->lock, flags);
527
	dm_cell_release_no_holder(cell, &pool->deferred_bios);
528
529
530
531
532
	spin_unlock_irqrestore(&pool->lock, flags);

	wake_worker(pool);
}

533
534
535
536
static void process_prepared_mapping_fail(struct dm_thin_new_mapping *m)
{
	if (m->bio)
		m->bio->bi_end_io = m->saved_bi_end_io;
537
	dm_cell_error(m->cell);
538
539
540
	list_del(&m->list);
	mempool_free(m, m->tc->pool->mapping_pool);
}
Mike Snitzer's avatar
Mike Snitzer committed
541
static void process_prepared_mapping(struct dm_thin_new_mapping *m)
542
543
544
545
546
547
548
549
550
551
{
	struct thin_c *tc = m->tc;
	struct bio *bio;
	int r;

	bio = m->bio;
	if (bio)
		bio->bi_end_io = m->saved_bi_end_io;

	if (m->err) {
552
		dm_cell_error(m->cell);
553
		goto out;
554
555
556
557
558
559
560
561
562
563
	}

	/*
	 * Commit the prepared block into the mapping btree.
	 * Any I/O for this block arriving after this point will get
	 * remapped to it directly.
	 */
	r = dm_thin_insert_block(tc->td, m->virt_block, m->data_block);
	if (r) {
		DMERR("dm_thin_insert_block() failed");
564
		dm_cell_error(m->cell);
565
		goto out;
566
567
568
569
570
571
572
573
574
	}

	/*
	 * Release any bios held while the block was being provisioned.
	 * If we are processing a write bio that completely covers the block,
	 * we already processed it so can ignore it now when processing
	 * the bios in the cell.
	 */
	if (bio) {
575
		cell_defer_except(tc, m->cell);
576
577
578
579
		bio_endio(bio, 0);
	} else
		cell_defer(tc, m->cell, m->data_block);

580
out:
581
582
583
584
	list_del(&m->list);
	mempool_free(m, tc->pool->mapping_pool);
}

585
static void process_prepared_discard_fail(struct dm_thin_new_mapping *m)
Joe Thornber's avatar
Joe Thornber committed
586
587
588
{
	struct thin_c *tc = m->tc;

589
590
591
592
593
594
595
596
597
	bio_io_error(m->bio);
	cell_defer_except(tc, m->cell);
	cell_defer_except(tc, m->cell2);
	mempool_free(m, tc->pool->mapping_pool);
}

static void process_prepared_discard_passdown(struct dm_thin_new_mapping *m)
{
	struct thin_c *tc = m->tc;
Joe Thornber's avatar
Joe Thornber committed
598
599
600
601
602
603
604
605
606
607
608

	if (m->pass_discard)
		remap_and_issue(tc, m->bio, m->data_block);
	else
		bio_endio(m->bio, 0);

	cell_defer_except(tc, m->cell);
	cell_defer_except(tc, m->cell2);
	mempool_free(m, tc->pool->mapping_pool);
}

609
610
611
612
613
614
615
616
617
618
619
620
static void process_prepared_discard(struct dm_thin_new_mapping *m)
{
	int r;
	struct thin_c *tc = m->tc;

	r = dm_thin_remove_block(tc->td, m->virt_block);
	if (r)
		DMERR("dm_thin_remove_block() failed");

	process_prepared_discard_passdown(m);
}

Joe Thornber's avatar
Joe Thornber committed
621
static void process_prepared(struct pool *pool, struct list_head *head,
622
			     process_mapping_fn *fn)
623
624
625
{
	unsigned long flags;
	struct list_head maps;
Mike Snitzer's avatar
Mike Snitzer committed
626
	struct dm_thin_new_mapping *m, *tmp;
627
628
629

	INIT_LIST_HEAD(&maps);
	spin_lock_irqsave(&pool->lock, flags);
Joe Thornber's avatar
Joe Thornber committed
630
	list_splice_init(head, &maps);
631
632
633
	spin_unlock_irqrestore(&pool->lock, flags);

	list_for_each_entry_safe(m, tmp, &maps, list)
634
		(*fn)(m);
635
636
637
638
639
}

/*
 * Deferred bio jobs.
 */
Joe Thornber's avatar
Joe Thornber committed
640
static int io_overlaps_block(struct pool *pool, struct bio *bio)
641
{
642
	return bio->bi_size == (pool->sectors_per_block << SECTOR_SHIFT);
Joe Thornber's avatar
Joe Thornber committed
643
644
645
646
647
648
}

static int io_overwrites_block(struct pool *pool, struct bio *bio)
{
	return (bio_data_dir(bio) == WRITE) &&
		io_overlaps_block(pool, bio);
649
650
651
652
653
654
655
656
657
658
659
660
661
662
663
664
665
666
667
}

static void save_and_set_endio(struct bio *bio, bio_end_io_t **save,
			       bio_end_io_t *fn)
{
	*save = bio->bi_end_io;
	bio->bi_end_io = fn;
}

static int ensure_next_mapping(struct pool *pool)
{
	if (pool->next_mapping)
		return 0;

	pool->next_mapping = mempool_alloc(pool->mapping_pool, GFP_ATOMIC);

	return pool->next_mapping ? 0 : -ENOMEM;
}

Mike Snitzer's avatar
Mike Snitzer committed
668
static struct dm_thin_new_mapping *get_next_mapping(struct pool *pool)
669
{
Mike Snitzer's avatar
Mike Snitzer committed
670
	struct dm_thin_new_mapping *r = pool->next_mapping;
671
672
673
674
675
676
677
678
679

	BUG_ON(!pool->next_mapping);

	pool->next_mapping = NULL;

	return r;
}

static void schedule_copy(struct thin_c *tc, dm_block_t virt_block,
680
681
			  struct dm_dev *origin, dm_block_t data_origin,
			  dm_block_t data_dest,
Mike Snitzer's avatar
Mike Snitzer committed
682
			  struct dm_bio_prison_cell *cell, struct bio *bio)
683
684
685
{
	int r;
	struct pool *pool = tc->pool;
Mike Snitzer's avatar
Mike Snitzer committed
686
	struct dm_thin_new_mapping *m = get_next_mapping(pool);
687
688

	INIT_LIST_HEAD(&m->list);
689
	m->quiesced = 0;
690
691
692
693
694
695
696
697
	m->prepared = 0;
	m->tc = tc;
	m->virt_block = virt_block;
	m->data_block = data_dest;
	m->cell = cell;
	m->err = 0;
	m->bio = NULL;

698
	if (!dm_deferred_set_add_work(pool->shared_read_ds, &m->list))
699
		m->quiesced = 1;
700
701
702
703
704
705
706
707

	/*
	 * IO to pool_dev remaps to the pool target's data_dev.
	 *
	 * If the whole block of data is being overwritten, we can issue the
	 * bio immediately. Otherwise we use kcopyd to clone the data first.
	 */
	if (io_overwrites_block(pool, bio)) {
Mike Snitzer's avatar
Mike Snitzer committed
708
709
		struct dm_thin_endio_hook *h = dm_get_mapinfo(bio)->ptr;

710
		h->overwrite_mapping = m;
711
712
713
714
715
716
		m->bio = bio;
		save_and_set_endio(bio, &m->saved_bi_end_io, overwrite_endio);
		remap_and_issue(tc, bio, data_dest);
	} else {
		struct dm_io_region from, to;

717
		from.bdev = origin->bdev;
718
719
720
721
722
723
724
725
726
727
728
729
		from.sector = data_origin * pool->sectors_per_block;
		from.count = pool->sectors_per_block;

		to.bdev = tc->pool_dev->bdev;
		to.sector = data_dest * pool->sectors_per_block;
		to.count = pool->sectors_per_block;

		r = dm_kcopyd_copy(pool->copier, &from, 1, &to,
				   0, copy_complete, m);
		if (r < 0) {
			mempool_free(m, pool->mapping_pool);
			DMERR("dm_kcopyd_copy() failed");
730
			dm_cell_error(cell);
731
732
733
734
		}
	}
}

735
736
static void schedule_internal_copy(struct thin_c *tc, dm_block_t virt_block,
				   dm_block_t data_origin, dm_block_t data_dest,
Mike Snitzer's avatar
Mike Snitzer committed
737
				   struct dm_bio_prison_cell *cell, struct bio *bio)
738
739
740
741
742
743
744
{
	schedule_copy(tc, virt_block, tc->pool_dev,
		      data_origin, data_dest, cell, bio);
}

static void schedule_external_copy(struct thin_c *tc, dm_block_t virt_block,
				   dm_block_t data_dest,
Mike Snitzer's avatar
Mike Snitzer committed
745
				   struct dm_bio_prison_cell *cell, struct bio *bio)
746
747
748
749
750
{
	schedule_copy(tc, virt_block, tc->origin_dev,
		      virt_block, data_dest, cell, bio);
}

751
static void schedule_zero(struct thin_c *tc, dm_block_t virt_block,
Mike Snitzer's avatar
Mike Snitzer committed
752
			  dm_block_t data_block, struct dm_bio_prison_cell *cell,
753
754
755
			  struct bio *bio)
{
	struct pool *pool = tc->pool;
Mike Snitzer's avatar
Mike Snitzer committed
756
	struct dm_thin_new_mapping *m = get_next_mapping(pool);
757
758

	INIT_LIST_HEAD(&m->list);
759
	m->quiesced = 1;
760
761
762
763
764
765
766
767
768
769
770
771
772
	m->prepared = 0;
	m->tc = tc;
	m->virt_block = virt_block;
	m->data_block = data_block;
	m->cell = cell;
	m->err = 0;
	m->bio = NULL;

	/*
	 * If the whole block of data is being overwritten or we are not
	 * zeroing pre-existing data, we can issue the bio immediately.
	 * Otherwise we use kcopyd to zero the data first.
	 */
773
	if (!pool->pf.zero_new_blocks)
774
775
776
		process_prepared_mapping(m);

	else if (io_overwrites_block(pool, bio)) {
Mike Snitzer's avatar
Mike Snitzer committed
777
778
		struct dm_thin_endio_hook *h = dm_get_mapinfo(bio)->ptr;

779
		h->overwrite_mapping = m;
780
781
782
783
784
785
786
787
788
789
790
791
792
793
794
		m->bio = bio;
		save_and_set_endio(bio, &m->saved_bi_end_io, overwrite_endio);
		remap_and_issue(tc, bio, data_block);
	} else {
		int r;
		struct dm_io_region to;

		to.bdev = tc->pool_dev->bdev;
		to.sector = data_block * pool->sectors_per_block;
		to.count = pool->sectors_per_block;

		r = dm_kcopyd_zero(pool->copier, 1, &to, 0, copy_complete, m);
		if (r < 0) {
			mempool_free(m, pool->mapping_pool);
			DMERR("dm_kcopyd_zero() failed");
795
			dm_cell_error(cell);
796
797
798
799
		}
	}
}

800
801
802
803
804
805
806
807
808
809
810
811
812
813
814
815
816
817
818
819
820
821
822
823
824
825
826
827
828
static int commit(struct pool *pool)
{
	int r;

	r = dm_pool_commit_metadata(pool->pmd);
	if (r)
		DMERR("commit failed, error = %d", r);

	return r;
}

/*
 * A non-zero return indicates read_only or fail_io mode.
 * Many callers don't care about the return value.
 */
static int commit_or_fallback(struct pool *pool)
{
	int r;

	if (get_pool_mode(pool) != PM_WRITE)
		return -EINVAL;

	r = commit(pool);
	if (r)
		set_pool_mode(pool, PM_READ_ONLY);

	return r;
}

829
830
831
832
833
834
835
836
837
838
839
840
841
842
843
844
845
846
847
848
849
850
851
852
853
854
855
856
static int alloc_data_block(struct thin_c *tc, dm_block_t *result)
{
	int r;
	dm_block_t free_blocks;
	unsigned long flags;
	struct pool *pool = tc->pool;

	r = dm_pool_get_free_block_count(pool->pmd, &free_blocks);
	if (r)
		return r;

	if (free_blocks <= pool->low_water_blocks && !pool->low_water_triggered) {
		DMWARN("%s: reached low water mark, sending event.",
		       dm_device_name(pool->pool_md));
		spin_lock_irqsave(&pool->lock, flags);
		pool->low_water_triggered = 1;
		spin_unlock_irqrestore(&pool->lock, flags);
		dm_table_event(pool->ti->table);
	}

	if (!free_blocks) {
		if (pool->no_free_space)
			return -ENOSPC;
		else {
			/*
			 * Try to commit to see if that will free up some
			 * more space.
			 */
857
			(void) commit_or_fallback(pool);
858
859
860
861
862
863
864
865
866
867
868
869
870
871
872
873
874
875
876
877
878
879
880
881
882
883
884
885
886
887
888
889
890

			r = dm_pool_get_free_block_count(pool->pmd, &free_blocks);
			if (r)
				return r;

			/*
			 * If we still have no space we set a flag to avoid
			 * doing all this checking and return -ENOSPC.
			 */
			if (!free_blocks) {
				DMWARN("%s: no free space available.",
				       dm_device_name(pool->pool_md));
				spin_lock_irqsave(&pool->lock, flags);
				pool->no_free_space = 1;
				spin_unlock_irqrestore(&pool->lock, flags);
				return -ENOSPC;
			}
		}
	}

	r = dm_pool_alloc_data_block(pool->pmd, result);
	if (r)
		return r;

	return 0;
}

/*
 * If we have run out of space, queue bios until the device is
 * resumed, presumably after having been reloaded with more space.
 */
static void retry_on_resume(struct bio *bio)
{
Mike Snitzer's avatar
Mike Snitzer committed
891
	struct dm_thin_endio_hook *h = dm_get_mapinfo(bio)->ptr;
892
	struct thin_c *tc = h->tc;
893
894
895
896
897
898
899
900
	struct pool *pool = tc->pool;
	unsigned long flags;

	spin_lock_irqsave(&pool->lock, flags);
	bio_list_add(&pool->retry_on_resume_list, bio);
	spin_unlock_irqrestore(&pool->lock, flags);
}

Mike Snitzer's avatar
Mike Snitzer committed
901
static void no_space(struct dm_bio_prison_cell *cell)
902
903
904
905
906
{
	struct bio *bio;
	struct bio_list bios;

	bio_list_init(&bios);
907
	dm_cell_release(cell, &bios);
908
909
910
911
912

	while ((bio = bio_list_pop(&bios)))
		retry_on_resume(bio);
}

Joe Thornber's avatar
Joe Thornber committed
913
914
915
static void process_discard(struct thin_c *tc, struct bio *bio)
{
	int r;
916
	unsigned long flags;
Joe Thornber's avatar
Joe Thornber committed
917
	struct pool *pool = tc->pool;
Mike Snitzer's avatar
Mike Snitzer committed
918
	struct dm_bio_prison_cell *cell, *cell2;
919
	struct dm_cell_key key, key2;
Joe Thornber's avatar
Joe Thornber committed
920
921
	dm_block_t block = get_bio_block(tc, bio);
	struct dm_thin_lookup_result lookup_result;
Mike Snitzer's avatar
Mike Snitzer committed
922
	struct dm_thin_new_mapping *m;
Joe Thornber's avatar
Joe Thornber committed
923
924

	build_virtual_key(tc->td, block, &key);
925
	if (dm_bio_detain(tc->pool->prison, &key, bio, &cell))
Joe Thornber's avatar
Joe Thornber committed
926
927
928
929
930
931
932
933
934
935
936
		return;

	r = dm_thin_find_block(tc->td, block, 1, &lookup_result);
	switch (r) {
	case 0:
		/*
		 * Check nobody is fiddling with this pool block.  This can
		 * happen if someone's in the process of breaking sharing
		 * on this block.
		 */
		build_data_key(tc->td, lookup_result.block, &key2);
937
		if (dm_bio_detain(tc->pool->prison, &key2, bio, &cell2)) {
938
			cell_defer_except(tc, cell);
Joe Thornber's avatar
Joe Thornber committed
939
940
941
942
943
944
945
946
947
948
			break;
		}

		if (io_overlaps_block(pool, bio)) {
			/*
			 * IO may still be going to the destination block.  We must
			 * quiesce before we can do the removal.
			 */
			m = get_next_mapping(pool);
			m->tc = tc;
949
			m->pass_discard = (!lookup_result.shared) && pool->pf.discard_passdown;
Joe Thornber's avatar
Joe Thornber committed
950
951
952
953
954
955
956
			m->virt_block = block;
			m->data_block = lookup_result.block;
			m->cell = cell;
			m->cell2 = cell2;
			m->err = 0;
			m->bio = bio;

957
			if (!dm_deferred_set_add_work(pool->all_io_ds, &m->list)) {
958
				spin_lock_irqsave(&pool->lock, flags);
Joe Thornber's avatar
Joe Thornber committed
959
				list_add(&m->list, &pool->prepared_discards);
960
				spin_unlock_irqrestore(&pool->lock, flags);
Joe Thornber's avatar
Joe Thornber committed
961
962
963
964
				wake_worker(pool);
			}
		} else {
			/*
965
966
967
			 * The DM core makes sure that the discard doesn't span
			 * a block boundary.  So we submit the discard of a
			 * partial block appropriately.
Joe Thornber's avatar
Joe Thornber committed
968
			 */
969
970
			cell_defer_except(tc, cell);
			cell_defer_except(tc, cell2);
971
972
973
974
			if ((!lookup_result.shared) && pool->pf.discard_passdown)
				remap_and_issue(tc, bio, lookup_result.block);
			else
				bio_endio(bio, 0);
Joe Thornber's avatar
Joe Thornber committed
975
976
977
978
979
980
981
		}
		break;

	case -ENODATA:
		/*
		 * It isn't provisioned, just forget it.
		 */
982
		cell_defer_except(tc, cell);
Joe Thornber's avatar
Joe Thornber committed
983
984
985
986
987
		bio_endio(bio, 0);
		break;

	default:
		DMERR("discard: find block unexpectedly returned %d", r);
988
		cell_defer_except(tc, cell);
Joe Thornber's avatar
Joe Thornber committed
989
990
991
992
993
		bio_io_error(bio);
		break;
	}
}

994
static void break_sharing(struct thin_c *tc, struct bio *bio, dm_block_t block,
995
			  struct dm_cell_key *key,
996
			  struct dm_thin_lookup_result *lookup_result,
Mike Snitzer's avatar
Mike Snitzer committed
997
			  struct dm_bio_prison_cell *cell)
998
999
1000
1001
1002
1003
1004
{
	int r;
	dm_block_t data_block;

	r = alloc_data_block(tc, &data_block);
	switch (r) {
	case 0:
1005
1006
		schedule_internal_copy(tc, block, lookup_result->block,
				       data_block, cell, bio);
1007
1008
1009
1010
1011
1012
1013
1014
		break;

	case -ENOSPC:
		no_space(cell);
		break;

	default:
		DMERR("%s: alloc_data_block() failed, error = %d", __func__, r);
1015
		dm_cell_error(cell);
1016
1017
1018
1019
1020
1021
1022
1023
		break;
	}
}

static void process_shared_bio(struct thin_c *tc, struct bio *bio,
			       dm_block_t block,
			       struct dm_thin_lookup_result *lookup_result)
{
Mike Snitzer's avatar
Mike Snitzer committed
1024
	struct dm_bio_prison_cell *cell;
1025
	struct pool *pool = tc->pool;
1026
	struct dm_cell_key key;
1027
1028
1029
1030
1031
1032

	/*
	 * If cell is already occupied, then sharing is already in the process
	 * of being broken so we have nothing further to do here.
	 */
	build_data_key(tc->td, lookup_result->block, &key);
1033
	if (dm_bio_detain(pool->prison, &key, bio, &cell))
1034
1035
		return;

1036
	if (bio_data_dir(bio) == WRITE && bio->bi_size)
1037
1038
		break_sharing(tc, bio, block, &key, lookup_result, cell);
	else {
Mike Snitzer's avatar
Mike Snitzer committed
1039
		struct dm_thin_endio_hook *h = dm_get_mapinfo(bio)->ptr;
1040

1041
		h->shared_read_entry = dm_deferred_entry_inc(pool->shared_read_ds);
1042

1043
		cell_defer_except(tc, cell);
1044
1045
1046
1047
1048
		remap_and_issue(tc, bio, lookup_result->block);
	}
}

static void provision_block(struct thin_c *tc, struct bio *bio, dm_block_t block,
Mike Snitzer's avatar
Mike Snitzer committed
1049
			    struct dm_bio_prison_cell *cell)
1050
1051
1052
1053
1054
1055
1056
1057
{
	int r;
	dm_block_t data_block;

	/*
	 * Remap empty bios (flushes) immediately, without provisioning.
	 */
	if (!bio->bi_size) {
1058
		cell_defer_except(tc, cell);
1059
1060
1061
1062
1063
1064
1065
1066
1067
		remap_and_issue(tc, bio, 0);
		return;
	}

	/*
	 * Fill read bios with zeroes and complete them immediately.
	 */
	if (bio_data_dir(bio) == READ) {
		zero_fill_bio(bio);
1068
		cell_defer_except(tc, cell);
1069
1070
1071
1072
1073
1074
1075
		bio_endio(bio, 0);
		return;
	}

	r = alloc_data_block(tc, &data_block);
	switch (r) {
	case 0:
1076
1077
1078
1079
		if (tc->origin_dev)
			schedule_external_copy(tc, block, data_block, cell, bio);
		else
			schedule_zero(tc, block, data_block, cell, bio);
1080
1081
1082
1083
1084
1085
1086
1087
		break;

	case -ENOSPC:
		no_space(cell);
		break;

	default:
		DMERR("%s: alloc_data_block() failed, error = %d", __func__, r);
1088
		set_pool_mode(tc->pool, PM_READ_ONLY);
1089
		dm_cell_error(cell);
1090
1091
1092
1093
1094
1095
1096
1097
		break;
	}
}

static void process_bio(struct thin_c *tc, struct bio *bio)
{
	int r;
	dm_block_t block = get_bio_block(tc, bio);
Mike Snitzer's avatar
Mike Snitzer committed
1098
	struct dm_bio_prison_cell *cell;
1099
	struct dm_cell_key key;
1100
1101
1102
1103
1104
1105
1106
	struct dm_thin_lookup_result lookup_result;

	/*
	 * If cell is already occupied, then the block is already
	 * being provisioned so we have nothing further to do here.
	 */
	build_virtual_key(tc->td, block, &key);
1107
	if (dm_bio_detain(tc->pool->prison, &key, bio, &cell))
1108
1109
1110
1111
1112
1113
1114
1115
1116
1117
1118
1119
1120
1121
		return;

	r = dm_thin_find_block(tc->td, block, 1, &lookup_result);
	switch (r) {
	case 0:
		/*
		 * We can release this cell now.  This thread is the only
		 * one that puts bios into a cell, and we know there were
		 * no preceding bios.
		 */
		/*
		 * TODO: this will probably have to change when discard goes
		 * back in.
		 */
1122
		cell_defer_except(tc, cell);
1123
1124
1125
1126
1127
1128
1129
1130

		if (lookup_result.shared)
			process_shared_bio(tc, bio, block, &lookup_result);
		else
			remap_and_issue(tc, bio, lookup_result.block);
		break;

	case -ENODATA:
1131
		if (bio_data_dir(bio) == READ && tc->origin_dev) {
1132
			cell_defer_except(tc, cell);
1133
1134
1135
			remap_to_origin_and_issue(tc, bio);
		} else
			provision_block(tc, bio, block, cell);
1136
1137
1138
1139
		break;

	default:
		DMERR("dm_thin_find_block() failed, error = %d", r);
1140
		cell_defer_except(tc, cell);
1141
1142
1143
1144
1145
		bio_io_error(bio);
		break;
	}
}

1146
1147
1148
1149
1150
1151
1152
1153
1154
1155
1156
1157
1158
1159
1160
1161
1162
1163
1164
1165
1166
1167
1168
1169
1170
1171
1172
1173
1174
1175
1176
1177
1178
1179
1180
1181
1182
1183
1184
1185
1186
1187
1188
static void process_bio_read_only(struct thin_c *tc, struct bio *bio)
{
	int r;
	int rw = bio_data_dir(bio);
	dm_block_t block = get_bio_block(tc, bio);
	struct dm_thin_lookup_result lookup_result;

	r = dm_thin_find_block(tc->td, block, 1, &lookup_result);
	switch (r) {
	case 0:
		if (lookup_result.shared && (rw == WRITE) && bio->bi_size)
			bio_io_error(bio);
		else
			remap_and_issue(tc, bio, lookup_result.block);
		break;

	case -ENODATA:
		if (rw != READ) {
			bio_io_error(bio);
			break;
		}

		if (tc->origin_dev) {
			remap_to_origin_and_issue(tc, bio);
			break;
		}

		zero_fill_bio(bio);
		bio_endio(bio, 0);
		break;

	default:
		DMERR("dm_thin_find_block() failed, error = %d", r);
		bio_io_error(bio);
		break;
	}
}

static void process_bio_fail(struct thin_c *tc, struct bio *bio)
{
	bio_io_error(bio);
}

1189
1190
1191
1192
1193
1194
static int need_commit_due_to_time(struct pool *pool)
{
	return jiffies < pool->last_commit_jiffies ||
	       jiffies > pool->last_commit_jiffies + COMMIT_PERIOD;
}

1195
1196
1197
1198
1199
1200
1201
1202
1203
1204
1205
1206
1207
1208
static void process_deferred_bios(struct pool *pool)
{
	unsigned long flags;
	struct bio *bio;
	struct bio_list bios;

	bio_list_init(&bios);

	spin_lock_irqsave(&pool->lock, flags);
	bio_list_merge(&bios, &pool->deferred_bios);
	bio_list_init(&pool->deferred_bios);
	spin_unlock_irqrestore(&pool->lock, flags);

	while ((bio = bio_list_pop(&bios))) {
Mike Snitzer's avatar
Mike Snitzer committed
1209
		struct dm_thin_endio_hook *h = dm_get_mapinfo(bio)->ptr;
1210
1211
		struct thin_c *tc = h->tc;

1212
1213
1214
1215
1216
1217
1218
1219
1220
1221
1222
1223
		/*
		 * If we've got no free new_mapping structs, and processing
		 * this bio might require one, we pause until there are some
		 * prepared mappings to process.
		 */
		if (ensure_next_mapping(pool)) {
			spin_lock_irqsave(&pool->lock, flags);
			bio_list_merge(&pool->deferred_bios, &bios);
			spin_unlock_irqrestore(&pool->lock, flags);

			break;
		}
Joe Thornber's avatar
Joe Thornber committed
1224
1225

		if (bio->bi_rw & REQ_DISCARD)
1226
			pool->process_discard(tc, bio);
Joe Thornber's avatar
Joe Thornber committed
1227
		else
1228
			pool->process_bio(tc, bio);
1229
1230
1231
1232
1233
1234
1235
1236
1237
1238
1239
1240
	}

	/*
	 * If there are any deferred flush bios, we must commit
	 * the metadata before issuing them.
	 */
	bio_list_init(&bios);
	spin_lock_irqsave(&pool->lock, flags);
	bio_list_merge(&bios, &pool->deferred_flush_bios);
	bio_list_init(&pool->deferred_flush_bios);
	spin_unlock_irqrestore(&pool->lock, flags);

1241
	if (bio_list_empty(&bios) && !need_commit_due_to_time(pool))
1242
1243
		return;

1244
	if (commit_or_fallback(pool)) {
1245
1246
1247
1248
		while ((bio = bio_list_pop(&bios)))
			bio_io_error(bio);
		return;
	}
1249
	pool->last_commit_jiffies = jiffies;
1250
1251
1252
1253
1254
1255
1256
1257
1258

	while ((bio = bio_list_pop(&bios)))
		generic_make_request(bio);
}

static void do_worker(struct work_struct *ws)
{
	struct pool *pool = container_of(ws, struct pool, worker);

1259
1260
	process_prepared(pool, &pool->prepared_mappings, &pool->process_prepared_mapping);
	process_prepared(pool, &pool->prepared_discards, &pool->process_prepared_discard);
1261
1262
1263
	process_deferred_bios(pool);
}

1264
1265
1266
1267
1268
1269
1270
1271
1272
1273
1274
/*
 * We want to commit periodically so that not too much
 * unwritten data builds up.
 */
static void do_waker(struct work_struct *ws)
{
	struct pool *pool = container_of(to_delayed_work(ws), struct pool, waker);
	wake_worker(pool);
	queue_delayed_work(pool->wq, &pool->waker, COMMIT_PERIOD);
}

1275
1276
/*----------------------------------------------------------------*/

1277
1278
1279
1280
1281
1282
1283
1284
1285
1286
1287
1288
1289
1290
1291
1292
1293
1294
1295
1296
1297
1298
1299
1300
1301
1302
1303
1304
1305
1306
1307
1308
1309
1310
1311
1312
1313
1314
1315
1316
1317
1318
1319
1320
1321
1322
static enum pool_mode get_pool_mode(struct pool *pool)
{
	return pool->pf.mode;
}

static void set_pool_mode(struct pool *pool, enum pool_mode mode)
{
	int r;

	pool->pf.mode = mode;

	switch (mode) {
	case PM_FAIL:
		DMERR("switching pool to failure mode");
		pool->process_bio = process_bio_fail;
		pool->process_discard = process_bio_fail;
		pool->process_prepared_mapping = process_prepared_mapping_fail;
		pool->process_prepared_discard = process_prepared_discard_fail;
		break;

	case PM_READ_ONLY:
		DMERR("switching pool to read-only mode");
		r = dm_pool_abort_metadata(pool->pmd);
		if (r) {
			DMERR("aborting transaction failed");
			set_pool_mode(pool, PM_FAIL);
		} else {
			dm_pool_metadata_read_only(pool->pmd);
			pool->process_bio = process_bio_read_only;
			pool->process_discard = process_discard;
			pool->process_prepared_mapping = process_prepared_mapping_fail;
			pool->process_prepared_discard = process_prepared_discard_passdown;
		}
		break;

	case PM_WRITE:
		pool->process_bio = process_bio;
		pool->process_discard = process_discard;
		pool->process_prepared_mapping = process_prepared_mapping;
		pool->process_prepared_discard = process_prepared_discard;
		break;
	}
}

/*----------------------------------------------------------------*/

1323
1324
1325
1326
1327
1328
1329
1330
1331
1332
1333
1334
1335
1336
1337
1338
1339
1340
1341
/*
 * Mapping functions.
 */

/*
 * Called only while mapping a thin bio to hand it over to the workqueue.
 */
static void thin_defer_bio(struct thin_c *tc, struct bio *bio)
{
	unsigned long flags;
	struct pool *pool = tc->pool;

	spin_lock_irqsave(&pool->lock, flags);
	bio_list_add(&pool->deferred_bios, bio);
	spin_unlock_irqrestore(&pool->lock, flags);

	wake_worker(pool);
}

Mike Snitzer's avatar
Mike Snitzer committed
1342
static struct dm_thin_endio_hook *thin_hook_bio(struct thin_c *tc, struct bio *bio)
1343
1344
{
	struct pool *pool = tc->pool;
Mike Snitzer's avatar
Mike Snitzer committed
1345
	struct dm_thin_endio_hook *h = mempool_alloc(pool->endio_hook_pool, GFP_NOIO);
1346
1347
1348

	h->tc = tc;
	h->shared_read_entry = NULL;
1349
	h->all_io_entry = bio->bi_rw & REQ_DISCARD ? NULL : dm_deferred_entry_inc(pool->all_io_ds);
1350
1351
1352
1353
1354
	h->overwrite_mapping = NULL;

	return h;
}

1355
1356
1357
1358
1359
1360
1361
1362
1363
1364
1365
1366
/*
 * Non-blocking function called from the thin target's map function.
 */
static int thin_bio_map(struct dm_target *ti, struct bio *bio,
			union map_info *map_context)
{
	int r;
	struct thin_c *tc = ti->private;
	dm_block_t block = get_bio_block(tc, bio);
	struct dm_thin_device *td = tc->td;
	struct dm_thin_lookup_result result;

1367
	map_context->ptr = thin_hook_bio(tc, bio);
1368
1369
1370
1371
1372
1373

	if (get_pool_mode(tc->pool) == PM_FAIL) {
		bio_io_error(bio);
		return DM_MAPIO_SUBMITTED;
	}

Joe Thornber's avatar
Joe Thornber committed
1374
	if (bio->bi_rw & (REQ_DISCARD | REQ_FLUSH | REQ_FUA)) {
1375
1376
1377
1378
1379
1380
1381
1382
1383
1384
1385
1386
1387
1388
1389
1390
1391
1392
1393
1394
1395
1396
1397
1398
1399
1400
1401
1402
1403
1404
1405
1406
1407
1408
1409
		thin_defer_bio(tc, bio);
		return DM_MAPIO_SUBMITTED;
	}

	r = dm_thin_find_block(td, block, 0, &result);

	/*
	 * Note that we defer readahead too.
	 */
	switch (r) {
	case 0:
		if (unlikely(result.shared)) {
			/*
			 * We have a race condition here between the
			 * result.shared value returned by the lookup and
			 * snapshot creation, which may cause new
			 * sharing.
			 *
			 * To avoid this always quiesce the origin before
			 * taking the snap.  You want to do this anyway to
			 * ensure a consistent application view
			 * (i.e. lockfs).
			 *
			 * More distant ancestors are irrelevant. The
			 * shared flag will be set in their case.
			 */
			thin_defer_bio(tc, bio);
			r = DM_MAPIO_SUBMITTED;
		} else {
			remap(tc, bio, result.block);
			r = DM_MAPIO_REMAPPED;
		}
		break;

	case -ENODATA:
1410
1411
1412
1413
1414
1415
1416
1417
1418
1419
1420
1421
		if (get_pool_mode(tc->pool) == PM_READ_ONLY) {
			/*
			 * This block isn't provisioned, and we have no way
			 * of doing so.  Just error it.
			 */
			bio_io_error(bio);
			r = DM_MAPIO_SUBMITTED;
			break;
		}
		/* fall through */

	case -EWOULDBLOCK:
1422
1423
1424
1425
1426
1427
1428
		/*
		 * In future, the failed dm_thin_find_block above could
		 * provide the hint to load the metadata into cache.
		 */
		thin_defer_bio(tc, bio);
		r = DM_MAPIO_SUBMITTED;
		break;
1429
1430
1431
1432
1433
1434
1435
1436
1437
1438

	default:
		/*
		 * Must always call bio_io_error on failure.
		 * dm_thin_find_block can fail with -EINVAL if the
		 * pool is switched to fail-io mode.
		 */
		bio_io_error(bio);
		r = DM_MAPIO_SUBMITTED;
		break;
1439
1440
1441
1442
1443
1444
1445
1446
1447
1448
1449
1450
1451
1452
1453
1454
1455
1456
1457
1458
1459
1460
1461
1462
1463
1464
1465
1466
1467
1468
1469
1470
	}

	return r;
}

static int pool_is_congested(struct dm_target_callbacks *cb, int bdi_bits)
{
	int r;
	unsigned long flags;
	struct pool_c *pt = container_of(cb, struct pool_c, callbacks);

	spin_lock_irqsave(&pt->pool->lock, flags);
	r = !bio_list_empty(&pt->pool->retry_on_resume_list);
	spin_unlock_irqrestore(&pt->pool->lock, flags);

	if (!r) {
		struct request_queue *q = bdev_get_queue(pt->data_dev->bdev);
		r = bdi_congested(&q->backing_dev_info, bdi_bits);
	}

	return r;
}

static void __requeue_bios(struct pool *pool)
{
	bio_list_merge(&pool->deferred_bios, &pool->retry_on_resume_list);
	bio_list_init(&pool->retry_on_resume_list);
}

/*----------------------------------------------------------------
 * Binding of control targets to a pool object
 *--------------------------------------------------------------*/
Mike Snitzer's avatar
Mike Snitzer committed
1471
1472
1473
1474
1475
1476
1477
1478
1479
static bool data_dev_supports_discard(struct pool_c *pt)
{
	struct request_queue *q = bdev_get_queue(pt->data_dev->bdev);

	return q && blk_queue_discard(q);
}

/*
 * If discard_passdown was enabled verify that the data device
1480
 * supports discards.  Disable discard_passdown if not.
Mike Snitzer's avatar
Mike Snitzer committed
1481
 */
1482
static void disable_passdown_if_not_supported(struct pool_c *pt)
Mike Snitzer's avatar
Mike Snitzer committed
1483
{
1484
1485
1486
1487
1488
	struct pool *pool = pt->pool;
	struct block_device *data_bdev = pt->data_dev->bdev;
	struct queue_limits *data_limits = &bdev_get_queue(data_bdev)->limits;
	sector_t block_size = pool->sectors_per_block << SECTOR_SHIFT;
	const char *reason = NULL;
Mike Snitzer's avatar
Mike Snitzer committed
1489
1490
	char buf[BDEVNAME_SIZE];

1491
	if (!pt->adjusted_pf.discard_passdown)
Mike Snitzer's avatar
Mike Snitzer committed
1492
1493
		return;

1494
1495
1496
1497
1498
	if (!data_dev_supports_discard(pt))
		reason = "discard unsupported";

	else if (data_limits->max_discard_sectors < pool->sectors_per_block)
		reason = "max discard sectors smaller than a block";
Mike Snitzer's avatar
Mike Snitzer committed
1499

1500
1501
1502
1503
1504
1505
1506
1507
1508
1509
	else if (data_limits->discard_granularity > block_size)
		reason = "discard granularity larger than a block";

	else if (block_size & (data_limits->discard_granularity - 1))
		reason = "discard granularity not a factor of block size";

	if (reason) {
		DMWARN("Data device (%s) %s: Disabling discard passdown.", bdevname(data_bdev, buf), reason);
		pt->adjusted_pf.discard_passdown = false;
	}
Mike Snitzer's avatar
Mike Snitzer committed
1510
1511
}

1512
1513
1514
1515
static int bind_control_target(struct pool *pool, struct dm_target *ti)
{
	struct pool_c *pt = ti->private;

1516
1517
1518
1519
	/*
	 * We want to make sure that degraded pools are never upgraded.
	 */
	enum pool_mode old_mode = pool->pf.mode;
1520
	enum pool_mode new_mode = pt->adjusted_pf.mode;
1521
1522
1523
1524

	if (old_mode > new_mode)
		new_mode = old_mode;

1525
1526
	pool->ti = ti;
	pool->low_water_blocks = pt->low_water_blocks;
1527
	pool->pf = pt->adjusted_pf;
1528

Mike Snitzer's avatar
Mike Snitzer committed
1529
	set_pool_mode(pool, new_mode);
1530

1531
1532
1533
1534
1535
1536
1537
1538
1539
1540
1541
1542
	return 0;
}

static void unbind_control_target(struct pool *pool, struct dm_target *ti)
{
	if (pool->ti == ti)
		pool->ti = NULL;
}

/*----------------------------------------------------------------
 * Pool creation
 *--------------------------------------------------------------*/
1543
1544
1545
/* Initialize pool features. */
static void pool_features_init(struct pool_features *pf)
{
1546
	pf->mode = PM_WRITE;
Mike Snitzer's avatar
Mike Snitzer committed
1547
1548
1549
	pf->zero_new_blocks = true;
	pf->discard_enabled = true;
	pf->discard_passdown = true;
1550
1551
}

1552
1553
1554
1555
1556
1557
1558
static void __pool_destroy(struct pool *pool)
{
	__pool_table_remove(pool);

	if (dm_pool_metadata_close(pool->pmd) < 0)
		DMWARN("%s: dm_pool_metadata_close() failed.", __func__);

1559
	dm_bio_prison_destroy(pool->prison);
1560
1561
1562
1563
1564
1565
1566
1567
1568
	dm_kcopyd_client_destroy(pool->copier);

	if (pool->wq)
		destroy_workqueue(pool->wq);

	if (pool->next_mapping)
		mempool_free(pool->next_mapping, pool->mapping_pool);
	mempool_destroy(pool->mapping_pool);
	mempool_destroy(pool->endio_hook_pool);
1569
1570
	dm_deferred_set_destroy(pool->shared_read_ds);
	dm_deferred_set_destroy(pool->all_io_ds);
1571
1572
1573
	kfree(pool);
}

Mike Snitzer's avatar
Mike Snitzer committed
1574
1575
1576