dm-crypt.c 29.7 KB
Newer Older
</
Linus Torvalds's avatar
Linus Torvalds committed
1
2
3
/*
 * Copyright (C) 2003 Christophe Saout <christophe@saout.de>
 * Copyright (C) 2004 Clemens Fruhwirth <clemens@endorphin.org>
Milan Broz's avatar
Milan Broz committed
4
 * Copyright (C) 2006-2008 Red Hat, Inc. All rights reserved.
Linus Torvalds's avatar
Linus Torvalds committed
5
6
7
8
 *
 * This file is released under the GPL.
 */

9
#include <linux/completion.h>
10
#include <linux/err.h>
Linus Torvalds's avatar
Linus Torvalds committed
11
12
13
14
15
16
17
18
19
#include <linux/module.h>
#include <linux/init.h>
#include <linux/kernel.h>
#include <linux/bio.h>
#include <linux/blkdev.h>
#include <linux/mempool.h>
#include <linux/slab.h>
#include <linux/crypto.h>
#include <linux/workqueue.h>
20
#include <linux/backing-dev.h>
Linus Torvalds's avatar
Linus Torvalds committed
21
#include <asm/atomic.h>
22
#include <linux/scatterlist.h>
Linus Torvalds's avatar
Linus Torvalds committed
23
#include <asm/page.h>
24
#include <asm/unaligned.h>
Linus Torvalds's avatar
Linus Torvalds committed
25
26
27

#include "dm.h"

28
#define DM_MSG_PREFIX "crypt"
Milan Broz's avatar
Milan Broz committed
29
#define MESG_STR(x) x, sizeof(x)
Linus Torvalds's avatar
Linus Torvalds committed
30
31
32
33
34

/*
 * context holding the current state of a multi-part conversion
 */
struct convert_context {
35
	struct completion restart;
Linus Torvalds's avatar
Linus Torvalds committed
36
37
38
39
40
41
42
	struct bio *bio_in;
	struct bio *bio_out;
	unsigned int offset_in;
	unsigned int offset_out;
	unsigned int idx_in;
	unsigned int idx_out;
	sector_t sector;
43
	atomic_t pending;
Linus Torvalds's avatar
Linus Torvalds committed
44
45
};

46
47
48
49
50
51
52
53
54
55
56
57
/*
 * per bio private data
 */
struct dm_crypt_io {
	struct dm_target *target;
	struct bio *base_bio;
	struct work_struct work;

	struct convert_context ctx;

	atomic_t pending;
	int error;
58
	sector_t sector;
59
60
};

61
62
63
64
65
struct dm_crypt_request {
	struct scatterlist sg_in;
	struct scatterlist sg_out;
};

Linus Torvalds's avatar
Linus Torvalds committed
66
67
68
69
struct crypt_config;

struct crypt_iv_operations {
	int (*ctr)(struct crypt_config *cc, struct dm_target *ti,
Milan Broz's avatar
Milan Broz committed
70
		   const char *opts);
Linus Torvalds's avatar
Linus Torvalds committed
71
72
73
74
75
76
77
78
79
	void (*dtr)(struct crypt_config *cc);
	const char *(*status)(struct crypt_config *cc);
	int (*generator)(struct crypt_config *cc, u8 *iv, sector_t sector);
};

/*
 * Crypt: maps a linear range of a block device
 * and encrypts / decrypts at the same time.
 */
Milan Broz's avatar
Milan Broz committed
80
enum flags { DM_CRYPT_SUSPENDED, DM_CRYPT_KEY_VALID };
Linus Torvalds's avatar
Linus Torvalds committed
81
82
83
84
85
struct crypt_config {
	struct dm_dev *dev;
	sector_t start;

	/*
86
87
	 * pool for per bio private data, crypto requests and
	 * encryption requeusts/buffer pages
Linus Torvalds's avatar
Linus Torvalds committed
88
89
	 */
	mempool_t *io_pool;
90
	mempool_t *req_pool;
Linus Torvalds's avatar
Linus Torvalds committed
91
	mempool_t *page_pool;
92
	struct bio_set *bs;
Linus Torvalds's avatar
Linus Torvalds committed
93

94
95
	struct workqueue_struct *io_queue;
	struct workqueue_struct *crypt_queue;
Milan Broz's avatar
Milan Broz committed
96
97
	wait_queue_head_t writeq;

Linus Torvalds's avatar
Linus Torvalds committed
98
99
100
101
102
	/*
	 * crypto related data
	 */
	struct crypt_iv_operations *iv_gen_ops;
	char *iv_mode;
103
104
105
106
	union {
		struct crypto_cipher *essiv_tfm;
		int benbi_shift;
	} iv_gen_private;
Linus Torvalds's avatar
Linus Torvalds committed
107
108
109
	sector_t iv_offset;
	unsigned int iv_size;

110
111
112
113
114
115
116
117
118
119
120
121
122
123
124
125
	/*
	 * Layout of each crypto request:
	 *
	 *   struct ablkcipher_request
	 *      context
	 *      padding
	 *   struct dm_crypt_request
	 *      padding
	 *   IV
	 *
	 * The padding is added so that dm_crypt_request and the IV are
	 * correctly aligned.
	 */
	unsigned int dmreq_start;
	struct ablkcipher_request *req;

126
127
	char cipher[CRYPTO_MAX_ALG_NAME];
	char chainmode[CRYPTO_MAX_ALG_NAME];
Milan Broz's avatar
Milan Broz committed
128
	struct crypto_ablkcipher *tfm;
Milan Broz's avatar
Milan Broz committed
129
	unsigned long flags;
Linus Torvalds's avatar
Linus Torvalds committed
130
131
132
133
	unsigned int key_size;
	u8 key[0];
};

134
#define MIN_IOS        16
Linus Torvalds's avatar
Linus Torvalds committed
135
136
137
#define MIN_POOL_PAGES 32
#define MIN_BIO_PAGES  8

138
static struct kmem_cache *_crypt_io_pool;
Linus Torvalds's avatar
Linus Torvalds committed
139

Alasdair G Kergon's avatar
Alasdair G Kergon committed
140
static void clone_init(struct dm_crypt_io *, struct bio *);
141
static void kcryptd_queue_crypt(struct dm_crypt_io *io);
Olaf Kirch's avatar
Olaf Kirch committed
142

Linus Torvalds's avatar
Linus Torvalds committed
143
144
145
/*
 * Different IV generation algorithms:
 *
146
 * plain: the initial vector is the 32-bit little-endian version of the sector
147
 *        number, padded with zeros if necessary.
Linus Torvalds's avatar
Linus Torvalds committed
148
 *
149
150
151
 * essiv: "encrypted sector|salt initial vector", the sector number is
 *        encrypted with the bulk cipher using a salt as key. The salt
 *        should be derived from the bulk cipher's key via hashing.
Linus Torvalds's avatar
Linus Torvalds committed
152
 *
153
154
155
 * benbi: the 64-bit "big-endian 'narrow block'-count", starting at 1
 *        (needed for LRW-32-AES and possible other narrow block modes)
 *
Ludwig Nussel's avatar
Ludwig Nussel committed
156
157
158
 * null: the initial vector is always zero.  Provides compatibility with
 *       obsolete loop_fish2 devices.  Do not use for new devices.
 *
Linus Torvalds's avatar
Linus Torvalds committed
159
160
161
162
163
164
165
166
167
168
169
170
171
 * plumb: unimplemented, see:
 * http://article.gmane.org/gmane.linux.kernel.device-mapper.dm-crypt/454
 */

static int crypt_iv_plain_gen(struct crypt_config *cc, u8 *iv, sector_t sector)
{
	memset(iv, 0, cc->iv_size);
	*(u32 *)iv = cpu_to_le32(sector & 0xffffffff);

	return 0;
}

static int crypt_iv_essiv_ctr(struct crypt_config *cc, struct dm_target *ti,
Milan Broz's avatar
Milan Broz committed
172
			      const char *opts)
Linus Torvalds's avatar
Linus Torvalds committed
173
{
174
	struct crypto_cipher *essiv_tfm;
175
176
	struct crypto_hash *hash_tfm;
	struct hash_desc desc;
Linus Torvalds's avatar
Linus Torvalds committed
177
178
179
	struct scatterlist sg;
	unsigned int saltsize;
	u8 *salt;
180
	int err;
Linus Torvalds's avatar
Linus Torvalds committed
181
182

	if (opts == NULL) {
183
		ti->error = "Digest algorithm missing for ESSIV mode";
Linus Torvalds's avatar
Linus Torvalds committed
184
185
186
187
		return -EINVAL;
	}

	/* Hash the cipher key with the given hash algorithm */
188
189
	hash_tfm = crypto_alloc_hash(opts, 0, CRYPTO_ALG_ASYNC);
	if (IS_ERR(hash_tfm)) {
190
		ti->error = "Error initializing ESSIV hash";
191
		return PTR_ERR(hash_tfm);
Linus Torvalds's avatar
Linus Torvalds committed
192
193
	}

194
	saltsize = crypto_hash_digestsize(hash_tfm);
Linus Torvalds's avatar
Linus Torvalds committed
195
196
	salt = kmalloc(saltsize, GFP_KERNEL);
	if (salt == NULL) {
197
		ti->error = "Error kmallocing salt storage in ESSIV";
198
		crypto_free_hash(hash_tfm);
Linus Torvalds's avatar
Linus Torvalds committed
199
200
201
		return -ENOMEM;
	}

202
	sg_init_one(&sg, cc->key, cc->key_size);
203
204
205
206
207
208
209
	desc.tfm = hash_tfm;
	desc.flags = CRYPTO_TFM_REQ_MAY_SLEEP;
	err = crypto_hash_digest(&desc, &sg, cc->key_size, salt);
	crypto_free_hash(hash_tfm);

	if (err) {
		ti->error = "Error calculating hash in ESSIV";
210
		kfree(salt);
211
212
		return err;
	}
Linus Torvalds's avatar
Linus Torvalds committed
213
214

	/* Setup the essiv_tfm with the given salt */
215
216
	essiv_tfm = crypto_alloc_cipher(cc->cipher, 0, CRYPTO_ALG_ASYNC);
	if (IS_ERR(essiv_tfm)) {
217
		ti->error = "Error allocating crypto tfm for ESSIV";
Linus Torvalds's avatar
Linus Torvalds committed
218
		kfree(salt);
219
		return PTR_ERR(essiv_tfm);
Linus Torvalds's avatar
Linus Torvalds committed
220
	}
221
	if (crypto_cipher_blocksize(essiv_tfm) !=
Milan Broz's avatar
Milan Broz committed
222
	    crypto_ablkcipher_ivsize(cc->tfm)) {
223
		ti->error = "Block size of ESSIV cipher does "
Milan Broz's avatar
Milan Broz committed
224
			    "not match IV size of block cipher";
225
		crypto_free_cipher(essiv_tfm);
Linus Torvalds's avatar
Linus Torvalds committed
226
227
228
		kfree(salt);
		return -EINVAL;
	}
229
230
	err = crypto_cipher_setkey(essiv_tfm, salt, saltsize);
	if (err) {
231
		ti->error = "Failed to set key for ESSIV cipher";
232
		crypto_free_cipher(essiv_tfm);
Linus Torvalds's avatar
Linus Torvalds committed
233
		kfree(salt);
234
		return err;
Linus Torvalds's avatar
Linus Torvalds committed
235
236
237
	}
	kfree(salt);

238
	cc->iv_gen_private.essiv_tfm = essiv_tfm;
Linus Torvalds's avatar
Linus Torvalds committed
239
240
241
242
243
	return 0;
}

static void crypt_iv_essiv_dtr(struct crypt_config *cc)
{
244
245
	crypto_free_cipher(cc->iv_gen_private.essiv_tfm);
	cc->iv_gen_private.essiv_tfm = NULL;
Linus Torvalds's avatar
Linus Torvalds committed
246
247
248
249
250
251
}

static int crypt_iv_essiv_gen(struct crypt_config *cc, u8 *iv, sector_t sector)
{
	memset(iv, 0, cc->iv_size);
	*(u64 *)iv = cpu_to_le64(sector);
252
	crypto_cipher_encrypt_one(cc->iv_gen_private.essiv_tfm, iv, iv);
Linus Torvalds's avatar
Linus Torvalds committed
253
254
255
	return 0;
}

256
257
258
static int crypt_iv_benbi_ctr(struct crypt_config *cc, struct dm_target *ti,
			      const char *opts)
{
Milan Broz's avatar
Milan Broz committed
259
	unsigned bs = crypto_ablkcipher_blocksize(cc->tfm);
260
	int log = ilog2(bs);
261
262
263
264
265
266
267
268
269
270
271
272
273
274

	/* we need to calculate how far we must shift the sector count
	 * to get the cipher block count, we use this shift in _gen */

	if (1 << log != bs) {
		ti->error = "cypher blocksize is not a power of 2";
		return -EINVAL;
	}

	if (log > 9) {
		ti->error = "cypher blocksize is > 512";
		return -EINVAL;
	}

275
	cc->iv_gen_private.benbi_shift = 9 - log;
276
277
278
279
280
281
282
283
284
285

	return 0;
}

static void crypt_iv_benbi_dtr(struct crypt_config *cc)
{
}

static int crypt_iv_benbi_gen(struct crypt_config *cc, u8 *iv, sector_t sector)
{
286
287
	__be64 val;

288
	memset(iv, 0, cc->iv_size - sizeof(u64)); /* rest is cleared below */
289
290
291

	val = cpu_to_be64(((u64)sector << cc->iv_gen_private.benbi_shift) + 1);
	put_unaligned(val, (__be64 *)(iv + cc->iv_size - sizeof(u64)));
292

Linus Torvalds's avatar
Linus Torvalds committed
293
294
295
	return 0;
}

Ludwig Nussel's avatar
Ludwig Nussel committed
296
297
298
299
300
301
302
static int crypt_iv_null_gen(struct crypt_config *cc, u8 *iv, sector_t sector)
{
	memset(iv, 0, cc->iv_size);

	return 0;
}

Linus Torvalds's avatar
Linus Torvalds committed
303
304
305
306
307
308
309
310
311
312
static struct crypt_iv_operations crypt_iv_plain_ops = {
	.generator = crypt_iv_plain_gen
};

static struct crypt_iv_operations crypt_iv_essiv_ops = {
	.ctr       = crypt_iv_essiv_ctr,
	.dtr       = crypt_iv_essiv_dtr,
	.generator = crypt_iv_essiv_gen
};

313
314
315
316
317
static struct crypt_iv_operations crypt_iv_benbi_ops = {
	.ctr	   = crypt_iv_benbi_ctr,
	.dtr	   = crypt_iv_benbi_dtr,
	.generator = crypt_iv_benbi_gen
};
Linus Torvalds's avatar
Linus Torvalds committed
318

Ludwig Nussel's avatar
Ludwig Nussel committed
319
320
321
322
static struct crypt_iv_operations crypt_iv_null_ops = {
	.generator = crypt_iv_null_gen
};

Milan Broz's avatar
Milan Broz committed
323
324
325
static void crypt_convert_init(struct crypt_config *cc,
			       struct convert_context *ctx,
			       struct bio *bio_out, struct bio *bio_in,
326
			       sector_t sector)
Linus Torvalds's avatar
Linus Torvalds committed
327
328
329
330
331
332
333
334
{
	ctx->bio_in = bio_in;
	ctx->bio_out = bio_out;
	ctx->offset_in = 0;
	ctx->offset_out = 0;
	ctx->idx_in = bio_in ? bio_in->bi_idx : 0;
	ctx->idx_out = bio_out ? bio_out->bi_idx : 0;
	ctx->sector = sector + cc->iv_offset;
335
	init_completion(&ctx->restart);
Linus Torvalds's avatar
Linus Torvalds committed
336
337
}

338
static int crypt_convert_block(struct crypt_config *cc,
Milan Broz's avatar
Milan Broz committed
339
340
			       struct convert_context *ctx,
			       struct ablkcipher_request *req)
341
342
343
{
	struct bio_vec *bv_in = bio_iovec_idx(ctx->bio_in, ctx->idx_in);
	struct bio_vec *bv_out = bio_iovec_idx(ctx->bio_out, ctx->idx_out);
Milan Broz's avatar
Milan Broz committed
344
345
346
347
348
349
350
	struct dm_crypt_request *dmreq;
	u8 *iv;
	int r = 0;

	dmreq = (struct dm_crypt_request *)((char *)req + cc->dmreq_start);
	iv = (u8 *)ALIGN((unsigned long)(dmreq + 1),
			 crypto_ablkcipher_alignmask(cc->tfm) + 1);
351

Milan Broz's avatar
Milan Broz committed
352
353
	sg_init_table(&dmreq->sg_in, 1);
	sg_set_page(&dmreq->sg_in, bv_in->bv_page, 1 << SECTOR_SHIFT,
354
355
		    bv_in->bv_offset + ctx->offset_in);

Milan Broz's avatar
Milan Broz committed
356
357
	sg_init_table(&dmreq->sg_out, 1);
	sg_set_page(&dmreq->sg_out, bv_out->bv_page, 1 << SECTOR_SHIFT,
358
359
360
361
362
363
364
365
366
367
368
369
370
371
		    bv_out->bv_offset + ctx->offset_out);

	ctx->offset_in += 1 << SECTOR_SHIFT;
	if (ctx->offset_in >= bv_in->bv_len) {
		ctx->offset_in = 0;
		ctx->idx_in++;
	}

	ctx->offset_out += 1 << SECTOR_SHIFT;
	if (ctx->offset_out >= bv_out->bv_len) {
		ctx->offset_out = 0;
		ctx->idx_out++;
	}

Milan Broz's avatar
Milan Broz committed
372
373
374
375
376
377
378
379
380
381
382
383
384
385
386
	if (cc->iv_gen_ops) {
		r = cc->iv_gen_ops->generator(cc, iv, ctx->sector);
		if (r < 0)
			return r;
	}

	ablkcipher_request_set_crypt(req, &dmreq->sg_in, &dmreq->sg_out,
				     1 << SECTOR_SHIFT, iv);

	if (bio_data_dir(ctx->bio_in) == WRITE)
		r = crypto_ablkcipher_encrypt(req);
	else
		r = crypto_ablkcipher_decrypt(req);

	return r;
387
388
}

389
390
static void kcryptd_async_done(struct crypto_async_request *async_req,
			       int error);
391
392
393
394
395
static void crypt_alloc_req(struct crypt_config *cc,
			    struct convert_context *ctx)
{
	if (!cc->req)
		cc->req = mempool_alloc(cc->req_pool, GFP_NOIO);
396
397
398
399
	ablkcipher_request_set_tfm(cc->req, cc->tfm);
	ablkcipher_request_set_callback(cc->req, CRYPTO_TFM_REQ_MAY_BACKLOG |
					     CRYPTO_TFM_REQ_MAY_SLEEP,
					     kcryptd_async_done, ctx);
400
401
}

Linus Torvalds's avatar
Linus Torvalds committed
402
403
404
405
/*
 * Encrypt / decrypt data from one bio to another one (can be the same one)
 */
static int crypt_convert(struct crypt_config *cc,
Milan Broz's avatar
Milan Broz committed
406
			 struct convert_context *ctx)
Linus Torvalds's avatar
Linus Torvalds committed
407
{
Milan Broz's avatar
Milan Broz committed
408
	int r;
Linus Torvalds's avatar
Linus Torvalds committed
409

Milan Broz's avatar
Milan Broz committed
410
411
	atomic_set(&ctx->pending, 1);

Linus Torvalds's avatar
Linus Torvalds committed
412
413
414
	while(ctx->idx_in < ctx->bio_in->bi_vcnt &&
	      ctx->idx_out < ctx->bio_out->bi_vcnt) {

Milan Broz's avatar
Milan Broz committed
415
416
		crypt_alloc_req(cc, ctx);

Milan Broz's avatar
Milan Broz committed
417
418
		atomic_inc(&ctx->pending);

Milan Broz's avatar
Milan Broz committed
419
420
421
		r = crypt_convert_block(cc, ctx, cc->req);

		switch (r) {
Milan Broz's avatar
Milan Broz committed
422
		/* async */
Milan Broz's avatar
Milan Broz committed
423
424
425
426
427
428
		case -EBUSY:
			wait_for_completion(&ctx->restart);
			INIT_COMPLETION(ctx->restart);
			/* fall through*/
		case -EINPROGRESS:
			cc->req = NULL;
Milan Broz's avatar
Milan Broz committed
429
430
431
432
			ctx->sector++;
			continue;

		/* sync */
Milan Broz's avatar
Milan Broz committed
433
		case 0:
Milan Broz's avatar
Milan Broz committed
434
			atomic_dec(&ctx->pending);
Milan Broz's avatar
Milan Broz committed
435
			ctx->sector++;
Milan Broz's avatar
Milan Broz committed
436
			cond_resched();
Milan Broz's avatar
Milan Broz committed
437
438
			continue;

Milan Broz's avatar
Milan Broz committed
439
440
441
442
443
		/* error */
		default:
			atomic_dec(&ctx->pending);
			return r;
		}
Linus Torvalds's avatar
Linus Torvalds committed
444
445
	}

Milan Broz's avatar
Milan Broz committed
446
	return 0;
Linus Torvalds's avatar
Linus Torvalds committed
447
448
}

Milan Broz's avatar
Milan Broz committed
449
450
static void dm_crypt_bio_destructor(struct bio *bio)
{
Alasdair G Kergon's avatar
Alasdair G Kergon committed
451
	struct dm_crypt_io *io = bio->bi_private;
452
453
454
	struct crypt_config *cc = io->target->private;

	bio_free(bio, cc->bs);
Milan Broz's avatar
Milan Broz committed
455
}
456

Linus Torvalds's avatar
Linus Torvalds committed
457
458
459
460
461
/*
 * Generate a new unfragmented bio with the given size
 * This should never violate the device limitations
 * May return a smaller bio when running out of pages
 */
Alasdair G Kergon's avatar
Alasdair G Kergon committed
462
static struct bio *crypt_alloc_buffer(struct dm_crypt_io *io, unsigned size)
Linus Torvalds's avatar
Linus Torvalds committed
463
{
Olaf Kirch's avatar
Olaf Kirch committed
464
	struct crypt_config *cc = io->target->private;
465
	struct bio *clone;
Linus Torvalds's avatar
Linus Torvalds committed
466
	unsigned int nr_iovecs = (size + PAGE_SIZE - 1) >> PAGE_SHIFT;
467
	gfp_t gfp_mask = GFP_NOIO | __GFP_HIGHMEM;
Milan Broz's avatar
Milan Broz committed
468
469
	unsigned i, len;
	struct page *page;
Linus Torvalds's avatar
Linus Torvalds committed
470

Olaf Kirch's avatar
Olaf Kirch committed
471
	clone = bio_alloc_bioset(GFP_NOIO, nr_iovecs, cc->bs);
472
	if (!clone)
Linus Torvalds's avatar
Linus Torvalds committed
473
474
		return NULL;

Olaf Kirch's avatar
Olaf Kirch committed
475
	clone_init(io, clone);
476

477
	for (i = 0; i < nr_iovecs; i++) {
Milan Broz's avatar
Milan Broz committed
478
479
		page = mempool_alloc(cc->page_pool, gfp_mask);
		if (!page)
Linus Torvalds's avatar
Linus Torvalds committed
480
481
482
483
484
485
486
			break;

		/*
		 * if additional pages cannot be allocated without waiting,
		 * return a partially allocated bio, the caller will then try
		 * to allocate additional bios while submitting this partial bio
		 */
487
		if (i == (MIN_BIO_PAGES - 1))
Linus Torvalds's avatar
Linus Torvalds committed
488
489
			gfp_mask = (gfp_mask | __GFP_NOWARN) & ~__GFP_WAIT;

Milan Broz's avatar
Milan Broz committed
490
491
492
493
494
495
		len = (size > PAGE_SIZE) ? PAGE_SIZE : size;

		if (!bio_add_page(clone, page, len, 0)) {
			mempool_free(page, cc->page_pool);
			break;
		}
Linus Torvalds's avatar
Linus Torvalds committed
496

Milan Broz's avatar
Milan Broz committed
497
		size -= len;
Linus Torvalds's avatar
Linus Torvalds committed
498
499
	}

500
501
	if (!clone->bi_size) {
		bio_put(clone);
Linus Torvalds's avatar
Linus Torvalds committed
502
503
504
		return NULL;
	}

505
	return clone;
Linus Torvalds's avatar
Linus Torvalds committed
506
507
}

Neil Brown's avatar
Neil Brown committed
508
static void crypt_free_buffer_pages(struct crypt_config *cc, struct bio *clone)
Linus Torvalds's avatar
Linus Torvalds committed
509
{
Neil Brown's avatar
Neil Brown committed
510
	unsigned int i;
Linus Torvalds's avatar
Linus Torvalds committed
511
512
	struct bio_vec *bv;

Neil Brown's avatar
Neil Brown committed
513
	for (i = 0; i < clone->bi_vcnt; i++) {
514
		bv = bio_iovec_idx(clone, i);
Linus Torvalds's avatar
Linus Torvalds committed
515
516
517
518
519
520
		BUG_ON(!bv->bv_page);
		mempool_free(bv->bv_page, cc->page_pool);
		bv->bv_page = NULL;
	}
}

Milan Broz's avatar
Milan Broz committed
521
522
523
524
525
526
527
528
529
530
531
532
533
534
535
536
static struct dm_crypt_io *crypt_io_alloc(struct dm_target *ti,
					  struct bio *bio, sector_t sector)
{
	struct crypt_config *cc = ti->private;
	struct dm_crypt_io *io;

	io = mempool_alloc(cc->io_pool, GFP_NOIO);
	io->target = ti;
	io->base_bio = bio;
	io->sector = sector;
	io->error = 0;
	atomic_set(&io->pending, 0);

	return io;
}

Milan Broz's avatar
Milan Broz committed
537
538
539
540
541
static void crypt_inc_pending(struct dm_crypt_io *io)
{
	atomic_inc(&io->pending);
}

Linus Torvalds's avatar
Linus Torvalds committed
542
543
544
545
/*
 * One of the bios was finished. Check for completion of
 * the whole request and correctly clean up the buffer.
 */
546
static void crypt_dec_pending(struct dm_crypt_io *io)
Linus Torvalds's avatar
Linus Torvalds committed
547
{
548
	struct crypt_config *cc = io->target->private;
Linus Torvalds's avatar
Linus Torvalds committed
549
550
551
552

	if (!atomic_dec_and_test(&io->pending))
		return;

553
	bio_endio(io->base_bio, io->error);
Linus Torvalds's avatar
Linus Torvalds committed
554
555
556
557
	mempool_free(io, cc->io_pool);
}

/*
558
 * kcryptd/kcryptd_io:
Linus Torvalds's avatar
Linus Torvalds committed
559
560
 *
 * Needed because it would be very unwise to do decryption in an
561
 * interrupt context.
562
563
564
565
566
567
568
569
 *
 * kcryptd performs the actual encryption or decryption.
 *
 * kcryptd_io performs the IO submission.
 *
 * They must be separated as otherwise the final stages could be
 * starved by new requests which can block in the first stages due
 * to memory allocation.
Linus Torvalds's avatar
Linus Torvalds committed
570
 */
571
static void crypt_endio(struct bio *clone, int error)
572
{
Alasdair G Kergon's avatar
Alasdair G Kergon committed
573
	struct dm_crypt_io *io = clone->bi_private;
574
	struct crypt_config *cc = io->target->private;
Milan Broz's avatar
Milan Broz committed
575
	unsigned rw = bio_data_dir(clone);
576

Milan Broz's avatar
Milan Broz committed
577
578
579
	if (unlikely(!bio_flagged(clone, BIO_UPTODATE) && !error))
		error = -EIO;

580
	/*
581
	 * free the processed pages
582
	 */
Milan Broz's avatar
Milan Broz committed
583
	if (rw == WRITE)
Neil Brown's avatar
Neil Brown committed
584
		crypt_free_buffer_pages(cc, clone);
585
586
587

	bio_put(clone);

Milan Broz's avatar
Milan Broz committed
588
589
590
591
	if (rw == READ && !error) {
		kcryptd_queue_crypt(io);
		return;
	}
592
593
594
595
596

	if (unlikely(error))
		io->error = error;

	crypt_dec_pending(io);
597
598
}

Alasdair G Kergon's avatar
Alasdair G Kergon committed
599
static void clone_init(struct dm_crypt_io *io, struct bio *clone)
600
601
602
603
604
605
606
{
	struct crypt_config *cc = io->target->private;

	clone->bi_private = io;
	clone->bi_end_io  = crypt_endio;
	clone->bi_bdev    = cc->dev->bdev;
	clone->bi_rw      = io->base_bio->bi_rw;
Olaf Kirch's avatar
Olaf Kirch committed
607
	clone->bi_destructor = dm_crypt_bio_destructor;
608
609
}

610
static void kcryptd_io_read(struct dm_crypt_io *io)
611
612
613
614
{
	struct crypt_config *cc = io->target->private;
	struct bio *base_bio = io->base_bio;
	struct bio *clone;
615

Milan Broz's avatar
Milan Broz committed
616
	crypt_inc_pending(io);
617
618
619
620
621
622

	/*
	 * The block layer might modify the bvec array, so always
	 * copy the required bvecs because we need the original
	 * one in order to decrypt the whole bio data *afterwards*.
	 */
623
	clone = bio_alloc_bioset(GFP_NOIO, bio_segments(base_bio), cc->bs);
624
	if (unlikely(!clone)) {
625
626
		io->error = -ENOMEM;
		crypt_dec_pending(io);
627
		return;
628
	}
629
630
631
632
633

	clone_init(io, clone);
	clone->bi_idx = 0;
	clone->bi_vcnt = bio_segments(base_bio);
	clone->bi_size = base_bio->bi_size;
634
	clone->bi_sector = cc->start + io->sector;
635
636
637
	memcpy(clone->bi_io_vec, bio_iovec(base_bio),
	       sizeof(struct bio_vec) * clone->bi_vcnt);

638
	generic_make_request(clone);
639
640
}

641
642
static void kcryptd_io_write(struct dm_crypt_io *io)
{
643
	struct bio *clone = io->ctx.bio_out;
Milan Broz's avatar
Milan Broz committed
644
	struct crypt_config *cc = io->target->private;
645
646

	generic_make_request(clone);
Milan Broz's avatar
Milan Broz committed
647
	wake_up(&cc->writeq);
648
649
}

650
651
652
653
654
655
656
657
658
659
660
661
662
663
664
665
666
667
static void kcryptd_io(struct work_struct *work)
{
	struct dm_crypt_io *io = container_of(work, struct dm_crypt_io, work);

	if (bio_data_dir(io->base_bio) == READ)
		kcryptd_io_read(io);
	else
		kcryptd_io_write(io);
}

static void kcryptd_queue_io(struct dm_crypt_io *io)
{
	struct crypt_config *cc = io->target->private;

	INIT_WORK(&io->work, kcryptd_io);
	queue_work(cc->io_queue, &io->work);
}

668
669
static void kcryptd_crypt_write_io_submit(struct dm_crypt_io *io,
					  int error, int async)
670
{
671
672
673
674
675
676
677
	struct bio *clone = io->ctx.bio_out;
	struct crypt_config *cc = io->target->private;

	if (unlikely(error < 0)) {
		crypt_free_buffer_pages(cc, clone);
		bio_put(clone);
		io->error = -EIO;
678
		crypt_dec_pending(io);
679
680
681
682
683
684
685
686
		return;
	}

	/* crypt_convert should have filled the clone bio */
	BUG_ON(io->ctx.idx_out < clone->bi_vcnt);

	clone->bi_sector = cc->start + io->sector;
	io->sector += bio_sectors(clone);
Milan Broz's avatar
Milan Broz committed
687

688
689
	if (async)
		kcryptd_queue_io(io);
690
	else
691
		generic_make_request(clone);
692
693
}

Milan Broz's avatar
Milan Broz committed
694
static void kcryptd_crypt_write_convert(struct dm_crypt_io *io)
695
696
697
{
	struct crypt_config *cc = io->target->private;
	struct bio *clone;
Milan Broz's avatar
Milan Broz committed
698
	int crypt_finished;
699
700
	unsigned remaining = io->base_bio->bi_size;
	int r;
701

Milan Broz's avatar
Milan Broz committed
702
703
704
705
706
707
	/*
	 * Prevent io from disappearing until this function completes.
	 */
	crypt_inc_pending(io);
	crypt_convert_init(cc, &io->ctx, NULL, io->base_bio, io->sector);

708
709
710
711
712
	/*
	 * The allocated buffers can be smaller than the whole bio,
	 * so repeat the whole process until all the data can be handled.
	 */
	while (remaining) {
713
		clone = crypt_alloc_buffer(io, remaining);
714
		if (unlikely(!clone)) {
715
			io->error = -ENOMEM;
Milan Broz's avatar
Milan Broz committed
716
			break;
717
		}
718

719
720
		io->ctx.bio_out = clone;
		io->ctx.idx_out = 0;
721

722
		remaining -= clone->bi_size;
723

Milan Broz's avatar
Milan Broz committed
724
		crypt_inc_pending(io);
725
		r = crypt_convert(cc, &io->ctx);
Milan Broz's avatar
Milan Broz committed
726
		crypt_finished = atomic_dec_and_test(&io->ctx.pending);
727

Milan Broz's avatar
Milan Broz committed
728
729
		/* Encryption was already finished, submit io now */
		if (crypt_finished) {
Milan Broz's avatar
Milan Broz committed
730
			kcryptd_crypt_write_io_submit(io, r, 0);
Milan Broz's avatar
Milan Broz committed
731
732
733
734
735

			/*
			 * If there was an error, do not try next fragments.
			 * For async, error is processed in async handler.
			 */
736
			if (unlikely(r < 0))
Milan Broz's avatar
Milan Broz committed
737
				break;
Milan Broz's avatar
Milan Broz committed
738
		}
739
740

		/* out of memory -> run queues */
Milan Broz's avatar
Milan Broz committed
741
742
		if (unlikely(remaining)) {
			wait_event(cc->writeq, !atomic_read(&io->ctx.pending));
743
			congestion_wait(WRITE, HZ/100);
Milan Broz's avatar
Milan Broz committed
744
		}
745
	}
Milan Broz's avatar
Milan Broz committed
746
747

	crypt_dec_pending(io);
748
749
}

750
static void kcryptd_crypt_read_done(struct dm_crypt_io *io, int error)
751
752
753
754
755
756
757
{
	if (unlikely(error < 0))
		io->error = -EIO;

	crypt_dec_pending(io);
}

758
static void kcryptd_crypt_read_convert(struct dm_crypt_io *io)
759
760
{
	struct crypt_config *cc = io->target->private;
761
	int r = 0;
Linus Torvalds's avatar
Linus Torvalds committed
762

Milan Broz's avatar
Milan Broz committed
763
	crypt_inc_pending(io);
Milan Broz's avatar
Milan Broz committed
764

765
	crypt_convert_init(cc, &io->ctx, io->base_bio, io->base_bio,
766
			   io->sector);
Linus Torvalds's avatar
Linus Torvalds committed
767

768
769
	r = crypt_convert(cc, &io->ctx);

Milan Broz's avatar
Milan Broz committed
770
	if (atomic_dec_and_test(&io->ctx.pending))
Milan Broz's avatar
Milan Broz committed
771
772
773
		kcryptd_crypt_read_done(io, r);

	crypt_dec_pending(io);
Linus Torvalds's avatar
Linus Torvalds committed
774
775
}

776
777
778
779
780
781
782
783
784
785
786
787
788
789
790
791
792
793
794
795
796
797
798
static void kcryptd_async_done(struct crypto_async_request *async_req,
			       int error)
{
	struct convert_context *ctx = async_req->data;
	struct dm_crypt_io *io = container_of(ctx, struct dm_crypt_io, ctx);
	struct crypt_config *cc = io->target->private;

	if (error == -EINPROGRESS) {
		complete(&ctx->restart);
		return;
	}

	mempool_free(ablkcipher_request_cast(async_req), cc->req_pool);

	if (!atomic_dec_and_test(&ctx->pending))
		return;

	if (bio_data_dir(io->base_bio) == READ)
		kcryptd_crypt_read_done(io, error);
	else
		kcryptd_crypt_write_io_submit(io, error, 1);
}

799
static void kcryptd_crypt(struct work_struct *work)
Linus Torvalds's avatar
Linus Torvalds committed
800
{
Alasdair G Kergon's avatar
Alasdair G Kergon committed
801
	struct dm_crypt_io *io = container_of(work, struct dm_crypt_io, work);
802

803
	if (bio_data_dir(io->base_bio) == READ)
804
		kcryptd_crypt_read_convert(io);
805
	else
806
		kcryptd_crypt_write_convert(io);
807
808
}

809
static void kcryptd_queue_crypt(struct dm_crypt_io *io)
810
{
811
	struct crypt_config *cc = io->target->private;
812

813
814
	INIT_WORK(&io->work, kcryptd_crypt);
	queue_work(cc->crypt_queue, &io->work);
Linus Torvalds's avatar
Linus Torvalds committed
815
816
817
818
819
820
821
822
823
824
825
826
827
}

/*
 * Decode key from its hex representation
 */
static int crypt_decode_key(u8 *key, char *hex, unsigned int size)
{
	char buffer[3];
	char *endp;
	unsigned int i;

	buffer[2] = '\0';

828
	for (i = 0; i < size; i++) {
Linus Torvalds's avatar
Linus Torvalds committed
829
830
831
832
833
834
835
836
837
838
839
840
841
842
843
844
845
846
847
848
849
850
		buffer[0] = *hex++;
		buffer[1] = *hex++;

		key[i] = (u8)simple_strtoul(buffer, &endp, 16);

		if (endp != &buffer[2])
			return -EINVAL;
	}

	if (*hex != '\0')
		return -EINVAL;

	return 0;
}

/*
 * Encode key into its hex representation
 */
static void crypt_encode_key(char *hex, u8 *key, unsigned int size)
{
	unsigned int i;

851
	for (i = 0; i < size; i++) {
Linus Torvalds's avatar
Linus Torvalds committed
852
853
854
855
856
857
		sprintf(hex, "%02x", *key);
		hex += 2;
		key++;
	}
}

Milan Broz's avatar
Milan Broz committed
858
859
860
861
862
863
864
865
866
867
static int crypt_set_key(struct crypt_config *cc, char *key)
{
	unsigned key_size = strlen(key) >> 1;

	if (cc->key_size && cc->key_size != key_size)
		return -EINVAL;

	cc->key_size = key_size; /* initial settings */

	if ((!key_size && strcmp(key, "-")) ||
Milan Broz's avatar
Milan Broz committed
868
	   (key_size && crypt_decode_key(cc->key, key, key_size) < 0))
Milan Broz's avatar
Milan Broz committed
869
870
871
872
873
874
875
876
877
878
879
880
881
882
		return -EINVAL;

	set_bit(DM_CRYPT_KEY_VALID, &cc->flags);

	return 0;
}

static int crypt_wipe_key(struct crypt_config *cc)
{
	clear_bit(DM_CRYPT_KEY_VALID, &cc->flags);
	memset(&cc->key, 0, cc->key_size * sizeof(u8));
	return 0;
}

Linus Torvalds's avatar
Linus Torvalds committed
883
884
885
886
887
888
889
/*
 * Construct an encryption mapping:
 * <cipher> <key> <iv_offset> <dev_path> <start>
 */
static int crypt_ctr(struct dm_target *ti, unsigned int argc, char **argv)
{
	struct crypt_config *cc;
Milan Broz's avatar
Milan Broz committed
890
	struct crypto_ablkcipher *tfm;
Linus Torvalds's avatar
Linus Torvalds committed
891
892
893
894
895
896
	char *tmp;
	char *cipher;
	char *chainmode;
	char *ivmode;
	char *ivopts;
	unsigned int key_size;
897
	unsigned long long tmpll;
Linus Torvalds's avatar
Linus Torvalds committed
898
899

	if (argc != 5) {
900
		ti->error = "Not enough arguments";
Linus Torvalds's avatar
Linus Torvalds committed
901
902
903
904
905
906
907
908
909
910
		return -EINVAL;
	}

	tmp = argv[0];
	cipher = strsep(&tmp, "-");
	chainmode = strsep(&tmp, "-");
	ivopts = strsep(&tmp, "-");
	ivmode = strsep(&ivopts, ":");

	if (tmp)
911
		DMWARN("Unexpected additional cipher options");
Linus Torvalds's avatar
Linus Torvalds committed
912
913
914

	key_size = strlen(argv[1]) >> 1;

Milan Broz's avatar
Milan Broz committed
915
 	cc = kzalloc(sizeof(*cc) + key_size * sizeof(u8), GFP_KERNEL);
Linus Torvalds's avatar
Linus Torvalds committed
916
917
	if (cc == NULL) {
		ti->error =
918
			"Cannot allocate transparent encryption context";
Linus Torvalds's avatar
Linus Torvalds committed
919
920
921
		return -ENOMEM;
	}

Milan Broz's avatar
Milan Broz committed
922
 	if (crypt_set_key(cc, argv[1])) {
923
		ti->error = "Error decoding key";
Milan Broz's avatar
Milan Broz committed
924
		goto bad_cipher;
Linus Torvalds's avatar
Linus Torvalds committed
925
926
927
928
929
930
931
932
	}

	/* Compatiblity mode for old dm-crypt cipher strings */
	if (!chainmode || (strcmp(chainmode, "plain") == 0 && !ivmode)) {
		chainmode = "cbc";
		ivmode = "plain";
	}

933
934
	if (strcmp(chainmode, "ecb") && !ivmode) {
		ti->error = "This chaining mode requires an IV mechanism";
Milan Broz's avatar
Milan Broz committed
935
		goto bad_cipher;
Linus Torvalds's avatar
Linus Torvalds committed
936
937
	}

Milan Broz's avatar
Milan Broz committed
938
939
	if (snprintf(cc->cipher, CRYPTO_MAX_ALG_NAME, "%s(%s)",
		     chainmode, cipher) >= CRYPTO_MAX_ALG_NAME) {
940
		ti->error = "Chain mode + cipher name is too long";
Milan Broz's avatar
Milan Broz committed
941
		goto bad_cipher;
Linus Torvalds's avatar
Linus Torvalds committed
942
943
	}

Milan Broz's avatar
Milan Broz committed
944
	tfm = crypto_alloc_ablkcipher(cc->cipher, 0, 0);
945
	if (IS_ERR(tfm)) {
946
		ti->error = "Error allocating crypto tfm";
Milan Broz's avatar
Milan Broz committed
947
		goto bad_cipher;
Linus Torvalds's avatar
Linus Torvalds committed
948
949
	}

950
951
	strcpy(cc->cipher, cipher);
	strcpy(cc->chainmode, chainmode);
Linus Torvalds's avatar
Linus Torvalds committed
952
953
954
	cc->tfm = tfm;

	/*
955
	 * Choose ivmode. Valid modes: "plain", "essiv:<esshash>", "benbi".
Linus Torvalds's avatar
Linus Torvalds committed
956
957
958
959
960
961
962
963
964
	 * See comments at iv code
	 */

	if (ivmode == NULL)
		cc->iv_gen_ops = NULL;
	else if (strcmp(ivmode, "plain") == 0)
		cc->iv_gen_ops = &crypt_iv_plain_ops;
	else if (strcmp(ivmode, "essiv") == 0)
		cc->iv_gen_ops = &crypt_iv_essiv_ops;
965
966
	else if (strcmp(ivmode, "benbi") == 0)
		cc->iv_gen_ops = &crypt_iv_benbi_ops;
Ludwig Nussel's avatar
Ludwig Nussel committed
967
968
	else if (strcmp(ivmode, "null") == 0)
		cc->iv_gen_ops = &crypt_iv_null_ops;
Linus Torvalds's avatar
Linus Torvalds committed
969
	else {
970
		ti->error = "Invalid IV mode";
Milan Broz's avatar
Milan Broz committed
971
		goto bad_ivmode;
Linus Torvalds's avatar
Linus Torvalds committed
972
973
974
975
	}

	if (cc->iv_gen_ops && cc->iv_gen_ops->ctr &&
	    cc->iv_gen_ops->ctr(cc, ti, ivopts) < 0)
Milan Broz's avatar
Milan Broz committed
976
		goto bad_ivmode;
Linus Torvalds's avatar
Linus Torvalds committed
977

Milan Broz's avatar
Milan Broz committed
978
	cc->iv_size = crypto_ablkcipher_ivsize(tfm);
979
	if (cc->iv_size)
Linus Torvalds's avatar
Linus Torvalds committed
980
		/* at least a 64 bit sector number should fit in our buffer */
981
		cc->iv_size = max(cc->iv_size,
Milan Broz's avatar
Milan Broz committed
982
				  (unsigned int)(sizeof(u64) / sizeof(u8)));
Linus Torvalds's avatar
Linus Torvalds committed
983
984
	else {
		if (cc->iv_gen_ops) {
985
			DMWARN("Selected cipher does not support IVs");
Linus Torvalds's avatar
Linus Torvalds committed
986
987
988
989
990
991
			if (cc->iv_gen_ops->dtr)
				cc->iv_gen_ops->dtr(cc);
			cc->iv_gen_ops = NULL;
		}
	}

992
	cc->io_pool = mempool_create_slab_pool(MIN_IOS, _crypt_io_pool);
Linus Torvalds's avatar
Linus Torvalds committed
993
	if (!cc->io_pool) {
994
		ti->error = "Cannot allocate crypt io mempool";
Milan Broz's avatar
Milan Broz committed
995
		goto bad_slab_pool;
Linus Torvalds's avatar
Linus Torvalds committed
996
997
	}

998
	cc->dmreq_start = sizeof(struct ablkcipher_request);
Milan Broz's avatar
Milan Broz committed
999
	cc->dmreq_start += crypto_ablkcipher_reqsize(tfm);
1000
	cc->dmreq_start = ALIGN(cc->dmreq_start, crypto_tfm_ctx_alignment());
Milan Broz's avatar
Milan Broz committed
1001
1002
	cc->dmreq_start += crypto_ablkcipher_alignmask(tfm) &
			   ~(crypto_tfm_ctx_alignment() - 1);
1003
1004
1005
1006
1007
1008
1009
1010
1011

	cc->req_pool = mempool_create_kmalloc_pool(MIN_IOS, cc->dmreq_start +
			sizeof(struct dm_crypt_request) + cc->iv_size);
	if (!cc->req_pool) {
		ti->error = "Cannot allocate crypt request mempool";
		goto bad_req_pool;
	}
	cc->req = NULL;

1012
	cc->page_pool = mempool_create_page_pool(MIN_POOL_PAGES, 0);
Linus Torvalds's avatar
Linus Torvalds committed
1013
	if (!cc->page_pool) {
1014
		ti->error = "Cannot allocate page mempool";
Milan Broz's avatar
Milan Broz committed
1015
		goto bad_page_pool;
Linus Torvalds's avatar
Linus Torvalds committed
1016
1017
	}

1018
	cc->bs = bioset_create(MIN_IOS, MIN_IOS);
1019
1020
1021
1022
1023
	if (!cc->bs) {
		ti->error = "Cannot allocate crypt bioset";
		goto bad_bs;
	}

Milan Broz's avatar
Milan Broz committed
1024
	if (crypto_ablkcipher_setkey(tfm, cc->key, key_size) < 0) {
1025
		ti->error = "Error setting key";
Milan Broz's avatar
Milan Broz committed
1026
		goto bad_device;
Linus Torvalds's avatar
Linus Torvalds committed
1027
1028
	}

1029
	if (sscanf(argv[2], "%llu", &tmpll) != 1) {
1030
		ti->error = "Invalid iv_offset sector";
Milan Broz's avatar
Milan Broz committed
1031
		goto bad_device;
Linus Torvalds's avatar
Linus Torvalds committed
1032
	}
1033
	cc->iv_offset = tmpll;
Linus Torvalds's avatar
Linus Torvalds committed
1034

1035
	if (sscanf(argv[4], "%llu", &tmpll) != 1) {
1036
		ti->error = "Invalid device sector";
Milan Broz's avatar
Milan Broz committed
1037
		goto bad_device;
Linus Torvalds's avatar
Linus Torvalds committed
1038
	}
1039
	cc->start = tmpll;
Linus Torvalds's avatar
Linus Torvalds committed
1040
1041

	if (dm_get_device(ti, argv[3], cc->start, ti->len,
Milan Broz's avatar
Milan Broz committed
1042
			  dm_table_get_mode(ti->table), &cc->dev)) {
1043
		ti->error = "Device lookup failed";
Milan Broz's avatar
Milan Broz committed
1044
		goto bad_device;
Linus Torvalds's avatar
Linus Torvalds committed
1045
1046
1047
1048
1049
1050
1051
	}

	if (ivmode && cc->iv_gen_ops) {
		if (ivopts)
			*(ivopts - 1) = ':';
		cc->iv_mode = kmalloc(strlen(ivmode) + 1, GFP_KERNEL);
		if (!cc->iv_mode) {
1052
			ti->error = "Error kmallocing iv_mode string";
Milan Broz's avatar
Milan Broz committed
1053
			goto bad_ivmode_string;
Linus Torvalds's avatar
Linus Torvalds committed
1054
1055
1056
1057
1058
		}
		strcpy(cc->iv_mode, ivmode);
	} else
		cc->iv_mode = NULL;

1059
1060
1061
1062
1063
1064
1065
1066
	cc->io_queue = create_singlethread_workqueue("kcryptd_io");
	if (!cc->io_queue) {
		ti->error = "Couldn't create kcryptd io queue";
		goto bad_io_queue;
	}

	cc->crypt_queue = create_singlethread_workqueue("kcryptd");
	if (!cc->crypt_queue) {
1067
		ti->error = "Couldn't create kcryptd queue";
1068
		goto bad_crypt_queue;
1069
1070
	}

Milan Broz's avatar
Milan Broz committed
1071
	init_waitqueue_head(&cc->writeq);
Linus Torvalds's avatar
Linus Torvalds committed
1072
1073
1074
	ti->private = cc;
	return 0;

1075
1076
1077
bad_crypt_queue:
	destroy_workqueue(cc->io_queue);
bad_io_queue:
1078
	kfree(cc->iv_mode);
Milan Broz's avatar
Milan Broz committed
1079
bad_ivmode_string:
1080
	dm_put_device(ti, cc->dev);
Milan Broz's avatar
Milan Broz committed
1081
bad_device:
1082
1083
	bioset_free(cc->bs);
bad_bs:
Linus Torvalds's avatar
Linus Torvalds committed
1084
	mempool_destroy(cc->page_pool);
Milan Broz's avatar
Milan Broz committed
1085
bad_page_pool:
1086
1087
	mempool_destroy(cc->req_pool);
bad_req_pool:
Linus Torvalds's avatar
Linus Torvalds committed
1088
	mempool_destroy(cc->io_pool);
Milan Broz's avatar
Milan Broz committed
1089
bad_slab_pool:
Linus Torvalds's avatar
Linus Torvalds committed
1090
1091
	if (cc->iv_gen_ops && cc->iv_gen_ops->dtr)
		cc->iv_gen_ops->dtr(cc);
Milan Broz's avatar
Milan Broz committed
1092
bad_ivmode:
Milan Broz's avatar
Milan Broz committed
1093
	crypto_free_ablkcipher(tfm);