dm-crypt.c 25.6 KB
Newer Older
Linus Torvalds's avatar
Linus Torvalds committed
1
2
3
/*
 * Copyright (C) 2003 Christophe Saout <christophe@saout.de>
 * Copyright (C) 2004 Clemens Fruhwirth <clemens@endorphin.org>
Milan Broz's avatar
Milan Broz committed
4
 * Copyright (C) 2006 Red Hat, Inc. All rights reserved.
Linus Torvalds's avatar
Linus Torvalds committed
5
6
7
8
 *
 * This file is released under the GPL.
 */

9
#include <linux/err.h>
Linus Torvalds's avatar
Linus Torvalds committed
10
11
12
13
14
15
16
17
18
#include <linux/module.h>
#include <linux/init.h>
#include <linux/kernel.h>
#include <linux/bio.h>
#include <linux/blkdev.h>
#include <linux/mempool.h>
#include <linux/slab.h>
#include <linux/crypto.h>
#include <linux/workqueue.h>
19
#include <linux/backing-dev.h>
Linus Torvalds's avatar
Linus Torvalds committed
20
#include <asm/atomic.h>
21
#include <linux/scatterlist.h>
Linus Torvalds's avatar
Linus Torvalds committed
22
#include <asm/page.h>
23
#include <asm/unaligned.h>
Linus Torvalds's avatar
Linus Torvalds committed
24
25
26

#include "dm.h"

27
#define DM_MSG_PREFIX "crypt"
Milan Broz's avatar
Milan Broz committed
28
#define MESG_STR(x) x, sizeof(x)
Linus Torvalds's avatar
Linus Torvalds committed
29
30
31
32

/*
 * per bio private data
 */
Alasdair G Kergon's avatar
Alasdair G Kergon committed
33
struct dm_crypt_io {
Linus Torvalds's avatar
Linus Torvalds committed
34
	struct dm_target *target;
35
	struct bio *base_bio;
Linus Torvalds's avatar
Linus Torvalds committed
36
37
38
39
40
41
42
43
44
45
46
47
48
49
50
51
52
53
54
55
56
57
58
	struct work_struct work;
	atomic_t pending;
	int error;
};

/*
 * context holding the current state of a multi-part conversion
 */
struct convert_context {
	struct bio *bio_in;
	struct bio *bio_out;
	unsigned int offset_in;
	unsigned int offset_out;
	unsigned int idx_in;
	unsigned int idx_out;
	sector_t sector;
	int write;
};

struct crypt_config;

struct crypt_iv_operations {
	int (*ctr)(struct crypt_config *cc, struct dm_target *ti,
Milan Broz's avatar
Milan Broz committed
59
		   const char *opts);
Linus Torvalds's avatar
Linus Torvalds committed
60
61
62
63
64
65
66
67
68
	void (*dtr)(struct crypt_config *cc);
	const char *(*status)(struct crypt_config *cc);
	int (*generator)(struct crypt_config *cc, u8 *iv, sector_t sector);
};

/*
 * Crypt: maps a linear range of a block device
 * and encrypts / decrypts at the same time.
 */
Milan Broz's avatar
Milan Broz committed
69
enum flags { DM_CRYPT_SUSPENDED, DM_CRYPT_KEY_VALID };
Linus Torvalds's avatar
Linus Torvalds committed
70
71
72
73
74
75
76
77
78
79
struct crypt_config {
	struct dm_dev *dev;
	sector_t start;

	/*
	 * pool for per bio private data and
	 * for encryption buffer pages
	 */
	mempool_t *io_pool;
	mempool_t *page_pool;
80
	struct bio_set *bs;
Linus Torvalds's avatar
Linus Torvalds committed
81

82
83
	struct workqueue_struct *io_queue;
	struct workqueue_struct *crypt_queue;
Linus Torvalds's avatar
Linus Torvalds committed
84
85
86
87
88
	/*
	 * crypto related data
	 */
	struct crypt_iv_operations *iv_gen_ops;
	char *iv_mode;
89
90
91
92
	union {
		struct crypto_cipher *essiv_tfm;
		int benbi_shift;
	} iv_gen_private;
Linus Torvalds's avatar
Linus Torvalds committed
93
94
95
	sector_t iv_offset;
	unsigned int iv_size;

96
97
98
	char cipher[CRYPTO_MAX_ALG_NAME];
	char chainmode[CRYPTO_MAX_ALG_NAME];
	struct crypto_blkcipher *tfm;
Milan Broz's avatar
Milan Broz committed
99
	unsigned long flags;
Linus Torvalds's avatar
Linus Torvalds committed
100
101
102
103
	unsigned int key_size;
	u8 key[0];
};

104
#define MIN_IOS        16
Linus Torvalds's avatar
Linus Torvalds committed
105
106
107
#define MIN_POOL_PAGES 32
#define MIN_BIO_PAGES  8

108
static struct kmem_cache *_crypt_io_pool;
Linus Torvalds's avatar
Linus Torvalds committed
109

Alasdair G Kergon's avatar
Alasdair G Kergon committed
110
static void clone_init(struct dm_crypt_io *, struct bio *);
Olaf Kirch's avatar
Olaf Kirch committed
111

Linus Torvalds's avatar
Linus Torvalds committed
112
113
114
/*
 * Different IV generation algorithms:
 *
115
 * plain: the initial vector is the 32-bit little-endian version of the sector
Linus Torvalds's avatar
Linus Torvalds committed
116
117
 *        number, padded with zeros if neccessary.
 *
118
119
120
 * essiv: "encrypted sector|salt initial vector", the sector number is
 *        encrypted with the bulk cipher using a salt as key. The salt
 *        should be derived from the bulk cipher's key via hashing.
Linus Torvalds's avatar
Linus Torvalds committed
121
 *
122
123
124
 * benbi: the 64-bit "big-endian 'narrow block'-count", starting at 1
 *        (needed for LRW-32-AES and possible other narrow block modes)
 *
Ludwig Nussel's avatar
Ludwig Nussel committed
125
126
127
 * null: the initial vector is always zero.  Provides compatibility with
 *       obsolete loop_fish2 devices.  Do not use for new devices.
 *
Linus Torvalds's avatar
Linus Torvalds committed
128
129
130
131
132
133
134
135
136
137
138
139
140
 * plumb: unimplemented, see:
 * http://article.gmane.org/gmane.linux.kernel.device-mapper.dm-crypt/454
 */

static int crypt_iv_plain_gen(struct crypt_config *cc, u8 *iv, sector_t sector)
{
	memset(iv, 0, cc->iv_size);
	*(u32 *)iv = cpu_to_le32(sector & 0xffffffff);

	return 0;
}

static int crypt_iv_essiv_ctr(struct crypt_config *cc, struct dm_target *ti,
Milan Broz's avatar
Milan Broz committed
141
			      const char *opts)
Linus Torvalds's avatar
Linus Torvalds committed
142
{
143
	struct crypto_cipher *essiv_tfm;
144
145
	struct crypto_hash *hash_tfm;
	struct hash_desc desc;
Linus Torvalds's avatar
Linus Torvalds committed
146
147
148
	struct scatterlist sg;
	unsigned int saltsize;
	u8 *salt;
149
	int err;
Linus Torvalds's avatar
Linus Torvalds committed
150
151

	if (opts == NULL) {
152
		ti->error = "Digest algorithm missing for ESSIV mode";
Linus Torvalds's avatar
Linus Torvalds committed
153
154
155
156
		return -EINVAL;
	}

	/* Hash the cipher key with the given hash algorithm */
157
158
	hash_tfm = crypto_alloc_hash(opts, 0, CRYPTO_ALG_ASYNC);
	if (IS_ERR(hash_tfm)) {
159
		ti->error = "Error initializing ESSIV hash";
160
		return PTR_ERR(hash_tfm);
Linus Torvalds's avatar
Linus Torvalds committed
161
162
	}

163
	saltsize = crypto_hash_digestsize(hash_tfm);
Linus Torvalds's avatar
Linus Torvalds committed
164
165
	salt = kmalloc(saltsize, GFP_KERNEL);
	if (salt == NULL) {
166
		ti->error = "Error kmallocing salt storage in ESSIV";
167
		crypto_free_hash(hash_tfm);
Linus Torvalds's avatar
Linus Torvalds committed
168
169
170
		return -ENOMEM;
	}

171
	sg_set_buf(&sg, cc->key, cc->key_size);
172
173
174
175
176
177
178
	desc.tfm = hash_tfm;
	desc.flags = CRYPTO_TFM_REQ_MAY_SLEEP;
	err = crypto_hash_digest(&desc, &sg, cc->key_size, salt);
	crypto_free_hash(hash_tfm);

	if (err) {
		ti->error = "Error calculating hash in ESSIV";
179
		kfree(salt);
180
181
		return err;
	}
Linus Torvalds's avatar
Linus Torvalds committed
182
183

	/* Setup the essiv_tfm with the given salt */
184
185
	essiv_tfm = crypto_alloc_cipher(cc->cipher, 0, CRYPTO_ALG_ASYNC);
	if (IS_ERR(essiv_tfm)) {
186
		ti->error = "Error allocating crypto tfm for ESSIV";
Linus Torvalds's avatar
Linus Torvalds committed
187
		kfree(salt);
188
		return PTR_ERR(essiv_tfm);
Linus Torvalds's avatar
Linus Torvalds committed
189
	}
190
191
	if (crypto_cipher_blocksize(essiv_tfm) !=
	    crypto_blkcipher_ivsize(cc->tfm)) {
192
		ti->error = "Block size of ESSIV cipher does "
Milan Broz's avatar
Milan Broz committed
193
			    "not match IV size of block cipher";
194
		crypto_free_cipher(essiv_tfm);
Linus Torvalds's avatar
Linus Torvalds committed
195
196
197
		kfree(salt);
		return -EINVAL;
	}
198
199
	err = crypto_cipher_setkey(essiv_tfm, salt, saltsize);
	if (err) {
200
		ti->error = "Failed to set key for ESSIV cipher";
201
		crypto_free_cipher(essiv_tfm);
Linus Torvalds's avatar
Linus Torvalds committed
202
		kfree(salt);
203
		return err;
Linus Torvalds's avatar
Linus Torvalds committed
204
205
206
	}
	kfree(salt);

207
	cc->iv_gen_private.essiv_tfm = essiv_tfm;
Linus Torvalds's avatar
Linus Torvalds committed
208
209
210
211
212
	return 0;
}

static void crypt_iv_essiv_dtr(struct crypt_config *cc)
{
213
214
	crypto_free_cipher(cc->iv_gen_private.essiv_tfm);
	cc->iv_gen_private.essiv_tfm = NULL;
Linus Torvalds's avatar
Linus Torvalds committed
215
216
217
218
219
220
}

static int crypt_iv_essiv_gen(struct crypt_config *cc, u8 *iv, sector_t sector)
{
	memset(iv, 0, cc->iv_size);
	*(u64 *)iv = cpu_to_le64(sector);
221
	crypto_cipher_encrypt_one(cc->iv_gen_private.essiv_tfm, iv, iv);
Linus Torvalds's avatar
Linus Torvalds committed
222
223
224
	return 0;
}

225
226
227
228
static int crypt_iv_benbi_ctr(struct crypt_config *cc, struct dm_target *ti,
			      const char *opts)
{
	unsigned int bs = crypto_blkcipher_blocksize(cc->tfm);
229
	int log = ilog2(bs);
230
231
232
233
234
235
236
237
238
239
240
241
242
243

	/* we need to calculate how far we must shift the sector count
	 * to get the cipher block count, we use this shift in _gen */

	if (1 << log != bs) {
		ti->error = "cypher blocksize is not a power of 2";
		return -EINVAL;
	}

	if (log > 9) {
		ti->error = "cypher blocksize is > 512";
		return -EINVAL;
	}

244
	cc->iv_gen_private.benbi_shift = 9 - log;
245
246
247
248
249
250
251
252
253
254

	return 0;
}

static void crypt_iv_benbi_dtr(struct crypt_config *cc)
{
}

static int crypt_iv_benbi_gen(struct crypt_config *cc, u8 *iv, sector_t sector)
{
255
256
	__be64 val;

257
	memset(iv, 0, cc->iv_size - sizeof(u64)); /* rest is cleared below */
258
259
260

	val = cpu_to_be64(((u64)sector << cc->iv_gen_private.benbi_shift) + 1);
	put_unaligned(val, (__be64 *)(iv + cc->iv_size - sizeof(u64)));
261

Linus Torvalds's avatar
Linus Torvalds committed
262
263
264
	return 0;
}

Ludwig Nussel's avatar
Ludwig Nussel committed
265
266
267
268
269
270
271
static int crypt_iv_null_gen(struct crypt_config *cc, u8 *iv, sector_t sector)
{
	memset(iv, 0, cc->iv_size);

	return 0;
}

Linus Torvalds's avatar
Linus Torvalds committed
272
273
274
275
276
277
278
279
280
281
static struct crypt_iv_operations crypt_iv_plain_ops = {
	.generator = crypt_iv_plain_gen
};

static struct crypt_iv_operations crypt_iv_essiv_ops = {
	.ctr       = crypt_iv_essiv_ctr,
	.dtr       = crypt_iv_essiv_dtr,
	.generator = crypt_iv_essiv_gen
};

282
283
284
285
286
static struct crypt_iv_operations crypt_iv_benbi_ops = {
	.ctr	   = crypt_iv_benbi_ctr,
	.dtr	   = crypt_iv_benbi_dtr,
	.generator = crypt_iv_benbi_gen
};
Linus Torvalds's avatar
Linus Torvalds committed
287

Ludwig Nussel's avatar
Ludwig Nussel committed
288
289
290
291
static struct crypt_iv_operations crypt_iv_null_ops = {
	.generator = crypt_iv_null_gen
};

292
static int
Linus Torvalds's avatar
Linus Torvalds committed
293
294
295
296
crypt_convert_scatterlist(struct crypt_config *cc, struct scatterlist *out,
                          struct scatterlist *in, unsigned int length,
                          int write, sector_t sector)
{
297
	u8 iv[cc->iv_size] __attribute__ ((aligned(__alignof__(u64))));
298
299
300
301
302
	struct blkcipher_desc desc = {
		.tfm = cc->tfm,
		.info = iv,
		.flags = CRYPTO_TFM_REQ_MAY_SLEEP,
	};
Linus Torvalds's avatar
Linus Torvalds committed
303
304
305
306
307
308
309
310
	int r;

	if (cc->iv_gen_ops) {
		r = cc->iv_gen_ops->generator(cc, iv, sector);
		if (r < 0)
			return r;

		if (write)
311
			r = crypto_blkcipher_encrypt_iv(&desc, out, in, length);
Linus Torvalds's avatar
Linus Torvalds committed
312
		else
313
			r = crypto_blkcipher_decrypt_iv(&desc, out, in, length);
Linus Torvalds's avatar
Linus Torvalds committed
314
315
	} else {
		if (write)
316
			r = crypto_blkcipher_encrypt(&desc, out, in, length);
Linus Torvalds's avatar
Linus Torvalds committed
317
		else
318
			r = crypto_blkcipher_decrypt(&desc, out, in, length);
Linus Torvalds's avatar
Linus Torvalds committed
319
320
321
322
323
	}

	return r;
}

Milan Broz's avatar
Milan Broz committed
324
325
326
327
static void crypt_convert_init(struct crypt_config *cc,
			       struct convert_context *ctx,
			       struct bio *bio_out, struct bio *bio_in,
			       sector_t sector, int write)
Linus Torvalds's avatar
Linus Torvalds committed
328
329
330
331
332
333
334
335
336
337
338
339
340
341
342
{
	ctx->bio_in = bio_in;
	ctx->bio_out = bio_out;
	ctx->offset_in = 0;
	ctx->offset_out = 0;
	ctx->idx_in = bio_in ? bio_in->bi_idx : 0;
	ctx->idx_out = bio_out ? bio_out->bi_idx : 0;
	ctx->sector = sector + cc->iv_offset;
	ctx->write = write;
}

/*
 * Encrypt / decrypt data from one bio to another one (can be the same one)
 */
static int crypt_convert(struct crypt_config *cc,
Milan Broz's avatar
Milan Broz committed
343
			 struct convert_context *ctx)
Linus Torvalds's avatar
Linus Torvalds committed
344
345
346
347
348
349
350
351
352
353
354
355
356
357
358
359
360
361
362
363
364
365
366
367
368
369
370
371
372
373
374
{
	int r = 0;

	while(ctx->idx_in < ctx->bio_in->bi_vcnt &&
	      ctx->idx_out < ctx->bio_out->bi_vcnt) {
		struct bio_vec *bv_in = bio_iovec_idx(ctx->bio_in, ctx->idx_in);
		struct bio_vec *bv_out = bio_iovec_idx(ctx->bio_out, ctx->idx_out);
		struct scatterlist sg_in = {
			.page = bv_in->bv_page,
			.offset = bv_in->bv_offset + ctx->offset_in,
			.length = 1 << SECTOR_SHIFT
		};
		struct scatterlist sg_out = {
			.page = bv_out->bv_page,
			.offset = bv_out->bv_offset + ctx->offset_out,
			.length = 1 << SECTOR_SHIFT
		};

		ctx->offset_in += sg_in.length;
		if (ctx->offset_in >= bv_in->bv_len) {
			ctx->offset_in = 0;
			ctx->idx_in++;
		}

		ctx->offset_out += sg_out.length;
		if (ctx->offset_out >= bv_out->bv_len) {
			ctx->offset_out = 0;
			ctx->idx_out++;
		}

		r = crypt_convert_scatterlist(cc, &sg_out, &sg_in, sg_in.length,
Milan Broz's avatar
Milan Broz committed
375
					      ctx->write, ctx->sector);
Linus Torvalds's avatar
Linus Torvalds committed
376
377
378
379
380
381
382
383
384
		if (r < 0)
			break;

		ctx->sector++;
	}

	return r;
}

Milan Broz's avatar
Milan Broz committed
385
386
static void dm_crypt_bio_destructor(struct bio *bio)
{
Alasdair G Kergon's avatar
Alasdair G Kergon committed
387
	struct dm_crypt_io *io = bio->bi_private;
388
389
390
	struct crypt_config *cc = io->target->private;

	bio_free(bio, cc->bs);
Milan Broz's avatar
Milan Broz committed
391
}
392

Linus Torvalds's avatar
Linus Torvalds committed
393
394
395
396
397
/*
 * Generate a new unfragmented bio with the given size
 * This should never violate the device limitations
 * May return a smaller bio when running out of pages
 */
Alasdair G Kergon's avatar
Alasdair G Kergon committed
398
static struct bio *crypt_alloc_buffer(struct dm_crypt_io *io, unsigned size)
Linus Torvalds's avatar
Linus Torvalds committed
399
{
Olaf Kirch's avatar
Olaf Kirch committed
400
	struct crypt_config *cc = io->target->private;
401
	struct bio *clone;
Linus Torvalds's avatar
Linus Torvalds committed
402
	unsigned int nr_iovecs = (size + PAGE_SIZE - 1) >> PAGE_SHIFT;
403
	gfp_t gfp_mask = GFP_NOIO | __GFP_HIGHMEM;
Linus Torvalds's avatar
Linus Torvalds committed
404
405
	unsigned int i;

Olaf Kirch's avatar
Olaf Kirch committed
406
	clone = bio_alloc_bioset(GFP_NOIO, nr_iovecs, cc->bs);
407
	if (!clone)
Linus Torvalds's avatar
Linus Torvalds committed
408
409
		return NULL;

Olaf Kirch's avatar
Olaf Kirch committed
410
	clone_init(io, clone);
411

412
	for (i = 0; i < nr_iovecs; i++) {
413
		struct bio_vec *bv = bio_iovec_idx(clone, i);
Linus Torvalds's avatar
Linus Torvalds committed
414
415
416
417
418
419
420
421
422
423

		bv->bv_page = mempool_alloc(cc->page_pool, gfp_mask);
		if (!bv->bv_page)
			break;

		/*
		 * if additional pages cannot be allocated without waiting,
		 * return a partially allocated bio, the caller will then try
		 * to allocate additional bios while submitting this partial bio
		 */
424
		if (i == (MIN_BIO_PAGES - 1))
Linus Torvalds's avatar
Linus Torvalds committed
425
426
427
428
429
430
431
432
			gfp_mask = (gfp_mask | __GFP_NOWARN) & ~__GFP_WAIT;

		bv->bv_offset = 0;
		if (size > PAGE_SIZE)
			bv->bv_len = PAGE_SIZE;
		else
			bv->bv_len = size;

433
434
		clone->bi_size += bv->bv_len;
		clone->bi_vcnt++;
Linus Torvalds's avatar
Linus Torvalds committed
435
436
437
		size -= bv->bv_len;
	}

438
439
	if (!clone->bi_size) {
		bio_put(clone);
Linus Torvalds's avatar
Linus Torvalds committed
440
441
442
		return NULL;
	}

443
	return clone;
Linus Torvalds's avatar
Linus Torvalds committed
444
445
}

Neil Brown's avatar
Neil Brown committed
446
static void crypt_free_buffer_pages(struct crypt_config *cc, struct bio *clone)
Linus Torvalds's avatar
Linus Torvalds committed
447
{
Neil Brown's avatar
Neil Brown committed
448
	unsigned int i;
Linus Torvalds's avatar
Linus Torvalds committed
449
450
	struct bio_vec *bv;

Neil Brown's avatar
Neil Brown committed
451
	for (i = 0; i < clone->bi_vcnt; i++) {
452
		bv = bio_iovec_idx(clone, i);
Linus Torvalds's avatar
Linus Torvalds committed
453
454
455
456
457
458
459
460
461
462
		BUG_ON(!bv->bv_page);
		mempool_free(bv->bv_page, cc->page_pool);
		bv->bv_page = NULL;
	}
}

/*
 * One of the bios was finished. Check for completion of
 * the whole request and correctly clean up the buffer.
 */
Alasdair G Kergon's avatar
Alasdair G Kergon committed
463
static void dec_pending(struct dm_crypt_io *io, int error)
Linus Torvalds's avatar
Linus Torvalds committed
464
465
466
467
468
469
470
471
472
{
	struct crypt_config *cc = (struct crypt_config *) io->target->private;

	if (error < 0)
		io->error = error;

	if (!atomic_dec_and_test(&io->pending))
		return;

473
	bio_endio(io->base_bio, io->error);
Linus Torvalds's avatar
Linus Torvalds committed
474
475
476
477
478

	mempool_free(io, cc->io_pool);
}

/*
479
 * kcryptd/kcryptd_io:
Linus Torvalds's avatar
Linus Torvalds committed
480
481
 *
 * Needed because it would be very unwise to do decryption in an
482
 * interrupt context.
483
484
485
486
487
488
489
490
 *
 * kcryptd performs the actual encryption or decryption.
 *
 * kcryptd_io performs the IO submission.
 *
 * They must be separated as otherwise the final stages could be
 * starved by new requests which can block in the first stages due
 * to memory allocation.
Linus Torvalds's avatar
Linus Torvalds committed
491
 */
David Howells's avatar
David Howells committed
492
static void kcryptd_do_work(struct work_struct *work);
493
static void kcryptd_do_crypt(struct work_struct *work);
Linus Torvalds's avatar
Linus Torvalds committed
494

Alasdair G Kergon's avatar
Alasdair G Kergon committed
495
static void kcryptd_queue_io(struct dm_crypt_io *io)
Linus Torvalds's avatar
Linus Torvalds committed
496
{
497
498
	struct crypt_config *cc = io->target->private;

David Howells's avatar
David Howells committed
499
	INIT_WORK(&io->work, kcryptd_do_work);
500
501
502
503
504
505
506
507
508
	queue_work(cc->io_queue, &io->work);
}

static void kcryptd_queue_crypt(struct dm_crypt_io *io)
{
	struct crypt_config *cc = io->target->private;

	INIT_WORK(&io->work, kcryptd_do_crypt);
	queue_work(cc->crypt_queue, &io->work);
509
510
}

511
static void crypt_endio(struct bio *clone, int error)
512
{
Alasdair G Kergon's avatar
Alasdair G Kergon committed
513
	struct dm_crypt_io *io = clone->bi_private;
514
515
516
517
	struct crypt_config *cc = io->target->private;
	unsigned read_io = bio_data_dir(clone) == READ;

	/*
518
	 * free the processed pages
519
	 */
520
	if (!read_io) {
Neil Brown's avatar
Neil Brown committed
521
		crypt_free_buffer_pages(cc, clone);
522
		goto out;
523
	}
524
525
526
527
528
529
530

	if (unlikely(!bio_flagged(clone, BIO_UPTODATE))) {
		error = -EIO;
		goto out;
	}

	bio_put(clone);
531
	kcryptd_queue_crypt(io);
532
	return;
533
534
535
536
537
538

out:
	bio_put(clone);
	dec_pending(io, error);
}

Alasdair G Kergon's avatar
Alasdair G Kergon committed
539
static void clone_init(struct dm_crypt_io *io, struct bio *clone)
540
541
542
543
544
545
546
{
	struct crypt_config *cc = io->target->private;

	clone->bi_private = io;
	clone->bi_end_io  = crypt_endio;
	clone->bi_bdev    = cc->dev->bdev;
	clone->bi_rw      = io->base_bio->bi_rw;
Olaf Kirch's avatar
Olaf Kirch committed
547
	clone->bi_destructor = dm_crypt_bio_destructor;
548
549
}

Alasdair G Kergon's avatar
Alasdair G Kergon committed
550
static void process_read(struct dm_crypt_io *io)
551
552
553
554
{
	struct crypt_config *cc = io->target->private;
	struct bio *base_bio = io->base_bio;
	struct bio *clone;
555
556
557
	sector_t sector = base_bio->bi_sector - io->target->begin;

	atomic_inc(&io->pending);
558
559
560
561
562
563

	/*
	 * The block layer might modify the bvec array, so always
	 * copy the required bvecs because we need the original
	 * one in order to decrypt the whole bio data *afterwards*.
	 */
564
	clone = bio_alloc_bioset(GFP_NOIO, bio_segments(base_bio), cc->bs);
565
566
	if (unlikely(!clone)) {
		dec_pending(io, -ENOMEM);
567
		return;
568
	}
569
570
571
572
573

	clone_init(io, clone);
	clone->bi_idx = 0;
	clone->bi_vcnt = bio_segments(base_bio);
	clone->bi_size = base_bio->bi_size;
574
	clone->bi_sector = cc->start + sector;
575
576
577
	memcpy(clone->bi_io_vec, bio_iovec(base_bio),
	       sizeof(struct bio_vec) * clone->bi_vcnt);

578
	generic_make_request(clone);
579
580
}

Alasdair G Kergon's avatar
Alasdair G Kergon committed
581
static void process_write(struct dm_crypt_io *io)
582
583
584
585
{
	struct crypt_config *cc = io->target->private;
	struct bio *base_bio = io->base_bio;
	struct bio *clone;
586
587
588
	struct convert_context ctx;
	unsigned remaining = base_bio->bi_size;
	sector_t sector = base_bio->bi_sector - io->target->begin;
589

590
	atomic_inc(&io->pending);
591

592
	crypt_convert_init(cc, &ctx, NULL, base_bio, sector, 1);
593

594
595
596
597
598
	/*
	 * The allocated buffers can be smaller than the whole bio,
	 * so repeat the whole process until all the data can be handled.
	 */
	while (remaining) {
599
		clone = crypt_alloc_buffer(io, remaining);
600
601
602
603
		if (unlikely(!clone)) {
			dec_pending(io, -ENOMEM);
			return;
		}
604
605

		ctx.bio_out = clone;
606
		ctx.idx_out = 0;
607
608

		if (unlikely(crypt_convert(cc, &ctx) < 0)) {
Neil Brown's avatar
Neil Brown committed
609
			crypt_free_buffer_pages(cc, clone);
610
			bio_put(clone);
611
612
			dec_pending(io, -EIO);
			return;
613
614
		}

615
616
617
		/* crypt_convert should have filled the clone bio */
		BUG_ON(ctx.idx_out < clone->bi_vcnt);

618
619
620
621
		clone->bi_sector = cc->start + sector;
		remaining -= clone->bi_size;
		sector += bio_sectors(clone);

Olaf Kirch's avatar
Olaf Kirch committed
622
623
		/* Grab another reference to the io struct
		 * before we kick off the request */
624
625
626
		if (remaining)
			atomic_inc(&io->pending);

627
628
		generic_make_request(clone);

629
630
631
		/* Do not reference clone after this - it
		 * may be gone already. */

632
633
		/* out of memory -> run queues */
		if (remaining)
634
			congestion_wait(WRITE, HZ/100);
635
	}
636
637
}

Alasdair G Kergon's avatar
Alasdair G Kergon committed
638
static void process_read_endio(struct dm_crypt_io *io)
639
640
{
	struct crypt_config *cc = io->target->private;
Linus Torvalds's avatar
Linus Torvalds committed
641
642
	struct convert_context ctx;

643
644
	crypt_convert_init(cc, &ctx, io->base_bio, io->base_bio,
			   io->base_bio->bi_sector - io->target->begin, 0);
Linus Torvalds's avatar
Linus Torvalds committed
645

646
	dec_pending(io, crypt_convert(cc, &ctx));
Linus Torvalds's avatar
Linus Torvalds committed
647
648
}

David Howells's avatar
David Howells committed
649
static void kcryptd_do_work(struct work_struct *work)
Linus Torvalds's avatar
Linus Torvalds committed
650
{
Alasdair G Kergon's avatar
Alasdair G Kergon committed
651
	struct dm_crypt_io *io = container_of(work, struct dm_crypt_io, work);
652

653
	if (bio_data_dir(io->base_bio) == READ)
654
		process_read(io);
655
656
657
658
659
660
661
662
}

static void kcryptd_do_crypt(struct work_struct *work)
{
	struct dm_crypt_io *io = container_of(work, struct dm_crypt_io, work);

	if (bio_data_dir(io->base_bio) == READ)
		process_read_endio(io);
663
664
	else
		process_write(io);
Linus Torvalds's avatar
Linus Torvalds committed
665
666
667
668
669
670
671
672
673
674
675
676
677
}

/*
 * Decode key from its hex representation
 */
static int crypt_decode_key(u8 *key, char *hex, unsigned int size)
{
	char buffer[3];
	char *endp;
	unsigned int i;

	buffer[2] = '\0';

678
	for (i = 0; i < size; i++) {
Linus Torvalds's avatar
Linus Torvalds committed
679
680
681
682
683
684
685
686
687
688
689
690
691
692
693
694
695
696
697
698
699
700
		buffer[0] = *hex++;
		buffer[1] = *hex++;

		key[i] = (u8)simple_strtoul(buffer, &endp, 16);

		if (endp != &buffer[2])
			return -EINVAL;
	}

	if (*hex != '\0')
		return -EINVAL;

	return 0;
}

/*
 * Encode key into its hex representation
 */
static void crypt_encode_key(char *hex, u8 *key, unsigned int size)
{
	unsigned int i;

701
	for (i = 0; i < size; i++) {
Linus Torvalds's avatar
Linus Torvalds committed
702
703
704
705
706
707
		sprintf(hex, "%02x", *key);
		hex += 2;
		key++;
	}
}

Milan Broz's avatar
Milan Broz committed
708
709
710
711
712
713
714
715
716
717
static int crypt_set_key(struct crypt_config *cc, char *key)
{
	unsigned key_size = strlen(key) >> 1;

	if (cc->key_size && cc->key_size != key_size)
		return -EINVAL;

	cc->key_size = key_size; /* initial settings */

	if ((!key_size && strcmp(key, "-")) ||
Milan Broz's avatar
Milan Broz committed
718
	   (key_size && crypt_decode_key(cc->key, key, key_size) < 0))
Milan Broz's avatar
Milan Broz committed
719
720
721
722
723
724
725
726
727
728
729
730
731
732
		return -EINVAL;

	set_bit(DM_CRYPT_KEY_VALID, &cc->flags);

	return 0;
}

static int crypt_wipe_key(struct crypt_config *cc)
{
	clear_bit(DM_CRYPT_KEY_VALID, &cc->flags);
	memset(&cc->key, 0, cc->key_size * sizeof(u8));
	return 0;
}

Linus Torvalds's avatar
Linus Torvalds committed
733
734
735
736
737
738
739
/*
 * Construct an encryption mapping:
 * <cipher> <key> <iv_offset> <dev_path> <start>
 */
static int crypt_ctr(struct dm_target *ti, unsigned int argc, char **argv)
{
	struct crypt_config *cc;
740
	struct crypto_blkcipher *tfm;
Linus Torvalds's avatar
Linus Torvalds committed
741
742
743
744
745
746
	char *tmp;
	char *cipher;
	char *chainmode;
	char *ivmode;
	char *ivopts;
	unsigned int key_size;
747
	unsigned long long tmpll;
Linus Torvalds's avatar
Linus Torvalds committed
748
749

	if (argc != 5) {
750
		ti->error = "Not enough arguments";
Linus Torvalds's avatar
Linus Torvalds committed
751
752
753
754
755
756
757
758
759
760
		return -EINVAL;
	}

	tmp = argv[0];
	cipher = strsep(&tmp, "-");
	chainmode = strsep(&tmp, "-");
	ivopts = strsep(&tmp, "-");
	ivmode = strsep(&ivopts, ":");

	if (tmp)
761
		DMWARN("Unexpected additional cipher options");
Linus Torvalds's avatar
Linus Torvalds committed
762
763
764

	key_size = strlen(argv[1]) >> 1;

Milan Broz's avatar
Milan Broz committed
765
 	cc = kzalloc(sizeof(*cc) + key_size * sizeof(u8), GFP_KERNEL);
Linus Torvalds's avatar
Linus Torvalds committed
766
767
	if (cc == NULL) {
		ti->error =
768
			"Cannot allocate transparent encryption context";
Linus Torvalds's avatar
Linus Torvalds committed
769
770
771
		return -ENOMEM;
	}

Milan Broz's avatar
Milan Broz committed
772
 	if (crypt_set_key(cc, argv[1])) {
773
		ti->error = "Error decoding key";
Milan Broz's avatar
Milan Broz committed
774
		goto bad_cipher;
Linus Torvalds's avatar
Linus Torvalds committed
775
776
777
778
779
780
781
782
	}

	/* Compatiblity mode for old dm-crypt cipher strings */
	if (!chainmode || (strcmp(chainmode, "plain") == 0 && !ivmode)) {
		chainmode = "cbc";
		ivmode = "plain";
	}

783
784
	if (strcmp(chainmode, "ecb") && !ivmode) {
		ti->error = "This chaining mode requires an IV mechanism";
Milan Broz's avatar
Milan Broz committed
785
		goto bad_cipher;
Linus Torvalds's avatar
Linus Torvalds committed
786
787
	}

Milan Broz's avatar
Milan Broz committed
788
789
	if (snprintf(cc->cipher, CRYPTO_MAX_ALG_NAME, "%s(%s)",
		     chainmode, cipher) >= CRYPTO_MAX_ALG_NAME) {
790
		ti->error = "Chain mode + cipher name is too long";
Milan Broz's avatar
Milan Broz committed
791
		goto bad_cipher;
Linus Torvalds's avatar
Linus Torvalds committed
792
793
	}

794
795
	tfm = crypto_alloc_blkcipher(cc->cipher, 0, CRYPTO_ALG_ASYNC);
	if (IS_ERR(tfm)) {
796
		ti->error = "Error allocating crypto tfm";
Milan Broz's avatar
Milan Broz committed
797
		goto bad_cipher;
Linus Torvalds's avatar
Linus Torvalds committed
798
799
	}

800
801
	strcpy(cc->cipher, cipher);
	strcpy(cc->chainmode, chainmode);
Linus Torvalds's avatar
Linus Torvalds committed
802
803
804
	cc->tfm = tfm;

	/*
805
	 * Choose ivmode. Valid modes: "plain", "essiv:<esshash>", "benbi".
Linus Torvalds's avatar
Linus Torvalds committed
806
807
808
809
810
811
812
813
814
	 * See comments at iv code
	 */

	if (ivmode == NULL)
		cc->iv_gen_ops = NULL;
	else if (strcmp(ivmode, "plain") == 0)
		cc->iv_gen_ops = &crypt_iv_plain_ops;
	else if (strcmp(ivmode, "essiv") == 0)
		cc->iv_gen_ops = &crypt_iv_essiv_ops;
815
816
	else if (strcmp(ivmode, "benbi") == 0)
		cc->iv_gen_ops = &crypt_iv_benbi_ops;
Ludwig Nussel's avatar
Ludwig Nussel committed
817
818
	else if (strcmp(ivmode, "null") == 0)
		cc->iv_gen_ops = &crypt_iv_null_ops;
Linus Torvalds's avatar
Linus Torvalds committed
819
	else {
820
		ti->error = "Invalid IV mode";
Milan Broz's avatar
Milan Broz committed
821
		goto bad_ivmode;
Linus Torvalds's avatar
Linus Torvalds committed
822
823
824
825
	}

	if (cc->iv_gen_ops && cc->iv_gen_ops->ctr &&
	    cc->iv_gen_ops->ctr(cc, ti, ivopts) < 0)
Milan Broz's avatar
Milan Broz committed
826
		goto bad_ivmode;
Linus Torvalds's avatar
Linus Torvalds committed
827

828
829
	cc->iv_size = crypto_blkcipher_ivsize(tfm);
	if (cc->iv_size)
Linus Torvalds's avatar
Linus Torvalds committed
830
		/* at least a 64 bit sector number should fit in our buffer */
831
		cc->iv_size = max(cc->iv_size,
Milan Broz's avatar
Milan Broz committed
832
				  (unsigned int)(sizeof(u64) / sizeof(u8)));
Linus Torvalds's avatar
Linus Torvalds committed
833
834
	else {
		if (cc->iv_gen_ops) {
835
			DMWARN("Selected cipher does not support IVs");
Linus Torvalds's avatar
Linus Torvalds committed
836
837
838
839
840
841
			if (cc->iv_gen_ops->dtr)
				cc->iv_gen_ops->dtr(cc);
			cc->iv_gen_ops = NULL;
		}
	}

842
	cc->io_pool = mempool_create_slab_pool(MIN_IOS, _crypt_io_pool);
Linus Torvalds's avatar
Linus Torvalds committed
843
	if (!cc->io_pool) {
844
		ti->error = "Cannot allocate crypt io mempool";
Milan Broz's avatar
Milan Broz committed
845
		goto bad_slab_pool;
Linus Torvalds's avatar
Linus Torvalds committed
846
847
	}

848
	cc->page_pool = mempool_create_page_pool(MIN_POOL_PAGES, 0);
Linus Torvalds's avatar
Linus Torvalds committed
849
	if (!cc->page_pool) {
850
		ti->error = "Cannot allocate page mempool";
Milan Broz's avatar
Milan Broz committed
851
		goto bad_page_pool;
Linus Torvalds's avatar
Linus Torvalds committed
852
853
	}

854
	cc->bs = bioset_create(MIN_IOS, MIN_IOS);
855
856
857
858
859
	if (!cc->bs) {
		ti->error = "Cannot allocate crypt bioset";
		goto bad_bs;
	}

860
	if (crypto_blkcipher_setkey(tfm, cc->key, key_size) < 0) {
861
		ti->error = "Error setting key";
Milan Broz's avatar
Milan Broz committed
862
		goto bad_device;
Linus Torvalds's avatar
Linus Torvalds committed
863
864
	}

865
	if (sscanf(argv[2], "%llu", &tmpll) != 1) {
866
		ti->error = "Invalid iv_offset sector";
Milan Broz's avatar
Milan Broz committed
867
		goto bad_device;
Linus Torvalds's avatar
Linus Torvalds committed
868
	}
869
	cc->iv_offset = tmpll;
Linus Torvalds's avatar
Linus Torvalds committed
870

871
	if (sscanf(argv[4], "%llu", &tmpll) != 1) {
872
		ti->error = "Invalid device sector";
Milan Broz's avatar
Milan Broz committed
873
		goto bad_device;
Linus Torvalds's avatar
Linus Torvalds committed
874
	}
875
	cc->start = tmpll;
Linus Torvalds's avatar
Linus Torvalds committed
876
877

	if (dm_get_device(ti, argv[3], cc->start, ti->len,
Milan Broz's avatar
Milan Broz committed
878
			  dm_table_get_mode(ti->table), &cc->dev)) {
879
		ti->error = "Device lookup failed";
Milan Broz's avatar
Milan Broz committed
880
		goto bad_device;
Linus Torvalds's avatar
Linus Torvalds committed
881
882
883
884
885
886
887
	}

	if (ivmode && cc->iv_gen_ops) {
		if (ivopts)
			*(ivopts - 1) = ':';
		cc->iv_mode = kmalloc(strlen(ivmode) + 1, GFP_KERNEL);
		if (!cc->iv_mode) {
888
			ti->error = "Error kmallocing iv_mode string";
Milan Broz's avatar
Milan Broz committed
889
			goto bad_ivmode_string;
Linus Torvalds's avatar
Linus Torvalds committed
890
891
892
893
894
		}
		strcpy(cc->iv_mode, ivmode);
	} else
		cc->iv_mode = NULL;

895
896
897
898
899
900
901
902
	cc->io_queue = create_singlethread_workqueue("kcryptd_io");
	if (!cc->io_queue) {
		ti->error = "Couldn't create kcryptd io queue";
		goto bad_io_queue;
	}

	cc->crypt_queue = create_singlethread_workqueue("kcryptd");
	if (!cc->crypt_queue) {
903
		ti->error = "Couldn't create kcryptd queue";
904
		goto bad_crypt_queue;
905
906
	}

Linus Torvalds's avatar
Linus Torvalds committed
907
908
909
	ti->private = cc;
	return 0;

910
911
912
bad_crypt_queue:
	destroy_workqueue(cc->io_queue);
bad_io_queue:
913
	kfree(cc->iv_mode);
Milan Broz's avatar
Milan Broz committed
914
bad_ivmode_string:
915
	dm_put_device(ti, cc->dev);
Milan Broz's avatar
Milan Broz committed
916
bad_device:
917
918
	bioset_free(cc->bs);
bad_bs:
Linus Torvalds's avatar
Linus Torvalds committed
919
	mempool_destroy(cc->page_pool);
Milan Broz's avatar
Milan Broz committed
920
bad_page_pool:
Linus Torvalds's avatar
Linus Torvalds committed
921
	mempool_destroy(cc->io_pool);
Milan Broz's avatar
Milan Broz committed
922
bad_slab_pool:
Linus Torvalds's avatar
Linus Torvalds committed
923
924
	if (cc->iv_gen_ops && cc->iv_gen_ops->dtr)
		cc->iv_gen_ops->dtr(cc);
Milan Broz's avatar
Milan Broz committed
925
bad_ivmode:
926
	crypto_free_blkcipher(tfm);
Milan Broz's avatar
Milan Broz committed
927
bad_cipher:
928
929
	/* Must zero key material before freeing */
	memset(cc, 0, sizeof(*cc) + cc->key_size * sizeof(u8));
Linus Torvalds's avatar
Linus Torvalds committed
930
931
932
933
934
935
936
937
	kfree(cc);
	return -EINVAL;
}

static void crypt_dtr(struct dm_target *ti)
{
	struct crypt_config *cc = (struct crypt_config *) ti->private;

938
939
	destroy_workqueue(cc->io_queue);
	destroy_workqueue(cc->crypt_queue);
Milan Broz's avatar
Milan Broz committed
940

941
	bioset_free(cc->bs);
Linus Torvalds's avatar
Linus Torvalds committed
942
943
944
	mempool_destroy(cc->page_pool);
	mempool_destroy(cc->io_pool);

945
	kfree(cc->iv_mode);
Linus Torvalds's avatar
Linus Torvalds committed
946
947
	if (cc->iv_gen_ops && cc->iv_gen_ops->dtr)
		cc->iv_gen_ops->dtr(cc);
948
	crypto_free_blkcipher(cc->tfm);
Linus Torvalds's avatar
Linus Torvalds committed
949
	dm_put_device(ti, cc->dev);
950
951
952

	/* Must zero key material before freeing */
	memset(cc, 0, sizeof(*cc) + cc->key_size * sizeof(u8));
Linus Torvalds's avatar
Linus Torvalds committed
953
954
955
956
957
958
	kfree(cc);
}

static int crypt_map(struct dm_target *ti, struct bio *bio,
		     union map_info *map_context)
{
959
	struct crypt_config *cc = ti->private;
Alasdair G Kergon's avatar
Alasdair G Kergon committed
960
	struct dm_crypt_io *io;
Linus Torvalds's avatar
Linus Torvalds committed
961

Milan Broz's avatar
Milan Broz committed
962
	io = mempool_alloc(cc->io_pool, GFP_NOIO);
Linus Torvalds's avatar
Linus Torvalds committed
963
	io->target = ti;
964
	io->base_bio = bio;
965
	io->error = 0;
966
	atomic_set(&io->pending, 0);
967
968
969
970
971

	if (bio_data_dir(io->base_bio) == READ)
		kcryptd_queue_io(io);
	else
		kcryptd_queue_crypt(io);
Linus Torvalds's avatar
Linus Torvalds committed
972

973
	return DM_MAPIO_SUBMITTED;
Linus Torvalds's avatar
Linus Torvalds committed
974
975
976
977
978
979
980
981
982
983
984
985
986
987
988
}

static int crypt_status(struct dm_target *ti, status_type_t type,
			char *result, unsigned int maxlen)
{
	struct crypt_config *cc = (struct crypt_config *) ti->private;
	unsigned int sz = 0;

	switch (type) {
	case STATUSTYPE_INFO:
		result[0] = '\0';
		break;

	case STATUSTYPE_TABLE:
		if (cc->iv_mode)
989
990
			DMEMIT("%s-%s-%s ", cc->cipher, cc->chainmode,
			       cc->iv_mode);
Linus Torvalds's avatar
Linus Torvalds committed
991
		else
992
			DMEMIT("%s-%s ", cc->cipher, cc->chainmode);
Linus Torvalds's avatar
Linus Torvalds committed
993
994
995
996
997
998
999
1000
1001
1002
1003
1004
1005

		if (cc->key_size > 0) {
			if ((maxlen - sz) < ((cc->key_size << 1) + 1))
				return -ENOMEM;

			crypt_encode_key(result + sz, cc->key, cc->key_size);
			sz += cc->key_size << 1;
		} else {
			if (sz >= maxlen)
				return -ENOMEM;
			result[sz++] = '-';
		}

1006
1007
		DMEMIT(" %llu %s %llu", (unsigned long long)cc->iv_offset,
				cc->dev->name, (unsigned long long)cc->start);
Linus Torvalds's avatar
Linus Torvalds committed
1008
1009
1010
1011
1012
		break;
	}
	return 0;
}

Milan Broz's avatar
Milan Broz committed
1013
1014
1015
1016
1017
1018
1019
1020
1021
1022
1023
1024
1025
1026
1027
1028
1029
1030
1031
1032
1033
1034
1035
1036
1037
1038
1039
1040
1041
1042
1043
1044
1045
1046
1047
1048
1049
1050
1051
1052
1053
1054
1055
1056
1057
1058
1059
1060
1061
1062
1063
1064
1065
static void crypt_postsuspend(struct dm_target *ti)
{
	struct crypt_config *cc = ti->private;

	set_bit(DM_CRYPT_SUSPENDED, &cc->flags);
}

static int crypt_preresume(struct dm_target *ti)
{
	struct crypt_config *cc = ti->private;

	if (!test_bit(DM_CRYPT_KEY_VALID, &cc->flags)) {
		DMERR("aborting resume - crypt key is not set.");
		return -EAGAIN;
	}

	return 0;
}

static void crypt_resume(struct dm_target *ti)
{
	struct crypt_config *cc = ti->private;

	clear_bit(DM_CRYPT_SUSPENDED, &cc->flags);
}

/* Message interface
 *	key set <key>
 *	key wipe
 */
static int crypt_message(struct dm_target *ti, unsigned argc, char **argv)
{
	struct crypt_config *cc = ti->private;

	if (argc < 2)
		goto error;

	if (!strnicmp(argv[0], MESG_STR("key"))) {
		if (!test_bit(DM_CRYPT_SUSPENDED, &cc->flags)) {
			DMWARN("not suspended during key manipulation.");
			return -EINVAL;
		}
		if (argc == 3 && !strnicmp(argv[1], MESG_STR("set")))
			return crypt_set_key(cc, argv[2]);
		if (argc == 2 && !strnicmp(argv[1], MESG_STR("wipe")))
			return crypt_wipe_key(cc);
	}

error:
	DMWARN("unrecognised message received.");
	return -EINVAL;
}

Linus Torvalds's avatar
Linus Torvalds committed
1066
1067
static struct target_type crypt_target = {
	.name   = "crypt",
Ludwig Nussel's avatar
Ludwig Nussel committed
1068
	.version= {1, 5, 0},
Linus Torvalds's avatar
Linus Torvalds committed
1069
1070
1071
1072
1073
	.module = THIS_MODULE,
	.ctr    = crypt_ctr,
	.dtr    = crypt_dtr,
	.map    = crypt_map,
	.status = crypt_status,
Milan Broz's avatar
Milan Broz committed
1074
1075
1076
1077
	.postsuspend = crypt_postsuspend,
	.preresume = crypt_preresume,
	.resume = crypt_resume,
	.message = crypt_message,
Linus Torvalds's avatar
Linus Torvalds committed
1078
1079
1080
1081
1082
1083
};

static int __init dm_crypt_init(void)
{
	int r;

Alasdair G Kergon's avatar
Alasdair G Kergon committed
1084
	_crypt_io_pool = KMEM_CACHE(dm_crypt_io, 0);
Linus Torvalds's avatar
Linus Torvalds committed
1085
1086
1087
1088
1089
	if (!_crypt_io_pool)
		return -ENOMEM;

	r = dm_register_target(&crypt_target);
	if (r < 0) {
1090
		DMERR("register failed %d", r);
1091
		kmem_cache_destroy(_crypt_io_pool);
Linus Torvalds's avatar
Linus Torvalds committed
1092
1093
1094
1095
1096
1097
1098
1099
1100
1101
	}

	return r;
}

static void __exit dm_crypt_exit(void)
{
	int r = dm_unregister_target(&crypt_target);

	if (r < 0)
1102
		DMERR("unregister failed %d", r);
Linus Torvalds's avatar
Linus Torvalds committed
1103
1104
1105
1106
1107
1108
1109
1110
1111
1112

	kmem_cache_destroy(_crypt_io_pool);
}

module_init(dm_crypt_init);
module_exit(dm_crypt_exit);

MODULE_AUTHOR("Christophe Saout <christophe@saout.de>");
MODULE_DESCRIPTION(DM_NAME " target for transparent encryption / decryption");
MODULE_LICENSE("GPL");