dm-crypt.c 25.6 KB
Newer Older
Linus Torvalds's avatar
Linus Torvalds committed
1
2
3
/*
 * Copyright (C) 2003 Christophe Saout <christophe@saout.de>
 * Copyright (C) 2004 Clemens Fruhwirth <clemens@endorphin.org>
Milan Broz's avatar
Milan Broz committed
4
 * Copyright (C) 2006 Red Hat, Inc. All rights reserved.
Linus Torvalds's avatar
Linus Torvalds committed
5
6
7
8
 *
 * This file is released under the GPL.
 */

9
#include <linux/err.h>
Linus Torvalds's avatar
Linus Torvalds committed
10
11
12
13
14
15
16
17
18
#include <linux/module.h>
#include <linux/init.h>
#include <linux/kernel.h>
#include <linux/bio.h>
#include <linux/blkdev.h>
#include <linux/mempool.h>
#include <linux/slab.h>
#include <linux/crypto.h>
#include <linux/workqueue.h>
19
#include <linux/backing-dev.h>
Linus Torvalds's avatar
Linus Torvalds committed
20
#include <asm/atomic.h>
21
#include <linux/scatterlist.h>
Linus Torvalds's avatar
Linus Torvalds committed
22
#include <asm/page.h>
23
#include <asm/unaligned.h>
Linus Torvalds's avatar
Linus Torvalds committed
24
25
26

#include "dm.h"

27
#define DM_MSG_PREFIX "crypt"
Milan Broz's avatar
Milan Broz committed
28
#define MESG_STR(x) x, sizeof(x)
Linus Torvalds's avatar
Linus Torvalds committed
29
30
31
32

/*
 * per bio private data
 */
Alasdair G Kergon's avatar
Alasdair G Kergon committed
33
struct dm_crypt_io {
Linus Torvalds's avatar
Linus Torvalds committed
34
	struct dm_target *target;
35
	struct bio *base_bio;
Linus Torvalds's avatar
Linus Torvalds committed
36
37
38
39
40
41
42
43
44
45
46
47
48
49
50
51
52
53
54
55
56
57
58
	struct work_struct work;
	atomic_t pending;
	int error;
};

/*
 * context holding the current state of a multi-part conversion
 */
struct convert_context {
	struct bio *bio_in;
	struct bio *bio_out;
	unsigned int offset_in;
	unsigned int offset_out;
	unsigned int idx_in;
	unsigned int idx_out;
	sector_t sector;
	int write;
};

struct crypt_config;

struct crypt_iv_operations {
	int (*ctr)(struct crypt_config *cc, struct dm_target *ti,
Milan Broz's avatar
Milan Broz committed
59
		   const char *opts);
Linus Torvalds's avatar
Linus Torvalds committed
60
61
62
63
64
65
66
67
68
	void (*dtr)(struct crypt_config *cc);
	const char *(*status)(struct crypt_config *cc);
	int (*generator)(struct crypt_config *cc, u8 *iv, sector_t sector);
};

/*
 * Crypt: maps a linear range of a block device
 * and encrypts / decrypts at the same time.
 */
Milan Broz's avatar
Milan Broz committed
69
enum flags { DM_CRYPT_SUSPENDED, DM_CRYPT_KEY_VALID };
Linus Torvalds's avatar
Linus Torvalds committed
70
71
72
73
74
75
76
77
78
79
struct crypt_config {
	struct dm_dev *dev;
	sector_t start;

	/*
	 * pool for per bio private data and
	 * for encryption buffer pages
	 */
	mempool_t *io_pool;
	mempool_t *page_pool;
80
	struct bio_set *bs;
Linus Torvalds's avatar
Linus Torvalds committed
81

82
83
	struct workqueue_struct *io_queue;
	struct workqueue_struct *crypt_queue;
Linus Torvalds's avatar
Linus Torvalds committed
84
85
86
87
88
	/*
	 * crypto related data
	 */
	struct crypt_iv_operations *iv_gen_ops;
	char *iv_mode;
89
90
91
92
	union {
		struct crypto_cipher *essiv_tfm;
		int benbi_shift;
	} iv_gen_private;
Linus Torvalds's avatar
Linus Torvalds committed
93
94
95
	sector_t iv_offset;
	unsigned int iv_size;

96
97
98
	char cipher[CRYPTO_MAX_ALG_NAME];
	char chainmode[CRYPTO_MAX_ALG_NAME];
	struct crypto_blkcipher *tfm;
Milan Broz's avatar
Milan Broz committed
99
	unsigned long flags;
Linus Torvalds's avatar
Linus Torvalds committed
100
101
102
103
	unsigned int key_size;
	u8 key[0];
};

104
#define MIN_IOS        16
Linus Torvalds's avatar
Linus Torvalds committed
105
106
107
#define MIN_POOL_PAGES 32
#define MIN_BIO_PAGES  8

108
static struct kmem_cache *_crypt_io_pool;
Linus Torvalds's avatar
Linus Torvalds committed
109

Alasdair G Kergon's avatar
Alasdair G Kergon committed
110
static void clone_init(struct dm_crypt_io *, struct bio *);
Olaf Kirch's avatar
Olaf Kirch committed
111

Linus Torvalds's avatar
Linus Torvalds committed
112
113
114
/*
 * Different IV generation algorithms:
 *
115
 * plain: the initial vector is the 32-bit little-endian version of the sector
116
 *        number, padded with zeros if necessary.
Linus Torvalds's avatar
Linus Torvalds committed
117
 *
118
119
120
 * essiv: "encrypted sector|salt initial vector", the sector number is
 *        encrypted with the bulk cipher using a salt as key. The salt
 *        should be derived from the bulk cipher's key via hashing.
Linus Torvalds's avatar
Linus Torvalds committed
121
 *
122
123
124
 * benbi: the 64-bit "big-endian 'narrow block'-count", starting at 1
 *        (needed for LRW-32-AES and possible other narrow block modes)
 *
Ludwig Nussel's avatar
Ludwig Nussel committed
125
126
127
 * null: the initial vector is always zero.  Provides compatibility with
 *       obsolete loop_fish2 devices.  Do not use for new devices.
 *
Linus Torvalds's avatar
Linus Torvalds committed
128
129
130
131
132
133
134
135
136
137
138
139
140
 * plumb: unimplemented, see:
 * http://article.gmane.org/gmane.linux.kernel.device-mapper.dm-crypt/454
 */

static int crypt_iv_plain_gen(struct crypt_config *cc, u8 *iv, sector_t sector)
{
	memset(iv, 0, cc->iv_size);
	*(u32 *)iv = cpu_to_le32(sector & 0xffffffff);

	return 0;
}

static int crypt_iv_essiv_ctr(struct crypt_config *cc, struct dm_target *ti,
Milan Broz's avatar
Milan Broz committed
141
			      const char *opts)
Linus Torvalds's avatar
Linus Torvalds committed
142
{
143
	struct crypto_cipher *essiv_tfm;
144
145
	struct crypto_hash *hash_tfm;
	struct hash_desc desc;
Linus Torvalds's avatar
Linus Torvalds committed
146
147
148
	struct scatterlist sg;
	unsigned int saltsize;
	u8 *salt;
149
	int err;
Linus Torvalds's avatar
Linus Torvalds committed
150
151

	if (opts == NULL) {
152
		ti->error = "Digest algorithm missing for ESSIV mode";
Linus Torvalds's avatar
Linus Torvalds committed
153
154
155
156
		return -EINVAL;
	}

	/* Hash the cipher key with the given hash algorithm */
157
158
	hash_tfm = crypto_alloc_hash(opts, 0, CRYPTO_ALG_ASYNC);
	if (IS_ERR(hash_tfm)) {
159
		ti->error = "Error initializing ESSIV hash";
160
		return PTR_ERR(hash_tfm);
Linus Torvalds's avatar
Linus Torvalds committed
161
162
	}

163
	saltsize = crypto_hash_digestsize(hash_tfm);
Linus Torvalds's avatar
Linus Torvalds committed
164
165
	salt = kmalloc(saltsize, GFP_KERNEL);
	if (salt == NULL) {
166
		ti->error = "Error kmallocing salt storage in ESSIV";
167
		crypto_free_hash(hash_tfm);
Linus Torvalds's avatar
Linus Torvalds committed
168
169
170
		return -ENOMEM;
	}

171
	sg_init_one(&sg, cc->key, cc->key_size);
172
173
174
175
176
177
178
	desc.tfm = hash_tfm;
	desc.flags = CRYPTO_TFM_REQ_MAY_SLEEP;
	err = crypto_hash_digest(&desc, &sg, cc->key_size, salt);
	crypto_free_hash(hash_tfm);

	if (err) {
		ti->error = "Error calculating hash in ESSIV";
179
		kfree(salt);
180
181
		return err;
	}
Linus Torvalds's avatar
Linus Torvalds committed
182
183

	/* Setup the essiv_tfm with the given salt */
184
185
	essiv_tfm = crypto_alloc_cipher(cc->cipher, 0, CRYPTO_ALG_ASYNC);
	if (IS_ERR(essiv_tfm)) {
186
		ti->error = "Error allocating crypto tfm for ESSIV";
Linus Torvalds's avatar
Linus Torvalds committed
187
		kfree(salt);
188
		return PTR_ERR(essiv_tfm);
Linus Torvalds's avatar
Linus Torvalds committed
189
	}
190
191
	if (crypto_cipher_blocksize(essiv_tfm) !=
	    crypto_blkcipher_ivsize(cc->tfm)) {
192
		ti->error = "Block size of ESSIV cipher does "
Milan Broz's avatar
Milan Broz committed
193
			    "not match IV size of block cipher";
194
		crypto_free_cipher(essiv_tfm);
Linus Torvalds's avatar
Linus Torvalds committed
195
196
197
		kfree(salt);
		return -EINVAL;
	}
198
199
	err = crypto_cipher_setkey(essiv_tfm, salt, saltsize);
	if (err) {
200
		ti->error = "Failed to set key for ESSIV cipher";
201
		crypto_free_cipher(essiv_tfm);
Linus Torvalds's avatar
Linus Torvalds committed
202
		kfree(salt);
203
		return err;
Linus Torvalds's avatar
Linus Torvalds committed
204
205
206
	}
	kfree(salt);

207
	cc->iv_gen_private.essiv_tfm = essiv_tfm;
Linus Torvalds's avatar
Linus Torvalds committed
208
209
210
211
212
	return 0;
}

static void crypt_iv_essiv_dtr(struct crypt_config *cc)
{
213
214
	crypto_free_cipher(cc->iv_gen_private.essiv_tfm);
	cc->iv_gen_private.essiv_tfm = NULL;
Linus Torvalds's avatar
Linus Torvalds committed
215
216
217
218
219
220
}

static int crypt_iv_essiv_gen(struct crypt_config *cc, u8 *iv, sector_t sector)
{
	memset(iv, 0, cc->iv_size);
	*(u64 *)iv = cpu_to_le64(sector);
221
	crypto_cipher_encrypt_one(cc->iv_gen_private.essiv_tfm, iv, iv);
Linus Torvalds's avatar
Linus Torvalds committed
222
223
224
	return 0;
}

225
226
227
228
static int crypt_iv_benbi_ctr(struct crypt_config *cc, struct dm_target *ti,
			      const char *opts)
{
	unsigned int bs = crypto_blkcipher_blocksize(cc->tfm);
229
	int log = ilog2(bs);
230
231
232
233
234
235
236
237
238
239
240
241
242
243

	/* we need to calculate how far we must shift the sector count
	 * to get the cipher block count, we use this shift in _gen */

	if (1 << log != bs) {
		ti->error = "cypher blocksize is not a power of 2";
		return -EINVAL;
	}

	if (log > 9) {
		ti->error = "cypher blocksize is > 512";
		return -EINVAL;
	}

244
	cc->iv_gen_private.benbi_shift = 9 - log;
245
246
247
248
249
250
251
252
253
254

	return 0;
}

static void crypt_iv_benbi_dtr(struct crypt_config *cc)
{
}

static int crypt_iv_benbi_gen(struct crypt_config *cc, u8 *iv, sector_t sector)
{
255
256
	__be64 val;

257
	memset(iv, 0, cc->iv_size - sizeof(u64)); /* rest is cleared below */
258
259
260

	val = cpu_to_be64(((u64)sector << cc->iv_gen_private.benbi_shift) + 1);
	put_unaligned(val, (__be64 *)(iv + cc->iv_size - sizeof(u64)));
261

Linus Torvalds's avatar
Linus Torvalds committed
262
263
264
	return 0;
}

Ludwig Nussel's avatar
Ludwig Nussel committed
265
266
267
268
269
270
271
static int crypt_iv_null_gen(struct crypt_config *cc, u8 *iv, sector_t sector)
{
	memset(iv, 0, cc->iv_size);

	return 0;
}

Linus Torvalds's avatar
Linus Torvalds committed
272
273
274
275
276
277
278
279
280
281
static struct crypt_iv_operations crypt_iv_plain_ops = {
	.generator = crypt_iv_plain_gen
};

static struct crypt_iv_operations crypt_iv_essiv_ops = {
	.ctr       = crypt_iv_essiv_ctr,
	.dtr       = crypt_iv_essiv_dtr,
	.generator = crypt_iv_essiv_gen
};

282
283
284
285
286
static struct crypt_iv_operations crypt_iv_benbi_ops = {
	.ctr	   = crypt_iv_benbi_ctr,
	.dtr	   = crypt_iv_benbi_dtr,
	.generator = crypt_iv_benbi_gen
};
Linus Torvalds's avatar
Linus Torvalds committed
287

Ludwig Nussel's avatar
Ludwig Nussel committed
288
289
290
291
static struct crypt_iv_operations crypt_iv_null_ops = {
	.generator = crypt_iv_null_gen
};

292
static int
Linus Torvalds's avatar
Linus Torvalds committed
293
294
295
296
crypt_convert_scatterlist(struct crypt_config *cc, struct scatterlist *out,
                          struct scatterlist *in, unsigned int length,
                          int write, sector_t sector)
{
297
	u8 iv[cc->iv_size] __attribute__ ((aligned(__alignof__(u64))));
298
299
300
301
302
	struct blkcipher_desc desc = {
		.tfm = cc->tfm,
		.info = iv,
		.flags = CRYPTO_TFM_REQ_MAY_SLEEP,
	};
Linus Torvalds's avatar
Linus Torvalds committed
303
304
305
306
307
308
309
310
	int r;

	if (cc->iv_gen_ops) {
		r = cc->iv_gen_ops->generator(cc, iv, sector);
		if (r < 0)
			return r;

		if (write)
311
			r = crypto_blkcipher_encrypt_iv(&desc, out, in, length);
Linus Torvalds's avatar
Linus Torvalds committed
312
		else
313
			r = crypto_blkcipher_decrypt_iv(&desc, out, in, length);
Linus Torvalds's avatar
Linus Torvalds committed
314
315
	} else {
		if (write)
316
			r = crypto_blkcipher_encrypt(&desc, out, in, length);
Linus Torvalds's avatar
Linus Torvalds committed
317
		else
318
			r = crypto_blkcipher_decrypt(&desc, out, in, length);
Linus Torvalds's avatar
Linus Torvalds committed
319
320
321
322
323
	}

	return r;
}

Milan Broz's avatar
Milan Broz committed
324
325
326
327
static void crypt_convert_init(struct crypt_config *cc,
			       struct convert_context *ctx,
			       struct bio *bio_out, struct bio *bio_in,
			       sector_t sector, int write)
Linus Torvalds's avatar
Linus Torvalds committed
328
329
330
331
332
333
334
335
336
337
338
339
340
341
342
{
	ctx->bio_in = bio_in;
	ctx->bio_out = bio_out;
	ctx->offset_in = 0;
	ctx->offset_out = 0;
	ctx->idx_in = bio_in ? bio_in->bi_idx : 0;
	ctx->idx_out = bio_out ? bio_out->bi_idx : 0;
	ctx->sector = sector + cc->iv_offset;
	ctx->write = write;
}

/*
 * Encrypt / decrypt data from one bio to another one (can be the same one)
 */
static int crypt_convert(struct crypt_config *cc,
Milan Broz's avatar
Milan Broz committed
343
			 struct convert_context *ctx)
Linus Torvalds's avatar
Linus Torvalds committed
344
345
346
347
348
349
350
{
	int r = 0;

	while(ctx->idx_in < ctx->bio_in->bi_vcnt &&
	      ctx->idx_out < ctx->bio_out->bi_vcnt) {
		struct bio_vec *bv_in = bio_iovec_idx(ctx->bio_in, ctx->idx_in);
		struct bio_vec *bv_out = bio_iovec_idx(ctx->bio_out, ctx->idx_out);
351
352
353
		struct scatterlist sg_in, sg_out;

		sg_init_table(&sg_in, 1);
354
		sg_set_page(&sg_in, bv_in->bv_page, 1 << SECTOR_SHIFT, bv_in->bv_offset + ctx->offset_in);
355
356

		sg_init_table(&sg_out, 1);
357
		sg_set_page(&sg_out, bv_out->bv_page, 1 << SECTOR_SHIFT, bv_out->bv_offset + ctx->offset_out);
Linus Torvalds's avatar
Linus Torvalds committed
358
359
360
361
362
363
364
365
366
367
368
369
370
371

		ctx->offset_in += sg_in.length;
		if (ctx->offset_in >= bv_in->bv_len) {
			ctx->offset_in = 0;
			ctx->idx_in++;
		}

		ctx->offset_out += sg_out.length;
		if (ctx->offset_out >= bv_out->bv_len) {
			ctx->offset_out = 0;
			ctx->idx_out++;
		}

		r = crypt_convert_scatterlist(cc, &sg_out, &sg_in, sg_in.length,
Milan Broz's avatar
Milan Broz committed
372
					      ctx->write, ctx->sector);
Linus Torvalds's avatar
Linus Torvalds committed
373
374
375
376
377
378
379
380
381
		if (r < 0)
			break;

		ctx->sector++;
	}

	return r;
}

Milan Broz's avatar
Milan Broz committed
382
383
static void dm_crypt_bio_destructor(struct bio *bio)
{
Alasdair G Kergon's avatar
Alasdair G Kergon committed
384
	struct dm_crypt_io *io = bio->bi_private;
385
386
387
	struct crypt_config *cc = io->target->private;

	bio_free(bio, cc->bs);
Milan Broz's avatar
Milan Broz committed
388
}
389

Linus Torvalds's avatar
Linus Torvalds committed
390
391
392
393
394
/*
 * Generate a new unfragmented bio with the given size
 * This should never violate the device limitations
 * May return a smaller bio when running out of pages
 */
Alasdair G Kergon's avatar
Alasdair G Kergon committed
395
static struct bio *crypt_alloc_buffer(struct dm_crypt_io *io, unsigned size)
Linus Torvalds's avatar
Linus Torvalds committed
396
{
Olaf Kirch's avatar
Olaf Kirch committed
397
	struct crypt_config *cc = io->target->private;
398
	struct bio *clone;
Linus Torvalds's avatar
Linus Torvalds committed
399
	unsigned int nr_iovecs = (size + PAGE_SIZE - 1) >> PAGE_SHIFT;
400
	gfp_t gfp_mask = GFP_NOIO | __GFP_HIGHMEM;
Milan Broz's avatar
Milan Broz committed
401
402
	unsigned i, len;
	struct page *page;
Linus Torvalds's avatar
Linus Torvalds committed
403

Olaf Kirch's avatar
Olaf Kirch committed
404
	clone = bio_alloc_bioset(GFP_NOIO, nr_iovecs, cc->bs);
405
	if (!clone)
Linus Torvalds's avatar
Linus Torvalds committed
406
407
		return NULL;

Olaf Kirch's avatar
Olaf Kirch committed
408
	clone_init(io, clone);
409

410
	for (i = 0; i < nr_iovecs; i++) {
Milan Broz's avatar
Milan Broz committed
411
412
		page = mempool_alloc(cc->page_pool, gfp_mask);
		if (!page)
Linus Torvalds's avatar
Linus Torvalds committed
413
414
415
416
417
418
419
			break;

		/*
		 * if additional pages cannot be allocated without waiting,
		 * return a partially allocated bio, the caller will then try
		 * to allocate additional bios while submitting this partial bio
		 */
420
		if (i == (MIN_BIO_PAGES - 1))
Linus Torvalds's avatar
Linus Torvalds committed
421
422
			gfp_mask = (gfp_mask | __GFP_NOWARN) & ~__GFP_WAIT;

Milan Broz's avatar
Milan Broz committed
423
424
425
426
427
428
		len = (size > PAGE_SIZE) ? PAGE_SIZE : size;

		if (!bio_add_page(clone, page, len, 0)) {
			mempool_free(page, cc->page_pool);
			break;
		}
Linus Torvalds's avatar
Linus Torvalds committed
429

Milan Broz's avatar
Milan Broz committed
430
		size -= len;
Linus Torvalds's avatar
Linus Torvalds committed
431
432
	}

433
434
	if (!clone->bi_size) {
		bio_put(clone);
Linus Torvalds's avatar
Linus Torvalds committed
435
436
437
		return NULL;
	}

438
	return clone;
Linus Torvalds's avatar
Linus Torvalds committed
439
440
}

Neil Brown's avatar
Neil Brown committed
441
static void crypt_free_buffer_pages(struct crypt_config *cc, struct bio *clone)
Linus Torvalds's avatar
Linus Torvalds committed
442
{
Neil Brown's avatar
Neil Brown committed
443
	unsigned int i;
Linus Torvalds's avatar
Linus Torvalds committed
444
445
	struct bio_vec *bv;

Neil Brown's avatar
Neil Brown committed
446
	for (i = 0; i < clone->bi_vcnt; i++) {
447
		bv = bio_iovec_idx(clone, i);
Linus Torvalds's avatar
Linus Torvalds committed
448
449
450
451
452
453
454
455
456
457
		BUG_ON(!bv->bv_page);
		mempool_free(bv->bv_page, cc->page_pool);
		bv->bv_page = NULL;
	}
}

/*
 * One of the bios was finished. Check for completion of
 * the whole request and correctly clean up the buffer.
 */
Milan Broz's avatar
Milan Broz committed
458
static void crypt_dec_pending(struct dm_crypt_io *io, int error)
Linus Torvalds's avatar
Linus Torvalds committed
459
460
461
462
463
464
465
466
467
{
	struct crypt_config *cc = (struct crypt_config *) io->target->private;

	if (error < 0)
		io->error = error;

	if (!atomic_dec_and_test(&io->pending))
		return;

468
	bio_endio(io->base_bio, io->error);
Linus Torvalds's avatar
Linus Torvalds committed
469
470
471
472
473

	mempool_free(io, cc->io_pool);
}

/*
474
 * kcryptd/kcryptd_io:
Linus Torvalds's avatar
Linus Torvalds committed
475
476
 *
 * Needed because it would be very unwise to do decryption in an
477
 * interrupt context.
478
479
480
481
482
483
484
485
 *
 * kcryptd performs the actual encryption or decryption.
 *
 * kcryptd_io performs the IO submission.
 *
 * They must be separated as otherwise the final stages could be
 * starved by new requests which can block in the first stages due
 * to memory allocation.
Linus Torvalds's avatar
Linus Torvalds committed
486
 */
David Howells's avatar
David Howells committed
487
static void kcryptd_do_work(struct work_struct *work);
488
static void kcryptd_do_crypt(struct work_struct *work);
Linus Torvalds's avatar
Linus Torvalds committed
489

Alasdair G Kergon's avatar
Alasdair G Kergon committed
490
static void kcryptd_queue_io(struct dm_crypt_io *io)
Linus Torvalds's avatar
Linus Torvalds committed
491
{
492
493
	struct crypt_config *cc = io->target->private;

David Howells's avatar
David Howells committed
494
	INIT_WORK(&io->work, kcryptd_do_work);
495
496
497
498
499
500
501
502
503
	queue_work(cc->io_queue, &io->work);
}

static void kcryptd_queue_crypt(struct dm_crypt_io *io)
{
	struct crypt_config *cc = io->target->private;

	INIT_WORK(&io->work, kcryptd_do_crypt);
	queue_work(cc->crypt_queue, &io->work);
504
505
}

506
static void crypt_endio(struct bio *clone, int error)
507
{
Alasdair G Kergon's avatar
Alasdair G Kergon committed
508
	struct dm_crypt_io *io = clone->bi_private;
509
510
511
	struct crypt_config *cc = io->target->private;
	unsigned read_io = bio_data_dir(clone) == READ;

Milan Broz's avatar
Milan Broz committed
512
513
514
	if (unlikely(!bio_flagged(clone, BIO_UPTODATE) && !error))
		error = -EIO;

515
	/*
516
	 * free the processed pages
517
	 */
518
	if (!read_io) {
Neil Brown's avatar
Neil Brown committed
519
		crypt_free_buffer_pages(cc, clone);
520
		goto out;
521
	}
522

Milan Broz's avatar
Milan Broz committed
523
	if (unlikely(error))
524
525
526
		goto out;

	bio_put(clone);
527
	kcryptd_queue_crypt(io);
528
	return;
529
530
531

out:
	bio_put(clone);
Milan Broz's avatar
Milan Broz committed
532
	crypt_dec_pending(io, error);
533
534
}

Alasdair G Kergon's avatar
Alasdair G Kergon committed
535
static void clone_init(struct dm_crypt_io *io, struct bio *clone)
536
537
538
539
540
541
542
{
	struct crypt_config *cc = io->target->private;

	clone->bi_private = io;
	clone->bi_end_io  = crypt_endio;
	clone->bi_bdev    = cc->dev->bdev;
	clone->bi_rw      = io->base_bio->bi_rw;
Olaf Kirch's avatar
Olaf Kirch committed
543
	clone->bi_destructor = dm_crypt_bio_destructor;
544
545
}

Alasdair G Kergon's avatar
Alasdair G Kergon committed
546
static void process_read(struct dm_crypt_io *io)
547
548
549
550
{
	struct crypt_config *cc = io->target->private;
	struct bio *base_bio = io->base_bio;
	struct bio *clone;
551
552
553
	sector_t sector = base_bio->bi_sector - io->target->begin;

	atomic_inc(&io->pending);
554
555
556
557
558
559

	/*
	 * The block layer might modify the bvec array, so always
	 * copy the required bvecs because we need the original
	 * one in order to decrypt the whole bio data *afterwards*.
	 */
560
	clone = bio_alloc_bioset(GFP_NOIO, bio_segments(base_bio), cc->bs);
561
	if (unlikely(!clone)) {
Milan Broz's avatar
Milan Broz committed
562
		crypt_dec_pending(io, -ENOMEM);
563
		return;
564
	}
565
566
567
568
569

	clone_init(io, clone);
	clone->bi_idx = 0;
	clone->bi_vcnt = bio_segments(base_bio);
	clone->bi_size = base_bio->bi_size;
570
	clone->bi_sector = cc->start + sector;
571
572
573
	memcpy(clone->bi_io_vec, bio_iovec(base_bio),
	       sizeof(struct bio_vec) * clone->bi_vcnt);

574
	generic_make_request(clone);
575
576
}

Alasdair G Kergon's avatar
Alasdair G Kergon committed
577
static void process_write(struct dm_crypt_io *io)
578
579
580
581
{
	struct crypt_config *cc = io->target->private;
	struct bio *base_bio = io->base_bio;
	struct bio *clone;
582
583
584
	struct convert_context ctx;
	unsigned remaining = base_bio->bi_size;
	sector_t sector = base_bio->bi_sector - io->target->begin;
585

586
	atomic_inc(&io->pending);
587

588
	crypt_convert_init(cc, &ctx, NULL, base_bio, sector, 1);
589

590
591
592
593
594
	/*
	 * The allocated buffers can be smaller than the whole bio,
	 * so repeat the whole process until all the data can be handled.
	 */
	while (remaining) {
595
		clone = crypt_alloc_buffer(io, remaining);
596
		if (unlikely(!clone)) {
Milan Broz's avatar
Milan Broz committed
597
			crypt_dec_pending(io, -ENOMEM);
598
599
			return;
		}
600
601

		ctx.bio_out = clone;
602
		ctx.idx_out = 0;
603
604

		if (unlikely(crypt_convert(cc, &ctx) < 0)) {
Neil Brown's avatar
Neil Brown committed
605
			crypt_free_buffer_pages(cc, clone);
606
			bio_put(clone);
Milan Broz's avatar
Milan Broz committed
607
			crypt_dec_pending(io, -EIO);
608
			return;
609
610
		}

611
612
613
		/* crypt_convert should have filled the clone bio */
		BUG_ON(ctx.idx_out < clone->bi_vcnt);

614
615
616
617
		clone->bi_sector = cc->start + sector;
		remaining -= clone->bi_size;
		sector += bio_sectors(clone);

Olaf Kirch's avatar
Olaf Kirch committed
618
619
		/* Grab another reference to the io struct
		 * before we kick off the request */
620
621
622
		if (remaining)
			atomic_inc(&io->pending);

623
624
		generic_make_request(clone);

625
626
627
		/* Do not reference clone after this - it
		 * may be gone already. */

628
629
		/* out of memory -> run queues */
		if (remaining)
630
			congestion_wait(WRITE, HZ/100);
631
	}
632
633
}

Alasdair G Kergon's avatar
Alasdair G Kergon committed
634
static void process_read_endio(struct dm_crypt_io *io)
635
636
{
	struct crypt_config *cc = io->target->private;
Linus Torvalds's avatar
Linus Torvalds committed
637
638
	struct convert_context ctx;

639
640
	crypt_convert_init(cc, &ctx, io->base_bio, io->base_bio,
			   io->base_bio->bi_sector - io->target->begin, 0);
Linus Torvalds's avatar
Linus Torvalds committed
641

Milan Broz's avatar
Milan Broz committed
642
	crypt_dec_pending(io, crypt_convert(cc, &ctx));
Linus Torvalds's avatar
Linus Torvalds committed
643
644
}

David Howells's avatar
David Howells committed
645
static void kcryptd_do_work(struct work_struct *work)
Linus Torvalds's avatar
Linus Torvalds committed
646
{
Alasdair G Kergon's avatar
Alasdair G Kergon committed
647
	struct dm_crypt_io *io = container_of(work, struct dm_crypt_io, work);
648

649
	if (bio_data_dir(io->base_bio) == READ)
650
		process_read(io);
651
652
653
654
655
656
657
658
}

static void kcryptd_do_crypt(struct work_struct *work)
{
	struct dm_crypt_io *io = container_of(work, struct dm_crypt_io, work);

	if (bio_data_dir(io->base_bio) == READ)
		process_read_endio(io);
659
660
	else
		process_write(io);
Linus Torvalds's avatar
Linus Torvalds committed
661
662
663
664
665
666
667
668
669
670
671
672
673
}

/*
 * Decode key from its hex representation
 */
static int crypt_decode_key(u8 *key, char *hex, unsigned int size)
{
	char buffer[3];
	char *endp;
	unsigned int i;

	buffer[2] = '\0';

674
	for (i = 0; i < size; i++) {
Linus Torvalds's avatar
Linus Torvalds committed
675
676
677
678
679
680
681
682
683
684
685
686
687
688
689
690
691
692
693
694
695
696
		buffer[0] = *hex++;
		buffer[1] = *hex++;

		key[i] = (u8)simple_strtoul(buffer, &endp, 16);

		if (endp != &buffer[2])
			return -EINVAL;
	}

	if (*hex != '\0')
		return -EINVAL;

	return 0;
}

/*
 * Encode key into its hex representation
 */
static void crypt_encode_key(char *hex, u8 *key, unsigned int size)
{
	unsigned int i;

697
	for (i = 0; i < size; i++) {
Linus Torvalds's avatar
Linus Torvalds committed
698
699
700
701
702
703
		sprintf(hex, "%02x", *key);
		hex += 2;
		key++;
	}
}

Milan Broz's avatar
Milan Broz committed
704
705
706
707
708
709
710
711
712
713
static int crypt_set_key(struct crypt_config *cc, char *key)
{
	unsigned key_size = strlen(key) >> 1;

	if (cc->key_size && cc->key_size != key_size)
		return -EINVAL;

	cc->key_size = key_size; /* initial settings */

	if ((!key_size && strcmp(key, "-")) ||
Milan Broz's avatar
Milan Broz committed
714
	   (key_size && crypt_decode_key(cc->key, key, key_size) < 0))
Milan Broz's avatar
Milan Broz committed
715
716
717
718
719
720
721
722
723
724
725
726
727
728
		return -EINVAL;

	set_bit(DM_CRYPT_KEY_VALID, &cc->flags);

	return 0;
}

static int crypt_wipe_key(struct crypt_config *cc)
{
	clear_bit(DM_CRYPT_KEY_VALID, &cc->flags);
	memset(&cc->key, 0, cc->key_size * sizeof(u8));
	return 0;
}

Linus Torvalds's avatar
Linus Torvalds committed
729
730
731
732
733
734
735
/*
 * Construct an encryption mapping:
 * <cipher> <key> <iv_offset> <dev_path> <start>
 */
static int crypt_ctr(struct dm_target *ti, unsigned int argc, char **argv)
{
	struct crypt_config *cc;
736
	struct crypto_blkcipher *tfm;
Linus Torvalds's avatar
Linus Torvalds committed
737
738
739
740
741
742
	char *tmp;
	char *cipher;
	char *chainmode;
	char *ivmode;
	char *ivopts;
	unsigned int key_size;
743
	unsigned long long tmpll;
Linus Torvalds's avatar
Linus Torvalds committed
744
745

	if (argc != 5) {
746
		ti->error = "Not enough arguments";
Linus Torvalds's avatar
Linus Torvalds committed
747
748
749
750
751
752
753
754
755
756
		return -EINVAL;
	}

	tmp = argv[0];
	cipher = strsep(&tmp, "-");
	chainmode = strsep(&tmp, "-");
	ivopts = strsep(&tmp, "-");
	ivmode = strsep(&ivopts, ":");

	if (tmp)
757
		DMWARN("Unexpected additional cipher options");
Linus Torvalds's avatar
Linus Torvalds committed
758
759
760

	key_size = strlen(argv[1]) >> 1;

Milan Broz's avatar
Milan Broz committed
761
 	cc = kzalloc(sizeof(*cc) + key_size * sizeof(u8), GFP_KERNEL);
Linus Torvalds's avatar
Linus Torvalds committed
762
763
	if (cc == NULL) {
		ti->error =
764
			"Cannot allocate transparent encryption context";
Linus Torvalds's avatar
Linus Torvalds committed
765
766
767
		return -ENOMEM;
	}

Milan Broz's avatar
Milan Broz committed
768
 	if (crypt_set_key(cc, argv[1])) {
769
		ti->error = "Error decoding key";
Milan Broz's avatar
Milan Broz committed
770
		goto bad_cipher;
Linus Torvalds's avatar
Linus Torvalds committed
771
772
773
774
775
776
777
778
	}

	/* Compatiblity mode for old dm-crypt cipher strings */
	if (!chainmode || (strcmp(chainmode, "plain") == 0 && !ivmode)) {
		chainmode = "cbc";
		ivmode = "plain";
	}

779
780
	if (strcmp(chainmode, "ecb") && !ivmode) {
		ti->error = "This chaining mode requires an IV mechanism";
Milan Broz's avatar
Milan Broz committed
781
		goto bad_cipher;
Linus Torvalds's avatar
Linus Torvalds committed
782
783
	}

Milan Broz's avatar
Milan Broz committed
784
785
	if (snprintf(cc->cipher, CRYPTO_MAX_ALG_NAME, "%s(%s)",
		     chainmode, cipher) >= CRYPTO_MAX_ALG_NAME) {
786
		ti->error = "Chain mode + cipher name is too long";
Milan Broz's avatar
Milan Broz committed
787
		goto bad_cipher;
Linus Torvalds's avatar
Linus Torvalds committed
788
789
	}

790
791
	tfm = crypto_alloc_blkcipher(cc->cipher, 0, CRYPTO_ALG_ASYNC);
	if (IS_ERR(tfm)) {
792
		ti->error = "Error allocating crypto tfm";
Milan Broz's avatar
Milan Broz committed
793
		goto bad_cipher;
Linus Torvalds's avatar
Linus Torvalds committed
794
795
	}

796
797
	strcpy(cc->cipher, cipher);
	strcpy(cc->chainmode, chainmode);
Linus Torvalds's avatar
Linus Torvalds committed
798
799
800
	cc->tfm = tfm;

	/*
801
	 * Choose ivmode. Valid modes: "plain", "essiv:<esshash>", "benbi".
Linus Torvalds's avatar
Linus Torvalds committed
802
803
804
805
806
807
808
809
810
	 * See comments at iv code
	 */

	if (ivmode == NULL)
		cc->iv_gen_ops = NULL;
	else if (strcmp(ivmode, "plain") == 0)
		cc->iv_gen_ops = &crypt_iv_plain_ops;
	else if (strcmp(ivmode, "essiv") == 0)
		cc->iv_gen_ops = &crypt_iv_essiv_ops;
811
812
	else if (strcmp(ivmode, "benbi") == 0)
		cc->iv_gen_ops = &crypt_iv_benbi_ops;
Ludwig Nussel's avatar
Ludwig Nussel committed
813
814
	else if (strcmp(ivmode, "null") == 0)
		cc->iv_gen_ops = &crypt_iv_null_ops;
Linus Torvalds's avatar
Linus Torvalds committed
815
	else {
816
		ti->error = "Invalid IV mode";
Milan Broz's avatar
Milan Broz committed
817
		goto bad_ivmode;
Linus Torvalds's avatar
Linus Torvalds committed
818
819
820
821
	}

	if (cc->iv_gen_ops && cc->iv_gen_ops->ctr &&
	    cc->iv_gen_ops->ctr(cc, ti, ivopts) < 0)
Milan Broz's avatar
Milan Broz committed
822
		goto bad_ivmode;
Linus Torvalds's avatar
Linus Torvalds committed
823

824
825
	cc->iv_size = crypto_blkcipher_ivsize(tfm);
	if (cc->iv_size)
Linus Torvalds's avatar
Linus Torvalds committed
826
		/* at least a 64 bit sector number should fit in our buffer */
827
		cc->iv_size = max(cc->iv_size,
Milan Broz's avatar
Milan Broz committed
828
				  (unsigned int)(sizeof(u64) / sizeof(u8)));
Linus Torvalds's avatar
Linus Torvalds committed
829
830
	else {
		if (cc->iv_gen_ops) {
831
			DMWARN("Selected cipher does not support IVs");
Linus Torvalds's avatar
Linus Torvalds committed
832
833
834
835
836
837
			if (cc->iv_gen_ops->dtr)
				cc->iv_gen_ops->dtr(cc);
			cc->iv_gen_ops = NULL;
		}
	}

838
	cc->io_pool = mempool_create_slab_pool(MIN_IOS, _crypt_io_pool);
Linus Torvalds's avatar
Linus Torvalds committed
839
	if (!cc->io_pool) {
840
		ti->error = "Cannot allocate crypt io mempool";
Milan Broz's avatar
Milan Broz committed
841
		goto bad_slab_pool;
Linus Torvalds's avatar
Linus Torvalds committed
842
843
	}

844
	cc->page_pool = mempool_create_page_pool(MIN_POOL_PAGES, 0);
Linus Torvalds's avatar
Linus Torvalds committed
845
	if (!cc->page_pool) {
846
		ti->error = "Cannot allocate page mempool";
Milan Broz's avatar
Milan Broz committed
847
		goto bad_page_pool;
Linus Torvalds's avatar
Linus Torvalds committed
848
849
	}

850
	cc->bs = bioset_create(MIN_IOS, MIN_IOS);
851
852
853
854
855
	if (!cc->bs) {
		ti->error = "Cannot allocate crypt bioset";
		goto bad_bs;
	}

856
	if (crypto_blkcipher_setkey(tfm, cc->key, key_size) < 0) {
857
		ti->error = "Error setting key";
Milan Broz's avatar
Milan Broz committed
858
		goto bad_device;
Linus Torvalds's avatar
Linus Torvalds committed
859
860
	}

861
	if (sscanf(argv[2], "%llu", &tmpll) != 1) {
862
		ti->error = "Invalid iv_offset sector";
Milan Broz's avatar
Milan Broz committed
863
		goto bad_device;
Linus Torvalds's avatar
Linus Torvalds committed
864
	}
865
	cc->iv_offset = tmpll;
Linus Torvalds's avatar
Linus Torvalds committed
866

867
	if (sscanf(argv[4], "%llu", &tmpll) != 1) {
868
		ti->error = "Invalid device sector";
Milan Broz's avatar
Milan Broz committed
869
		goto bad_device;
Linus Torvalds's avatar
Linus Torvalds committed
870
	}
871
	cc->start = tmpll;
Linus Torvalds's avatar
Linus Torvalds committed
872
873

	if (dm_get_device(ti, argv[3], cc->start, ti->len,
Milan Broz's avatar
Milan Broz committed
874
			  dm_table_get_mode(ti->table), &cc->dev)) {
875
		ti->error = "Device lookup failed";
Milan Broz's avatar
Milan Broz committed
876
		goto bad_device;
Linus Torvalds's avatar
Linus Torvalds committed
877
878
879
880
881
882
883
	}

	if (ivmode && cc->iv_gen_ops) {
		if (ivopts)
			*(ivopts - 1) = ':';
		cc->iv_mode = kmalloc(strlen(ivmode) + 1, GFP_KERNEL);
		if (!cc->iv_mode) {
884
			ti->error = "Error kmallocing iv_mode string";
Milan Broz's avatar
Milan Broz committed
885
			goto bad_ivmode_string;
Linus Torvalds's avatar
Linus Torvalds committed
886
887
888
889
890
		}
		strcpy(cc->iv_mode, ivmode);
	} else
		cc->iv_mode = NULL;

891
892
893
894
895
896
897
898
	cc->io_queue = create_singlethread_workqueue("kcryptd_io");
	if (!cc->io_queue) {
		ti->error = "Couldn't create kcryptd io queue";
		goto bad_io_queue;
	}

	cc->crypt_queue = create_singlethread_workqueue("kcryptd");
	if (!cc->crypt_queue) {
899
		ti->error = "Couldn't create kcryptd queue";
900
		goto bad_crypt_queue;
901
902
	}

Linus Torvalds's avatar
Linus Torvalds committed
903
904
905
	ti->private = cc;
	return 0;

906
907
908
bad_crypt_queue:
	destroy_workqueue(cc->io_queue);
bad_io_queue:
909
	kfree(cc->iv_mode);
Milan Broz's avatar
Milan Broz committed
910
bad_ivmode_string:
911
	dm_put_device(ti, cc->dev);
Milan Broz's avatar
Milan Broz committed
912
bad_device:
913
914
	bioset_free(cc->bs);
bad_bs:
Linus Torvalds's avatar
Linus Torvalds committed
915
	mempool_destroy(cc->page_pool);
Milan Broz's avatar
Milan Broz committed
916
bad_page_pool:
Linus Torvalds's avatar
Linus Torvalds committed
917
	mempool_destroy(cc->io_pool);
Milan Broz's avatar
Milan Broz committed
918
bad_slab_pool:
Linus Torvalds's avatar
Linus Torvalds committed
919
920
	if (cc->iv_gen_ops && cc->iv_gen_ops->dtr)
		cc->iv_gen_ops->dtr(cc);
Milan Broz's avatar
Milan Broz committed
921
bad_ivmode:
922
	crypto_free_blkcipher(tfm);
Milan Broz's avatar
Milan Broz committed
923
bad_cipher:
924
925
	/* Must zero key material before freeing */
	memset(cc, 0, sizeof(*cc) + cc->key_size * sizeof(u8));
Linus Torvalds's avatar
Linus Torvalds committed
926
927
928
929
930
931
932
933
	kfree(cc);
	return -EINVAL;
}

static void crypt_dtr(struct dm_target *ti)
{
	struct crypt_config *cc = (struct crypt_config *) ti->private;

934
935
	destroy_workqueue(cc->io_queue);
	destroy_workqueue(cc->crypt_queue);
Milan Broz's avatar
Milan Broz committed
936

937
	bioset_free(cc->bs);
Linus Torvalds's avatar
Linus Torvalds committed
938
939
940
	mempool_destroy(cc->page_pool);
	mempool_destroy(cc->io_pool);

941
	kfree(cc->iv_mode);
Linus Torvalds's avatar
Linus Torvalds committed
942
943
	if (cc->iv_gen_ops && cc->iv_gen_ops->dtr)
		cc->iv_gen_ops->dtr(cc);
944
	crypto_free_blkcipher(cc->tfm);
Linus Torvalds's avatar
Linus Torvalds committed
945
	dm_put_device(ti, cc->dev);
946
947
948

	/* Must zero key material before freeing */
	memset(cc, 0, sizeof(*cc) + cc->key_size * sizeof(u8));
Linus Torvalds's avatar
Linus Torvalds committed
949
950
951
952
953
954
	kfree(cc);
}

static int crypt_map(struct dm_target *ti, struct bio *bio,
		     union map_info *map_context)
{
955
	struct crypt_config *cc = ti->private;
Alasdair G Kergon's avatar
Alasdair G Kergon committed
956
	struct dm_crypt_io *io;
Linus Torvalds's avatar
Linus Torvalds committed
957

Milan Broz's avatar
Milan Broz committed
958
	io = mempool_alloc(cc->io_pool, GFP_NOIO);
Linus Torvalds's avatar
Linus Torvalds committed
959
	io->target = ti;
960
	io->base_bio = bio;
961
	io->error = 0;
962
	atomic_set(&io->pending, 0);
963
964
965
966
967

	if (bio_data_dir(io->base_bio) == READ)
		kcryptd_queue_io(io);
	else
		kcryptd_queue_crypt(io);
Linus Torvalds's avatar
Linus Torvalds committed
968

969
	return DM_MAPIO_SUBMITTED;
Linus Torvalds's avatar
Linus Torvalds committed
970
971
972
973
974
975
976
977
978
979
980
981
982
983
984
}

static int crypt_status(struct dm_target *ti, status_type_t type,
			char *result, unsigned int maxlen)
{
	struct crypt_config *cc = (struct crypt_config *) ti->private;
	unsigned int sz = 0;

	switch (type) {
	case STATUSTYPE_INFO:
		result[0] = '\0';
		break;

	case STATUSTYPE_TABLE:
		if (cc->iv_mode)
985
986
			DMEMIT("%s-%s-%s ", cc->cipher, cc->chainmode,
			       cc->iv_mode);
Linus Torvalds's avatar
Linus Torvalds committed
987
		else
988
			DMEMIT("%s-%s ", cc->cipher, cc->chainmode);
Linus Torvalds's avatar
Linus Torvalds committed
989
990
991
992
993
994
995
996
997
998
999
1000
1001

		if (cc->key_size > 0) {
			if ((maxlen - sz) < ((cc->key_size << 1) + 1))
				return -ENOMEM;

			crypt_encode_key(result + sz, cc->key, cc->key_size);
			sz += cc->key_size << 1;
		} else {
			if (sz >= maxlen)
				return -ENOMEM;
			result[sz++] = '-';
		}

1002
1003
		DMEMIT(" %llu %s %llu", (unsigned long long)cc->iv_offset,
				cc->dev->name, (unsigned long long)cc->start);
Linus Torvalds's avatar
Linus Torvalds committed
1004
1005
1006
1007
1008
		break;
	}
	return 0;
}

Milan Broz's avatar
Milan Broz committed
1009
1010
1011
1012
1013
1014
1015
1016
1017
1018
1019
1020
1021
1022
1023
1024
1025
1026
1027
1028
1029
1030
1031
1032
1033
1034
1035
1036
1037
1038
1039
1040
1041
1042
1043
1044
1045
1046
1047
1048
1049
1050
1051
1052
1053
1054
1055
1056
1057
1058
1059
1060
1061
static void crypt_postsuspend(struct dm_target *ti)
{
	struct crypt_config *cc = ti->private;

	set_bit(DM_CRYPT_SUSPENDED, &cc->flags);
}

static int crypt_preresume(struct dm_target *ti)
{
	struct crypt_config *cc = ti->private;

	if (!test_bit(DM_CRYPT_KEY_VALID, &cc->flags)) {
		DMERR("aborting resume - crypt key is not set.");
		return -EAGAIN;
	}

	return 0;
}

static void crypt_resume(struct dm_target *ti)
{
	struct crypt_config *cc = ti->private;

	clear_bit(DM_CRYPT_SUSPENDED, &cc->flags);
}

/* Message interface
 *	key set <key>
 *	key wipe
 */
static int crypt_message(struct dm_target *ti, unsigned argc, char **argv)
{
	struct crypt_config *cc = ti->private;

	if (argc < 2)
		goto error;

	if (!strnicmp(argv[0], MESG_STR("key"))) {
		if (!test_bit(DM_CRYPT_SUSPENDED, &cc->flags)) {
			DMWARN("not suspended during key manipulation.");
			return -EINVAL;
		}
		if (argc == 3 && !strnicmp(argv[1], MESG_STR("set")))
			return crypt_set_key(cc, argv[2]);
		if (argc == 2 && !strnicmp(argv[1], MESG_STR("wipe")))
			return crypt_wipe_key(cc);
	}

error:
	DMWARN("unrecognised message received.");
	return -EINVAL;
}

Linus Torvalds's avatar
Linus Torvalds committed
1062
1063
static struct target_type crypt_target = {
	.name   = "crypt",
Ludwig Nussel's avatar
Ludwig Nussel committed
1064
	.version= {1, 5, 0},
Linus Torvalds's avatar
Linus Torvalds committed
1065
1066
1067
1068
1069
	.module = THIS_MODULE,
	.ctr    = crypt_ctr,
	.dtr    = crypt_dtr,
	.map    = crypt_map,
	.status = crypt_status,
Milan Broz's avatar
Milan Broz committed
1070
1071
1072
1073
	.postsuspend = crypt_postsuspend,
	.preresume = crypt_preresume,
	.resume = crypt_resume,
	.message = crypt_message,
Linus Torvalds's avatar
Linus Torvalds committed
1074
1075
1076
1077
1078
1079
};

static int __init dm_crypt_init(void)
{
	int r;

Alasdair G Kergon's avatar
Alasdair G Kergon committed
1080
	_crypt_io_pool = KMEM_CACHE(dm_crypt_io, 0);
Linus Torvalds's avatar
Linus Torvalds committed
1081
1082
1083
1084
1085
	if (!_crypt_io_pool)
		return -ENOMEM;

	r = dm_register_target(&crypt_target);
	if (r < 0) {
1086
		DMERR("register failed %d", r);
1087
		kmem_cache_destroy(_crypt_io_pool);
Linus Torvalds's avatar
Linus Torvalds committed
1088
1089
1090
1091
1092
1093
1094
1095
1096
1097
	}

	return r;
}

static void __exit dm_crypt_exit(void)
{
	int r = dm_unregister_target(&crypt_target);

	if (r < 0)
1098
		DMERR("unregister failed %d", r);
Linus Torvalds's avatar
Linus Torvalds committed
1099
1100
1101
1102
1103
1104
1105
1106
1107
1108

	kmem_cache_destroy(_crypt_io_pool);
}

module_init(dm_crypt_init);
module_exit(dm_crypt_exit);

MODULE_AUTHOR("Christophe Saout <christophe@saout.de>");
MODULE_DESCRIPTION(DM_NAME " target for transparent encryption / decryption");
MODULE_LICENSE("GPL");