dm-crypt.c 25.9 KB
Newer Older
Linus Torvalds's avatar
Linus Torvalds committed
1
2
3
/*
 * Copyright (C) 2003 Christophe Saout <christophe@saout.de>
 * Copyright (C) 2004 Clemens Fruhwirth <clemens@endorphin.org>
4
 * Copyright (C) 2006-2007 Red Hat, Inc. All rights reserved.
Linus Torvalds's avatar
Linus Torvalds committed
5
6
7
8
 *
 * This file is released under the GPL.
 */

9
#include <linux/err.h>
Linus Torvalds's avatar
Linus Torvalds committed
10
11
12
13
14
15
16
17
18
#include <linux/module.h>
#include <linux/init.h>
#include <linux/kernel.h>
#include <linux/bio.h>
#include <linux/blkdev.h>
#include <linux/mempool.h>
#include <linux/slab.h>
#include <linux/crypto.h>
#include <linux/workqueue.h>
19
#include <linux/backing-dev.h>
Linus Torvalds's avatar
Linus Torvalds committed
20
#include <asm/atomic.h>
21
#include <linux/scatterlist.h>
Linus Torvalds's avatar
Linus Torvalds committed
22
#include <asm/page.h>
23
#include <asm/unaligned.h>
Linus Torvalds's avatar
Linus Torvalds committed
24
25
26

#include "dm.h"

27
#define DM_MSG_PREFIX "crypt"
Milan Broz's avatar
Milan Broz committed
28
#define MESG_STR(x) x, sizeof(x)
Linus Torvalds's avatar
Linus Torvalds committed
29
30
31
32
33
34
35
36
37
38
39
40
41
42

/*
 * context holding the current state of a multi-part conversion
 */
struct convert_context {
	struct bio *bio_in;
	struct bio *bio_out;
	unsigned int offset_in;
	unsigned int offset_out;
	unsigned int idx_in;
	unsigned int idx_out;
	sector_t sector;
};

43
44
45
46
47
48
49
50
51
52
53
54
55
56
/*
 * per bio private data
 */
struct dm_crypt_io {
	struct dm_target *target;
	struct bio *base_bio;
	struct work_struct work;

	struct convert_context ctx;

	atomic_t pending;
	int error;
};

Linus Torvalds's avatar
Linus Torvalds committed
57
58
59
60
struct crypt_config;

struct crypt_iv_operations {
	int (*ctr)(struct crypt_config *cc, struct dm_target *ti,
Milan Broz's avatar
Milan Broz committed
61
		   const char *opts);
Linus Torvalds's avatar
Linus Torvalds committed
62
63
64
65
66
67
68
69
70
	void (*dtr)(struct crypt_config *cc);
	const char *(*status)(struct crypt_config *cc);
	int (*generator)(struct crypt_config *cc, u8 *iv, sector_t sector);
};

/*
 * Crypt: maps a linear range of a block device
 * and encrypts / decrypts at the same time.
 */
Milan Broz's avatar
Milan Broz committed
71
enum flags { DM_CRYPT_SUSPENDED, DM_CRYPT_KEY_VALID };
Linus Torvalds's avatar
Linus Torvalds committed
72
73
74
75
76
77
78
79
80
81
struct crypt_config {
	struct dm_dev *dev;
	sector_t start;

	/*
	 * pool for per bio private data and
	 * for encryption buffer pages
	 */
	mempool_t *io_pool;
	mempool_t *page_pool;
82
	struct bio_set *bs;
Linus Torvalds's avatar
Linus Torvalds committed
83

84
85
	struct workqueue_struct *io_queue;
	struct workqueue_struct *crypt_queue;
Linus Torvalds's avatar
Linus Torvalds committed
86
87
88
89
90
	/*
	 * crypto related data
	 */
	struct crypt_iv_operations *iv_gen_ops;
	char *iv_mode;
91
92
93
94
	union {
		struct crypto_cipher *essiv_tfm;
		int benbi_shift;
	} iv_gen_private;
Linus Torvalds's avatar
Linus Torvalds committed
95
96
97
	sector_t iv_offset;
	unsigned int iv_size;

98
99
100
	char cipher[CRYPTO_MAX_ALG_NAME];
	char chainmode[CRYPTO_MAX_ALG_NAME];
	struct crypto_blkcipher *tfm;
Milan Broz's avatar
Milan Broz committed
101
	unsigned long flags;
Linus Torvalds's avatar
Linus Torvalds committed
102
103
104
105
	unsigned int key_size;
	u8 key[0];
};

106
#define MIN_IOS        16
Linus Torvalds's avatar
Linus Torvalds committed
107
108
109
#define MIN_POOL_PAGES 32
#define MIN_BIO_PAGES  8

110
static struct kmem_cache *_crypt_io_pool;
Linus Torvalds's avatar
Linus Torvalds committed
111

Alasdair G Kergon's avatar
Alasdair G Kergon committed
112
static void clone_init(struct dm_crypt_io *, struct bio *);
Olaf Kirch's avatar
Olaf Kirch committed
113

Linus Torvalds's avatar
Linus Torvalds committed
114
115
116
/*
 * Different IV generation algorithms:
 *
117
 * plain: the initial vector is the 32-bit little-endian version of the sector
118
 *        number, padded with zeros if necessary.
Linus Torvalds's avatar
Linus Torvalds committed
119
 *
120
121
122
 * essiv: "encrypted sector|salt initial vector", the sector number is
 *        encrypted with the bulk cipher using a salt as key. The salt
 *        should be derived from the bulk cipher's key via hashing.
Linus Torvalds's avatar
Linus Torvalds committed
123
 *
124
125
126
 * benbi: the 64-bit "big-endian 'narrow block'-count", starting at 1
 *        (needed for LRW-32-AES and possible other narrow block modes)
 *
Ludwig Nussel's avatar
Ludwig Nussel committed
127
128
129
 * null: the initial vector is always zero.  Provides compatibility with
 *       obsolete loop_fish2 devices.  Do not use for new devices.
 *
Linus Torvalds's avatar
Linus Torvalds committed
130
131
132
133
134
135
136
137
138
139
140
141
142
 * plumb: unimplemented, see:
 * http://article.gmane.org/gmane.linux.kernel.device-mapper.dm-crypt/454
 */

static int crypt_iv_plain_gen(struct crypt_config *cc, u8 *iv, sector_t sector)
{
	memset(iv, 0, cc->iv_size);
	*(u32 *)iv = cpu_to_le32(sector & 0xffffffff);

	return 0;
}

static int crypt_iv_essiv_ctr(struct crypt_config *cc, struct dm_target *ti,
Milan Broz's avatar
Milan Broz committed
143
			      const char *opts)
Linus Torvalds's avatar
Linus Torvalds committed
144
{
145
	struct crypto_cipher *essiv_tfm;
146
147
	struct crypto_hash *hash_tfm;
	struct hash_desc desc;
Linus Torvalds's avatar
Linus Torvalds committed
148
149
150
	struct scatterlist sg;
	unsigned int saltsize;
	u8 *salt;
151
	int err;
Linus Torvalds's avatar
Linus Torvalds committed
152
153

	if (opts == NULL) {
154
		ti->error = "Digest algorithm missing for ESSIV mode";
Linus Torvalds's avatar
Linus Torvalds committed
155
156
157
158
		return -EINVAL;
	}

	/* Hash the cipher key with the given hash algorithm */
159
160
	hash_tfm = crypto_alloc_hash(opts, 0, CRYPTO_ALG_ASYNC);
	if (IS_ERR(hash_tfm)) {
161
		ti->error = "Error initializing ESSIV hash";
162
		return PTR_ERR(hash_tfm);
Linus Torvalds's avatar
Linus Torvalds committed
163
164
	}

165
	saltsize = crypto_hash_digestsize(hash_tfm);
Linus Torvalds's avatar
Linus Torvalds committed
166
167
	salt = kmalloc(saltsize, GFP_KERNEL);
	if (salt == NULL) {
168
		ti->error = "Error kmallocing salt storage in ESSIV";
169
		crypto_free_hash(hash_tfm);
Linus Torvalds's avatar
Linus Torvalds committed
170
171
172
		return -ENOMEM;
	}

173
	sg_init_one(&sg, cc->key, cc->key_size);
174
175
176
177
178
179
180
	desc.tfm = hash_tfm;
	desc.flags = CRYPTO_TFM_REQ_MAY_SLEEP;
	err = crypto_hash_digest(&desc, &sg, cc->key_size, salt);
	crypto_free_hash(hash_tfm);

	if (err) {
		ti->error = "Error calculating hash in ESSIV";
181
		kfree(salt);
182
183
		return err;
	}
Linus Torvalds's avatar
Linus Torvalds committed
184
185

	/* Setup the essiv_tfm with the given salt */
186
187
	essiv_tfm = crypto_alloc_cipher(cc->cipher, 0, CRYPTO_ALG_ASYNC);
	if (IS_ERR(essiv_tfm)) {
188
		ti->error = "Error allocating crypto tfm for ESSIV";
Linus Torvalds's avatar
Linus Torvalds committed
189
		kfree(salt);
190
		return PTR_ERR(essiv_tfm);
Linus Torvalds's avatar
Linus Torvalds committed
191
	}
192
193
	if (crypto_cipher_blocksize(essiv_tfm) !=
	    crypto_blkcipher_ivsize(cc->tfm)) {
194
		ti->error = "Block size of ESSIV cipher does "
Milan Broz's avatar
Milan Broz committed
195
			    "not match IV size of block cipher";
196
		crypto_free_cipher(essiv_tfm);
Linus Torvalds's avatar
Linus Torvalds committed
197
198
199
		kfree(salt);
		return -EINVAL;
	}
200
201
	err = crypto_cipher_setkey(essiv_tfm, salt, saltsize);
	if (err) {
202
		ti->error = "Failed to set key for ESSIV cipher";
203
		crypto_free_cipher(essiv_tfm);
Linus Torvalds's avatar
Linus Torvalds committed
204
		kfree(salt);
205
		return err;
Linus Torvalds's avatar
Linus Torvalds committed
206
207
208
	}
	kfree(salt);

209
	cc->iv_gen_private.essiv_tfm = essiv_tfm;
Linus Torvalds's avatar
Linus Torvalds committed
210
211
212
213
214
	return 0;
}

static void crypt_iv_essiv_dtr(struct crypt_config *cc)
{
215
216
	crypto_free_cipher(cc->iv_gen_private.essiv_tfm);
	cc->iv_gen_private.essiv_tfm = NULL;
Linus Torvalds's avatar
Linus Torvalds committed
217
218
219
220
221
222
}

static int crypt_iv_essiv_gen(struct crypt_config *cc, u8 *iv, sector_t sector)
{
	memset(iv, 0, cc->iv_size);
	*(u64 *)iv = cpu_to_le64(sector);
223
	crypto_cipher_encrypt_one(cc->iv_gen_private.essiv_tfm, iv, iv);
Linus Torvalds's avatar
Linus Torvalds committed
224
225
226
	return 0;
}

227
228
229
230
static int crypt_iv_benbi_ctr(struct crypt_config *cc, struct dm_target *ti,
			      const char *opts)
{
	unsigned int bs = crypto_blkcipher_blocksize(cc->tfm);
231
	int log = ilog2(bs);
232
233
234
235
236
237
238
239
240
241
242
243
244
245

	/* we need to calculate how far we must shift the sector count
	 * to get the cipher block count, we use this shift in _gen */

	if (1 << log != bs) {
		ti->error = "cypher blocksize is not a power of 2";
		return -EINVAL;
	}

	if (log > 9) {
		ti->error = "cypher blocksize is > 512";
		return -EINVAL;
	}

246
	cc->iv_gen_private.benbi_shift = 9 - log;
247
248
249
250
251
252
253
254
255
256

	return 0;
}

static void crypt_iv_benbi_dtr(struct crypt_config *cc)
{
}

static int crypt_iv_benbi_gen(struct crypt_config *cc, u8 *iv, sector_t sector)
{
257
258
	__be64 val;

259
	memset(iv, 0, cc->iv_size - sizeof(u64)); /* rest is cleared below */
260
261
262

	val = cpu_to_be64(((u64)sector << cc->iv_gen_private.benbi_shift) + 1);
	put_unaligned(val, (__be64 *)(iv + cc->iv_size - sizeof(u64)));
263

Linus Torvalds's avatar
Linus Torvalds committed
264
265
266
	return 0;
}

Ludwig Nussel's avatar
Ludwig Nussel committed
267
268
269
270
271
272
273
static int crypt_iv_null_gen(struct crypt_config *cc, u8 *iv, sector_t sector)
{
	memset(iv, 0, cc->iv_size);

	return 0;
}

Linus Torvalds's avatar
Linus Torvalds committed
274
275
276
277
278
279
280
281
282
283
static struct crypt_iv_operations crypt_iv_plain_ops = {
	.generator = crypt_iv_plain_gen
};

static struct crypt_iv_operations crypt_iv_essiv_ops = {
	.ctr       = crypt_iv_essiv_ctr,
	.dtr       = crypt_iv_essiv_dtr,
	.generator = crypt_iv_essiv_gen
};

284
285
286
287
288
static struct crypt_iv_operations crypt_iv_benbi_ops = {
	.ctr	   = crypt_iv_benbi_ctr,
	.dtr	   = crypt_iv_benbi_dtr,
	.generator = crypt_iv_benbi_gen
};
Linus Torvalds's avatar
Linus Torvalds committed
289

Ludwig Nussel's avatar
Ludwig Nussel committed
290
291
292
293
static struct crypt_iv_operations crypt_iv_null_ops = {
	.generator = crypt_iv_null_gen
};

294
static int
Linus Torvalds's avatar
Linus Torvalds committed
295
296
297
298
crypt_convert_scatterlist(struct crypt_config *cc, struct scatterlist *out,
                          struct scatterlist *in, unsigned int length,
                          int write, sector_t sector)
{
299
	u8 iv[cc->iv_size] __attribute__ ((aligned(__alignof__(u64))));
300
301
302
303
304
	struct blkcipher_desc desc = {
		.tfm = cc->tfm,
		.info = iv,
		.flags = CRYPTO_TFM_REQ_MAY_SLEEP,
	};
Linus Torvalds's avatar
Linus Torvalds committed
305
306
307
308
309
310
311
312
	int r;

	if (cc->iv_gen_ops) {
		r = cc->iv_gen_ops->generator(cc, iv, sector);
		if (r < 0)
			return r;

		if (write)
313
			r = crypto_blkcipher_encrypt_iv(&desc, out, in, length);
Linus Torvalds's avatar
Linus Torvalds committed
314
		else
315
			r = crypto_blkcipher_decrypt_iv(&desc, out, in, length);
Linus Torvalds's avatar
Linus Torvalds committed
316
317
	} else {
		if (write)
318
			r = crypto_blkcipher_encrypt(&desc, out, in, length);
Linus Torvalds's avatar
Linus Torvalds committed
319
		else
320
			r = crypto_blkcipher_decrypt(&desc, out, in, length);
Linus Torvalds's avatar
Linus Torvalds committed
321
322
323
324
325
	}

	return r;
}

Milan Broz's avatar
Milan Broz committed
326
327
328
static void crypt_convert_init(struct crypt_config *cc,
			       struct convert_context *ctx,
			       struct bio *bio_out, struct bio *bio_in,
329
			       sector_t sector)
Linus Torvalds's avatar
Linus Torvalds committed
330
331
332
333
334
335
336
337
338
339
340
341
342
343
{
	ctx->bio_in = bio_in;
	ctx->bio_out = bio_out;
	ctx->offset_in = 0;
	ctx->offset_out = 0;
	ctx->idx_in = bio_in ? bio_in->bi_idx : 0;
	ctx->idx_out = bio_out ? bio_out->bi_idx : 0;
	ctx->sector = sector + cc->iv_offset;
}

/*
 * Encrypt / decrypt data from one bio to another one (can be the same one)
 */
static int crypt_convert(struct crypt_config *cc,
Milan Broz's avatar
Milan Broz committed
344
			 struct convert_context *ctx)
Linus Torvalds's avatar
Linus Torvalds committed
345
346
347
348
349
350
351
{
	int r = 0;

	while(ctx->idx_in < ctx->bio_in->bi_vcnt &&
	      ctx->idx_out < ctx->bio_out->bi_vcnt) {
		struct bio_vec *bv_in = bio_iovec_idx(ctx->bio_in, ctx->idx_in);
		struct bio_vec *bv_out = bio_iovec_idx(ctx->bio_out, ctx->idx_out);
352
353
354
		struct scatterlist sg_in, sg_out;

		sg_init_table(&sg_in, 1);
355
		sg_set_page(&sg_in, bv_in->bv_page, 1 << SECTOR_SHIFT, bv_in->bv_offset + ctx->offset_in);
356
357

		sg_init_table(&sg_out, 1);
358
		sg_set_page(&sg_out, bv_out->bv_page, 1 << SECTOR_SHIFT, bv_out->bv_offset + ctx->offset_out);
Linus Torvalds's avatar
Linus Torvalds committed
359
360
361
362
363
364
365
366
367
368
369
370
371
372

		ctx->offset_in += sg_in.length;
		if (ctx->offset_in >= bv_in->bv_len) {
			ctx->offset_in = 0;
			ctx->idx_in++;
		}

		ctx->offset_out += sg_out.length;
		if (ctx->offset_out >= bv_out->bv_len) {
			ctx->offset_out = 0;
			ctx->idx_out++;
		}

		r = crypt_convert_scatterlist(cc, &sg_out, &sg_in, sg_in.length,
373
			bio_data_dir(ctx->bio_in) == WRITE, ctx->sector);
Linus Torvalds's avatar
Linus Torvalds committed
374
375
376
377
378
379
380
381
382
		if (r < 0)
			break;

		ctx->sector++;
	}

	return r;
}

Milan Broz's avatar
Milan Broz committed
383
384
static void dm_crypt_bio_destructor(struct bio *bio)
{
Alasdair G Kergon's avatar
Alasdair G Kergon committed
385
	struct dm_crypt_io *io = bio->bi_private;
386
387
388
	struct crypt_config *cc = io->target->private;

	bio_free(bio, cc->bs);
Milan Broz's avatar
Milan Broz committed
389
}
390

Linus Torvalds's avatar
Linus Torvalds committed
391
392
393
394
395
/*
 * Generate a new unfragmented bio with the given size
 * This should never violate the device limitations
 * May return a smaller bio when running out of pages
 */
Alasdair G Kergon's avatar
Alasdair G Kergon committed
396
static struct bio *crypt_alloc_buffer(struct dm_crypt_io *io, unsigned size)
Linus Torvalds's avatar
Linus Torvalds committed
397
{
Olaf Kirch's avatar
Olaf Kirch committed
398
	struct crypt_config *cc = io->target->private;
399
	struct bio *clone;
Linus Torvalds's avatar
Linus Torvalds committed
400
	unsigned int nr_iovecs = (size + PAGE_SIZE - 1) >> PAGE_SHIFT;
401
	gfp_t gfp_mask = GFP_NOIO | __GFP_HIGHMEM;
Milan Broz's avatar
Milan Broz committed
402
403
	unsigned i, len;
	struct page *page;
Linus Torvalds's avatar
Linus Torvalds committed
404

Olaf Kirch's avatar
Olaf Kirch committed
405
	clone = bio_alloc_bioset(GFP_NOIO, nr_iovecs, cc->bs);
406
	if (!clone)
Linus Torvalds's avatar
Linus Torvalds committed
407
408
		return NULL;

Olaf Kirch's avatar
Olaf Kirch committed
409
	clone_init(io, clone);
410

411
	for (i = 0; i < nr_iovecs; i++) {
Milan Broz's avatar
Milan Broz committed
412
413
		page = mempool_alloc(cc->page_pool, gfp_mask);
		if (!page)
Linus Torvalds's avatar
Linus Torvalds committed
414
415
416
417
418
419
420
			break;

		/*
		 * if additional pages cannot be allocated without waiting,
		 * return a partially allocated bio, the caller will then try
		 * to allocate additional bios while submitting this partial bio
		 */
421
		if (i == (MIN_BIO_PAGES - 1))
Linus Torvalds's avatar
Linus Torvalds committed
422
423
			gfp_mask = (gfp_mask | __GFP_NOWARN) & ~__GFP_WAIT;

Milan Broz's avatar
Milan Broz committed
424
425
426
427
428
429
		len = (size > PAGE_SIZE) ? PAGE_SIZE : size;

		if (!bio_add_page(clone, page, len, 0)) {
			mempool_free(page, cc->page_pool);
			break;
		}
Linus Torvalds's avatar
Linus Torvalds committed
430

Milan Broz's avatar
Milan Broz committed
431
		size -= len;
Linus Torvalds's avatar
Linus Torvalds committed
432
433
	}

434
435
	if (!clone->bi_size) {
		bio_put(clone);
Linus Torvalds's avatar
Linus Torvalds committed
436
437
438
		return NULL;
	}

439
	return clone;
Linus Torvalds's avatar
Linus Torvalds committed
440
441
}

Neil Brown's avatar
Neil Brown committed
442
static void crypt_free_buffer_pages(struct crypt_config *cc, struct bio *clone)
Linus Torvalds's avatar
Linus Torvalds committed
443
{
Neil Brown's avatar
Neil Brown committed
444
	unsigned int i;
Linus Torvalds's avatar
Linus Torvalds committed
445
446
	struct bio_vec *bv;

Neil Brown's avatar
Neil Brown committed
447
	for (i = 0; i < clone->bi_vcnt; i++) {
448
		bv = bio_iovec_idx(clone, i);
Linus Torvalds's avatar
Linus Torvalds committed
449
450
451
452
453
454
455
456
457
458
		BUG_ON(!bv->bv_page);
		mempool_free(bv->bv_page, cc->page_pool);
		bv->bv_page = NULL;
	}
}

/*
 * One of the bios was finished. Check for completion of
 * the whole request and correctly clean up the buffer.
 */
459
static void crypt_dec_pending(struct dm_crypt_io *io)
Linus Torvalds's avatar
Linus Torvalds committed
460
{
461
	struct crypt_config *cc = io->target->private;
Linus Torvalds's avatar
Linus Torvalds committed
462
463
464
465

	if (!atomic_dec_and_test(&io->pending))
		return;

466
	bio_endio(io->base_bio, io->error);
Linus Torvalds's avatar
Linus Torvalds committed
467
468
469
470
	mempool_free(io, cc->io_pool);
}

/*
471
 * kcryptd/kcryptd_io:
Linus Torvalds's avatar
Linus Torvalds committed
472
473
 *
 * Needed because it would be very unwise to do decryption in an
474
 * interrupt context.
475
476
477
478
479
480
481
482
 *
 * kcryptd performs the actual encryption or decryption.
 *
 * kcryptd_io performs the IO submission.
 *
 * They must be separated as otherwise the final stages could be
 * starved by new requests which can block in the first stages due
 * to memory allocation.
Linus Torvalds's avatar
Linus Torvalds committed
483
 */
484
485
static void kcryptd_io(struct work_struct *work);
static void kcryptd_crypt(struct work_struct *work);
Linus Torvalds's avatar
Linus Torvalds committed
486

Alasdair G Kergon's avatar
Alasdair G Kergon committed
487
static void kcryptd_queue_io(struct dm_crypt_io *io)
Linus Torvalds's avatar
Linus Torvalds committed
488
{
489
490
	struct crypt_config *cc = io->target->private;

491
	INIT_WORK(&io->work, kcryptd_io);
492
493
494
495
496
497
498
	queue_work(cc->io_queue, &io->work);
}

static void kcryptd_queue_crypt(struct dm_crypt_io *io)
{
	struct crypt_config *cc = io->target->private;

499
	INIT_WORK(&io->work, kcryptd_crypt);
500
	queue_work(cc->crypt_queue, &io->work);
501
502
}

503
static void crypt_endio(struct bio *clone, int error)
504
{
Alasdair G Kergon's avatar
Alasdair G Kergon committed
505
	struct dm_crypt_io *io = clone->bi_private;
506
	struct crypt_config *cc = io->target->private;
Milan Broz's avatar
Milan Broz committed
507
	unsigned rw = bio_data_dir(clone);
508

Milan Broz's avatar
Milan Broz committed
509
510
511
	if (unlikely(!bio_flagged(clone, BIO_UPTODATE) && !error))
		error = -EIO;

512
	/*
513
	 * free the processed pages
514
	 */
Milan Broz's avatar
Milan Broz committed
515
	if (rw == WRITE)
Neil Brown's avatar
Neil Brown committed
516
		crypt_free_buffer_pages(cc, clone);
517
518
519

	bio_put(clone);

Milan Broz's avatar
Milan Broz committed
520
521
522
523
	if (rw == READ && !error) {
		kcryptd_queue_crypt(io);
		return;
	}
524
525
526
527
528

	if (unlikely(error))
		io->error = error;

	crypt_dec_pending(io);
529
530
}

Alasdair G Kergon's avatar
Alasdair G Kergon committed
531
static void clone_init(struct dm_crypt_io *io, struct bio *clone)
532
533
534
535
536
537
538
{
	struct crypt_config *cc = io->target->private;

	clone->bi_private = io;
	clone->bi_end_io  = crypt_endio;
	clone->bi_bdev    = cc->dev->bdev;
	clone->bi_rw      = io->base_bio->bi_rw;
Olaf Kirch's avatar
Olaf Kirch committed
539
	clone->bi_destructor = dm_crypt_bio_destructor;
540
541
}

542
static void kcryptd_io_read(struct dm_crypt_io *io)
543
544
545
546
{
	struct crypt_config *cc = io->target->private;
	struct bio *base_bio = io->base_bio;
	struct bio *clone;
547
548
549
	sector_t sector = base_bio->bi_sector - io->target->begin;

	atomic_inc(&io->pending);
550
551
552
553
554
555

	/*
	 * The block layer might modify the bvec array, so always
	 * copy the required bvecs because we need the original
	 * one in order to decrypt the whole bio data *afterwards*.
	 */
556
	clone = bio_alloc_bioset(GFP_NOIO, bio_segments(base_bio), cc->bs);
557
	if (unlikely(!clone)) {
558
559
		io->error = -ENOMEM;
		crypt_dec_pending(io);
560
		return;
561
	}
562
563
564
565
566

	clone_init(io, clone);
	clone->bi_idx = 0;
	clone->bi_vcnt = bio_segments(base_bio);
	clone->bi_size = base_bio->bi_size;
567
	clone->bi_sector = cc->start + sector;
568
569
570
	memcpy(clone->bi_io_vec, bio_iovec(base_bio),
	       sizeof(struct bio_vec) * clone->bi_vcnt);

571
	generic_make_request(clone);
572
573
}

574
575
576
577
578
579
580
581
582
static void kcryptd_io_write(struct dm_crypt_io *io)
{
}

static void kcryptd_crypt_write_io_submit(struct dm_crypt_io *io, int error)
{
}

static void kcryptd_crypt_write_convert(struct dm_crypt_io *io)
583
584
585
586
{
	struct crypt_config *cc = io->target->private;
	struct bio *base_bio = io->base_bio;
	struct bio *clone;
587
588
	unsigned remaining = base_bio->bi_size;
	sector_t sector = base_bio->bi_sector - io->target->begin;
589

590
	atomic_inc(&io->pending);
591

592
	crypt_convert_init(cc, &io->ctx, NULL, base_bio, sector);
593

594
595
596
597
598
	/*
	 * The allocated buffers can be smaller than the whole bio,
	 * so repeat the whole process until all the data can be handled.
	 */
	while (remaining) {
599
		clone = crypt_alloc_buffer(io, remaining);
600
		if (unlikely(!clone)) {
601
602
			io->error = -ENOMEM;
			crypt_dec_pending(io);
603
604
			return;
		}
605

606
607
		io->ctx.bio_out = clone;
		io->ctx.idx_out = 0;
608

609
		if (unlikely(crypt_convert(cc, &io->ctx) < 0)) {
Neil Brown's avatar
Neil Brown committed
610
			crypt_free_buffer_pages(cc, clone);
611
			bio_put(clone);
612
613
			io->error = -EIO;
			crypt_dec_pending(io);
614
			return;
615
616
		}

617
		/* crypt_convert should have filled the clone bio */
618
		BUG_ON(io->ctx.idx_out < clone->bi_vcnt);
619

620
621
622
623
		clone->bi_sector = cc->start + sector;
		remaining -= clone->bi_size;
		sector += bio_sectors(clone);

Olaf Kirch's avatar
Olaf Kirch committed
624
625
		/* Grab another reference to the io struct
		 * before we kick off the request */
626
627
628
		if (remaining)
			atomic_inc(&io->pending);

629
630
		generic_make_request(clone);

631
632
633
		/* Do not reference clone after this - it
		 * may be gone already. */

634
635
		/* out of memory -> run queues */
		if (remaining)
636
			congestion_wait(WRITE, HZ/100);
637
	}
638
639
}

640
static void kcryptd_crypt_read_done(struct dm_crypt_io *io, int error)
641
642
643
644
645
646
647
{
	if (unlikely(error < 0))
		io->error = -EIO;

	crypt_dec_pending(io);
}

648
static void kcryptd_crypt_read_convert(struct dm_crypt_io *io)
649
650
{
	struct crypt_config *cc = io->target->private;
651
	int r = 0;
Linus Torvalds's avatar
Linus Torvalds committed
652

653
	crypt_convert_init(cc, &io->ctx, io->base_bio, io->base_bio,
654
			   io->base_bio->bi_sector - io->target->begin);
Linus Torvalds's avatar
Linus Torvalds committed
655

656
657
	r = crypt_convert(cc, &io->ctx);

658
	kcryptd_crypt_read_done(io, r);
Linus Torvalds's avatar
Linus Torvalds committed
659
660
}

661
static void kcryptd_io(struct work_struct *work)
Linus Torvalds's avatar
Linus Torvalds committed
662
{
Alasdair G Kergon's avatar
Alasdair G Kergon committed
663
	struct dm_crypt_io *io = container_of(work, struct dm_crypt_io, work);
664

665
	if (bio_data_dir(io->base_bio) == READ)
666
667
668
		kcryptd_io_read(io);
	else
		kcryptd_io_write(io);
669
670
}

671
static void kcryptd_crypt(struct work_struct *work)
672
673
674
675
{
	struct dm_crypt_io *io = container_of(work, struct dm_crypt_io, work);

	if (bio_data_dir(io->base_bio) == READ)
676
		kcryptd_crypt_read_convert(io);
677
	else
678
		kcryptd_crypt_write_convert(io);
Linus Torvalds's avatar
Linus Torvalds committed
679
680
681
682
683
684
685
686
687
688
689
690
691
}

/*
 * Decode key from its hex representation
 */
static int crypt_decode_key(u8 *key, char *hex, unsigned int size)
{
	char buffer[3];
	char *endp;
	unsigned int i;

	buffer[2] = '\0';

692
	for (i = 0; i < size; i++) {
Linus Torvalds's avatar
Linus Torvalds committed
693
694
695
696
697
698
699
700
701
702
703
704
705
706
707
708
709
710
711
712
713
714
		buffer[0] = *hex++;
		buffer[1] = *hex++;

		key[i] = (u8)simple_strtoul(buffer, &endp, 16);

		if (endp != &buffer[2])
			return -EINVAL;
	}

	if (*hex != '\0')
		return -EINVAL;

	return 0;
}

/*
 * Encode key into its hex representation
 */
static void crypt_encode_key(char *hex, u8 *key, unsigned int size)
{
	unsigned int i;

715
	for (i = 0; i < size; i++) {
Linus Torvalds's avatar
Linus Torvalds committed
716
717
718
719
720
721
		sprintf(hex, "%02x", *key);
		hex += 2;
		key++;
	}
}

Milan Broz's avatar
Milan Broz committed
722
723
724
725
726
727
728
729
730
731
static int crypt_set_key(struct crypt_config *cc, char *key)
{
	unsigned key_size = strlen(key) >> 1;

	if (cc->key_size && cc->key_size != key_size)
		return -EINVAL;

	cc->key_size = key_size; /* initial settings */

	if ((!key_size && strcmp(key, "-")) ||
Milan Broz's avatar
Milan Broz committed
732
	   (key_size && crypt_decode_key(cc->key, key, key_size) < 0))
Milan Broz's avatar
Milan Broz committed
733
734
735
736
737
738
739
740
741
742
743
744
745
746
		return -EINVAL;

	set_bit(DM_CRYPT_KEY_VALID, &cc->flags);

	return 0;
}

static int crypt_wipe_key(struct crypt_config *cc)
{
	clear_bit(DM_CRYPT_KEY_VALID, &cc->flags);
	memset(&cc->key, 0, cc->key_size * sizeof(u8));
	return 0;
}

Linus Torvalds's avatar
Linus Torvalds committed
747
748
749
750
751
752
753
/*
 * Construct an encryption mapping:
 * <cipher> <key> <iv_offset> <dev_path> <start>
 */
static int crypt_ctr(struct dm_target *ti, unsigned int argc, char **argv)
{
	struct crypt_config *cc;
754
	struct crypto_blkcipher *tfm;
Linus Torvalds's avatar
Linus Torvalds committed
755
756
757
758
759
760
	char *tmp;
	char *cipher;
	char *chainmode;
	char *ivmode;
	char *ivopts;
	unsigned int key_size;
761
	unsigned long long tmpll;
Linus Torvalds's avatar
Linus Torvalds committed
762
763

	if (argc != 5) {
764
		ti->error = "Not enough arguments";
Linus Torvalds's avatar
Linus Torvalds committed
765
766
767
768
769
770
771
772
773
774
		return -EINVAL;
	}

	tmp = argv[0];
	cipher = strsep(&tmp, "-");
	chainmode = strsep(&tmp, "-");
	ivopts = strsep(&tmp, "-");
	ivmode = strsep(&ivopts, ":");

	if (tmp)
775
		DMWARN("Unexpected additional cipher options");
Linus Torvalds's avatar
Linus Torvalds committed
776
777
778

	key_size = strlen(argv[1]) >> 1;

Milan Broz's avatar
Milan Broz committed
779
 	cc = kzalloc(sizeof(*cc) + key_size * sizeof(u8), GFP_KERNEL);
Linus Torvalds's avatar
Linus Torvalds committed
780
781
	if (cc == NULL) {
		ti->error =
782
			"Cannot allocate transparent encryption context";
Linus Torvalds's avatar
Linus Torvalds committed
783
784
785
		return -ENOMEM;
	}

Milan Broz's avatar
Milan Broz committed
786
 	if (crypt_set_key(cc, argv[1])) {
787
		ti->error = "Error decoding key";
Milan Broz's avatar
Milan Broz committed
788
		goto bad_cipher;
Linus Torvalds's avatar
Linus Torvalds committed
789
790
791
792
793
794
795
796
	}

	/* Compatiblity mode for old dm-crypt cipher strings */
	if (!chainmode || (strcmp(chainmode, "plain") == 0 && !ivmode)) {
		chainmode = "cbc";
		ivmode = "plain";
	}

797
798
	if (strcmp(chainmode, "ecb") && !ivmode) {
		ti->error = "This chaining mode requires an IV mechanism";
Milan Broz's avatar
Milan Broz committed
799
		goto bad_cipher;
Linus Torvalds's avatar
Linus Torvalds committed
800
801
	}

Milan Broz's avatar
Milan Broz committed
802
803
	if (snprintf(cc->cipher, CRYPTO_MAX_ALG_NAME, "%s(%s)",
		     chainmode, cipher) >= CRYPTO_MAX_ALG_NAME) {
804
		ti->error = "Chain mode + cipher name is too long";
Milan Broz's avatar
Milan Broz committed
805
		goto bad_cipher;
Linus Torvalds's avatar
Linus Torvalds committed
806
807
	}

808
809
	tfm = crypto_alloc_blkcipher(cc->cipher, 0, CRYPTO_ALG_ASYNC);
	if (IS_ERR(tfm)) {
810
		ti->error = "Error allocating crypto tfm";
Milan Broz's avatar
Milan Broz committed
811
		goto bad_cipher;
Linus Torvalds's avatar
Linus Torvalds committed
812
813
	}

814
815
	strcpy(cc->cipher, cipher);
	strcpy(cc->chainmode, chainmode);
Linus Torvalds's avatar
Linus Torvalds committed
816
817
818
	cc->tfm = tfm;

	/*
819
	 * Choose ivmode. Valid modes: "plain", "essiv:<esshash>", "benbi".
Linus Torvalds's avatar
Linus Torvalds committed
820
821
822
823
824
825
826
827
828
	 * See comments at iv code
	 */

	if (ivmode == NULL)
		cc->iv_gen_ops = NULL;
	else if (strcmp(ivmode, "plain") == 0)
		cc->iv_gen_ops = &crypt_iv_plain_ops;
	else if (strcmp(ivmode, "essiv") == 0)
		cc->iv_gen_ops = &crypt_iv_essiv_ops;
829
830
	else if (strcmp(ivmode, "benbi") == 0)
		cc->iv_gen_ops = &crypt_iv_benbi_ops;
Ludwig Nussel's avatar
Ludwig Nussel committed
831
832
	else if (strcmp(ivmode, "null") == 0)
		cc->iv_gen_ops = &crypt_iv_null_ops;
Linus Torvalds's avatar
Linus Torvalds committed
833
	else {
834
		ti->error = "Invalid IV mode";
Milan Broz's avatar
Milan Broz committed
835
		goto bad_ivmode;
Linus Torvalds's avatar
Linus Torvalds committed
836
837
838
839
	}

	if (cc->iv_gen_ops && cc->iv_gen_ops->ctr &&
	    cc->iv_gen_ops->ctr(cc, ti, ivopts) < 0)
Milan Broz's avatar
Milan Broz committed
840
		goto bad_ivmode;
Linus Torvalds's avatar
Linus Torvalds committed
841

842
843
	cc->iv_size = crypto_blkcipher_ivsize(tfm);
	if (cc->iv_size)
Linus Torvalds's avatar
Linus Torvalds committed
844
		/* at least a 64 bit sector number should fit in our buffer */
845
		cc->iv_size = max(cc->iv_size,
Milan Broz's avatar
Milan Broz committed
846
				  (unsigned int)(sizeof(u64) / sizeof(u8)));
Linus Torvalds's avatar
Linus Torvalds committed
847
848
	else {
		if (cc->iv_gen_ops) {
849
			DMWARN("Selected cipher does not support IVs");
Linus Torvalds's avatar
Linus Torvalds committed
850
851
852
853
854
855
			if (cc->iv_gen_ops->dtr)
				cc->iv_gen_ops->dtr(cc);
			cc->iv_gen_ops = NULL;
		}
	}

856
	cc->io_pool = mempool_create_slab_pool(MIN_IOS, _crypt_io_pool);
Linus Torvalds's avatar
Linus Torvalds committed
857
	if (!cc->io_pool) {
858
		ti->error = "Cannot allocate crypt io mempool";
Milan Broz's avatar
Milan Broz committed
859
		goto bad_slab_pool;
Linus Torvalds's avatar
Linus Torvalds committed
860
861
	}

862
	cc->page_pool = mempool_create_page_pool(MIN_POOL_PAGES, 0);
Linus Torvalds's avatar
Linus Torvalds committed
863
	if (!cc->page_pool) {
864
		ti->error = "Cannot allocate page mempool";
Milan Broz's avatar
Milan Broz committed
865
		goto bad_page_pool;
Linus Torvalds's avatar
Linus Torvalds committed
866
867
	}

868
	cc->bs = bioset_create(MIN_IOS, MIN_IOS);
869
870
871
872
873
	if (!cc->bs) {
		ti->error = "Cannot allocate crypt bioset";
		goto bad_bs;
	}

874
	if (crypto_blkcipher_setkey(tfm, cc->key, key_size) < 0) {
875
		ti->error = "Error setting key";
Milan Broz's avatar
Milan Broz committed
876
		goto bad_device;
Linus Torvalds's avatar
Linus Torvalds committed
877
878
	}

879
	if (sscanf(argv[2], "%llu", &tmpll) != 1) {
880
		ti->error = "Invalid iv_offset sector";
Milan Broz's avatar
Milan Broz committed
881
		goto bad_device;
Linus Torvalds's avatar
Linus Torvalds committed
882
	}
883
	cc->iv_offset = tmpll;
Linus Torvalds's avatar
Linus Torvalds committed
884

885
	if (sscanf(argv[4], "%llu", &tmpll) != 1) {
886
		ti->error = "Invalid device sector";
Milan Broz's avatar
Milan Broz committed
887
		goto bad_device;
Linus Torvalds's avatar
Linus Torvalds committed
888
	}
889
	cc->start = tmpll;
Linus Torvalds's avatar
Linus Torvalds committed
890
891

	if (dm_get_device(ti, argv[3], cc->start, ti->len,
Milan Broz's avatar
Milan Broz committed
892
			  dm_table_get_mode(ti->table), &cc->dev)) {
893
		ti->error = "Device lookup failed";
Milan Broz's avatar
Milan Broz committed
894
		goto bad_device;
Linus Torvalds's avatar
Linus Torvalds committed
895
896
897
898
899
900
901
	}

	if (ivmode && cc->iv_gen_ops) {
		if (ivopts)
			*(ivopts - 1) = ':';
		cc->iv_mode = kmalloc(strlen(ivmode) + 1, GFP_KERNEL);
		if (!cc->iv_mode) {
902
			ti->error = "Error kmallocing iv_mode string";
Milan Broz's avatar
Milan Broz committed
903
			goto bad_ivmode_string;
Linus Torvalds's avatar
Linus Torvalds committed
904
905
906
907
908
		}
		strcpy(cc->iv_mode, ivmode);
	} else
		cc->iv_mode = NULL;

909
910
911
912
913
914
915
916
	cc->io_queue = create_singlethread_workqueue("kcryptd_io");
	if (!cc->io_queue) {
		ti->error = "Couldn't create kcryptd io queue";
		goto bad_io_queue;
	}

	cc->crypt_queue = create_singlethread_workqueue("kcryptd");
	if (!cc->crypt_queue) {
917
		ti->error = "Couldn't create kcryptd queue";
918
		goto bad_crypt_queue;
919
920
	}

Linus Torvalds's avatar
Linus Torvalds committed
921
922
923
	ti->private = cc;
	return 0;

924
925
926
bad_crypt_queue:
	destroy_workqueue(cc->io_queue);
bad_io_queue:
927
	kfree(cc->iv_mode);
Milan Broz's avatar
Milan Broz committed
928
bad_ivmode_string:
929
	dm_put_device(ti, cc->dev);
Milan Broz's avatar
Milan Broz committed
930
bad_device:
931
932
	bioset_free(cc->bs);
bad_bs:
Linus Torvalds's avatar
Linus Torvalds committed
933
	mempool_destroy(cc->page_pool);
Milan Broz's avatar
Milan Broz committed
934
bad_page_pool:
Linus Torvalds's avatar
Linus Torvalds committed
935
	mempool_destroy(cc->io_pool);
Milan Broz's avatar
Milan Broz committed
936
bad_slab_pool:
Linus Torvalds's avatar
Linus Torvalds committed
937
938
	if (cc->iv_gen_ops && cc->iv_gen_ops->dtr)
		cc->iv_gen_ops->dtr(cc);
Milan Broz's avatar
Milan Broz committed
939
bad_ivmode:
940
	crypto_free_blkcipher(tfm);
Milan Broz's avatar
Milan Broz committed
941
bad_cipher:
942
943
	/* Must zero key material before freeing */
	memset(cc, 0, sizeof(*cc) + cc->key_size * sizeof(u8));
Linus Torvalds's avatar
Linus Torvalds committed
944
945
946
947
948
949
950
951
	kfree(cc);
	return -EINVAL;
}

static void crypt_dtr(struct dm_target *ti)
{
	struct crypt_config *cc = (struct crypt_config *) ti->private;

952
953
	destroy_workqueue(cc->io_queue);
	destroy_workqueue(cc->crypt_queue);
Milan Broz's avatar
Milan Broz committed
954

955
	bioset_free(cc->bs);
Linus Torvalds's avatar
Linus Torvalds committed
956
957
958
	mempool_destroy(cc->page_pool);
	mempool_destroy(cc->io_pool);

959
	kfree(cc->iv_mode);
Linus Torvalds's avatar
Linus Torvalds committed
960
961
	if (cc->iv_gen_ops && cc->iv_gen_ops->dtr)
		cc->iv_gen_ops->dtr(cc);
962
	crypto_free_blkcipher(cc->tfm);
Linus Torvalds's avatar
Linus Torvalds committed
963
	dm_put_device(ti, cc->dev);
964
965
966

	/* Must zero key material before freeing */
	memset(cc, 0, sizeof(*cc) + cc->key_size * sizeof(u8));
Linus Torvalds's avatar
Linus Torvalds committed
967
968
969
970
971
972
	kfree(cc);
}

static int crypt_map(struct dm_target *ti, struct bio *bio,
		     union map_info *map_context)
{
973
	struct crypt_config *cc = ti->private;
Alasdair G Kergon's avatar
Alasdair G Kergon committed
974
	struct dm_crypt_io *io;
Linus Torvalds's avatar
Linus Torvalds committed
975

Milan Broz's avatar
Milan Broz committed
976
	io = mempool_alloc(cc->io_pool, GFP_NOIO);
Linus Torvalds's avatar
Linus Torvalds committed
977
	io->target = ti;
978
	io->base_bio = bio;
979
	io->error = 0;
980
	atomic_set(&io->pending, 0);
981
982
983
984
985

	if (bio_data_dir(io->base_bio) == READ)
		kcryptd_queue_io(io);
	else
		kcryptd_queue_crypt(io);
Linus Torvalds's avatar
Linus Torvalds committed
986

987
	return DM_MAPIO_SUBMITTED;
Linus Torvalds's avatar
Linus Torvalds committed
988
989
990
991
992
993
994
995
996
997
998
999
1000
1001
1002
}

static int crypt_status(struct dm_target *ti, status_type_t type,
			char *result, unsigned int maxlen)
{
	struct crypt_config *cc = (struct crypt_config *) ti->private;
	unsigned int sz = 0;

	switch (type) {
	case STATUSTYPE_INFO:
		result[0] = '\0';
		break;

	case STATUSTYPE_TABLE:
		if (cc->iv_mode)
1003
1004
			DMEMIT("%s-%s-%s ", cc->cipher, cc->chainmode,
			       cc->iv_mode);
Linus Torvalds's avatar
Linus Torvalds committed
1005
		else
1006
			DMEMIT("%s-%s ", cc->cipher, cc->chainmode);
Linus Torvalds's avatar
Linus Torvalds committed
1007
1008
1009
1010
1011
1012
1013
1014
1015
1016
1017
1018
1019

		if (cc->key_size > 0) {
			if ((maxlen - sz) < ((cc->key_size << 1) + 1))
				return -ENOMEM;

			crypt_encode_key(result + sz, cc->key, cc->key_size);
			sz += cc->key_size << 1;
		} else {
			if (sz >= maxlen)
				return -ENOMEM;
			result[sz++] = '-';
		}

1020
1021
		DMEMIT(" %llu %s %llu", (unsigned long long)cc->iv_offset,
				cc->dev->name, (unsigned long long)cc->start);
Linus Torvalds's avatar
Linus Torvalds committed
1022
1023
1024
1025
1026
		break;
	}
	return 0;
}

Milan Broz's avatar
Milan Broz committed
1027
1028
1029
1030
1031
1032
1033
1034
1035
1036
1037
1038
1039
1040
1041
1042
1043
1044
1045
1046
1047
1048
1049
1050
1051
1052
1053
1054
1055
1056
1057
1058
1059
1060
1061
1062
1063
1064
1065
1066
1067
1068
1069
1070
1071
1072
1073
1074
1075
1076
1077
1078
1079
static void crypt_postsuspend(struct dm_target *ti)
{
	struct crypt_config *cc = ti->private;

	set_bit(DM_CRYPT_SUSPENDED, &cc->flags);
}

static int crypt_preresume(struct dm_target *ti)
{
	struct crypt_config *cc = ti->private;

	if (!test_bit(DM_CRYPT_KEY_VALID, &cc->flags)) {
		DMERR("aborting resume - crypt key is not set.");
		return -EAGAIN;
	}

	return 0;
}

static void crypt_resume(struct dm_target *ti)
{
	struct crypt_config *cc = ti->private;

	clear_bit(DM_CRYPT_SUSPENDED, &cc->flags);
}

/* Message interface
 *	key set <key>
 *	key wipe
 */
static int crypt_message(struct dm_target *ti, unsigned argc, char **argv)
{
	struct crypt_config *cc = ti->private;

	if (argc < 2)
		goto error;

	if (!strnicmp(argv[0], MESG_STR("key"))) {
		if (!test_bit(DM_CRYPT_SUSPENDED, &cc->flags)) {
			DMWARN("not suspended during key manipulation.");
			return -EINVAL;
		}
		if (argc == 3 && !strnicmp(argv[1], MESG_STR("set")))
			return crypt_set_key(cc, argv[2]);
		if (argc == 2 && !strnicmp(argv[1], MESG_STR("wipe")))
			return crypt_wipe_key(cc);
	}

error:
	DMWARN("unrecognised message received.");
	return -EINVAL;
}

Linus Torvalds's avatar
Linus Torvalds committed
1080
1081
static struct target_type crypt_target = {
	.name   = "crypt",
Ludwig Nussel's avatar
Ludwig Nussel committed
1082
	.version= {1, 5, 0},
Linus Torvalds's avatar
Linus Torvalds committed
1083
1084
1085
1086
1087
	.module = THIS_MODULE,
	.ctr    = crypt_ctr,
	.dtr    = crypt_dtr,
	.map    = crypt_map,
	.status = crypt_status,
Milan Broz's avatar
Milan Broz committed
1088
1089
1090
1091
	.postsuspend = crypt_postsuspend,
	.preresume = crypt_preresume,
	.resume = crypt_resume,
	.message = crypt_message,
Linus Torvalds's avatar
Linus Torvalds committed
1092
1093
1094
1095
1096
1097
};

static int __init dm_crypt_init(void)
{
	int r;

Alasdair G Kergon's avatar
Alasdair G Kergon committed
1098
	_crypt_io_pool = KMEM_CACHE(dm_crypt_io, 0);
Linus Torvalds's avatar
Linus Torvalds committed
1099
1100
1101
1102
1103
	if (!_crypt_io_pool)
		return -ENOMEM;

	r = dm_register_target(&crypt_target);
	if (r < 0) {
1104
		DMERR("register failed %d", r);
1105
		kmem_cache_destroy(_crypt_io_pool);
Linus Torvalds's avatar
Linus Torvalds committed
1106
1107
1108
1109
1110
1111
1112
1113
1114
1115
	}

	return r;
}

static void __exit dm_crypt_exit(void)
{
	int r = dm_unregister_target(&crypt_target);

	if (r < 0)
1116
		DMERR("unregister failed %d", r);
Linus Torvalds's avatar
Linus Torvalds committed
1117
1118
1119
1120
1121
1122
1123
1124
1125
1126

	kmem_cache_destroy(_crypt_io_pool);
}

module_init(dm_crypt_init);
module_exit(dm_crypt_exit);

MODULE_AUTHOR("Christophe Saout <christophe@saout.de>");
MODULE_DESCRIPTION(DM_NAME " target for transparent encryption / decryption");
MODULE_LICENSE("GPL");