dm-crypt.c 24.6 KB
Newer Older
Linus Torvalds's avatar
Linus Torvalds committed
1
2
3
/*
 * Copyright (C) 2003 Christophe Saout <christophe@saout.de>
 * Copyright (C) 2004 Clemens Fruhwirth <clemens@endorphin.org>
Milan Broz's avatar
Milan Broz committed
4
 * Copyright (C) 2006 Red Hat, Inc. All rights reserved.
Linus Torvalds's avatar
Linus Torvalds committed
5
6
7
8
 *
 * This file is released under the GPL.
 */

9
#include <linux/err.h>
Linus Torvalds's avatar
Linus Torvalds committed
10
11
12
13
14
15
16
17
18
#include <linux/module.h>
#include <linux/init.h>
#include <linux/kernel.h>
#include <linux/bio.h>
#include <linux/blkdev.h>
#include <linux/mempool.h>
#include <linux/slab.h>
#include <linux/crypto.h>
#include <linux/workqueue.h>
19
#include <linux/backing-dev.h>
Linus Torvalds's avatar
Linus Torvalds committed
20
#include <asm/atomic.h>
21
#include <linux/scatterlist.h>
Linus Torvalds's avatar
Linus Torvalds committed
22
#include <asm/page.h>
23
#include <asm/unaligned.h>
Linus Torvalds's avatar
Linus Torvalds committed
24
25
26

#include "dm.h"

27
#define DM_MSG_PREFIX "crypt"
Milan Broz's avatar
Milan Broz committed
28
#define MESG_STR(x) x, sizeof(x)
Linus Torvalds's avatar
Linus Torvalds committed
29
30
31
32

/*
 * per bio private data
 */
Alasdair G Kergon's avatar
Alasdair G Kergon committed
33
struct dm_crypt_io {
Linus Torvalds's avatar
Linus Torvalds committed
34
	struct dm_target *target;
35
	struct bio *base_bio;
Linus Torvalds's avatar
Linus Torvalds committed
36
37
38
	struct work_struct work;
	atomic_t pending;
	int error;
39
	int post_process;
Linus Torvalds's avatar
Linus Torvalds committed
40
41
42
43
44
45
46
47
48
49
50
51
52
53
54
55
56
57
58
59
60
61
62
63
64
65
66
67
68
69
};

/*
 * context holding the current state of a multi-part conversion
 */
struct convert_context {
	struct bio *bio_in;
	struct bio *bio_out;
	unsigned int offset_in;
	unsigned int offset_out;
	unsigned int idx_in;
	unsigned int idx_out;
	sector_t sector;
	int write;
};

struct crypt_config;

struct crypt_iv_operations {
	int (*ctr)(struct crypt_config *cc, struct dm_target *ti,
	           const char *opts);
	void (*dtr)(struct crypt_config *cc);
	const char *(*status)(struct crypt_config *cc);
	int (*generator)(struct crypt_config *cc, u8 *iv, sector_t sector);
};

/*
 * Crypt: maps a linear range of a block device
 * and encrypts / decrypts at the same time.
 */
Milan Broz's avatar
Milan Broz committed
70
enum flags { DM_CRYPT_SUSPENDED, DM_CRYPT_KEY_VALID };
Linus Torvalds's avatar
Linus Torvalds committed
71
72
73
74
75
76
77
78
79
80
struct crypt_config {
	struct dm_dev *dev;
	sector_t start;

	/*
	 * pool for per bio private data and
	 * for encryption buffer pages
	 */
	mempool_t *io_pool;
	mempool_t *page_pool;
81
	struct bio_set *bs;
Linus Torvalds's avatar
Linus Torvalds committed
82

83
	struct workqueue_struct *queue;
Linus Torvalds's avatar
Linus Torvalds committed
84
85
86
87
88
	/*
	 * crypto related data
	 */
	struct crypt_iv_operations *iv_gen_ops;
	char *iv_mode;
89
90
91
92
	union {
		struct crypto_cipher *essiv_tfm;
		int benbi_shift;
	} iv_gen_private;
Linus Torvalds's avatar
Linus Torvalds committed
93
94
95
	sector_t iv_offset;
	unsigned int iv_size;

96
97
98
	char cipher[CRYPTO_MAX_ALG_NAME];
	char chainmode[CRYPTO_MAX_ALG_NAME];
	struct crypto_blkcipher *tfm;
Milan Broz's avatar
Milan Broz committed
99
	unsigned long flags;
Linus Torvalds's avatar
Linus Torvalds committed
100
101
102
103
	unsigned int key_size;
	u8 key[0];
};

104
#define MIN_IOS        16
Linus Torvalds's avatar
Linus Torvalds committed
105
106
107
#define MIN_POOL_PAGES 32
#define MIN_BIO_PAGES  8

108
static struct kmem_cache *_crypt_io_pool;
Linus Torvalds's avatar
Linus Torvalds committed
109

Alasdair G Kergon's avatar
Alasdair G Kergon committed
110
static void clone_init(struct dm_crypt_io *, struct bio *);
Olaf Kirch's avatar
Olaf Kirch committed
111

Linus Torvalds's avatar
Linus Torvalds committed
112
113
114
/*
 * Different IV generation algorithms:
 *
115
 * plain: the initial vector is the 32-bit little-endian version of the sector
Linus Torvalds's avatar
Linus Torvalds committed
116
117
 *        number, padded with zeros if neccessary.
 *
118
119
120
 * essiv: "encrypted sector|salt initial vector", the sector number is
 *        encrypted with the bulk cipher using a salt as key. The salt
 *        should be derived from the bulk cipher's key via hashing.
Linus Torvalds's avatar
Linus Torvalds committed
121
 *
122
123
124
 * benbi: the 64-bit "big-endian 'narrow block'-count", starting at 1
 *        (needed for LRW-32-AES and possible other narrow block modes)
 *
Ludwig Nussel's avatar
Ludwig Nussel committed
125
126
127
 * null: the initial vector is always zero.  Provides compatibility with
 *       obsolete loop_fish2 devices.  Do not use for new devices.
 *
Linus Torvalds's avatar
Linus Torvalds committed
128
129
130
131
132
133
134
135
136
137
138
139
140
141
142
 * plumb: unimplemented, see:
 * http://article.gmane.org/gmane.linux.kernel.device-mapper.dm-crypt/454
 */

static int crypt_iv_plain_gen(struct crypt_config *cc, u8 *iv, sector_t sector)
{
	memset(iv, 0, cc->iv_size);
	*(u32 *)iv = cpu_to_le32(sector & 0xffffffff);

	return 0;
}

static int crypt_iv_essiv_ctr(struct crypt_config *cc, struct dm_target *ti,
	                      const char *opts)
{
143
	struct crypto_cipher *essiv_tfm;
144
145
	struct crypto_hash *hash_tfm;
	struct hash_desc desc;
Linus Torvalds's avatar
Linus Torvalds committed
146
147
148
	struct scatterlist sg;
	unsigned int saltsize;
	u8 *salt;
149
	int err;
Linus Torvalds's avatar
Linus Torvalds committed
150
151

	if (opts == NULL) {
152
		ti->error = "Digest algorithm missing for ESSIV mode";
Linus Torvalds's avatar
Linus Torvalds committed
153
154
155
156
		return -EINVAL;
	}

	/* Hash the cipher key with the given hash algorithm */
157
158
	hash_tfm = crypto_alloc_hash(opts, 0, CRYPTO_ALG_ASYNC);
	if (IS_ERR(hash_tfm)) {
159
		ti->error = "Error initializing ESSIV hash";
160
		return PTR_ERR(hash_tfm);
Linus Torvalds's avatar
Linus Torvalds committed
161
162
	}

163
	saltsize = crypto_hash_digestsize(hash_tfm);
Linus Torvalds's avatar
Linus Torvalds committed
164
165
	salt = kmalloc(saltsize, GFP_KERNEL);
	if (salt == NULL) {
166
		ti->error = "Error kmallocing salt storage in ESSIV";
167
		crypto_free_hash(hash_tfm);
Linus Torvalds's avatar
Linus Torvalds committed
168
169
170
		return -ENOMEM;
	}

171
	sg_set_buf(&sg, cc->key, cc->key_size);
172
173
174
175
176
177
178
	desc.tfm = hash_tfm;
	desc.flags = CRYPTO_TFM_REQ_MAY_SLEEP;
	err = crypto_hash_digest(&desc, &sg, cc->key_size, salt);
	crypto_free_hash(hash_tfm);

	if (err) {
		ti->error = "Error calculating hash in ESSIV";
179
		kfree(salt);
180
181
		return err;
	}
Linus Torvalds's avatar
Linus Torvalds committed
182
183

	/* Setup the essiv_tfm with the given salt */
184
185
	essiv_tfm = crypto_alloc_cipher(cc->cipher, 0, CRYPTO_ALG_ASYNC);
	if (IS_ERR(essiv_tfm)) {
186
		ti->error = "Error allocating crypto tfm for ESSIV";
Linus Torvalds's avatar
Linus Torvalds committed
187
		kfree(salt);
188
		return PTR_ERR(essiv_tfm);
Linus Torvalds's avatar
Linus Torvalds committed
189
	}
190
191
	if (crypto_cipher_blocksize(essiv_tfm) !=
	    crypto_blkcipher_ivsize(cc->tfm)) {
192
		ti->error = "Block size of ESSIV cipher does "
Linus Torvalds's avatar
Linus Torvalds committed
193
			        "not match IV size of block cipher";
194
		crypto_free_cipher(essiv_tfm);
Linus Torvalds's avatar
Linus Torvalds committed
195
196
197
		kfree(salt);
		return -EINVAL;
	}
198
199
	err = crypto_cipher_setkey(essiv_tfm, salt, saltsize);
	if (err) {
200
		ti->error = "Failed to set key for ESSIV cipher";
201
		crypto_free_cipher(essiv_tfm);
Linus Torvalds's avatar
Linus Torvalds committed
202
		kfree(salt);
203
		return err;
Linus Torvalds's avatar
Linus Torvalds committed
204
205
206
	}
	kfree(salt);

207
	cc->iv_gen_private.essiv_tfm = essiv_tfm;
Linus Torvalds's avatar
Linus Torvalds committed
208
209
210
211
212
	return 0;
}

static void crypt_iv_essiv_dtr(struct crypt_config *cc)
{
213
214
	crypto_free_cipher(cc->iv_gen_private.essiv_tfm);
	cc->iv_gen_private.essiv_tfm = NULL;
Linus Torvalds's avatar
Linus Torvalds committed
215
216
217
218
219
220
}

static int crypt_iv_essiv_gen(struct crypt_config *cc, u8 *iv, sector_t sector)
{
	memset(iv, 0, cc->iv_size);
	*(u64 *)iv = cpu_to_le64(sector);
221
	crypto_cipher_encrypt_one(cc->iv_gen_private.essiv_tfm, iv, iv);
Linus Torvalds's avatar
Linus Torvalds committed
222
223
224
	return 0;
}

225
226
227
228
static int crypt_iv_benbi_ctr(struct crypt_config *cc, struct dm_target *ti,
			      const char *opts)
{
	unsigned int bs = crypto_blkcipher_blocksize(cc->tfm);
229
	int log = ilog2(bs);
230
231
232
233
234
235
236
237
238
239
240
241
242
243

	/* we need to calculate how far we must shift the sector count
	 * to get the cipher block count, we use this shift in _gen */

	if (1 << log != bs) {
		ti->error = "cypher blocksize is not a power of 2";
		return -EINVAL;
	}

	if (log > 9) {
		ti->error = "cypher blocksize is > 512";
		return -EINVAL;
	}

244
	cc->iv_gen_private.benbi_shift = 9 - log;
245
246
247
248
249
250
251
252
253
254

	return 0;
}

static void crypt_iv_benbi_dtr(struct crypt_config *cc)
{
}

static int crypt_iv_benbi_gen(struct crypt_config *cc, u8 *iv, sector_t sector)
{
255
256
	__be64 val;

257
	memset(iv, 0, cc->iv_size - sizeof(u64)); /* rest is cleared below */
258
259
260

	val = cpu_to_be64(((u64)sector << cc->iv_gen_private.benbi_shift) + 1);
	put_unaligned(val, (__be64 *)(iv + cc->iv_size - sizeof(u64)));
261

Linus Torvalds's avatar
Linus Torvalds committed
262
263
264
	return 0;
}

Ludwig Nussel's avatar
Ludwig Nussel committed
265
266
267
268
269
270
271
static int crypt_iv_null_gen(struct crypt_config *cc, u8 *iv, sector_t sector)
{
	memset(iv, 0, cc->iv_size);

	return 0;
}

Linus Torvalds's avatar
Linus Torvalds committed
272
273
274
275
276
277
278
279
280
281
static struct crypt_iv_operations crypt_iv_plain_ops = {
	.generator = crypt_iv_plain_gen
};

static struct crypt_iv_operations crypt_iv_essiv_ops = {
	.ctr       = crypt_iv_essiv_ctr,
	.dtr       = crypt_iv_essiv_dtr,
	.generator = crypt_iv_essiv_gen
};

282
283
284
285
286
static struct crypt_iv_operations crypt_iv_benbi_ops = {
	.ctr	   = crypt_iv_benbi_ctr,
	.dtr	   = crypt_iv_benbi_dtr,
	.generator = crypt_iv_benbi_gen
};
Linus Torvalds's avatar
Linus Torvalds committed
287

Ludwig Nussel's avatar
Ludwig Nussel committed
288
289
290
291
static struct crypt_iv_operations crypt_iv_null_ops = {
	.generator = crypt_iv_null_gen
};

292
static int
Linus Torvalds's avatar
Linus Torvalds committed
293
294
295
296
crypt_convert_scatterlist(struct crypt_config *cc, struct scatterlist *out,
                          struct scatterlist *in, unsigned int length,
                          int write, sector_t sector)
{
297
	u8 iv[cc->iv_size] __attribute__ ((aligned(__alignof__(u64))));
298
299
300
301
302
	struct blkcipher_desc desc = {
		.tfm = cc->tfm,
		.info = iv,
		.flags = CRYPTO_TFM_REQ_MAY_SLEEP,
	};
Linus Torvalds's avatar
Linus Torvalds committed
303
304
305
306
307
308
309
310
	int r;

	if (cc->iv_gen_ops) {
		r = cc->iv_gen_ops->generator(cc, iv, sector);
		if (r < 0)
			return r;

		if (write)
311
			r = crypto_blkcipher_encrypt_iv(&desc, out, in, length);
Linus Torvalds's avatar
Linus Torvalds committed
312
		else
313
			r = crypto_blkcipher_decrypt_iv(&desc, out, in, length);
Linus Torvalds's avatar
Linus Torvalds committed
314
315
	} else {
		if (write)
316
			r = crypto_blkcipher_encrypt(&desc, out, in, length);
Linus Torvalds's avatar
Linus Torvalds committed
317
		else
318
			r = crypto_blkcipher_decrypt(&desc, out, in, length);
Linus Torvalds's avatar
Linus Torvalds committed
319
320
321
322
323
324
325
326
327
328
329
330
331
332
333
334
335
336
337
338
339
340
341
342
343
344
345
346
347
348
349
350
351
352
353
354
355
356
357
358
359
360
361
362
363
364
365
366
367
368
369
370
371
372
373
374
375
376
377
378
379
380
381
382
383
384
	}

	return r;
}

static void
crypt_convert_init(struct crypt_config *cc, struct convert_context *ctx,
                   struct bio *bio_out, struct bio *bio_in,
                   sector_t sector, int write)
{
	ctx->bio_in = bio_in;
	ctx->bio_out = bio_out;
	ctx->offset_in = 0;
	ctx->offset_out = 0;
	ctx->idx_in = bio_in ? bio_in->bi_idx : 0;
	ctx->idx_out = bio_out ? bio_out->bi_idx : 0;
	ctx->sector = sector + cc->iv_offset;
	ctx->write = write;
}

/*
 * Encrypt / decrypt data from one bio to another one (can be the same one)
 */
static int crypt_convert(struct crypt_config *cc,
                         struct convert_context *ctx)
{
	int r = 0;

	while(ctx->idx_in < ctx->bio_in->bi_vcnt &&
	      ctx->idx_out < ctx->bio_out->bi_vcnt) {
		struct bio_vec *bv_in = bio_iovec_idx(ctx->bio_in, ctx->idx_in);
		struct bio_vec *bv_out = bio_iovec_idx(ctx->bio_out, ctx->idx_out);
		struct scatterlist sg_in = {
			.page = bv_in->bv_page,
			.offset = bv_in->bv_offset + ctx->offset_in,
			.length = 1 << SECTOR_SHIFT
		};
		struct scatterlist sg_out = {
			.page = bv_out->bv_page,
			.offset = bv_out->bv_offset + ctx->offset_out,
			.length = 1 << SECTOR_SHIFT
		};

		ctx->offset_in += sg_in.length;
		if (ctx->offset_in >= bv_in->bv_len) {
			ctx->offset_in = 0;
			ctx->idx_in++;
		}

		ctx->offset_out += sg_out.length;
		if (ctx->offset_out >= bv_out->bv_len) {
			ctx->offset_out = 0;
			ctx->idx_out++;
		}

		r = crypt_convert_scatterlist(cc, &sg_out, &sg_in, sg_in.length,
		                              ctx->write, ctx->sector);
		if (r < 0)
			break;

		ctx->sector++;
	}

	return r;
}

385
386
 static void dm_crypt_bio_destructor(struct bio *bio)
 {
Alasdair G Kergon's avatar
Alasdair G Kergon committed
387
	struct dm_crypt_io *io = bio->bi_private;
388
389
390
391
392
	struct crypt_config *cc = io->target->private;

	bio_free(bio, cc->bs);
 }

Linus Torvalds's avatar
Linus Torvalds committed
393
394
395
396
397
/*
 * Generate a new unfragmented bio with the given size
 * This should never violate the device limitations
 * May return a smaller bio when running out of pages
 */
Alasdair G Kergon's avatar
Alasdair G Kergon committed
398
static struct bio *crypt_alloc_buffer(struct dm_crypt_io *io, unsigned size)
Linus Torvalds's avatar
Linus Torvalds committed
399
{
Olaf Kirch's avatar
Olaf Kirch committed
400
	struct crypt_config *cc = io->target->private;
401
	struct bio *clone;
Linus Torvalds's avatar
Linus Torvalds committed
402
	unsigned int nr_iovecs = (size + PAGE_SIZE - 1) >> PAGE_SHIFT;
403
	gfp_t gfp_mask = GFP_NOIO | __GFP_HIGHMEM;
Linus Torvalds's avatar
Linus Torvalds committed
404
405
	unsigned int i;

Olaf Kirch's avatar
Olaf Kirch committed
406
	clone = bio_alloc_bioset(GFP_NOIO, nr_iovecs, cc->bs);
407
	if (!clone)
Linus Torvalds's avatar
Linus Torvalds committed
408
409
		return NULL;

Olaf Kirch's avatar
Olaf Kirch committed
410
	clone_init(io, clone);
411

412
	for (i = 0; i < nr_iovecs; i++) {
413
		struct bio_vec *bv = bio_iovec_idx(clone, i);
Linus Torvalds's avatar
Linus Torvalds committed
414
415
416
417
418
419
420
421
422
423

		bv->bv_page = mempool_alloc(cc->page_pool, gfp_mask);
		if (!bv->bv_page)
			break;

		/*
		 * if additional pages cannot be allocated without waiting,
		 * return a partially allocated bio, the caller will then try
		 * to allocate additional bios while submitting this partial bio
		 */
424
		if (i == (MIN_BIO_PAGES - 1))
Linus Torvalds's avatar
Linus Torvalds committed
425
426
427
428
429
430
431
432
			gfp_mask = (gfp_mask | __GFP_NOWARN) & ~__GFP_WAIT;

		bv->bv_offset = 0;
		if (size > PAGE_SIZE)
			bv->bv_len = PAGE_SIZE;
		else
			bv->bv_len = size;

433
434
		clone->bi_size += bv->bv_len;
		clone->bi_vcnt++;
Linus Torvalds's avatar
Linus Torvalds committed
435
436
437
		size -= bv->bv_len;
	}

438
439
	if (!clone->bi_size) {
		bio_put(clone);
Linus Torvalds's avatar
Linus Torvalds committed
440
441
442
		return NULL;
	}

443
	return clone;
Linus Torvalds's avatar
Linus Torvalds committed
444
445
}

Neil Brown's avatar
Neil Brown committed
446
static void crypt_free_buffer_pages(struct crypt_config *cc, struct bio *clone)
Linus Torvalds's avatar
Linus Torvalds committed
447
{
Neil Brown's avatar
Neil Brown committed
448
	unsigned int i;
Linus Torvalds's avatar
Linus Torvalds committed
449
450
	struct bio_vec *bv;

Neil Brown's avatar
Neil Brown committed
451
	for (i = 0; i < clone->bi_vcnt; i++) {
452
		bv = bio_iovec_idx(clone, i);
Linus Torvalds's avatar
Linus Torvalds committed
453
454
455
456
457
458
459
460
461
462
		BUG_ON(!bv->bv_page);
		mempool_free(bv->bv_page, cc->page_pool);
		bv->bv_page = NULL;
	}
}

/*
 * One of the bios was finished. Check for completion of
 * the whole request and correctly clean up the buffer.
 */
Alasdair G Kergon's avatar
Alasdair G Kergon committed
463
static void dec_pending(struct dm_crypt_io *io, int error)
Linus Torvalds's avatar
Linus Torvalds committed
464
465
466
467
468
469
470
471
472
{
	struct crypt_config *cc = (struct crypt_config *) io->target->private;

	if (error < 0)
		io->error = error;

	if (!atomic_dec_and_test(&io->pending))
		return;

473
	bio_endio(io->base_bio, io->error);
Linus Torvalds's avatar
Linus Torvalds committed
474
475
476
477
478
479
480
481

	mempool_free(io, cc->io_pool);
}

/*
 * kcryptd:
 *
 * Needed because it would be very unwise to do decryption in an
482
 * interrupt context.
Linus Torvalds's avatar
Linus Torvalds committed
483
 */
David Howells's avatar
David Howells committed
484
static void kcryptd_do_work(struct work_struct *work);
Linus Torvalds's avatar
Linus Torvalds committed
485

Alasdair G Kergon's avatar
Alasdair G Kergon committed
486
static void kcryptd_queue_io(struct dm_crypt_io *io)
Linus Torvalds's avatar
Linus Torvalds committed
487
{
488
489
	struct crypt_config *cc = io->target->private;

David Howells's avatar
David Howells committed
490
	INIT_WORK(&io->work, kcryptd_do_work);
491
	queue_work(cc->queue, &io->work);
492
493
}

494
static void crypt_endio(struct bio *clone, int error)
495
{
Alasdair G Kergon's avatar
Alasdair G Kergon committed
496
	struct dm_crypt_io *io = clone->bi_private;
497
498
499
500
	struct crypt_config *cc = io->target->private;
	unsigned read_io = bio_data_dir(clone) == READ;

	/*
501
	 * free the processed pages
502
	 */
503
	if (!read_io) {
Neil Brown's avatar
Neil Brown committed
504
		crypt_free_buffer_pages(cc, clone);
505
		goto out;
506
	}
507
508
509
510
511
512
513

	if (unlikely(!bio_flagged(clone, BIO_UPTODATE))) {
		error = -EIO;
		goto out;
	}

	bio_put(clone);
514
	io->post_process = 1;
515
	kcryptd_queue_io(io);
516
	return;
517
518
519
520
521
522

out:
	bio_put(clone);
	dec_pending(io, error);
}

Alasdair G Kergon's avatar
Alasdair G Kergon committed
523
static void clone_init(struct dm_crypt_io *io, struct bio *clone)
524
525
526
527
528
529
530
{
	struct crypt_config *cc = io->target->private;

	clone->bi_private = io;
	clone->bi_end_io  = crypt_endio;
	clone->bi_bdev    = cc->dev->bdev;
	clone->bi_rw      = io->base_bio->bi_rw;
Olaf Kirch's avatar
Olaf Kirch committed
531
	clone->bi_destructor = dm_crypt_bio_destructor;
532
533
}

Alasdair G Kergon's avatar
Alasdair G Kergon committed
534
static void process_read(struct dm_crypt_io *io)
535
536
537
538
{
	struct crypt_config *cc = io->target->private;
	struct bio *base_bio = io->base_bio;
	struct bio *clone;
539
540
541
	sector_t sector = base_bio->bi_sector - io->target->begin;

	atomic_inc(&io->pending);
542
543
544
545
546
547

	/*
	 * The block layer might modify the bvec array, so always
	 * copy the required bvecs because we need the original
	 * one in order to decrypt the whole bio data *afterwards*.
	 */
548
	clone = bio_alloc_bioset(GFP_NOIO, bio_segments(base_bio), cc->bs);
549
550
	if (unlikely(!clone)) {
		dec_pending(io, -ENOMEM);
551
		return;
552
	}
553
554
555
556
557

	clone_init(io, clone);
	clone->bi_idx = 0;
	clone->bi_vcnt = bio_segments(base_bio);
	clone->bi_size = base_bio->bi_size;
558
	clone->bi_sector = cc->start + sector;
559
560
561
	memcpy(clone->bi_io_vec, bio_iovec(base_bio),
	       sizeof(struct bio_vec) * clone->bi_vcnt);

562
	generic_make_request(clone);
563
564
}

Alasdair G Kergon's avatar
Alasdair G Kergon committed
565
static void process_write(struct dm_crypt_io *io)
566
567
568
569
{
	struct crypt_config *cc = io->target->private;
	struct bio *base_bio = io->base_bio;
	struct bio *clone;
570
571
572
	struct convert_context ctx;
	unsigned remaining = base_bio->bi_size;
	sector_t sector = base_bio->bi_sector - io->target->begin;
573

574
	atomic_inc(&io->pending);
575

576
	crypt_convert_init(cc, &ctx, NULL, base_bio, sector, 1);
577

578
579
580
581
582
	/*
	 * The allocated buffers can be smaller than the whole bio,
	 * so repeat the whole process until all the data can be handled.
	 */
	while (remaining) {
583
		clone = crypt_alloc_buffer(io, remaining);
584
585
586
587
		if (unlikely(!clone)) {
			dec_pending(io, -ENOMEM);
			return;
		}
588
589

		ctx.bio_out = clone;
590
		ctx.idx_out = 0;
591
592

		if (unlikely(crypt_convert(cc, &ctx) < 0)) {
Neil Brown's avatar
Neil Brown committed
593
			crypt_free_buffer_pages(cc, clone);
594
			bio_put(clone);
595
596
			dec_pending(io, -EIO);
			return;
597
598
		}

599
600
601
		/* crypt_convert should have filled the clone bio */
		BUG_ON(ctx.idx_out < clone->bi_vcnt);

602
603
604
605
		clone->bi_sector = cc->start + sector;
		remaining -= clone->bi_size;
		sector += bio_sectors(clone);

Olaf Kirch's avatar
Olaf Kirch committed
606
607
		/* Grab another reference to the io struct
		 * before we kick off the request */
608
609
610
		if (remaining)
			atomic_inc(&io->pending);

611
612
		generic_make_request(clone);

613
614
615
		/* Do not reference clone after this - it
		 * may be gone already. */

616
617
		/* out of memory -> run queues */
		if (remaining)
618
			congestion_wait(WRITE, HZ/100);
619
	}
620
621
}

Alasdair G Kergon's avatar
Alasdair G Kergon committed
622
static void process_read_endio(struct dm_crypt_io *io)
623
624
{
	struct crypt_config *cc = io->target->private;
Linus Torvalds's avatar
Linus Torvalds committed
625
626
	struct convert_context ctx;

627
628
	crypt_convert_init(cc, &ctx, io->base_bio, io->base_bio,
			   io->base_bio->bi_sector - io->target->begin, 0);
Linus Torvalds's avatar
Linus Torvalds committed
629

630
	dec_pending(io, crypt_convert(cc, &ctx));
Linus Torvalds's avatar
Linus Torvalds committed
631
632
}

David Howells's avatar
David Howells committed
633
static void kcryptd_do_work(struct work_struct *work)
Linus Torvalds's avatar
Linus Torvalds committed
634
{
Alasdair G Kergon's avatar
Alasdair G Kergon committed
635
	struct dm_crypt_io *io = container_of(work, struct dm_crypt_io, work);
636

637
638
639
640
641
642
	if (io->post_process)
		process_read_endio(io);
	else if (bio_data_dir(io->base_bio) == READ)
		process_read(io);
	else
		process_write(io);
Linus Torvalds's avatar
Linus Torvalds committed
643
644
645
646
647
648
649
650
651
652
653
654
655
}

/*
 * Decode key from its hex representation
 */
static int crypt_decode_key(u8 *key, char *hex, unsigned int size)
{
	char buffer[3];
	char *endp;
	unsigned int i;

	buffer[2] = '\0';

656
	for (i = 0; i < size; i++) {
Linus Torvalds's avatar
Linus Torvalds committed
657
658
659
660
661
662
663
664
665
666
667
668
669
670
671
672
673
674
675
676
677
678
		buffer[0] = *hex++;
		buffer[1] = *hex++;

		key[i] = (u8)simple_strtoul(buffer, &endp, 16);

		if (endp != &buffer[2])
			return -EINVAL;
	}

	if (*hex != '\0')
		return -EINVAL;

	return 0;
}

/*
 * Encode key into its hex representation
 */
static void crypt_encode_key(char *hex, u8 *key, unsigned int size)
{
	unsigned int i;

679
	for (i = 0; i < size; i++) {
Linus Torvalds's avatar
Linus Torvalds committed
680
681
682
683
684
685
		sprintf(hex, "%02x", *key);
		hex += 2;
		key++;
	}
}

Milan Broz's avatar
Milan Broz committed
686
687
688
689
690
691
692
693
694
695
696
697
698
699
700
701
702
703
704
705
706
707
708
709
710
static int crypt_set_key(struct crypt_config *cc, char *key)
{
	unsigned key_size = strlen(key) >> 1;

	if (cc->key_size && cc->key_size != key_size)
		return -EINVAL;

	cc->key_size = key_size; /* initial settings */

	if ((!key_size && strcmp(key, "-")) ||
	    (key_size && crypt_decode_key(cc->key, key, key_size) < 0))
		return -EINVAL;

	set_bit(DM_CRYPT_KEY_VALID, &cc->flags);

	return 0;
}

static int crypt_wipe_key(struct crypt_config *cc)
{
	clear_bit(DM_CRYPT_KEY_VALID, &cc->flags);
	memset(&cc->key, 0, cc->key_size * sizeof(u8));
	return 0;
}

Linus Torvalds's avatar
Linus Torvalds committed
711
712
713
714
715
716
717
/*
 * Construct an encryption mapping:
 * <cipher> <key> <iv_offset> <dev_path> <start>
 */
static int crypt_ctr(struct dm_target *ti, unsigned int argc, char **argv)
{
	struct crypt_config *cc;
718
	struct crypto_blkcipher *tfm;
Linus Torvalds's avatar
Linus Torvalds committed
719
720
721
722
723
724
	char *tmp;
	char *cipher;
	char *chainmode;
	char *ivmode;
	char *ivopts;
	unsigned int key_size;
725
	unsigned long long tmpll;
Linus Torvalds's avatar
Linus Torvalds committed
726
727

	if (argc != 5) {
728
		ti->error = "Not enough arguments";
Linus Torvalds's avatar
Linus Torvalds committed
729
730
731
732
733
734
735
736
737
738
		return -EINVAL;
	}

	tmp = argv[0];
	cipher = strsep(&tmp, "-");
	chainmode = strsep(&tmp, "-");
	ivopts = strsep(&tmp, "-");
	ivmode = strsep(&ivopts, ":");

	if (tmp)
739
		DMWARN("Unexpected additional cipher options");
Linus Torvalds's avatar
Linus Torvalds committed
740
741
742

	key_size = strlen(argv[1]) >> 1;

Milan Broz's avatar
Milan Broz committed
743
 	cc = kzalloc(sizeof(*cc) + key_size * sizeof(u8), GFP_KERNEL);
Linus Torvalds's avatar
Linus Torvalds committed
744
745
	if (cc == NULL) {
		ti->error =
746
			"Cannot allocate transparent encryption context";
Linus Torvalds's avatar
Linus Torvalds committed
747
748
749
		return -ENOMEM;
	}

Milan Broz's avatar
Milan Broz committed
750
 	if (crypt_set_key(cc, argv[1])) {
751
		ti->error = "Error decoding key";
Linus Torvalds's avatar
Linus Torvalds committed
752
753
754
755
756
757
758
759
760
		goto bad1;
	}

	/* Compatiblity mode for old dm-crypt cipher strings */
	if (!chainmode || (strcmp(chainmode, "plain") == 0 && !ivmode)) {
		chainmode = "cbc";
		ivmode = "plain";
	}

761
762
	if (strcmp(chainmode, "ecb") && !ivmode) {
		ti->error = "This chaining mode requires an IV mechanism";
Linus Torvalds's avatar
Linus Torvalds committed
763
764
765
		goto bad1;
	}

766
767
768
	if (snprintf(cc->cipher, CRYPTO_MAX_ALG_NAME, "%s(%s)", chainmode, 
		     cipher) >= CRYPTO_MAX_ALG_NAME) {
		ti->error = "Chain mode + cipher name is too long";
Linus Torvalds's avatar
Linus Torvalds committed
769
770
771
		goto bad1;
	}

772
773
	tfm = crypto_alloc_blkcipher(cc->cipher, 0, CRYPTO_ALG_ASYNC);
	if (IS_ERR(tfm)) {
774
		ti->error = "Error allocating crypto tfm";
Linus Torvalds's avatar
Linus Torvalds committed
775
776
777
		goto bad1;
	}

778
779
	strcpy(cc->cipher, cipher);
	strcpy(cc->chainmode, chainmode);
Linus Torvalds's avatar
Linus Torvalds committed
780
781
782
	cc->tfm = tfm;

	/*
783
	 * Choose ivmode. Valid modes: "plain", "essiv:<esshash>", "benbi".
Linus Torvalds's avatar
Linus Torvalds committed
784
785
786
787
788
789
790
791
792
	 * See comments at iv code
	 */

	if (ivmode == NULL)
		cc->iv_gen_ops = NULL;
	else if (strcmp(ivmode, "plain") == 0)
		cc->iv_gen_ops = &crypt_iv_plain_ops;
	else if (strcmp(ivmode, "essiv") == 0)
		cc->iv_gen_ops = &crypt_iv_essiv_ops;
793
794
	else if (strcmp(ivmode, "benbi") == 0)
		cc->iv_gen_ops = &crypt_iv_benbi_ops;
Ludwig Nussel's avatar
Ludwig Nussel committed
795
796
	else if (strcmp(ivmode, "null") == 0)
		cc->iv_gen_ops = &crypt_iv_null_ops;
Linus Torvalds's avatar
Linus Torvalds committed
797
	else {
798
		ti->error = "Invalid IV mode";
Linus Torvalds's avatar
Linus Torvalds committed
799
800
801
802
803
804
805
		goto bad2;
	}

	if (cc->iv_gen_ops && cc->iv_gen_ops->ctr &&
	    cc->iv_gen_ops->ctr(cc, ti, ivopts) < 0)
		goto bad2;

806
807
	cc->iv_size = crypto_blkcipher_ivsize(tfm);
	if (cc->iv_size)
Linus Torvalds's avatar
Linus Torvalds committed
808
		/* at least a 64 bit sector number should fit in our buffer */
809
		cc->iv_size = max(cc->iv_size,
Linus Torvalds's avatar
Linus Torvalds committed
810
811
812
		                  (unsigned int)(sizeof(u64) / sizeof(u8)));
	else {
		if (cc->iv_gen_ops) {
813
			DMWARN("Selected cipher does not support IVs");
Linus Torvalds's avatar
Linus Torvalds committed
814
815
816
817
818
819
			if (cc->iv_gen_ops->dtr)
				cc->iv_gen_ops->dtr(cc);
			cc->iv_gen_ops = NULL;
		}
	}

820
	cc->io_pool = mempool_create_slab_pool(MIN_IOS, _crypt_io_pool);
Linus Torvalds's avatar
Linus Torvalds committed
821
	if (!cc->io_pool) {
822
		ti->error = "Cannot allocate crypt io mempool";
Linus Torvalds's avatar
Linus Torvalds committed
823
824
825
		goto bad3;
	}

826
	cc->page_pool = mempool_create_page_pool(MIN_POOL_PAGES, 0);
Linus Torvalds's avatar
Linus Torvalds committed
827
	if (!cc->page_pool) {
828
		ti->error = "Cannot allocate page mempool";
Linus Torvalds's avatar
Linus Torvalds committed
829
830
831
		goto bad4;
	}

832
	cc->bs = bioset_create(MIN_IOS, MIN_IOS);
833
834
835
836
837
	if (!cc->bs) {
		ti->error = "Cannot allocate crypt bioset";
		goto bad_bs;
	}

838
	if (crypto_blkcipher_setkey(tfm, cc->key, key_size) < 0) {
839
		ti->error = "Error setting key";
Linus Torvalds's avatar
Linus Torvalds committed
840
841
842
		goto bad5;
	}

843
	if (sscanf(argv[2], "%llu", &tmpll) != 1) {
844
		ti->error = "Invalid iv_offset sector";
Linus Torvalds's avatar
Linus Torvalds committed
845
846
		goto bad5;
	}
847
	cc->iv_offset = tmpll;
Linus Torvalds's avatar
Linus Torvalds committed
848

849
	if (sscanf(argv[4], "%llu", &tmpll) != 1) {
850
		ti->error = "Invalid device sector";
Linus Torvalds's avatar
Linus Torvalds committed
851
852
		goto bad5;
	}
853
	cc->start = tmpll;
Linus Torvalds's avatar
Linus Torvalds committed
854
855
856

	if (dm_get_device(ti, argv[3], cc->start, ti->len,
	                  dm_table_get_mode(ti->table), &cc->dev)) {
857
		ti->error = "Device lookup failed";
Linus Torvalds's avatar
Linus Torvalds committed
858
859
860
861
862
863
864
865
		goto bad5;
	}

	if (ivmode && cc->iv_gen_ops) {
		if (ivopts)
			*(ivopts - 1) = ':';
		cc->iv_mode = kmalloc(strlen(ivmode) + 1, GFP_KERNEL);
		if (!cc->iv_mode) {
866
			ti->error = "Error kmallocing iv_mode string";
867
			goto bad_iv_mode;
Linus Torvalds's avatar
Linus Torvalds committed
868
869
870
871
872
		}
		strcpy(cc->iv_mode, ivmode);
	} else
		cc->iv_mode = NULL;

873
874
875
876
877
878
	cc->queue = create_singlethread_workqueue("kcryptd");
	if (!cc->queue) {
		ti->error = "Couldn't create kcryptd queue";
		goto bad_queue;
	}

Linus Torvalds's avatar
Linus Torvalds committed
879
880
881
	ti->private = cc;
	return 0;

882
883
bad_queue:
	kfree(cc->iv_mode);
884
885
bad_iv_mode:
	dm_put_device(ti, cc->dev);
Linus Torvalds's avatar
Linus Torvalds committed
886
bad5:
887
888
	bioset_free(cc->bs);
bad_bs:
Linus Torvalds's avatar
Linus Torvalds committed
889
890
891
892
893
894
895
	mempool_destroy(cc->page_pool);
bad4:
	mempool_destroy(cc->io_pool);
bad3:
	if (cc->iv_gen_ops && cc->iv_gen_ops->dtr)
		cc->iv_gen_ops->dtr(cc);
bad2:
896
	crypto_free_blkcipher(tfm);
Linus Torvalds's avatar
Linus Torvalds committed
897
bad1:
898
899
	/* Must zero key material before freeing */
	memset(cc, 0, sizeof(*cc) + cc->key_size * sizeof(u8));
Linus Torvalds's avatar
Linus Torvalds committed
900
901
902
903
904
905
906
907
	kfree(cc);
	return -EINVAL;
}

static void crypt_dtr(struct dm_target *ti)
{
	struct crypt_config *cc = (struct crypt_config *) ti->private;

908
	destroy_workqueue(cc->queue);
Milan Broz's avatar
Milan Broz committed
909

910
	bioset_free(cc->bs);
Linus Torvalds's avatar
Linus Torvalds committed
911
912
913
	mempool_destroy(cc->page_pool);
	mempool_destroy(cc->io_pool);

914
	kfree(cc->iv_mode);
Linus Torvalds's avatar
Linus Torvalds committed
915
916
	if (cc->iv_gen_ops && cc->iv_gen_ops->dtr)
		cc->iv_gen_ops->dtr(cc);
917
	crypto_free_blkcipher(cc->tfm);
Linus Torvalds's avatar
Linus Torvalds committed
918
	dm_put_device(ti, cc->dev);
919
920
921

	/* Must zero key material before freeing */
	memset(cc, 0, sizeof(*cc) + cc->key_size * sizeof(u8));
Linus Torvalds's avatar
Linus Torvalds committed
922
923
924
925
926
927
	kfree(cc);
}

static int crypt_map(struct dm_target *ti, struct bio *bio,
		     union map_info *map_context)
{
928
	struct crypt_config *cc = ti->private;
Alasdair G Kergon's avatar
Alasdair G Kergon committed
929
	struct dm_crypt_io *io;
Linus Torvalds's avatar
Linus Torvalds committed
930

Milan Broz's avatar
Milan Broz committed
931
	io = mempool_alloc(cc->io_pool, GFP_NOIO);
Linus Torvalds's avatar
Linus Torvalds committed
932
	io->target = ti;
933
	io->base_bio = bio;
934
	io->error = io->post_process = 0;
935
	atomic_set(&io->pending, 0);
936
	kcryptd_queue_io(io);
Linus Torvalds's avatar
Linus Torvalds committed
937

938
	return DM_MAPIO_SUBMITTED;
Linus Torvalds's avatar
Linus Torvalds committed
939
940
941
942
943
944
945
946
947
948
949
950
951
952
953
}

static int crypt_status(struct dm_target *ti, status_type_t type,
			char *result, unsigned int maxlen)
{
	struct crypt_config *cc = (struct crypt_config *) ti->private;
	unsigned int sz = 0;

	switch (type) {
	case STATUSTYPE_INFO:
		result[0] = '\0';
		break;

	case STATUSTYPE_TABLE:
		if (cc->iv_mode)
954
955
			DMEMIT("%s-%s-%s ", cc->cipher, cc->chainmode,
			       cc->iv_mode);
Linus Torvalds's avatar
Linus Torvalds committed
956
		else
957
			DMEMIT("%s-%s ", cc->cipher, cc->chainmode);
Linus Torvalds's avatar
Linus Torvalds committed
958
959
960
961
962
963
964
965
966
967
968
969
970

		if (cc->key_size > 0) {
			if ((maxlen - sz) < ((cc->key_size << 1) + 1))
				return -ENOMEM;

			crypt_encode_key(result + sz, cc->key, cc->key_size);
			sz += cc->key_size << 1;
		} else {
			if (sz >= maxlen)
				return -ENOMEM;
			result[sz++] = '-';
		}

971
972
		DMEMIT(" %llu %s %llu", (unsigned long long)cc->iv_offset,
				cc->dev->name, (unsigned long long)cc->start);
Linus Torvalds's avatar
Linus Torvalds committed
973
974
975
976
977
		break;
	}
	return 0;
}

Milan Broz's avatar
Milan Broz committed
978
979
980
981
982
983
984
985
986
987
988
989
990
991
992
993
994
995
996
997
998
999
1000
1001
1002
1003
1004
1005
1006
1007
1008
1009
1010
1011
1012
1013
1014
1015
1016
1017
1018
1019
1020
1021
1022
1023
1024
1025
1026
1027
1028
1029
1030
static void crypt_postsuspend(struct dm_target *ti)
{
	struct crypt_config *cc = ti->private;

	set_bit(DM_CRYPT_SUSPENDED, &cc->flags);
}

static int crypt_preresume(struct dm_target *ti)
{
	struct crypt_config *cc = ti->private;

	if (!test_bit(DM_CRYPT_KEY_VALID, &cc->flags)) {
		DMERR("aborting resume - crypt key is not set.");
		return -EAGAIN;
	}

	return 0;
}

static void crypt_resume(struct dm_target *ti)
{
	struct crypt_config *cc = ti->private;

	clear_bit(DM_CRYPT_SUSPENDED, &cc->flags);
}

/* Message interface
 *	key set <key>
 *	key wipe
 */
static int crypt_message(struct dm_target *ti, unsigned argc, char **argv)
{
	struct crypt_config *cc = ti->private;

	if (argc < 2)
		goto error;

	if (!strnicmp(argv[0], MESG_STR("key"))) {
		if (!test_bit(DM_CRYPT_SUSPENDED, &cc->flags)) {
			DMWARN("not suspended during key manipulation.");
			return -EINVAL;
		}
		if (argc == 3 && !strnicmp(argv[1], MESG_STR("set")))
			return crypt_set_key(cc, argv[2]);
		if (argc == 2 && !strnicmp(argv[1], MESG_STR("wipe")))
			return crypt_wipe_key(cc);
	}

error:
	DMWARN("unrecognised message received.");
	return -EINVAL;
}

Linus Torvalds's avatar
Linus Torvalds committed
1031
1032
static struct target_type crypt_target = {
	.name   = "crypt",
Ludwig Nussel's avatar
Ludwig Nussel committed
1033
	.version= {1, 5, 0},
Linus Torvalds's avatar
Linus Torvalds committed
1034
1035
1036
1037
1038
	.module = THIS_MODULE,
	.ctr    = crypt_ctr,
	.dtr    = crypt_dtr,
	.map    = crypt_map,
	.status = crypt_status,
Milan Broz's avatar
Milan Broz committed
1039
1040
1041
1042
	.postsuspend = crypt_postsuspend,
	.preresume = crypt_preresume,
	.resume = crypt_resume,
	.message = crypt_message,
Linus Torvalds's avatar
Linus Torvalds committed
1043
1044
1045
1046
1047
1048
};

static int __init dm_crypt_init(void)
{
	int r;

Alasdair G Kergon's avatar
Alasdair G Kergon committed
1049
	_crypt_io_pool = KMEM_CACHE(dm_crypt_io, 0);
Linus Torvalds's avatar
Linus Torvalds committed
1050
1051
1052
1053
1054
	if (!_crypt_io_pool)
		return -ENOMEM;

	r = dm_register_target(&crypt_target);
	if (r < 0) {
1055
		DMERR("register failed %d", r);
1056
		kmem_cache_destroy(_crypt_io_pool);
Linus Torvalds's avatar
Linus Torvalds committed
1057
1058
1059
1060
1061
1062
1063
1064
1065
1066
	}

	return r;
}

static void __exit dm_crypt_exit(void)
{
	int r = dm_unregister_target(&crypt_target);

	if (r < 0)
1067
		DMERR("unregister failed %d", r);
Linus Torvalds's avatar
Linus Torvalds committed
1068
1069
1070
1071
1072
1073
1074
1075
1076
1077

	kmem_cache_destroy(_crypt_io_pool);
}

module_init(dm_crypt_init);
module_exit(dm_crypt_exit);

MODULE_AUTHOR("Christophe Saout <christophe@saout.de>");
MODULE_DESCRIPTION(DM_NAME " target for transparent encryption / decryption");
MODULE_LICENSE("GPL");