dm-crypt.c 27.9 KB
Newer Older
Linus Torvalds's avatar
Linus Torvalds committed
1
2
3
/*
 * Copyright (C) 2003 Christophe Saout <christophe@saout.de>
 * Copyright (C) 2004 Clemens Fruhwirth <clemens@endorphin.org>
4
 * Copyright (C) 2006-2007 Red Hat, Inc. All rights reserved.
Linus Torvalds's avatar
Linus Torvalds committed
5
6
7
8
 *
 * This file is released under the GPL.
 */

9
#include <linux/completion.h>
10
#include <linux/err.h>
Linus Torvalds's avatar
Linus Torvalds committed
11
12
13
14
15
16
17
18
19
#include <linux/module.h>
#include <linux/init.h>
#include <linux/kernel.h>
#include <linux/bio.h>
#include <linux/blkdev.h>
#include <linux/mempool.h>
#include <linux/slab.h>
#include <linux/crypto.h>
#include <linux/workqueue.h>
20
#include <linux/backing-dev.h>
Linus Torvalds's avatar
Linus Torvalds committed
21
#include <asm/atomic.h>
22
#include <linux/scatterlist.h>
Linus Torvalds's avatar
Linus Torvalds committed
23
#include <asm/page.h>
24
#include <asm/unaligned.h>
Linus Torvalds's avatar
Linus Torvalds committed
25
26
27

#include "dm.h"

28
#define DM_MSG_PREFIX "crypt"
Milan Broz's avatar
Milan Broz committed
29
#define MESG_STR(x) x, sizeof(x)
Linus Torvalds's avatar
Linus Torvalds committed
30
31
32
33
34

/*
 * context holding the current state of a multi-part conversion
 */
struct convert_context {
35
	struct completion restart;
Linus Torvalds's avatar
Linus Torvalds committed
36
37
38
39
40
41
42
	struct bio *bio_in;
	struct bio *bio_out;
	unsigned int offset_in;
	unsigned int offset_out;
	unsigned int idx_in;
	unsigned int idx_out;
	sector_t sector;
43
	atomic_t pending;
Linus Torvalds's avatar
Linus Torvalds committed
44
45
};

46
47
48
49
50
51
52
53
54
55
56
57
/*
 * per bio private data
 */
struct dm_crypt_io {
	struct dm_target *target;
	struct bio *base_bio;
	struct work_struct work;

	struct convert_context ctx;

	atomic_t pending;
	int error;
58
	sector_t sector;
59
60
};

61
62
63
64
65
struct dm_crypt_request {
	struct scatterlist sg_in;
	struct scatterlist sg_out;
};

Linus Torvalds's avatar
Linus Torvalds committed
66
67
68
69
struct crypt_config;

struct crypt_iv_operations {
	int (*ctr)(struct crypt_config *cc, struct dm_target *ti,
Milan Broz's avatar
Milan Broz committed
70
		   const char *opts);
Linus Torvalds's avatar
Linus Torvalds committed
71
72
73
74
75
76
77
78
79
	void (*dtr)(struct crypt_config *cc);
	const char *(*status)(struct crypt_config *cc);
	int (*generator)(struct crypt_config *cc, u8 *iv, sector_t sector);
};

/*
 * Crypt: maps a linear range of a block device
 * and encrypts / decrypts at the same time.
 */
Milan Broz's avatar
Milan Broz committed
80
enum flags { DM_CRYPT_SUSPENDED, DM_CRYPT_KEY_VALID };
Linus Torvalds's avatar
Linus Torvalds committed
81
82
83
84
85
struct crypt_config {
	struct dm_dev *dev;
	sector_t start;

	/*
86
87
	 * pool for per bio private data, crypto requests and
	 * encryption requeusts/buffer pages
Linus Torvalds's avatar
Linus Torvalds committed
88
89
	 */
	mempool_t *io_pool;
90
	mempool_t *req_pool;
Linus Torvalds's avatar
Linus Torvalds committed
91
	mempool_t *page_pool;
92
	struct bio_set *bs;
Linus Torvalds's avatar
Linus Torvalds committed
93

94
95
	struct workqueue_struct *io_queue;
	struct workqueue_struct *crypt_queue;
Linus Torvalds's avatar
Linus Torvalds committed
96
97
98
99
100
	/*
	 * crypto related data
	 */
	struct crypt_iv_operations *iv_gen_ops;
	char *iv_mode;
101
102
103
104
	union {
		struct crypto_cipher *essiv_tfm;
		int benbi_shift;
	} iv_gen_private;
Linus Torvalds's avatar
Linus Torvalds committed
105
106
107
	sector_t iv_offset;
	unsigned int iv_size;

108
109
110
111
112
113
114
115
116
117
118
119
120
121
122
123
	/*
	 * Layout of each crypto request:
	 *
	 *   struct ablkcipher_request
	 *      context
	 *      padding
	 *   struct dm_crypt_request
	 *      padding
	 *   IV
	 *
	 * The padding is added so that dm_crypt_request and the IV are
	 * correctly aligned.
	 */
	unsigned int dmreq_start;
	struct ablkcipher_request *req;

124
125
126
	char cipher[CRYPTO_MAX_ALG_NAME];
	char chainmode[CRYPTO_MAX_ALG_NAME];
	struct crypto_blkcipher *tfm;
Milan Broz's avatar
Milan Broz committed
127
	unsigned long flags;
Linus Torvalds's avatar
Linus Torvalds committed
128
129
130
131
	unsigned int key_size;
	u8 key[0];
};

132
#define MIN_IOS        16
Linus Torvalds's avatar
Linus Torvalds committed
133
134
135
#define MIN_POOL_PAGES 32
#define MIN_BIO_PAGES  8

136
static struct kmem_cache *_crypt_io_pool;
Linus Torvalds's avatar
Linus Torvalds committed
137

Alasdair G Kergon's avatar
Alasdair G Kergon committed
138
static void clone_init(struct dm_crypt_io *, struct bio *);
139
static void kcryptd_queue_crypt(struct dm_crypt_io *io);
Olaf Kirch's avatar
Olaf Kirch committed
140

Linus Torvalds's avatar
Linus Torvalds committed
141
142
143
/*
 * Different IV generation algorithms:
 *
144
 * plain: the initial vector is the 32-bit little-endian version of the sector
145
 *        number, padded with zeros if necessary.
Linus Torvalds's avatar
Linus Torvalds committed
146
 *
147
148
149
 * essiv: "encrypted sector|salt initial vector", the sector number is
 *        encrypted with the bulk cipher using a salt as key. The salt
 *        should be derived from the bulk cipher's key via hashing.
Linus Torvalds's avatar
Linus Torvalds committed
150
 *
151
152
153
 * benbi: the 64-bit "big-endian 'narrow block'-count", starting at 1
 *        (needed for LRW-32-AES and possible other narrow block modes)
 *
Ludwig Nussel's avatar
Ludwig Nussel committed
154
155
156
 * null: the initial vector is always zero.  Provides compatibility with
 *       obsolete loop_fish2 devices.  Do not use for new devices.
 *
Linus Torvalds's avatar
Linus Torvalds committed
157
158
159
160
161
162
163
164
165
166
167
168
169
 * plumb: unimplemented, see:
 * http://article.gmane.org/gmane.linux.kernel.device-mapper.dm-crypt/454
 */

static int crypt_iv_plain_gen(struct crypt_config *cc, u8 *iv, sector_t sector)
{
	memset(iv, 0, cc->iv_size);
	*(u32 *)iv = cpu_to_le32(sector & 0xffffffff);

	return 0;
}

static int crypt_iv_essiv_ctr(struct crypt_config *cc, struct dm_target *ti,
Milan Broz's avatar
Milan Broz committed
170
			      const char *opts)
Linus Torvalds's avatar
Linus Torvalds committed
171
{
172
	struct crypto_cipher *essiv_tfm;
173
174
	struct crypto_hash *hash_tfm;
	struct hash_desc desc;
Linus Torvalds's avatar
Linus Torvalds committed
175
176
177
	struct scatterlist sg;
	unsigned int saltsize;
	u8 *salt;
178
	int err;
Linus Torvalds's avatar
Linus Torvalds committed
179
180

	if (opts == NULL) {
181
		ti->error = "Digest algorithm missing for ESSIV mode";
Linus Torvalds's avatar
Linus Torvalds committed
182
183
184
185
		return -EINVAL;
	}

	/* Hash the cipher key with the given hash algorithm */
186
187
	hash_tfm = crypto_alloc_hash(opts, 0, CRYPTO_ALG_ASYNC);
	if (IS_ERR(hash_tfm)) {
188
		ti->error = "Error initializing ESSIV hash";
189
		return PTR_ERR(hash_tfm);
Linus Torvalds's avatar
Linus Torvalds committed
190
191
	}

192
	saltsize = crypto_hash_digestsize(hash_tfm);
Linus Torvalds's avatar
Linus Torvalds committed
193
194
	salt = kmalloc(saltsize, GFP_KERNEL);
	if (salt == NULL) {
195
		ti->error = "Error kmallocing salt storage in ESSIV";
196
		crypto_free_hash(hash_tfm);
Linus Torvalds's avatar
Linus Torvalds committed
197
198
199
		return -ENOMEM;
	}

200
	sg_init_one(&sg, cc->key, cc->key_size);
201
202
203
204
205
206
207
	desc.tfm = hash_tfm;
	desc.flags = CRYPTO_TFM_REQ_MAY_SLEEP;
	err = crypto_hash_digest(&desc, &sg, cc->key_size, salt);
	crypto_free_hash(hash_tfm);

	if (err) {
		ti->error = "Error calculating hash in ESSIV";
208
		kfree(salt);
209
210
		return err;
	}
Linus Torvalds's avatar
Linus Torvalds committed
211
212

	/* Setup the essiv_tfm with the given salt */
213
214
	essiv_tfm = crypto_alloc_cipher(cc->cipher, 0, CRYPTO_ALG_ASYNC);
	if (IS_ERR(essiv_tfm)) {
215
		ti->error = "Error allocating crypto tfm for ESSIV";
Linus Torvalds's avatar
Linus Torvalds committed
216
		kfree(salt);
217
		return PTR_ERR(essiv_tfm);
Linus Torvalds's avatar
Linus Torvalds committed
218
	}
219
220
	if (crypto_cipher_blocksize(essiv_tfm) !=
	    crypto_blkcipher_ivsize(cc->tfm)) {
221
		ti->error = "Block size of ESSIV cipher does "
Milan Broz's avatar
Milan Broz committed
222
			    "not match IV size of block cipher";
223
		crypto_free_cipher(essiv_tfm);
Linus Torvalds's avatar
Linus Torvalds committed
224
225
226
		kfree(salt);
		return -EINVAL;
	}
227
228
	err = crypto_cipher_setkey(essiv_tfm, salt, saltsize);
	if (err) {
229
		ti->error = "Failed to set key for ESSIV cipher";
230
		crypto_free_cipher(essiv_tfm);
Linus Torvalds's avatar
Linus Torvalds committed
231
		kfree(salt);
232
		return err;
Linus Torvalds's avatar
Linus Torvalds committed
233
234
235
	}
	kfree(salt);

236
	cc->iv_gen_private.essiv_tfm = essiv_tfm;
Linus Torvalds's avatar
Linus Torvalds committed
237
238
239
240
241
	return 0;
}

static void crypt_iv_essiv_dtr(struct crypt_config *cc)
{
242
243
	crypto_free_cipher(cc->iv_gen_private.essiv_tfm);
	cc->iv_gen_private.essiv_tfm = NULL;
Linus Torvalds's avatar
Linus Torvalds committed
244
245
246
247
248
249
}

static int crypt_iv_essiv_gen(struct crypt_config *cc, u8 *iv, sector_t sector)
{
	memset(iv, 0, cc->iv_size);
	*(u64 *)iv = cpu_to_le64(sector);
250
	crypto_cipher_encrypt_one(cc->iv_gen_private.essiv_tfm, iv, iv);
Linus Torvalds's avatar
Linus Torvalds committed
251
252
253
	return 0;
}

254
255
256
257
static int crypt_iv_benbi_ctr(struct crypt_config *cc, struct dm_target *ti,
			      const char *opts)
{
	unsigned int bs = crypto_blkcipher_blocksize(cc->tfm);
258
	int log = ilog2(bs);
259
260
261
262
263
264
265
266
267
268
269
270
271
272

	/* we need to calculate how far we must shift the sector count
	 * to get the cipher block count, we use this shift in _gen */

	if (1 << log != bs) {
		ti->error = "cypher blocksize is not a power of 2";
		return -EINVAL;
	}

	if (log > 9) {
		ti->error = "cypher blocksize is > 512";
		return -EINVAL;
	}

273
	cc->iv_gen_private.benbi_shift = 9 - log;
274
275
276
277
278
279
280
281
282
283

	return 0;
}

static void crypt_iv_benbi_dtr(struct crypt_config *cc)
{
}

static int crypt_iv_benbi_gen(struct crypt_config *cc, u8 *iv, sector_t sector)
{
284
285
	__be64 val;

286
	memset(iv, 0, cc->iv_size - sizeof(u64)); /* rest is cleared below */
287
288
289

	val = cpu_to_be64(((u64)sector << cc->iv_gen_private.benbi_shift) + 1);
	put_unaligned(val, (__be64 *)(iv + cc->iv_size - sizeof(u64)));
290

Linus Torvalds's avatar
Linus Torvalds committed
291
292
293
	return 0;
}

Ludwig Nussel's avatar
Ludwig Nussel committed
294
295
296
297
298
299
300
static int crypt_iv_null_gen(struct crypt_config *cc, u8 *iv, sector_t sector)
{
	memset(iv, 0, cc->iv_size);

	return 0;
}

Linus Torvalds's avatar
Linus Torvalds committed
301
302
303
304
305
306
307
308
309
310
static struct crypt_iv_operations crypt_iv_plain_ops = {
	.generator = crypt_iv_plain_gen
};

static struct crypt_iv_operations crypt_iv_essiv_ops = {
	.ctr       = crypt_iv_essiv_ctr,
	.dtr       = crypt_iv_essiv_dtr,
	.generator = crypt_iv_essiv_gen
};

311
312
313
314
315
static struct crypt_iv_operations crypt_iv_benbi_ops = {
	.ctr	   = crypt_iv_benbi_ctr,
	.dtr	   = crypt_iv_benbi_dtr,
	.generator = crypt_iv_benbi_gen
};
Linus Torvalds's avatar
Linus Torvalds committed
316

Ludwig Nussel's avatar
Ludwig Nussel committed
317
318
319
320
static struct crypt_iv_operations crypt_iv_null_ops = {
	.generator = crypt_iv_null_gen
};

321
static int
Linus Torvalds's avatar
Linus Torvalds committed
322
323
324
325
crypt_convert_scatterlist(struct crypt_config *cc, struct scatterlist *out,
                          struct scatterlist *in, unsigned int length,
                          int write, sector_t sector)
{
326
	u8 iv[cc->iv_size] __attribute__ ((aligned(__alignof__(u64))));
327
328
329
330
331
	struct blkcipher_desc desc = {
		.tfm = cc->tfm,
		.info = iv,
		.flags = CRYPTO_TFM_REQ_MAY_SLEEP,
	};
Linus Torvalds's avatar
Linus Torvalds committed
332
333
334
335
336
337
338
339
	int r;

	if (cc->iv_gen_ops) {
		r = cc->iv_gen_ops->generator(cc, iv, sector);
		if (r < 0)
			return r;

		if (write)
340
			r = crypto_blkcipher_encrypt_iv(&desc, out, in, length);
Linus Torvalds's avatar
Linus Torvalds committed
341
		else
342
			r = crypto_blkcipher_decrypt_iv(&desc, out, in, length);
Linus Torvalds's avatar
Linus Torvalds committed
343
344
	} else {
		if (write)
345
			r = crypto_blkcipher_encrypt(&desc, out, in, length);
Linus Torvalds's avatar
Linus Torvalds committed
346
		else
347
			r = crypto_blkcipher_decrypt(&desc, out, in, length);
Linus Torvalds's avatar
Linus Torvalds committed
348
349
350
351
352
	}

	return r;
}

Milan Broz's avatar
Milan Broz committed
353
354
355
static void crypt_convert_init(struct crypt_config *cc,
			       struct convert_context *ctx,
			       struct bio *bio_out, struct bio *bio_in,
356
			       sector_t sector)
Linus Torvalds's avatar
Linus Torvalds committed
357
358
359
360
361
362
363
364
{
	ctx->bio_in = bio_in;
	ctx->bio_out = bio_out;
	ctx->offset_in = 0;
	ctx->offset_out = 0;
	ctx->idx_in = bio_in ? bio_in->bi_idx : 0;
	ctx->idx_out = bio_out ? bio_out->bi_idx : 0;
	ctx->sector = sector + cc->iv_offset;
365
366
367
368
369
370
371
372
373
	init_completion(&ctx->restart);
	/*
	 * Crypto operation can be asynchronous,
	 * ctx->pending is increased after request submission.
	 * We need to ensure that we don't call the crypt finish
	 * operation before pending got incremented
	 * (dependent on crypt submission return code).
	 */
	atomic_set(&ctx->pending, 2);
Linus Torvalds's avatar
Linus Torvalds committed
374
375
}

376
377
378
379
380
381
382
383
384
385
386
387
388
389
390
391
392
393
394
395
396
397
398
399
400
401
402
403
404
405
406
407
408
static int crypt_convert_block(struct crypt_config *cc,
			       struct convert_context *ctx)
{
	struct bio_vec *bv_in = bio_iovec_idx(ctx->bio_in, ctx->idx_in);
	struct bio_vec *bv_out = bio_iovec_idx(ctx->bio_out, ctx->idx_out);
	struct dm_crypt_request dmreq;

	sg_init_table(&dmreq.sg_in, 1);
	sg_set_page(&dmreq.sg_in, bv_in->bv_page, 1 << SECTOR_SHIFT,
		    bv_in->bv_offset + ctx->offset_in);

	sg_init_table(&dmreq.sg_out, 1);
	sg_set_page(&dmreq.sg_out, bv_out->bv_page, 1 << SECTOR_SHIFT,
		    bv_out->bv_offset + ctx->offset_out);

	ctx->offset_in += 1 << SECTOR_SHIFT;
	if (ctx->offset_in >= bv_in->bv_len) {
		ctx->offset_in = 0;
		ctx->idx_in++;
	}

	ctx->offset_out += 1 << SECTOR_SHIFT;
	if (ctx->offset_out >= bv_out->bv_len) {
		ctx->offset_out = 0;
		ctx->idx_out++;
	}

	return crypt_convert_scatterlist(cc, &dmreq.sg_out, &dmreq.sg_in,
					 dmreq.sg_in.length,
					 bio_data_dir(ctx->bio_in) == WRITE,
					 ctx->sector);
}

409
410
411
412
413
414
415
static void crypt_alloc_req(struct crypt_config *cc,
			    struct convert_context *ctx)
{
	if (!cc->req)
		cc->req = mempool_alloc(cc->req_pool, GFP_NOIO);
}

Linus Torvalds's avatar
Linus Torvalds committed
416
417
418
419
/*
 * Encrypt / decrypt data from one bio to another one (can be the same one)
 */
static int crypt_convert(struct crypt_config *cc,
Milan Broz's avatar
Milan Broz committed
420
			 struct convert_context *ctx)
Linus Torvalds's avatar
Linus Torvalds committed
421
422
423
424
425
{
	int r = 0;

	while(ctx->idx_in < ctx->bio_in->bi_vcnt &&
	      ctx->idx_out < ctx->bio_out->bi_vcnt) {
426
		r = crypt_convert_block(cc, ctx);
Linus Torvalds's avatar
Linus Torvalds committed
427
428
429
430
431
432
		if (r < 0)
			break;

		ctx->sector++;
	}

433
434
435
436
437
438
439
440
441
	/*
	 * If there are pending crypto operation run async
	 * code. Otherwise process return code synchronously.
	 * The step of 2 ensures that async finish doesn't
	 * call crypto finish too early.
	 */
	if (atomic_sub_return(2, &ctx->pending))
		return -EINPROGRESS;

Linus Torvalds's avatar
Linus Torvalds committed
442
443
444
	return r;
}

Milan Broz's avatar
Milan Broz committed
445
446
static void dm_crypt_bio_destructor(struct bio *bio)
{
Alasdair G Kergon's avatar
Alasdair G Kergon committed
447
	struct dm_crypt_io *io = bio->bi_private;
448
449
450
	struct crypt_config *cc = io->target->private;

	bio_free(bio, cc->bs);
Milan Broz's avatar
Milan Broz committed
451
}
452

Linus Torvalds's avatar
Linus Torvalds committed
453
454
455
456
457
/*
 * Generate a new unfragmented bio with the given size
 * This should never violate the device limitations
 * May return a smaller bio when running out of pages
 */
Alasdair G Kergon's avatar
Alasdair G Kergon committed
458
static struct bio *crypt_alloc_buffer(struct dm_crypt_io *io, unsigned size)
Linus Torvalds's avatar
Linus Torvalds committed
459
{
Olaf Kirch's avatar
Olaf Kirch committed
460
	struct crypt_config *cc = io->target->private;
461
	struct bio *clone;
Linus Torvalds's avatar
Linus Torvalds committed
462
	unsigned int nr_iovecs = (size + PAGE_SIZE - 1) >> PAGE_SHIFT;
463
	gfp_t gfp_mask = GFP_NOIO | __GFP_HIGHMEM;
Milan Broz's avatar
Milan Broz committed
464
465
	unsigned i, len;
	struct page *page;
Linus Torvalds's avatar
Linus Torvalds committed
466

Olaf Kirch's avatar
Olaf Kirch committed
467
	clone = bio_alloc_bioset(GFP_NOIO, nr_iovecs, cc->bs);
468
	if (!clone)
Linus Torvalds's avatar
Linus Torvalds committed
469
470
		return NULL;

Olaf Kirch's avatar
Olaf Kirch committed
471
	clone_init(io, clone);
472

473
	for (i = 0; i < nr_iovecs; i++) {
Milan Broz's avatar
Milan Broz committed
474
475
		page = mempool_alloc(cc->page_pool, gfp_mask);
		if (!page)
Linus Torvalds's avatar
Linus Torvalds committed
476
477
478
479
480
481
482
			break;

		/*
		 * if additional pages cannot be allocated without waiting,
		 * return a partially allocated bio, the caller will then try
		 * to allocate additional bios while submitting this partial bio
		 */
483
		if (i == (MIN_BIO_PAGES - 1))
Linus Torvalds's avatar
Linus Torvalds committed
484
485
			gfp_mask = (gfp_mask | __GFP_NOWARN) & ~__GFP_WAIT;

Milan Broz's avatar
Milan Broz committed
486
487
488
489
490
491
		len = (size > PAGE_SIZE) ? PAGE_SIZE : size;

		if (!bio_add_page(clone, page, len, 0)) {
			mempool_free(page, cc->page_pool);
			break;
		}
Linus Torvalds's avatar
Linus Torvalds committed
492

Milan Broz's avatar
Milan Broz committed
493
		size -= len;
Linus Torvalds's avatar
Linus Torvalds committed
494
495
	}

496
497
	if (!clone->bi_size) {
		bio_put(clone);
Linus Torvalds's avatar
Linus Torvalds committed
498
499
500
		return NULL;
	}

501
	return clone;
Linus Torvalds's avatar
Linus Torvalds committed
502
503
}

Neil Brown's avatar
Neil Brown committed
504
static void crypt_free_buffer_pages(struct crypt_config *cc, struct bio *clone)
Linus Torvalds's avatar
Linus Torvalds committed
505
{
Neil Brown's avatar
Neil Brown committed
506
	unsigned int i;
Linus Torvalds's avatar
Linus Torvalds committed
507
508
	struct bio_vec *bv;

Neil Brown's avatar
Neil Brown committed
509
	for (i = 0; i < clone->bi_vcnt; i++) {
510
		bv = bio_iovec_idx(clone, i);
Linus Torvalds's avatar
Linus Torvalds committed
511
512
513
514
515
516
517
518
519
520
		BUG_ON(!bv->bv_page);
		mempool_free(bv->bv_page, cc->page_pool);
		bv->bv_page = NULL;
	}
}

/*
 * One of the bios was finished. Check for completion of
 * the whole request and correctly clean up the buffer.
 */
521
static void crypt_dec_pending(struct dm_crypt_io *io)
Linus Torvalds's avatar
Linus Torvalds committed
522
{
523
	struct crypt_config *cc = io->target->private;
Linus Torvalds's avatar
Linus Torvalds committed
524
525
526
527

	if (!atomic_dec_and_test(&io->pending))
		return;

528
	bio_endio(io->base_bio, io->error);
Linus Torvalds's avatar
Linus Torvalds committed
529
530
531
532
	mempool_free(io, cc->io_pool);
}

/*
533
 * kcryptd/kcryptd_io:
Linus Torvalds's avatar
Linus Torvalds committed
534
535
 *
 * Needed because it would be very unwise to do decryption in an
536
 * interrupt context.
537
538
539
540
541
542
543
544
 *
 * kcryptd performs the actual encryption or decryption.
 *
 * kcryptd_io performs the IO submission.
 *
 * They must be separated as otherwise the final stages could be
 * starved by new requests which can block in the first stages due
 * to memory allocation.
Linus Torvalds's avatar
Linus Torvalds committed
545
 */
546
static void crypt_endio(struct bio *clone, int error)
547
{
Alasdair G Kergon's avatar
Alasdair G Kergon committed
548
	struct dm_crypt_io *io = clone->bi_private;
549
	struct crypt_config *cc = io->target->private;
Milan Broz's avatar
Milan Broz committed
550
	unsigned rw = bio_data_dir(clone);
551

Milan Broz's avatar
Milan Broz committed
552
553
554
	if (unlikely(!bio_flagged(clone, BIO_UPTODATE) && !error))
		error = -EIO;

555
	/*
556
	 * free the processed pages
557
	 */
Milan Broz's avatar
Milan Broz committed
558
	if (rw == WRITE)
Neil Brown's avatar
Neil Brown committed
559
		crypt_free_buffer_pages(cc, clone);
560
561
562

	bio_put(clone);

Milan Broz's avatar
Milan Broz committed
563
564
565
566
	if (rw == READ && !error) {
		kcryptd_queue_crypt(io);
		return;
	}
567
568
569
570
571

	if (unlikely(error))
		io->error = error;

	crypt_dec_pending(io);
572
573
}

Alasdair G Kergon's avatar
Alasdair G Kergon committed
574
static void clone_init(struct dm_crypt_io *io, struct bio *clone)
575
576
577
578
579
580
581
{
	struct crypt_config *cc = io->target->private;

	clone->bi_private = io;
	clone->bi_end_io  = crypt_endio;
	clone->bi_bdev    = cc->dev->bdev;
	clone->bi_rw      = io->base_bio->bi_rw;
Olaf Kirch's avatar
Olaf Kirch committed
582
	clone->bi_destructor = dm_crypt_bio_destructor;
583
584
}

585
static void kcryptd_io_read(struct dm_crypt_io *io)
586
587
588
589
{
	struct crypt_config *cc = io->target->private;
	struct bio *base_bio = io->base_bio;
	struct bio *clone;
590
591

	atomic_inc(&io->pending);
592
593
594
595
596
597

	/*
	 * The block layer might modify the bvec array, so always
	 * copy the required bvecs because we need the original
	 * one in order to decrypt the whole bio data *afterwards*.
	 */
598
	clone = bio_alloc_bioset(GFP_NOIO, bio_segments(base_bio), cc->bs);
599
	if (unlikely(!clone)) {
600
601
		io->error = -ENOMEM;
		crypt_dec_pending(io);
602
		return;
603
	}
604
605
606
607
608

	clone_init(io, clone);
	clone->bi_idx = 0;
	clone->bi_vcnt = bio_segments(base_bio);
	clone->bi_size = base_bio->bi_size;
609
	clone->bi_sector = cc->start + io->sector;
610
611
612
	memcpy(clone->bi_io_vec, bio_iovec(base_bio),
	       sizeof(struct bio_vec) * clone->bi_vcnt);

613
	generic_make_request(clone);
614
615
}

616
617
618
619
static void kcryptd_io_write(struct dm_crypt_io *io)
{
}

620
621
622
623
624
625
626
627
628
629
630
631
632
633
634
635
636
637
static void kcryptd_io(struct work_struct *work)
{
	struct dm_crypt_io *io = container_of(work, struct dm_crypt_io, work);

	if (bio_data_dir(io->base_bio) == READ)
		kcryptd_io_read(io);
	else
		kcryptd_io_write(io);
}

static void kcryptd_queue_io(struct dm_crypt_io *io)
{
	struct crypt_config *cc = io->target->private;

	INIT_WORK(&io->work, kcryptd_io);
	queue_work(cc->io_queue, &io->work);
}

638
639
static void kcryptd_crypt_write_io_submit(struct dm_crypt_io *io, int error)
{
640
641
642
643
644
645
646
647
648
649
650
651
652
653
654
	struct bio *clone = io->ctx.bio_out;
	struct crypt_config *cc = io->target->private;

	if (unlikely(error < 0)) {
		crypt_free_buffer_pages(cc, clone);
		bio_put(clone);
		io->error = -EIO;
		return;
	}

	/* crypt_convert should have filled the clone bio */
	BUG_ON(io->ctx.idx_out < clone->bi_vcnt);

	clone->bi_sector = cc->start + io->sector;
	io->sector += bio_sectors(clone);
Milan Broz's avatar
Milan Broz committed
655
656
657

	atomic_inc(&io->pending);
	generic_make_request(clone);
658
659
}

660
static void kcryptd_crypt_write_convert_loop(struct dm_crypt_io *io)
661
662
663
{
	struct crypt_config *cc = io->target->private;
	struct bio *clone;
664
665
	unsigned remaining = io->base_bio->bi_size;
	int r;
666

667
668
669
670
671
	/*
	 * The allocated buffers can be smaller than the whole bio,
	 * so repeat the whole process until all the data can be handled.
	 */
	while (remaining) {
672
		clone = crypt_alloc_buffer(io, remaining);
673
		if (unlikely(!clone)) {
674
			io->error = -ENOMEM;
675
676
			return;
		}
677

678
679
		io->ctx.bio_out = clone;
		io->ctx.idx_out = 0;
680

681
		remaining -= clone->bi_size;
682

683
		r = crypt_convert(cc, &io->ctx);
684

685
686
687
		kcryptd_crypt_write_io_submit(io, r);
		if (unlikely(r < 0))
			return;
688
689

		/* out of memory -> run queues */
690
		if (unlikely(remaining))
691
			congestion_wait(WRITE, HZ/100);
692
	}
693
694
}

695
696
697
698
static void kcryptd_crypt_write_convert(struct dm_crypt_io *io)
{
	struct crypt_config *cc = io->target->private;

Milan Broz's avatar
Milan Broz committed
699
700
701
	/*
	 * Prevent io from disappearing until this function completes.
	 */
702
703
704
705
	atomic_inc(&io->pending);

	crypt_convert_init(cc, &io->ctx, NULL, io->base_bio, io->sector);
	kcryptd_crypt_write_convert_loop(io);
Milan Broz's avatar
Milan Broz committed
706
707

	crypt_dec_pending(io);
708
709
}

710
static void kcryptd_crypt_read_done(struct dm_crypt_io *io, int error)
711
712
713
714
715
716
717
{
	if (unlikely(error < 0))
		io->error = -EIO;

	crypt_dec_pending(io);
}

718
static void kcryptd_crypt_read_convert(struct dm_crypt_io *io)
719
720
{
	struct crypt_config *cc = io->target->private;
721
	int r = 0;
Linus Torvalds's avatar
Linus Torvalds committed
722

723
	crypt_convert_init(cc, &io->ctx, io->base_bio, io->base_bio,
724
			   io->sector);
Linus Torvalds's avatar
Linus Torvalds committed
725

726
727
	r = crypt_convert(cc, &io->ctx);

728
	kcryptd_crypt_read_done(io, r);
Linus Torvalds's avatar
Linus Torvalds committed
729
730
}

731
static void kcryptd_crypt(struct work_struct *work)
Linus Torvalds's avatar
Linus Torvalds committed
732
{
Alasdair G Kergon's avatar
Alasdair G Kergon committed
733
	struct dm_crypt_io *io = container_of(work, struct dm_crypt_io, work);
734

735
	if (bio_data_dir(io->base_bio) == READ)
736
		kcryptd_crypt_read_convert(io);
737
	else
738
		kcryptd_crypt_write_convert(io);
739
740
}

741
static void kcryptd_queue_crypt(struct dm_crypt_io *io)
742
{
743
	struct crypt_config *cc = io->target->private;
744

745
746
	INIT_WORK(&io->work, kcryptd_crypt);
	queue_work(cc->crypt_queue, &io->work);
Linus Torvalds's avatar
Linus Torvalds committed
747
748
749
750
751
752
753
754
755
756
757
758
759
}

/*
 * Decode key from its hex representation
 */
static int crypt_decode_key(u8 *key, char *hex, unsigned int size)
{
	char buffer[3];
	char *endp;
	unsigned int i;

	buffer[2] = '\0';

760
	for (i = 0; i < size; i++) {
Linus Torvalds's avatar
Linus Torvalds committed
761
762
763
764
765
766
767
768
769
770
771
772
773
774
775
776
777
778
779
780
781
782
		buffer[0] = *hex++;
		buffer[1] = *hex++;

		key[i] = (u8)simple_strtoul(buffer, &endp, 16);

		if (endp != &buffer[2])
			return -EINVAL;
	}

	if (*hex != '\0')
		return -EINVAL;

	return 0;
}

/*
 * Encode key into its hex representation
 */
static void crypt_encode_key(char *hex, u8 *key, unsigned int size)
{
	unsigned int i;

783
	for (i = 0; i < size; i++) {
Linus Torvalds's avatar
Linus Torvalds committed
784
785
786
787
788
789
		sprintf(hex, "%02x", *key);
		hex += 2;
		key++;
	}
}

Milan Broz's avatar
Milan Broz committed
790
791
792
793
794
795
796
797
798
799
static int crypt_set_key(struct crypt_config *cc, char *key)
{
	unsigned key_size = strlen(key) >> 1;

	if (cc->key_size && cc->key_size != key_size)
		return -EINVAL;

	cc->key_size = key_size; /* initial settings */

	if ((!key_size && strcmp(key, "-")) ||
Milan Broz's avatar
Milan Broz committed
800
	   (key_size && crypt_decode_key(cc->key, key, key_size) < 0))
Milan Broz's avatar
Milan Broz committed
801
802
803
804
805
806
807
808
809
810
811
812
813
814
		return -EINVAL;

	set_bit(DM_CRYPT_KEY_VALID, &cc->flags);

	return 0;
}

static int crypt_wipe_key(struct crypt_config *cc)
{
	clear_bit(DM_CRYPT_KEY_VALID, &cc->flags);
	memset(&cc->key, 0, cc->key_size * sizeof(u8));
	return 0;
}

Linus Torvalds's avatar
Linus Torvalds committed
815
816
817
818
819
820
821
/*
 * Construct an encryption mapping:
 * <cipher> <key> <iv_offset> <dev_path> <start>
 */
static int crypt_ctr(struct dm_target *ti, unsigned int argc, char **argv)
{
	struct crypt_config *cc;
822
	struct crypto_blkcipher *tfm;
Linus Torvalds's avatar
Linus Torvalds committed
823
824
825
826
827
828
	char *tmp;
	char *cipher;
	char *chainmode;
	char *ivmode;
	char *ivopts;
	unsigned int key_size;
829
	unsigned long long tmpll;
Linus Torvalds's avatar
Linus Torvalds committed
830
831

	if (argc != 5) {
832
		ti->error = "Not enough arguments";
Linus Torvalds's avatar
Linus Torvalds committed
833
834
835
836
837
838
839
840
841
842
		return -EINVAL;
	}

	tmp = argv[0];
	cipher = strsep(&tmp, "-");
	chainmode = strsep(&tmp, "-");
	ivopts = strsep(&tmp, "-");
	ivmode = strsep(&ivopts, ":");

	if (tmp)
843
		DMWARN("Unexpected additional cipher options");
Linus Torvalds's avatar
Linus Torvalds committed
844
845
846

	key_size = strlen(argv[1]) >> 1;

Milan Broz's avatar
Milan Broz committed
847
 	cc = kzalloc(sizeof(*cc) + key_size * sizeof(u8), GFP_KERNEL);
Linus Torvalds's avatar
Linus Torvalds committed
848
849
	if (cc == NULL) {
		ti->error =
850
			"Cannot allocate transparent encryption context";
Linus Torvalds's avatar
Linus Torvalds committed
851
852
853
		return -ENOMEM;
	}

Milan Broz's avatar
Milan Broz committed
854
 	if (crypt_set_key(cc, argv[1])) {
855
		ti->error = "Error decoding key";
Milan Broz's avatar
Milan Broz committed
856
		goto bad_cipher;
Linus Torvalds's avatar
Linus Torvalds committed
857
858
859
860
861
862
863
864
	}

	/* Compatiblity mode for old dm-crypt cipher strings */
	if (!chainmode || (strcmp(chainmode, "plain") == 0 && !ivmode)) {
		chainmode = "cbc";
		ivmode = "plain";
	}

865
866
	if (strcmp(chainmode, "ecb") && !ivmode) {
		ti->error = "This chaining mode requires an IV mechanism";
Milan Broz's avatar
Milan Broz committed
867
		goto bad_cipher;
Linus Torvalds's avatar
Linus Torvalds committed
868
869
	}

Milan Broz's avatar
Milan Broz committed
870
871
	if (snprintf(cc->cipher, CRYPTO_MAX_ALG_NAME, "%s(%s)",
		     chainmode, cipher) >= CRYPTO_MAX_ALG_NAME) {
872
		ti->error = "Chain mode + cipher name is too long";
Milan Broz's avatar
Milan Broz committed
873
		goto bad_cipher;
Linus Torvalds's avatar
Linus Torvalds committed
874
875
	}

876
877
	tfm = crypto_alloc_blkcipher(cc->cipher, 0, CRYPTO_ALG_ASYNC);
	if (IS_ERR(tfm)) {
878
		ti->error = "Error allocating crypto tfm";
Milan Broz's avatar
Milan Broz committed
879
		goto bad_cipher;
Linus Torvalds's avatar
Linus Torvalds committed
880
881
	}

882
883
	strcpy(cc->cipher, cipher);
	strcpy(cc->chainmode, chainmode);
Linus Torvalds's avatar
Linus Torvalds committed
884
885
886
	cc->tfm = tfm;

	/*
887
	 * Choose ivmode. Valid modes: "plain", "essiv:<esshash>", "benbi".
Linus Torvalds's avatar
Linus Torvalds committed
888
889
890
891
892
893
894
895
896
	 * See comments at iv code
	 */

	if (ivmode == NULL)
		cc->iv_gen_ops = NULL;
	else if (strcmp(ivmode, "plain") == 0)
		cc->iv_gen_ops = &crypt_iv_plain_ops;
	else if (strcmp(ivmode, "essiv") == 0)
		cc->iv_gen_ops = &crypt_iv_essiv_ops;
897
898
	else if (strcmp(ivmode, "benbi") == 0)
		cc->iv_gen_ops = &crypt_iv_benbi_ops;
Ludwig Nussel's avatar
Ludwig Nussel committed
899
900
	else if (strcmp(ivmode, "null") == 0)
		cc->iv_gen_ops = &crypt_iv_null_ops;
Linus Torvalds's avatar
Linus Torvalds committed
901
	else {
902
		ti->error = "Invalid IV mode";
Milan Broz's avatar
Milan Broz committed
903
		goto bad_ivmode;
Linus Torvalds's avatar
Linus Torvalds committed
904
905
906
907
	}

	if (cc->iv_gen_ops && cc->iv_gen_ops->ctr &&
	    cc->iv_gen_ops->ctr(cc, ti, ivopts) < 0)
Milan Broz's avatar
Milan Broz committed
908
		goto bad_ivmode;
Linus Torvalds's avatar
Linus Torvalds committed
909

910
911
	cc->iv_size = crypto_blkcipher_ivsize(tfm);
	if (cc->iv_size)
Linus Torvalds's avatar
Linus Torvalds committed
912
		/* at least a 64 bit sector number should fit in our buffer */
913
		cc->iv_size = max(cc->iv_size,
Milan Broz's avatar
Milan Broz committed
914
				  (unsigned int)(sizeof(u64) / sizeof(u8)));
Linus Torvalds's avatar
Linus Torvalds committed
915
916
	else {
		if (cc->iv_gen_ops) {
917
			DMWARN("Selected cipher does not support IVs");
Linus Torvalds's avatar
Linus Torvalds committed
918
919
920
921
922
923
			if (cc->iv_gen_ops->dtr)
				cc->iv_gen_ops->dtr(cc);
			cc->iv_gen_ops = NULL;
		}
	}

924
	cc->io_pool = mempool_create_slab_pool(MIN_IOS, _crypt_io_pool);
Linus Torvalds's avatar
Linus Torvalds committed
925
	if (!cc->io_pool) {
926
		ti->error = "Cannot allocate crypt io mempool";
Milan Broz's avatar
Milan Broz committed
927
		goto bad_slab_pool;
Linus Torvalds's avatar
Linus Torvalds committed
928
929
	}

930
931
932
933
934
935
936
937
938
939
940
	cc->dmreq_start = sizeof(struct ablkcipher_request);
	cc->dmreq_start = ALIGN(cc->dmreq_start, crypto_tfm_ctx_alignment());

	cc->req_pool = mempool_create_kmalloc_pool(MIN_IOS, cc->dmreq_start +
			sizeof(struct dm_crypt_request) + cc->iv_size);
	if (!cc->req_pool) {
		ti->error = "Cannot allocate crypt request mempool";
		goto bad_req_pool;
	}
	cc->req = NULL;

941
	cc->page_pool = mempool_create_page_pool(MIN_POOL_PAGES, 0);
Linus Torvalds's avatar
Linus Torvalds committed
942
	if (!cc->page_pool) {
943
		ti->error = "Cannot allocate page mempool";
Milan Broz's avatar
Milan Broz committed
944
		goto bad_page_pool;
Linus Torvalds's avatar
Linus Torvalds committed
945
946
	}

947
	cc->bs = bioset_create(MIN_IOS, MIN_IOS);
948
949
950
951
952
	if (!cc->bs) {
		ti->error = "Cannot allocate crypt bioset";
		goto bad_bs;
	}

953
	if (crypto_blkcipher_setkey(tfm, cc->key, key_size) < 0) {
954
		ti->error = "Error setting key";
Milan Broz's avatar
Milan Broz committed
955
		goto bad_device;
Linus Torvalds's avatar
Linus Torvalds committed
956
957
	}

958
	if (sscanf(argv[2], "%llu", &tmpll) != 1) {
959
		ti->error = "Invalid iv_offset sector";
Milan Broz's avatar
Milan Broz committed
960
		goto bad_device;
Linus Torvalds's avatar
Linus Torvalds committed
961
	}
962
	cc->iv_offset = tmpll;
Linus Torvalds's avatar
Linus Torvalds committed
963

964
	if (sscanf(argv[4], "%llu", &tmpll) != 1) {
965
		ti->error = "Invalid device sector";
Milan Broz's avatar
Milan Broz committed
966
		goto bad_device;
Linus Torvalds's avatar
Linus Torvalds committed
967
	}
968
	cc->start = tmpll;
Linus Torvalds's avatar
Linus Torvalds committed
969
970

	if (dm_get_device(ti, argv[3], cc->start, ti->len,
Milan Broz's avatar
Milan Broz committed
971
			  dm_table_get_mode(ti->table), &cc->dev)) {
972
		ti->error = "Device lookup failed";
Milan Broz's avatar
Milan Broz committed
973
		goto bad_device;
Linus Torvalds's avatar
Linus Torvalds committed
974
975
976
977
978
979
980
	}

	if (ivmode && cc->iv_gen_ops) {
		if (ivopts)
			*(ivopts - 1) = ':';
		cc->iv_mode = kmalloc(strlen(ivmode) + 1, GFP_KERNEL);
		if (!cc->iv_mode) {
981
			ti->error = "Error kmallocing iv_mode string";
Milan Broz's avatar
Milan Broz committed
982
			goto bad_ivmode_string;
Linus Torvalds's avatar
Linus Torvalds committed
983
984
985
986
987
		}
		strcpy(cc->iv_mode, ivmode);
	} else
		cc->iv_mode = NULL;

988
989
990
991
992
993
994
995
	cc->io_queue = create_singlethread_workqueue("kcryptd_io");
	if (!cc->io_queue) {
		ti->error = "Couldn't create kcryptd io queue";
		goto bad_io_queue;
	}

	cc->crypt_queue = create_singlethread_workqueue("kcryptd");
	if (!cc->crypt_queue) {
996
		ti->error = "Couldn't create kcryptd queue";
997
		goto bad_crypt_queue;
998
999
	}

Linus Torvalds's avatar
Linus Torvalds committed
1000
1001
1002
	ti->private = cc;
	return 0;

1003
1004
1005
bad_crypt_queue:
	destroy_workqueue(cc->io_queue);
bad_io_queue:
1006
	kfree(cc->iv_mode);
Milan Broz's avatar
Milan Broz committed
1007
bad_ivmode_string:
1008
	dm_put_device(ti, cc->dev);
Milan Broz's avatar
Milan Broz committed
1009
bad_device:
1010
1011
	bioset_free(cc->bs);
bad_bs:
Linus Torvalds's avatar
Linus Torvalds committed
1012
	mempool_destroy(cc->page_pool);
Milan Broz's avatar
Milan Broz committed
1013
bad_page_pool:
1014
1015
	mempool_destroy(cc->req_pool);
bad_req_pool:
Linus Torvalds's avatar
Linus Torvalds committed
1016
	mempool_destroy(cc->io_pool);
Milan Broz's avatar
Milan Broz committed
1017
bad_slab_pool:
Linus Torvalds's avatar
Linus Torvalds committed
1018
1019
	if (cc->iv_gen_ops && cc->iv_gen_ops->dtr)
		cc->iv_gen_ops->dtr(cc);
Milan Broz's avatar
Milan Broz committed
1020
bad_ivmode:
1021
	crypto_free_blkcipher(tfm);
Milan Broz's avatar
Milan Broz committed
1022
bad_cipher:
1023
1024
	/* Must zero key material before freeing */
	memset(cc, 0, sizeof(*cc) + cc->key_size * sizeof(u8));
Linus Torvalds's avatar
Linus Torvalds committed
1025
1026
1027
1028
1029
1030
1031
1032
	kfree(cc);
	return -EINVAL;
}

static void crypt_dtr(struct dm_target *ti)
{
	struct crypt_config *cc = (struct crypt_config *) ti->private;

1033
1034
	destroy_workqueue(cc->io_queue);
	destroy_workqueue(cc->crypt_queue);
Milan Broz's avatar
Milan Broz committed
1035

1036
1037
1038
	if (cc->req)
		mempool_free(cc->req, cc->req_pool);

1039
	bioset_free(cc->bs);
Linus Torvalds's avatar
Linus Torvalds committed
1040
	mempool_destroy(cc->page_pool);
1041
	mempool_destroy(cc->req_pool);
Linus Torvalds's avatar
Linus Torvalds committed
1042
1043
	mempool_destroy(cc->io_pool);

1044
	kfree(cc->iv_mode);
Linus Torvalds's avatar
Linus Torvalds committed
1045
1046
	if (cc->iv_gen_ops && cc->iv_gen_ops->dtr)
		cc->iv_gen_ops->dtr(cc);
1047
	crypto_free_blkcipher(cc->tfm);
Linus Torvalds's avatar
Linus Torvalds committed
1048
	dm_put_device(ti, cc->dev);
1049
1050
1051

	/* Must zero key material before freeing */
	memset(cc, 0, sizeof(*cc) + cc->key_size * sizeof(u8));
Linus Torvalds's avatar
Linus Torvalds committed
1052
1053
1054
1055
1056
1057
	kfree(cc);
}

static int crypt_map(struct dm_target *ti, struct bio *bio,
		     union map_info *map_context)
{
1058
	struct crypt_config *cc = ti->private;
Alasdair G Kergon's avatar
Alasdair G Kergon committed
1059
	struct dm_crypt_io *io;
Linus Torvalds's avatar
Linus Torvalds committed
1060

Milan Broz's avatar
Milan Broz committed
1061
	io = mempool_alloc(cc->io_pool, GFP_NOIO);
Linus Torvalds's avatar
Linus Torvalds committed
1062
	io->target = ti;
1063
	io->base_bio = bio;
1064
	io->sector = bio->bi_sector - ti->begin;
1065
	io->error = 0;
1066
	atomic_set(&io->pending, 0);
1067
1068
1069
1070
1071

	if (bio_data_dir(io->base_bio) == READ)
		kcryptd_queue_io(io);
	else
		kcryptd_queue_crypt(io);
Linus Torvalds's avatar
Linus Torvalds committed
1072

1073
	return DM_MAPIO_SUBMITTED;
Linus Torvalds's avatar
Linus Torvalds committed
1074
1075
1076
1077
1078
1079
1080
1081
1082
1083
1084
1085
1086
1087
1088
}

static int crypt_status(struct dm_target *ti, status_type_t type,
			char *result, unsigned int maxlen)
{
	struct crypt_config *cc = (struct crypt_config *) ti->private;
	unsigned int sz = 0;

	switch (type) {
	case STATUSTYPE_INFO:
		result[0] = '\0';
		break;

	case STATUSTYPE_TABLE:
		if (cc->iv_mode)
1089
1090
			DMEMIT("%s-%s-%s ", cc->cipher, cc->chainmode,
			       cc->iv_mode);
Linus Torvalds's avatar
Linus Torvalds committed
1091
		else
1092
			DMEMIT("%s-%s ", cc->cipher, cc->chainmode);
Linus Torvalds's avatar
Linus Torvalds committed
1093
1094
1095
1096
1097
1098
1099
1100
1101
1102
1103
1104
1105

		if (cc->key_size > 0) {
			if ((maxlen - sz) < ((cc->key_size << 1) + 1))
				return -ENOMEM;

			crypt_encode_key(result + sz, cc->key, cc->key_size);
			sz += cc->key_size << 1;
		} else {
			if (sz >= maxlen)
				return -ENOMEM;
			result[sz++] = '-';
		}

1106
1107
		DMEMIT(" %llu %s %llu", (unsigned long long)cc->iv_offset,
				cc->dev->name, (unsigned long long)cc->start);
Linus Torvalds's avatar
Linus Torvalds committed
1108
1109
1110
1111
1112
		break;
	}
	return 0;
}

Milan Broz's avatar
Milan Broz committed
1113
1114
1115
1116
1117
1118
1119
1120
1121
1122
1123
1124
1125
1126
1127
1128
1129
1130
1131
1132
1133
1134
1135
1136
1137
1138
1139
1140
1141
1142
1143
1144
1145
1146
1147
1148
1149
1150
1151
1152
1153
1154
1155
1156
1157
1158
1159
1160
1161
1162
1163
1164
1165
static void crypt_postsuspend(struct dm_target *ti)
{
	struct crypt_config *cc = ti->private;

	set_bit(DM_CRYPT_SUSPENDED, &cc->flags);
}

static int crypt_preresume(struct dm_target *ti)
{
	struct crypt_config *cc = ti->private;

	if (!test_bit(DM_CRYPT_KEY_VALID, &cc->flags)) {
		DMERR("aborting resume - crypt key is not set.");
		return -EAGAIN;
	}

	return 0;
}

static void crypt_resume(struct dm_target *ti)
{
	struct crypt_config *cc = ti->private;

	clear_bit(DM_CRYPT_SUSPENDED, &cc->flags);
}

/* Message interface
 *	key set <key>
 *	key wipe
 */
static int crypt_message(struct dm_target *ti, unsigned argc, char **argv)
{
	struct crypt_config *cc = ti->private;

	if (argc < 2)
		goto error;

	if (!strnicmp(argv[0], MESG_STR("key"))) {
		if (!test_bit(DM_CRYPT_SUSPENDED, &cc->flags)) {
			DMWARN("not suspended during key manipulation.");
			return -EINVAL;
		}
		if (argc == 3 && !strnicmp(argv[1], MESG_STR("set")))
			return crypt_set_key(cc, argv[2]);
		if (argc == 2 && !strnicmp(argv[1], MESG_STR("wipe")))
			return crypt_wipe_key(cc);
	}

error:
	DMWARN("unrecognised message received.");
	return -EINVAL;
}