blk-mq-tag.c 13.7 KB
Newer Older
1
2
3
4
5
6
7
8
9
10
11
12
/*
 * Fast and scalable bitmap tagging variant. Uses sparser bitmaps spread
 * over multiple cachelines to avoid ping-pong between multiple submitters
 * or submitter and completer. Uses rolling wakeups to avoid falling of
 * the scaling cliff when we run out of tags and have to start putting
 * submitters to sleep.
 *
 * Uses active queue tracking to support fairer distribution of tags
 * between multiple submitters when a shared tag map is used.
 *
 * Copyright (C) 2013-2014 Jens Axboe
 */
13
14
#include <linux/kernel.h>
#include <linux/module.h>
15
#include <linux/random.h>
16
17
18
19
20
21

#include <linux/blk-mq.h>
#include "blk.h"
#include "blk-mq.h"
#include "blk-mq-tag.h"

22
23
24
25
26
static bool bt_has_free_tags(struct blk_mq_bitmap_tags *bt)
{
	int i;

	for (i = 0; i < bt->map_nr; i++) {
27
		struct blk_align_bitmap *bm = &bt->map[i];
28
29
30
31
32
33
34
35
		int ret;

		ret = find_first_zero_bit(&bm->word, bm->depth);
		if (ret < bm->depth)
			return true;
	}

	return false;
36
37
38
39
}

bool blk_mq_has_free_tags(struct blk_mq_tags *tags)
{
40
41
42
43
44
45
	if (!tags)
		return true;

	return bt_has_free_tags(&tags->bitmap_tags);
}

46
static inline int bt_index_inc(int index)
47
{
48
49
50
51
52
53
54
55
	return (index + 1) & (BT_WAIT_QUEUES - 1);
}

static inline void bt_index_atomic_inc(atomic_t *index)
{
	int old = atomic_read(index);
	int new = bt_index_inc(old);
	atomic_cmpxchg(index, old, new);
56
57
58
59
60
61
62
63
64
65
66
67
68
69
70
}

/*
 * If a previously inactive queue goes active, bump the active user count.
 */
bool __blk_mq_tag_busy(struct blk_mq_hw_ctx *hctx)
{
	if (!test_bit(BLK_MQ_S_TAG_ACTIVE, &hctx->state) &&
	    !test_and_set_bit(BLK_MQ_S_TAG_ACTIVE, &hctx->state))
		atomic_inc(&hctx->tags->active_queues);

	return true;
}

/*
71
 * Wakeup all potentially sleeping on normal (non-reserved) tags
72
 */
73
static void blk_mq_tag_wakeup_all(struct blk_mq_tags *tags)
74
75
76
77
78
{
	struct blk_mq_bitmap_tags *bt;
	int i, wake_index;

	bt = &tags->bitmap_tags;
79
	wake_index = atomic_read(&bt->wake_index);
80
81
82
83
84
85
	for (i = 0; i < BT_WAIT_QUEUES; i++) {
		struct bt_wait_state *bs = &bt->bs[wake_index];

		if (waitqueue_active(&bs->wait))
			wake_up(&bs->wait);

86
		wake_index = bt_index_inc(wake_index);
87
88
89
	}
}

90
91
92
93
94
95
96
97
98
99
100
101
102
103
104
105
/*
 * If a previously busy queue goes inactive, potential waiters could now
 * be allowed to queue. Wake them up and check.
 */
void __blk_mq_tag_idle(struct blk_mq_hw_ctx *hctx)
{
	struct blk_mq_tags *tags = hctx->tags;

	if (!test_and_clear_bit(BLK_MQ_S_TAG_ACTIVE, &hctx->state))
		return;

	atomic_dec(&tags->active_queues);

	blk_mq_tag_wakeup_all(tags);
}

106
107
108
109
110
111
112
113
114
115
116
117
118
119
120
121
122
123
124
125
126
127
128
129
130
131
132
133
134
135
136
/*
 * For shared tag users, we track the number of currently active users
 * and attempt to provide a fair share of the tag depth for each of them.
 */
static inline bool hctx_may_queue(struct blk_mq_hw_ctx *hctx,
				  struct blk_mq_bitmap_tags *bt)
{
	unsigned int depth, users;

	if (!hctx || !(hctx->flags & BLK_MQ_F_TAG_SHARED))
		return true;
	if (!test_bit(BLK_MQ_S_TAG_ACTIVE, &hctx->state))
		return true;

	/*
	 * Don't try dividing an ant
	 */
	if (bt->depth == 1)
		return true;

	users = atomic_read(&hctx->tags->active_queues);
	if (!users)
		return true;

	/*
	 * Allow at least some tags
	 */
	depth = max((bt->depth + users - 1) / users, 4U);
	return atomic_read(&hctx->nr_active) < depth;
}

137
static int __bt_get_word(struct blk_align_bitmap *bm, unsigned int last_tag)
138
139
140
{
	int tag, org_last_tag, end;

141
	org_last_tag = last_tag;
142
143
144
145
146
147
148
149
150
151
152
153
154
155
156
157
158
159
160
161
162
163
164
165
166
167
168
169
170
171
172
173
174
	end = bm->depth;
	do {
restart:
		tag = find_next_zero_bit(&bm->word, end, last_tag);
		if (unlikely(tag >= end)) {
			/*
			 * We started with an offset, start from 0 to
			 * exhaust the map.
			 */
			if (org_last_tag && last_tag) {
				end = last_tag;
				last_tag = 0;
				goto restart;
			}
			return -1;
		}
		last_tag = tag + 1;
	} while (test_and_set_bit_lock(tag, &bm->word));

	return tag;
}

/*
 * Straight forward bitmap tag implementation, where each bit is a tag
 * (cleared == free, and set == busy). The small twist is using per-cpu
 * last_tag caches, which blk-mq stores in the blk_mq_ctx software queue
 * contexts. This enables us to drastically limit the space searched,
 * without dirtying an extra shared cacheline like we would if we stored
 * the cache value inside the shared blk_mq_bitmap_tags structure. On top
 * of that, each word of tags is in a separate cacheline. This means that
 * multiple users will tend to stick to different cachelines, at least
 * until the map is exhausted.
 */
175
176
static int __bt_get(struct blk_mq_hw_ctx *hctx, struct blk_mq_bitmap_tags *bt,
		    unsigned int *tag_cache)
177
178
179
180
{
	unsigned int last_tag, org_last_tag;
	int index, i, tag;

181
182
183
	if (!hctx_may_queue(hctx, bt))
		return -1;

184
	last_tag = org_last_tag = *tag_cache;
185
	index = TAG_TO_INDEX(bt, last_tag);
186
187

	for (i = 0; i < bt->map_nr; i++) {
188
		tag = __bt_get_word(&bt->map[index], TAG_TO_BIT(bt, last_tag));
189
		if (tag != -1) {
190
			tag += (index << bt->bits_per_word);
191
192
193
194
195
196
197
198
199
200
201
202
203
204
205
206
207
208
209
210
211
212
213
214
215
216
217
218
219
220
221
			goto done;
		}

		last_tag = 0;
		if (++index >= bt->map_nr)
			index = 0;
	}

	*tag_cache = 0;
	return -1;

	/*
	 * Only update the cache from the allocation path, if we ended
	 * up using the specific cached tag.
	 */
done:
	if (tag == org_last_tag) {
		last_tag = tag + 1;
		if (last_tag >= bt->depth - 1)
			last_tag = 0;

		*tag_cache = last_tag;
	}

	return tag;
}

static struct bt_wait_state *bt_wait_ptr(struct blk_mq_bitmap_tags *bt,
					 struct blk_mq_hw_ctx *hctx)
{
	struct bt_wait_state *bs;
222
	int wait_index;
223
224
225
226

	if (!hctx)
		return &bt->bs[0];

227
228
229
	wait_index = atomic_read(&hctx->wait_index);
	bs = &bt->bs[wait_index];
	bt_index_atomic_inc(&hctx->wait_index);
230
	return bs;
231
232
}

233
234
235
236
static int bt_get(struct blk_mq_alloc_data *data,
		struct blk_mq_bitmap_tags *bt,
		struct blk_mq_hw_ctx *hctx,
		unsigned int *last_tag)
237
{
238
239
	struct bt_wait_state *bs;
	DEFINE_WAIT(wait);
240
241
	int tag;

242
	tag = __bt_get(hctx, bt, last_tag);
243
244
245
	if (tag != -1)
		return tag;

246
	if (!(data->gfp & __GFP_WAIT))
247
248
249
250
251
252
		return -1;

	bs = bt_wait_ptr(bt, hctx);
	do {
		prepare_to_wait(&bs->wait, &wait, TASK_UNINTERRUPTIBLE);

253
		tag = __bt_get(hctx, bt, last_tag);
254
255
256
		if (tag != -1)
			break;

257
258
		blk_mq_put_ctx(data->ctx);

259
		io_schedule();
260
261
262
263
264
265
266
267
268
269
270
271
272

		data->ctx = blk_mq_get_ctx(data->q);
		data->hctx = data->q->mq_ops->map_queue(data->q,
				data->ctx->cpu);
		if (data->reserved) {
			bt = &data->hctx->tags->breserved_tags;
		} else {
			last_tag = &data->ctx->last_tag;
			hctx = data->hctx;
			bt = &hctx->tags->bitmap_tags;
		}
		finish_wait(&bs->wait, &wait);
		bs = bt_wait_ptr(bt, hctx);
273
274
275
276
277
278
	} while (1);

	finish_wait(&bs->wait, &wait);
	return tag;
}

279
static unsigned int __blk_mq_get_tag(struct blk_mq_alloc_data *data)
280
281
282
{
	int tag;

283
284
	tag = bt_get(data, &data->hctx->tags->bitmap_tags, data->hctx,
			&data->ctx->last_tag);
285
	if (tag >= 0)
286
		return tag + data->hctx->tags->nr_reserved_tags;
287
288

	return BLK_MQ_TAG_FAIL;
289
290
}

291
static unsigned int __blk_mq_get_reserved_tag(struct blk_mq_alloc_data *data)
292
{
293
	int tag, zero = 0;
294

295
	if (unlikely(!data->hctx->tags->nr_reserved_tags)) {
296
297
298
299
		WARN_ON_ONCE(1);
		return BLK_MQ_TAG_FAIL;
	}

300
	tag = bt_get(data, &data->hctx->tags->breserved_tags, NULL, &zero);
301
302
	if (tag < 0)
		return BLK_MQ_TAG_FAIL;
303

304
305
306
	return tag;
}

307
unsigned int blk_mq_get_tag(struct blk_mq_alloc_data *data)
308
{
309
310
	if (!data->reserved)
		return __blk_mq_get_tag(data);
311

312
	return __blk_mq_get_reserved_tag(data);
313
314
}

315
316
317
318
static struct bt_wait_state *bt_wake_ptr(struct blk_mq_bitmap_tags *bt)
{
	int i, wake_index;

319
	wake_index = atomic_read(&bt->wake_index);
320
321
322
323
	for (i = 0; i < BT_WAIT_QUEUES; i++) {
		struct bt_wait_state *bs = &bt->bs[wake_index];

		if (waitqueue_active(&bs->wait)) {
324
325
326
			int o = atomic_read(&bt->wake_index);
			if (wake_index != o)
				atomic_cmpxchg(&bt->wake_index, o, wake_index);
327
328
329
330

			return bs;
		}

331
		wake_index = bt_index_inc(wake_index);
332
333
334
335
336
337
338
	}

	return NULL;
}

static void bt_clear_tag(struct blk_mq_bitmap_tags *bt, unsigned int tag)
{
339
	const int index = TAG_TO_INDEX(bt, tag);
340
	struct bt_wait_state *bs;
341
	int wait_cnt;
342

343
344
345
346
347
	/*
	 * The unlock memory barrier need to order access to req in free
	 * path and clearing tag bit
	 */
	clear_bit_unlock(TAG_TO_BIT(bt, tag), &bt->map[index].word);
348
349

	bs = bt_wake_ptr(bt);
350
351
352
353
354
355
356
	if (!bs)
		return;

	wait_cnt = atomic_dec_return(&bs->wait_cnt);
	if (wait_cnt == 0) {
wake:
		atomic_add(bt->wake_cnt, &bs->wait_cnt);
357
		bt_index_atomic_inc(&bt->wake_index);
358
		wake_up(&bs->wait);
359
360
361
362
	} else if (wait_cnt < 0) {
		wait_cnt = atomic_inc_return(&bs->wait_cnt);
		if (!wait_cnt)
			goto wake;
363
364
365
	}
}

366
367
368
369
static void __blk_mq_put_tag(struct blk_mq_tags *tags, unsigned int tag)
{
	BUG_ON(tag >= tags->nr_tags);

370
	bt_clear_tag(&tags->bitmap_tags, tag);
371
372
373
374
375
376
377
}

static void __blk_mq_put_reserved_tag(struct blk_mq_tags *tags,
				      unsigned int tag)
{
	BUG_ON(tag >= tags->nr_reserved_tags);

378
	bt_clear_tag(&tags->breserved_tags, tag);
379
380
}

381
void blk_mq_put_tag(struct blk_mq_hw_ctx *hctx, unsigned int tag,
382
		    unsigned int *last_tag)
383
{
384
385
	struct blk_mq_tags *tags = hctx->tags;

386
387
388
389
390
391
	if (tag >= tags->nr_reserved_tags) {
		const int real_tag = tag - tags->nr_reserved_tags;

		__blk_mq_put_tag(tags, real_tag);
		*last_tag = real_tag;
	} else
392
393
394
		__blk_mq_put_reserved_tag(tags, tag);
}

395
396
static void bt_for_each_free(struct blk_mq_bitmap_tags *bt,
			     unsigned long *free_map, unsigned int off)
397
{
398
399
400
	int i;

	for (i = 0; i < bt->map_nr; i++) {
401
		struct blk_align_bitmap *bm = &bt->map[i];
402
403
404
405
406
407
408
409
410
411
412
		int bit = 0;

		do {
			bit = find_next_zero_bit(&bm->word, bm->depth, bit);
			if (bit >= bm->depth)
				break;

			__set_bit(bit + off, free_map);
			bit++;
		} while (1);

413
		off += (1 << bt->bits_per_word);
414
	}
415
416
417
418
419
420
421
422
423
424
425
426
427
}

void blk_mq_tag_busy_iter(struct blk_mq_tags *tags,
			  void (*fn)(void *, unsigned long *), void *data)
{
	unsigned long *tag_map;
	size_t map_size;

	map_size = ALIGN(tags->nr_tags, BITS_PER_LONG) / BITS_PER_LONG;
	tag_map = kzalloc(map_size * sizeof(unsigned long), GFP_ATOMIC);
	if (!tag_map)
		return;

428
	bt_for_each_free(&tags->bitmap_tags, tag_map, tags->nr_reserved_tags);
429
	if (tags->nr_reserved_tags)
430
		bt_for_each_free(&tags->breserved_tags, tag_map, 0);
431
432
433
434

	fn(data, tag_map);
	kfree(tag_map);
}
435
EXPORT_SYMBOL(blk_mq_tag_busy_iter);
436

437
438
439
440
441
static unsigned int bt_unused_tags(struct blk_mq_bitmap_tags *bt)
{
	unsigned int i, used;

	for (i = 0, used = 0; i < bt->map_nr; i++) {
442
		struct blk_align_bitmap *bm = &bt->map[i];
443
444
445
446
447
448
449

		used += bitmap_weight(&bm->word, bm->depth);
	}

	return bt->depth - used;
}

450
451
452
453
454
455
456
457
458
459
460
461
462
463
464
465
466
467
468
469
470
471
static void bt_update_count(struct blk_mq_bitmap_tags *bt,
			    unsigned int depth)
{
	unsigned int tags_per_word = 1U << bt->bits_per_word;
	unsigned int map_depth = depth;

	if (depth) {
		int i;

		for (i = 0; i < bt->map_nr; i++) {
			bt->map[i].depth = min(map_depth, tags_per_word);
			map_depth -= bt->map[i].depth;
		}
	}

	bt->wake_cnt = BT_WAIT_BATCH;
	if (bt->wake_cnt > depth / 4)
		bt->wake_cnt = max(1U, depth / 4);

	bt->depth = depth;
}

472
473
474
475
476
static int bt_alloc(struct blk_mq_bitmap_tags *bt, unsigned int depth,
			int node, bool reserved)
{
	int i;

477
478
	bt->bits_per_word = ilog2(BITS_PER_LONG);

479
480
481
482
483
	/*
	 * Depth can be zero for reserved tags, that's not a failure
	 * condition.
	 */
	if (depth) {
484
		unsigned int nr, tags_per_word;
485
486
487
488
489
490
491
492
493
494
495
496
497
498
499

		tags_per_word = (1 << bt->bits_per_word);

		/*
		 * If the tag space is small, shrink the number of tags
		 * per word so we spread over a few cachelines, at least.
		 * If less than 4 tags, just forget about it, it's not
		 * going to work optimally anyway.
		 */
		if (depth >= 4) {
			while (tags_per_word * 4 > depth) {
				bt->bits_per_word--;
				tags_per_word = (1 << bt->bits_per_word);
			}
		}
500

501
		nr = ALIGN(depth, tags_per_word) / tags_per_word;
502
		bt->map = kzalloc_node(nr * sizeof(struct blk_align_bitmap),
503
504
505
506
507
508
509
510
511
512
513
514
515
						GFP_KERNEL, node);
		if (!bt->map)
			return -ENOMEM;

		bt->map_nr = nr;
	}

	bt->bs = kzalloc(BT_WAIT_QUEUES * sizeof(*bt->bs), GFP_KERNEL);
	if (!bt->bs) {
		kfree(bt->map);
		return -ENOMEM;
	}

516
517
518
	bt_update_count(bt, depth);

	for (i = 0; i < BT_WAIT_QUEUES; i++) {
519
		init_waitqueue_head(&bt->bs[i].wait);
520
521
		atomic_set(&bt->bs[i].wait_cnt, bt->wake_cnt);
	}
522
523
524
525
526
527
528
529
530
531
532
533
534
535
536
537
538
539
540
541
542
543
544
545
546
547
548

	return 0;
}

static void bt_free(struct blk_mq_bitmap_tags *bt)
{
	kfree(bt->map);
	kfree(bt->bs);
}

static struct blk_mq_tags *blk_mq_init_bitmap_tags(struct blk_mq_tags *tags,
						   int node)
{
	unsigned int depth = tags->nr_tags - tags->nr_reserved_tags;

	if (bt_alloc(&tags->bitmap_tags, depth, node, false))
		goto enomem;
	if (bt_alloc(&tags->breserved_tags, tags->nr_reserved_tags, node, true))
		goto enomem;

	return tags;
enomem:
	bt_free(&tags->bitmap_tags);
	kfree(tags);
	return NULL;
}

549
550
551
552
553
554
555
556
557
558
559
560
561
562
563
564
565
struct blk_mq_tags *blk_mq_init_tags(unsigned int total_tags,
				     unsigned int reserved_tags, int node)
{
	struct blk_mq_tags *tags;

	if (total_tags > BLK_MQ_TAG_MAX) {
		pr_err("blk-mq: tag depth too large\n");
		return NULL;
	}

	tags = kzalloc_node(sizeof(*tags), GFP_KERNEL, node);
	if (!tags)
		return NULL;

	tags->nr_tags = total_tags;
	tags->nr_reserved_tags = reserved_tags;

566
	return blk_mq_init_bitmap_tags(tags, node);
567
568
569
570
}

void blk_mq_free_tags(struct blk_mq_tags *tags)
{
571
572
	bt_free(&tags->bitmap_tags);
	bt_free(&tags->breserved_tags);
573
574
575
	kfree(tags);
}

576
577
578
579
void blk_mq_tag_init_last_tag(struct blk_mq_tags *tags, unsigned int *tag)
{
	unsigned int depth = tags->nr_tags - tags->nr_reserved_tags;

580
	*tag = prandom_u32() % depth;
581
582
}

583
584
585
586
587
588
589
590
591
592
593
594
595
596
597
int blk_mq_tag_update_depth(struct blk_mq_tags *tags, unsigned int tdepth)
{
	tdepth -= tags->nr_reserved_tags;
	if (tdepth > tags->nr_tags)
		return -EINVAL;

	/*
	 * Don't need (or can't) update reserved tags here, they remain
	 * static and should never need resizing.
	 */
	bt_update_count(&tags->bitmap_tags, tdepth);
	blk_mq_tag_wakeup_all(tags);
	return 0;
}

598
599
600
ssize_t blk_mq_tag_sysfs_show(struct blk_mq_tags *tags, char *page)
{
	char *orig_page = page;
601
	unsigned int free, res;
602
603
604
605

	if (!tags)
		return 0;

606
607
608
609
	page += sprintf(page, "nr_tags=%u, reserved_tags=%u, "
			"bits_per_word=%u\n",
			tags->nr_tags, tags->nr_reserved_tags,
			tags->bitmap_tags.bits_per_word);
610

611
612
	free = bt_unused_tags(&tags->bitmap_tags);
	res = bt_unused_tags(&tags->breserved_tags);
613

614
	page += sprintf(page, "nr_free=%u, nr_reserved=%u\n", free, res);
615
	page += sprintf(page, "active_queues=%u\n", atomic_read(&tags->active_queues));
616
617
618

	return page - orig_page;
}