blk-cgroup.c 23.7 KB
Newer Older
1
2
3
4
5
6
7
8
9
10
11
12
13
/*
 * Common Block IO controller cgroup interface
 *
 * Based on ideas and code from CFQ, CFS and BFQ:
 * Copyright (C) 2003 Jens Axboe <axboe@kernel.dk>
 *
 * Copyright (C) 2008 Fabio Checconi <fabio@gandalf.sssup.it>
 *		      Paolo Valente <paolo.valente@unimore.it>
 *
 * Copyright (C) 2009 Vivek Goyal <vgoyal@redhat.com>
 * 	              Nauman Rafique <nauman@google.com>
 */
#include <linux/ioprio.h>
14
#include <linux/kdev_t.h>
15
#include <linux/module.h>
16
#include <linux/err.h>
17
#include <linux/blkdev.h>
18
#include <linux/slab.h>
19
#include <linux/genhd.h>
20
#include <linux/delay.h>
Tejun Heo's avatar
Tejun Heo committed
21
#include <linux/atomic.h>
22
#include "blk-cgroup.h"
23
#include "blk.h"
24

25
26
#define MAX_KEY_LEN 100

27
static DEFINE_MUTEX(blkcg_pol_mutex);
28

Tejun Heo's avatar
Tejun Heo committed
29
30
struct blkcg blkcg_root = { .cfq_weight = 2 * CFQ_WEIGHT_DEFAULT };
EXPORT_SYMBOL_GPL(blkcg_root);
31

Tejun Heo's avatar
Tejun Heo committed
32
static struct blkcg_policy *blkcg_policy[BLKCG_MAX_POLS];
33

34
static bool blkcg_policy_enabled(struct request_queue *q,
Tejun Heo's avatar
Tejun Heo committed
35
				 const struct blkcg_policy *pol)
36
37
38
39
{
	return pol && test_bit(pol->plid, q->blkcg_pols);
}

40
41
42
43
44
45
/**
 * blkg_free - free a blkg
 * @blkg: blkg to free
 *
 * Free @blkg which may be partially allocated.
 */
Tejun Heo's avatar
Tejun Heo committed
46
static void blkg_free(struct blkcg_gq *blkg)
47
{
48
	int i;
49
50
51
52

	if (!blkg)
		return;

53
	for (i = 0; i < BLKCG_MAX_POLS; i++) {
Tejun Heo's avatar
Tejun Heo committed
54
		struct blkcg_policy *pol = blkcg_policy[i];
55
56
		struct blkg_policy_data *pd = blkg->pd[i];

57
58
59
		if (!pd)
			continue;

60
61
		if (pol && pol->pd_exit_fn)
			pol->pd_exit_fn(blkg);
62
63

		kfree(pd);
64
	}
65

66
	blk_exit_rl(&blkg->rl);
67
	kfree(blkg);
68
69
70
71
72
73
}

/**
 * blkg_alloc - allocate a blkg
 * @blkcg: block cgroup the new blkg is associated with
 * @q: request_queue the new blkg is associated with
74
 * @gfp_mask: allocation mask to use
75
 *
76
 * Allocate a new blkg assocating @blkcg and @q.
77
 */
78
79
static struct blkcg_gq *blkg_alloc(struct blkcg *blkcg, struct request_queue *q,
				   gfp_t gfp_mask)
80
{
Tejun Heo's avatar
Tejun Heo committed
81
	struct blkcg_gq *blkg;
82
	int i;
83
84

	/* alloc and init base part */
85
	blkg = kzalloc_node(sizeof(*blkg), gfp_mask, q->node);
86
87
88
	if (!blkg)
		return NULL;

89
	blkg->q = q;
90
	INIT_LIST_HEAD(&blkg->q_node);
91
	blkg->blkcg = blkcg;
Tejun Heo's avatar
Tejun Heo committed
92
	blkg->refcnt = 1;
93

94
95
96
97
98
99
100
	/* root blkg uses @q->root_rl, init rl only for !root blkgs */
	if (blkcg != &blkcg_root) {
		if (blk_init_rl(&blkg->rl, q, gfp_mask))
			goto err_free;
		blkg->rl.blkg = blkg;
	}

101
	for (i = 0; i < BLKCG_MAX_POLS; i++) {
Tejun Heo's avatar
Tejun Heo committed
102
		struct blkcg_policy *pol = blkcg_policy[i];
103
		struct blkg_policy_data *pd;
104

105
		if (!blkcg_policy_enabled(q, pol))
106
107
108
			continue;

		/* alloc per-policy data and attach it to blkg */
109
		pd = kzalloc_node(pol->pd_size, gfp_mask, q->node);
110
111
		if (!pd)
			goto err_free;
112

113
114
115
		blkg->pd[i] = pd;
		pd->blkg = blkg;

116
		/* invoke per-policy init */
117
		if (blkcg_policy_enabled(blkg->q, pol))
118
			pol->pd_init_fn(blkg);
119
120
	}

121
	return blkg;
122
123
124
125

err_free:
	blkg_free(blkg);
	return NULL;
126
127
}

Tejun Heo's avatar
Tejun Heo committed
128
129
static struct blkcg_gq *__blkg_lookup(struct blkcg *blkcg,
				      struct request_queue *q)
130
{
Tejun Heo's avatar
Tejun Heo committed
131
	struct blkcg_gq *blkg;
132

133
134
135
136
137
138
139
140
141
142
143
144
145
146
	blkg = rcu_dereference(blkcg->blkg_hint);
	if (blkg && blkg->q == q)
		return blkg;

	/*
	 * Hint didn't match.  Look up from the radix tree.  Note that we
	 * may not be holding queue_lock and thus are not sure whether
	 * @blkg from blkg_tree has already been removed or not, so we
	 * can't update hint to the lookup result.  Leave it to the caller.
	 */
	blkg = radix_tree_lookup(&blkcg->blkg_tree, q->id);
	if (blkg && blkg->q == q)
		return blkg;

147
148
149
150
151
152
153
154
155
156
157
158
	return NULL;
}

/**
 * blkg_lookup - lookup blkg for the specified blkcg - q pair
 * @blkcg: blkcg of interest
 * @q: request_queue of interest
 *
 * Lookup blkg for the @blkcg - @q pair.  This function should be called
 * under RCU read lock and is guaranteed to return %NULL if @q is bypassing
 * - see blk_queue_bypass_start() for details.
 */
Tejun Heo's avatar
Tejun Heo committed
159
struct blkcg_gq *blkg_lookup(struct blkcg *blkcg, struct request_queue *q)
160
161
162
163
164
165
166
167
168
{
	WARN_ON_ONCE(!rcu_read_lock_held());

	if (unlikely(blk_queue_bypass(q)))
		return NULL;
	return __blkg_lookup(blkcg, q);
}
EXPORT_SYMBOL_GPL(blkg_lookup);

169
170
171
172
/*
 * If @new_blkg is %NULL, this function tries to allocate a new one as
 * necessary using %GFP_ATOMIC.  @new_blkg is always consumed on return.
 */
Tejun Heo's avatar
Tejun Heo committed
173
static struct blkcg_gq *__blkg_lookup_create(struct blkcg *blkcg,
174
175
					     struct request_queue *q,
					     struct blkcg_gq *new_blkg)
176
{
Tejun Heo's avatar
Tejun Heo committed
177
	struct blkcg_gq *blkg;
178
	int ret;
179

180
181
182
	WARN_ON_ONCE(!rcu_read_lock_held());
	lockdep_assert_held(q->queue_lock);

183
	/* lookup and update hint on success, see __blkg_lookup() for details */
184
	blkg = __blkg_lookup(blkcg, q);
185
186
	if (blkg) {
		rcu_assign_pointer(blkcg->blkg_hint, blkg);
187
		goto out_free;
188
	}
189

190
	/* blkg holds a reference to blkcg */
191
192
193
194
	if (!css_tryget(&blkcg->css)) {
		blkg = ERR_PTR(-EINVAL);
		goto out_free;
	}
195

196
	/* allocate */
197
198
199
200
201
202
203
204
	if (!new_blkg) {
		new_blkg = blkg_alloc(blkcg, q, GFP_ATOMIC);
		if (unlikely(!new_blkg)) {
			blkg = ERR_PTR(-ENOMEM);
			goto out_put;
		}
	}
	blkg = new_blkg;
205
206
207

	/* insert */
	spin_lock(&blkcg->lock);
208
209
210
211
212
	ret = radix_tree_insert(&blkcg->blkg_tree, q->id, blkg);
	if (likely(!ret)) {
		hlist_add_head_rcu(&blkg->blkcg_node, &blkcg->blkg_list);
		list_add(&blkg->q_node, &q->blkg_list);
	}
213
	spin_unlock(&blkcg->lock);
214

215
216
	if (!ret)
		return blkg;
217
218
219

	blkg = ERR_PTR(ret);
out_put:
220
	css_put(&blkcg->css);
221
222
223
out_free:
	blkg_free(new_blkg);
	return blkg;
224
}
225

Tejun Heo's avatar
Tejun Heo committed
226
227
struct blkcg_gq *blkg_lookup_create(struct blkcg *blkcg,
				    struct request_queue *q)
228
229
230
231
232
233
234
{
	/*
	 * This could be the first entry point of blkcg implementation and
	 * we shouldn't allow anything to go through for a bypassing queue.
	 */
	if (unlikely(blk_queue_bypass(q)))
		return ERR_PTR(blk_queue_dead(q) ? -EINVAL : -EBUSY);
235
	return __blkg_lookup_create(blkcg, q, NULL);
236
}
237
EXPORT_SYMBOL_GPL(blkg_lookup_create);
238

Tejun Heo's avatar
Tejun Heo committed
239
static void blkg_destroy(struct blkcg_gq *blkg)
240
{
Tejun Heo's avatar
Tejun Heo committed
241
	struct blkcg *blkcg = blkg->blkcg;
242

243
	lockdep_assert_held(blkg->q->queue_lock);
244
	lockdep_assert_held(&blkcg->lock);
245
246

	/* Something wrong if we are trying to remove same group twice */
247
	WARN_ON_ONCE(list_empty(&blkg->q_node));
248
	WARN_ON_ONCE(hlist_unhashed(&blkg->blkcg_node));
249
250

	radix_tree_delete(&blkcg->blkg_tree, blkg->q->id);
251
	list_del_init(&blkg->q_node);
252
	hlist_del_init_rcu(&blkg->blkcg_node);
253

254
255
256
257
258
259
260
261
	/*
	 * Both setting lookup hint to and clearing it from @blkg are done
	 * under queue_lock.  If it's not pointing to @blkg now, it never
	 * will.  Hint assignment itself can race safely.
	 */
	if (rcu_dereference_raw(blkcg->blkg_hint) == blkg)
		rcu_assign_pointer(blkcg->blkg_hint, NULL);

262
263
264
265
266
267
268
	/*
	 * Put the reference taken at the time of creation so that when all
	 * queues are gone, group can be destroyed.
	 */
	blkg_put(blkg);
}

269
270
271
272
/**
 * blkg_destroy_all - destroy all blkgs associated with a request_queue
 * @q: request_queue of interest
 *
273
 * Destroy all blkgs associated with @q.
274
 */
275
static void blkg_destroy_all(struct request_queue *q)
276
{
Tejun Heo's avatar
Tejun Heo committed
277
	struct blkcg_gq *blkg, *n;
278

279
	lockdep_assert_held(q->queue_lock);
280

281
	list_for_each_entry_safe(blkg, n, &q->blkg_list, q_node) {
Tejun Heo's avatar
Tejun Heo committed
282
		struct blkcg *blkcg = blkg->blkcg;
283

284
285
286
		spin_lock(&blkcg->lock);
		blkg_destroy(blkg);
		spin_unlock(&blkcg->lock);
287
288
289
	}
}

Tejun Heo's avatar
Tejun Heo committed
290
291
static void blkg_rcu_free(struct rcu_head *rcu_head)
{
Tejun Heo's avatar
Tejun Heo committed
292
	blkg_free(container_of(rcu_head, struct blkcg_gq, rcu_head));
Tejun Heo's avatar
Tejun Heo committed
293
294
}

Tejun Heo's avatar
Tejun Heo committed
295
void __blkg_release(struct blkcg_gq *blkg)
Tejun Heo's avatar
Tejun Heo committed
296
297
298
299
300
301
302
303
304
305
306
307
308
309
310
311
312
{
	/* release the extra blkcg reference this blkg has been holding */
	css_put(&blkg->blkcg->css);

	/*
	 * A group is freed in rcu manner. But having an rcu lock does not
	 * mean that one can access all the fields of blkg and assume these
	 * are valid. For example, don't try to follow throtl_data and
	 * request queue links.
	 *
	 * Having a reference to blkg under an rcu allows acess to only
	 * values local to groups like group stats and group rate limits
	 */
	call_rcu(&blkg->rcu_head, blkg_rcu_free);
}
EXPORT_SYMBOL_GPL(__blkg_release);

313
314
315
316
317
318
319
320
321
322
323
324
325
326
327
328
329
330
331
332
333
334
335
336
337
338
339
340
341
342
343
344
/*
 * The next function used by blk_queue_for_each_rl().  It's a bit tricky
 * because the root blkg uses @q->root_rl instead of its own rl.
 */
struct request_list *__blk_queue_next_rl(struct request_list *rl,
					 struct request_queue *q)
{
	struct list_head *ent;
	struct blkcg_gq *blkg;

	/*
	 * Determine the current blkg list_head.  The first entry is
	 * root_rl which is off @q->blkg_list and mapped to the head.
	 */
	if (rl == &q->root_rl) {
		ent = &q->blkg_list;
	} else {
		blkg = container_of(rl, struct blkcg_gq, rl);
		ent = &blkg->q_node;
	}

	/* walk to the next list_head, skip root blkcg */
	ent = ent->next;
	if (ent == &q->root_blkg->q_node)
		ent = ent->next;
	if (ent == &q->blkg_list)
		return NULL;

	blkg = container_of(ent, struct blkcg_gq, q_node);
	return &blkg->rl;
}

Tejun Heo's avatar
Tejun Heo committed
345
346
static int blkcg_reset_stats(struct cgroup *cgroup, struct cftype *cftype,
			     u64 val)
347
{
Tejun Heo's avatar
Tejun Heo committed
348
349
	struct blkcg *blkcg = cgroup_to_blkcg(cgroup);
	struct blkcg_gq *blkg;
350
	struct hlist_node *n;
351
	int i;
352

353
	mutex_lock(&blkcg_pol_mutex);
354
	spin_lock_irq(&blkcg->lock);
Tejun Heo's avatar
Tejun Heo committed
355
356
357
358
359
360

	/*
	 * Note that stat reset is racy - it doesn't synchronize against
	 * stat updates.  This is a debug feature which shouldn't exist
	 * anyway.  If you get hit by a race, retry.
	 */
361
	hlist_for_each_entry(blkg, n, &blkcg->blkg_list, blkcg_node) {
362
		for (i = 0; i < BLKCG_MAX_POLS; i++) {
Tejun Heo's avatar
Tejun Heo committed
363
			struct blkcg_policy *pol = blkcg_policy[i];
364

365
			if (blkcg_policy_enabled(blkg->q, pol) &&
366
367
			    pol->pd_reset_stats_fn)
				pol->pd_reset_stats_fn(blkg);
368
		}
369
	}
370

371
	spin_unlock_irq(&blkcg->lock);
372
	mutex_unlock(&blkcg_pol_mutex);
373
374
375
	return 0;
}

Tejun Heo's avatar
Tejun Heo committed
376
static const char *blkg_dev_name(struct blkcg_gq *blkg)
377
{
378
379
380
381
	/* some drivers (floppy) instantiate a queue w/o disk registered */
	if (blkg->q->backing_dev_info.dev)
		return dev_name(blkg->q->backing_dev_info.dev);
	return NULL;
382
383
}

384
385
386
387
388
389
390
391
392
393
394
395
396
397
398
399
400
/**
 * blkcg_print_blkgs - helper for printing per-blkg data
 * @sf: seq_file to print to
 * @blkcg: blkcg of interest
 * @prfill: fill function to print out a blkg
 * @pol: policy in question
 * @data: data to be passed to @prfill
 * @show_total: to print out sum of prfill return values or not
 *
 * This function invokes @prfill on each blkg of @blkcg if pd for the
 * policy specified by @pol exists.  @prfill is invoked with @sf, the
 * policy data and @data.  If @show_total is %true, the sum of the return
 * values from @prfill is printed with "Total" label at the end.
 *
 * This is to be used to construct print functions for
 * cftype->read_seq_string method.
 */
Tejun Heo's avatar
Tejun Heo committed
401
void blkcg_print_blkgs(struct seq_file *sf, struct blkcg *blkcg,
402
403
		       u64 (*prfill)(struct seq_file *,
				     struct blkg_policy_data *, int),
Tejun Heo's avatar
Tejun Heo committed
404
		       const struct blkcg_policy *pol, int data,
405
		       bool show_total)
406
{
Tejun Heo's avatar
Tejun Heo committed
407
	struct blkcg_gq *blkg;
408
409
	struct hlist_node *n;
	u64 total = 0;
410

411
412
	spin_lock_irq(&blkcg->lock);
	hlist_for_each_entry(blkg, n, &blkcg->blkg_list, blkcg_node)
413
		if (blkcg_policy_enabled(blkg->q, pol))
414
			total += prfill(sf, blkg->pd[pol->plid], data);
415
416
417
418
419
	spin_unlock_irq(&blkcg->lock);

	if (show_total)
		seq_printf(sf, "Total %llu\n", (unsigned long long)total);
}
420
EXPORT_SYMBOL_GPL(blkcg_print_blkgs);
421
422
423
424

/**
 * __blkg_prfill_u64 - prfill helper for a single u64 value
 * @sf: seq_file to print to
425
 * @pd: policy private data of interest
426
427
 * @v: value to print
 *
428
 * Print @v to @sf for the device assocaited with @pd.
429
 */
430
u64 __blkg_prfill_u64(struct seq_file *sf, struct blkg_policy_data *pd, u64 v)
431
{
432
	const char *dname = blkg_dev_name(pd->blkg);
433
434
435
436
437
438
439

	if (!dname)
		return 0;

	seq_printf(sf, "%s %llu\n", dname, (unsigned long long)v);
	return v;
}
440
EXPORT_SYMBOL_GPL(__blkg_prfill_u64);
441
442
443
444

/**
 * __blkg_prfill_rwstat - prfill helper for a blkg_rwstat
 * @sf: seq_file to print to
445
 * @pd: policy private data of interest
446
447
 * @rwstat: rwstat to print
 *
448
 * Print @rwstat to @sf for the device assocaited with @pd.
449
 */
450
u64 __blkg_prfill_rwstat(struct seq_file *sf, struct blkg_policy_data *pd,
451
			 const struct blkg_rwstat *rwstat)
452
453
454
455
456
457
458
{
	static const char *rwstr[] = {
		[BLKG_RWSTAT_READ]	= "Read",
		[BLKG_RWSTAT_WRITE]	= "Write",
		[BLKG_RWSTAT_SYNC]	= "Sync",
		[BLKG_RWSTAT_ASYNC]	= "Async",
	};
459
	const char *dname = blkg_dev_name(pd->blkg);
460
461
462
463
464
465
466
467
468
469
470
471
472
473
474
	u64 v;
	int i;

	if (!dname)
		return 0;

	for (i = 0; i < BLKG_RWSTAT_NR; i++)
		seq_printf(sf, "%s %s %llu\n", dname, rwstr[i],
			   (unsigned long long)rwstat->cnt[i]);

	v = rwstat->cnt[BLKG_RWSTAT_READ] + rwstat->cnt[BLKG_RWSTAT_WRITE];
	seq_printf(sf, "%s Total %llu\n", dname, (unsigned long long)v);
	return v;
}

475
476
477
/**
 * blkg_prfill_stat - prfill callback for blkg_stat
 * @sf: seq_file to print to
478
479
 * @pd: policy private data of interest
 * @off: offset to the blkg_stat in @pd
480
481
482
 *
 * prfill callback for printing a blkg_stat.
 */
483
u64 blkg_prfill_stat(struct seq_file *sf, struct blkg_policy_data *pd, int off)
484
{
485
	return __blkg_prfill_u64(sf, pd, blkg_stat_read((void *)pd + off));
486
}
487
EXPORT_SYMBOL_GPL(blkg_prfill_stat);
488

489
490
491
/**
 * blkg_prfill_rwstat - prfill callback for blkg_rwstat
 * @sf: seq_file to print to
492
493
 * @pd: policy private data of interest
 * @off: offset to the blkg_rwstat in @pd
494
495
496
 *
 * prfill callback for printing a blkg_rwstat.
 */
497
498
u64 blkg_prfill_rwstat(struct seq_file *sf, struct blkg_policy_data *pd,
		       int off)
499
{
500
	struct blkg_rwstat rwstat = blkg_rwstat_read((void *)pd + off);
501

502
	return __blkg_prfill_rwstat(sf, pd, &rwstat);
503
}
504
EXPORT_SYMBOL_GPL(blkg_prfill_rwstat);
505

506
507
508
/**
 * blkg_conf_prep - parse and prepare for per-blkg config update
 * @blkcg: target block cgroup
509
 * @pol: target policy
510
511
512
513
514
 * @input: input string
 * @ctx: blkg_conf_ctx to be filled
 *
 * Parse per-blkg config update from @input and initialize @ctx with the
 * result.  @ctx->blkg points to the blkg to be updated and @ctx->v the new
515
516
 * value.  This function returns with RCU read lock and queue lock held and
 * must be paired with blkg_conf_finish().
517
 */
Tejun Heo's avatar
Tejun Heo committed
518
519
int blkg_conf_prep(struct blkcg *blkcg, const struct blkcg_policy *pol,
		   const char *input, struct blkg_conf_ctx *ctx)
520
	__acquires(rcu) __acquires(disk->queue->queue_lock)
521
{
522
	struct gendisk *disk;
Tejun Heo's avatar
Tejun Heo committed
523
	struct blkcg_gq *blkg;
Tejun Heo's avatar
Tejun Heo committed
524
525
526
	unsigned int major, minor;
	unsigned long long v;
	int part, ret;
527

Tejun Heo's avatar
Tejun Heo committed
528
529
	if (sscanf(input, "%u:%u %llu", &major, &minor, &v) != 3)
		return -EINVAL;
530

Tejun Heo's avatar
Tejun Heo committed
531
	disk = get_gendisk(MKDEV(major, minor), &part);
Tejun Heo's avatar
Tejun Heo committed
532
	if (!disk || part)
Tejun Heo's avatar
Tejun Heo committed
533
		return -EINVAL;
534
535

	rcu_read_lock();
Tejun Heo's avatar
Tejun Heo committed
536
	spin_lock_irq(disk->queue->queue_lock);
537

538
	if (blkcg_policy_enabled(disk->queue, pol))
539
		blkg = blkg_lookup_create(blkcg, disk->queue);
540
541
	else
		blkg = ERR_PTR(-EINVAL);
542

Tejun Heo's avatar
Tejun Heo committed
543
544
	if (IS_ERR(blkg)) {
		ret = PTR_ERR(blkg);
545
		rcu_read_unlock();
546
		spin_unlock_irq(disk->queue->queue_lock);
547
548
549
550
551
552
553
554
555
556
		put_disk(disk);
		/*
		 * If queue was bypassing, we should retry.  Do so after a
		 * short msleep().  It isn't strictly necessary but queue
		 * can be bypassing for some time and it's always nice to
		 * avoid busy looping.
		 */
		if (ret == -EBUSY) {
			msleep(10);
			ret = restart_syscall();
557
		}
Tejun Heo's avatar
Tejun Heo committed
558
		return ret;
559
	}
560
561
562

	ctx->disk = disk;
	ctx->blkg = blkg;
Tejun Heo's avatar
Tejun Heo committed
563
564
	ctx->v = v;
	return 0;
565
}
566
EXPORT_SYMBOL_GPL(blkg_conf_prep);
567

568
569
570
571
572
573
574
/**
 * blkg_conf_finish - finish up per-blkg config update
 * @ctx: blkg_conf_ctx intiailized by blkg_conf_prep()
 *
 * Finish up after per-blkg config update.  This function must be paired
 * with blkg_conf_prep().
 */
575
void blkg_conf_finish(struct blkg_conf_ctx *ctx)
576
	__releases(ctx->disk->queue->queue_lock) __releases(rcu)
577
{
578
	spin_unlock_irq(ctx->disk->queue->queue_lock);
579
580
	rcu_read_unlock();
	put_disk(ctx->disk);
581
}
582
EXPORT_SYMBOL_GPL(blkg_conf_finish);
583

Tejun Heo's avatar
Tejun Heo committed
584
struct cftype blkcg_files[] = {
585
586
	{
		.name = "reset_stats",
Tejun Heo's avatar
Tejun Heo committed
587
		.write_u64 = blkcg_reset_stats,
588
	},
589
	{ }	/* terminate */
590
591
};

592
/**
Tejun Heo's avatar
Tejun Heo committed
593
 * blkcg_pre_destroy - cgroup pre_destroy callback
594
595
596
597
598
599
600
601
602
 * @cgroup: cgroup of interest
 *
 * This function is called when @cgroup is about to go away and responsible
 * for shooting down all blkgs associated with @cgroup.  blkgs should be
 * removed while holding both q and blkcg locks.  As blkcg lock is nested
 * inside q lock, this function performs reverse double lock dancing.
 *
 * This is the blkcg counterpart of ioc_release_fn().
 */
603
static void blkcg_pre_destroy(struct cgroup *cgroup)
604
{
Tejun Heo's avatar
Tejun Heo committed
605
	struct blkcg *blkcg = cgroup_to_blkcg(cgroup);
606

607
	spin_lock_irq(&blkcg->lock);
608

609
	while (!hlist_empty(&blkcg->blkg_list)) {
Tejun Heo's avatar
Tejun Heo committed
610
611
		struct blkcg_gq *blkg = hlist_entry(blkcg->blkg_list.first,
						struct blkcg_gq, blkcg_node);
612
		struct request_queue *q = blkg->q;
613

614
615
616
617
618
619
		if (spin_trylock(q->queue_lock)) {
			blkg_destroy(blkg);
			spin_unlock(q->queue_lock);
		} else {
			spin_unlock_irq(&blkcg->lock);
			cpu_relax();
620
			spin_lock_irq(&blkcg->lock);
621
		}
622
	}
623

624
	spin_unlock_irq(&blkcg->lock);
625
626
}

Tejun Heo's avatar
Tejun Heo committed
627
static void blkcg_destroy(struct cgroup *cgroup)
628
{
Tejun Heo's avatar
Tejun Heo committed
629
	struct blkcg *blkcg = cgroup_to_blkcg(cgroup);
630

Tejun Heo's avatar
Tejun Heo committed
631
	if (blkcg != &blkcg_root)
Ben Blum's avatar
Ben Blum committed
632
		kfree(blkcg);
633
634
}

Tejun Heo's avatar
Tejun Heo committed
635
static struct cgroup_subsys_state *blkcg_create(struct cgroup *cgroup)
636
{
Tejun Heo's avatar
Tejun Heo committed
637
	static atomic64_t id_seq = ATOMIC64_INIT(0);
Tejun Heo's avatar
Tejun Heo committed
638
	struct blkcg *blkcg;
639
	struct cgroup *parent = cgroup->parent;
640

641
	if (!parent) {
Tejun Heo's avatar
Tejun Heo committed
642
		blkcg = &blkcg_root;
643
644
645
646
647
648
649
		goto done;
	}

	blkcg = kzalloc(sizeof(*blkcg), GFP_KERNEL);
	if (!blkcg)
		return ERR_PTR(-ENOMEM);

650
	blkcg->cfq_weight = CFQ_WEIGHT_DEFAULT;
Tejun Heo's avatar
Tejun Heo committed
651
	blkcg->id = atomic64_inc_return(&id_seq); /* root is 0, start from 1 */
652
653
done:
	spin_lock_init(&blkcg->lock);
654
	INIT_RADIX_TREE(&blkcg->blkg_tree, GFP_ATOMIC);
655
656
657
658
659
	INIT_HLIST_HEAD(&blkcg->blkg_list);

	return &blkcg->css;
}

660
661
662
663
664
665
666
667
668
669
670
671
672
673
/**
 * blkcg_init_queue - initialize blkcg part of request queue
 * @q: request_queue to initialize
 *
 * Called from blk_alloc_queue_node(). Responsible for initializing blkcg
 * part of new request_queue @q.
 *
 * RETURNS:
 * 0 on success, -errno on failure.
 */
int blkcg_init_queue(struct request_queue *q)
{
	might_sleep();

674
	return blk_throtl_init(q);
675
676
677
678
679
680
681
682
683
684
685
686
687
688
689
690
691
692
693
694
695
696
697
}

/**
 * blkcg_drain_queue - drain blkcg part of request_queue
 * @q: request_queue to drain
 *
 * Called from blk_drain_queue().  Responsible for draining blkcg part.
 */
void blkcg_drain_queue(struct request_queue *q)
{
	lockdep_assert_held(q->queue_lock);

	blk_throtl_drain(q);
}

/**
 * blkcg_exit_queue - exit and release blkcg part of request_queue
 * @q: request_queue being released
 *
 * Called from blk_release_queue().  Responsible for exiting blkcg part.
 */
void blkcg_exit_queue(struct request_queue *q)
{
698
	spin_lock_irq(q->queue_lock);
699
	blkg_destroy_all(q);
700
701
	spin_unlock_irq(q->queue_lock);

702
703
704
	blk_throtl_exit(q);
}

705
706
707
708
709
710
/*
 * We cannot support shared io contexts, as we have no mean to support
 * two tasks with the same ioc in two different groups without major rework
 * of the main cic data structures.  For now we allow a task to change
 * its cgroup only if it's the only owner of its ioc.
 */
Tejun Heo's avatar
Tejun Heo committed
711
static int blkcg_can_attach(struct cgroup *cgrp, struct cgroup_taskset *tset)
712
{
713
	struct task_struct *task;
714
715
716
717
	struct io_context *ioc;
	int ret = 0;

	/* task_lock() is needed to avoid races with exit_io_context() */
718
719
720
721
722
723
724
725
726
	cgroup_taskset_for_each(task, cgrp, tset) {
		task_lock(task);
		ioc = task->io_context;
		if (ioc && atomic_read(&ioc->nr_tasks) > 1)
			ret = -EINVAL;
		task_unlock(task);
		if (ret)
			break;
	}
727
728
729
	return ret;
}

730
731
struct cgroup_subsys blkio_subsys = {
	.name = "blkio",
Tejun Heo's avatar
Tejun Heo committed
732
733
734
735
	.create = blkcg_create,
	.can_attach = blkcg_can_attach,
	.pre_destroy = blkcg_pre_destroy,
	.destroy = blkcg_destroy,
736
	.subsys_id = blkio_subsys_id,
Tejun Heo's avatar
Tejun Heo committed
737
	.base_cftypes = blkcg_files,
738
739
740
741
	.module = THIS_MODULE,
};
EXPORT_SYMBOL_GPL(blkio_subsys);

742
743
744
745
746
747
748
749
750
751
752
753
754
755
756
757
758
/**
 * blkcg_activate_policy - activate a blkcg policy on a request_queue
 * @q: request_queue of interest
 * @pol: blkcg policy to activate
 *
 * Activate @pol on @q.  Requires %GFP_KERNEL context.  @q goes through
 * bypass mode to populate its blkgs with policy_data for @pol.
 *
 * Activation happens with @q bypassed, so nobody would be accessing blkgs
 * from IO path.  Update of each blkg is protected by both queue and blkcg
 * locks so that holding either lock and testing blkcg_policy_enabled() is
 * always enough for dereferencing policy data.
 *
 * The caller is responsible for synchronizing [de]activations and policy
 * [un]registerations.  Returns 0 on success, -errno on failure.
 */
int blkcg_activate_policy(struct request_queue *q,
Tejun Heo's avatar
Tejun Heo committed
759
			  const struct blkcg_policy *pol)
760
761
{
	LIST_HEAD(pds);
Tejun Heo's avatar
Tejun Heo committed
762
	struct blkcg_gq *blkg;
763
764
	struct blkg_policy_data *pd, *n;
	int cnt = 0, ret;
765
	bool preloaded;
766
767
768
769

	if (blkcg_policy_enabled(q, pol))
		return 0;

770
771
772
773
774
775
776
	/* preallocations for root blkg */
	blkg = blkg_alloc(&blkcg_root, q, GFP_KERNEL);
	if (!blkg)
		return -ENOMEM;

	preloaded = !radix_tree_preload(GFP_KERNEL);

777
778
779
780
781
782
	blk_queue_bypass_start(q);

	/* make sure the root blkg exists and count the existing blkgs */
	spin_lock_irq(q->queue_lock);

	rcu_read_lock();
783
	blkg = __blkg_lookup_create(&blkcg_root, q, blkg);
784
785
	rcu_read_unlock();

786
787
788
	if (preloaded)
		radix_tree_preload_end();

789
790
791
792
793
	if (IS_ERR(blkg)) {
		ret = PTR_ERR(blkg);
		goto out_unlock;
	}
	q->root_blkg = blkg;
794
	q->root_rl.blkg = blkg;
795
796
797
798
799
800
801
802

	list_for_each_entry(blkg, &q->blkg_list, q_node)
		cnt++;

	spin_unlock_irq(q->queue_lock);

	/* allocate policy_data for all existing blkgs */
	while (cnt--) {
803
		pd = kzalloc_node(pol->pd_size, GFP_KERNEL, q->node);
804
805
806
807
808
809
810
811
812
813
814
815
816
817
818
819
820
821
822
823
824
825
826
827
828
829
830
		if (!pd) {
			ret = -ENOMEM;
			goto out_free;
		}
		list_add_tail(&pd->alloc_node, &pds);
	}

	/*
	 * Install the allocated pds.  With @q bypassing, no new blkg
	 * should have been created while the queue lock was dropped.
	 */
	spin_lock_irq(q->queue_lock);

	list_for_each_entry(blkg, &q->blkg_list, q_node) {
		if (WARN_ON(list_empty(&pds))) {
			/* umm... this shouldn't happen, just abort */
			ret = -ENOMEM;
			goto out_unlock;
		}
		pd = list_first_entry(&pds, struct blkg_policy_data, alloc_node);
		list_del_init(&pd->alloc_node);

		/* grab blkcg lock too while installing @pd on @blkg */
		spin_lock(&blkg->blkcg->lock);

		blkg->pd[pol->plid] = pd;
		pd->blkg = blkg;
831
		pol->pd_init_fn(blkg);
832
833
834
835
836
837
838
839
840
841
842
843
844
845
846
847
848
849
850
851
852
853
854
855
856

		spin_unlock(&blkg->blkcg->lock);
	}

	__set_bit(pol->plid, q->blkcg_pols);
	ret = 0;
out_unlock:
	spin_unlock_irq(q->queue_lock);
out_free:
	blk_queue_bypass_end(q);
	list_for_each_entry_safe(pd, n, &pds, alloc_node)
		kfree(pd);
	return ret;
}
EXPORT_SYMBOL_GPL(blkcg_activate_policy);

/**
 * blkcg_deactivate_policy - deactivate a blkcg policy on a request_queue
 * @q: request_queue of interest
 * @pol: blkcg policy to deactivate
 *
 * Deactivate @pol on @q.  Follows the same synchronization rules as
 * blkcg_activate_policy().
 */
void blkcg_deactivate_policy(struct request_queue *q,
Tejun Heo's avatar
Tejun Heo committed
857
			     const struct blkcg_policy *pol)
858
{
Tejun Heo's avatar
Tejun Heo committed
859
	struct blkcg_gq *blkg;
860
861
862
863
864
865
866
867
868

	if (!blkcg_policy_enabled(q, pol))
		return;

	blk_queue_bypass_start(q);
	spin_lock_irq(q->queue_lock);

	__clear_bit(pol->plid, q->blkcg_pols);

869
870
871
872
	/* if no policy is left, no need for blkgs - shoot them down */
	if (bitmap_empty(q->blkcg_pols, BLKCG_MAX_POLS))
		blkg_destroy_all(q);

873
874
875
876
	list_for_each_entry(blkg, &q->blkg_list, q_node) {
		/* grab blkcg lock too while removing @pd from @blkg */
		spin_lock(&blkg->blkcg->lock);

877
878
		if (pol->pd_exit_fn)
			pol->pd_exit_fn(blkg);
879
880
881
882
883
884
885
886
887
888
889
890

		kfree(blkg->pd[pol->plid]);
		blkg->pd[pol->plid] = NULL;

		spin_unlock(&blkg->blkcg->lock);
	}

	spin_unlock_irq(q->queue_lock);
	blk_queue_bypass_end(q);
}
EXPORT_SYMBOL_GPL(blkcg_deactivate_policy);

891
/**
Tejun Heo's avatar
Tejun Heo committed
892
893
 * blkcg_policy_register - register a blkcg policy
 * @pol: blkcg policy to register
894
 *
Tejun Heo's avatar
Tejun Heo committed
895
896
 * Register @pol with blkcg core.  Might sleep and @pol may be modified on
 * successful registration.  Returns 0 on success and -errno on failure.
897
 */
Tejun Heo's avatar
Tejun Heo committed
898
int blkcg_policy_register(struct blkcg_policy *pol)
899
{
900
	int i, ret;
901

902
903
904
	if (WARN_ON(pol->pd_size < sizeof(struct blkg_policy_data)))
		return -EINVAL;

905
906
	mutex_lock(&blkcg_pol_mutex);

907
908
909
	/* find an empty slot */
	ret = -ENOSPC;
	for (i = 0; i < BLKCG_MAX_POLS; i++)
Tejun Heo's avatar
Tejun Heo committed
910
		if (!blkcg_policy[i])
911
912
913
			break;
	if (i >= BLKCG_MAX_POLS)
		goto out_unlock;
914

915
	/* register and update blkgs */
Tejun Heo's avatar
Tejun Heo committed
916
917
	pol->plid = i;
	blkcg_policy[i] = pol;
918
919

	/* everything is in place, add intf files for the new policy */
Tejun Heo's avatar
Tejun Heo committed
920
921
	if (pol->cftypes)
		WARN_ON(cgroup_add_cftypes(&blkio_subsys, pol->cftypes));
922
923
	ret = 0;
out_unlock:
924
	mutex_unlock(&blkcg_pol_mutex);
925
	return ret;
926
}
Tejun Heo's avatar
Tejun Heo committed
927
EXPORT_SYMBOL_GPL(blkcg_policy_register);
928

929
/**
Tejun Heo's avatar
Tejun Heo committed
930
931
 * blkcg_policy_unregister - unregister a blkcg policy
 * @pol: blkcg policy to unregister
932
 *
Tejun Heo's avatar
Tejun Heo committed
933
 * Undo blkcg_policy_register(@pol).  Might sleep.
934
 */
Tejun Heo's avatar
Tejun Heo committed
935
void blkcg_policy_unregister(struct blkcg_policy *pol)
936
{
937
938
	mutex_lock(&blkcg_pol_mutex);

Tejun Heo's avatar
Tejun Heo committed
939
	if (WARN_ON(blkcg_policy[pol->plid] != pol))
940
941
942
		goto out_unlock;

	/* kill the intf files first */
Tejun Heo's avatar
Tejun Heo committed
943
944
	if (pol->cftypes)
		cgroup_rm_cftypes(&blkio_subsys, pol->cftypes);
945

946
	/* unregister and update blkgs */
Tejun Heo's avatar
Tejun Heo committed
947
	blkcg_policy[pol->plid] = NULL;
948
out_unlock:
949
	mutex_unlock(&blkcg_pol_mutex);
950
}
Tejun Heo's avatar
Tejun Heo committed
951
EXPORT_SYMBOL_GPL(blkcg_policy_unregister);