blk-cgroup.h 17.1 KB
Newer Older
1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
#ifndef _BLK_CGROUP_H
#define _BLK_CGROUP_H
/*
 * Common Block IO controller cgroup interface
 *
 * Based on ideas and code from CFQ, CFS and BFQ:
 * Copyright (C) 2003 Jens Axboe <axboe@kernel.dk>
 *
 * Copyright (C) 2008 Fabio Checconi <fabio@gandalf.sssup.it>
 *		      Paolo Valente <paolo.valente@unimore.it>
 *
 * Copyright (C) 2009 Vivek Goyal <vgoyal@redhat.com>
 * 	              Nauman Rafique <nauman@google.com>
 */

#include <linux/cgroup.h>
17
#include <linux/u64_stats_sync.h>
18
#include <linux/seq_file.h>
19
#include <linux/radix-tree.h>
20
#include <linux/blkdev.h>
21
#include <linux/atomic.h>
22

23
24
25
/* Max limits for throttle policy */
#define THROTL_IOPS_MAX		UINT_MAX

26
27
28
29
30
/* CFQ specific, out here for blkcg->cfq_weight */
#define CFQ_WEIGHT_MIN		10
#define CFQ_WEIGHT_MAX		1000
#define CFQ_WEIGHT_DEFAULT	500

Tejun Heo's avatar
Tejun Heo committed
31
32
#ifdef CONFIG_BLK_CGROUP

33
34
35
36
37
38
39
40
enum blkg_rwstat_type {
	BLKG_RWSTAT_READ,
	BLKG_RWSTAT_WRITE,
	BLKG_RWSTAT_SYNC,
	BLKG_RWSTAT_ASYNC,

	BLKG_RWSTAT_NR,
	BLKG_RWSTAT_TOTAL = BLKG_RWSTAT_NR,
41
42
};

43
44
struct blkcg_gq;

Tejun Heo's avatar
Tejun Heo committed
45
struct blkcg {
46
47
	struct cgroup_subsys_state	css;
	spinlock_t			lock;
48
49
50

	struct radix_tree_root		blkg_tree;
	struct blkcg_gq			*blkg_hint;
51
	struct hlist_head		blkg_list;
Tejun Heo's avatar
Tejun Heo committed
52

Tejun Heo's avatar
Tejun Heo committed
53
	/* TODO: per-policy storage in blkcg */
54
	unsigned int			cfq_weight;	/* belongs to cfq */
Tejun Heo's avatar
Tejun Heo committed
55
	unsigned int			cfq_leaf_weight;
56
57
58
59

#ifdef CONFIG_CGROUP_WRITEBACK
	struct list_head		cgwb_list;
#endif
60
61
};

62
63
64
65
66
67
68
69
70
71
struct blkg_stat {
	struct u64_stats_sync		syncp;
	uint64_t			cnt;
};

struct blkg_rwstat {
	struct u64_stats_sync		syncp;
	uint64_t			cnt[BLKG_RWSTAT_NR];
};

72
73
74
75
76
77
78
79
80
81
82
83
84
/*
 * A blkcg_gq (blkg) is association between a block cgroup (blkcg) and a
 * request_queue (q).  This is used by blkcg policies which need to track
 * information per blkcg - q pair.
 *
 * There can be multiple active blkcg policies and each has its private
 * data on each blkg, the size of which is determined by
 * blkcg_policy->pd_size.  blkcg core allocates and frees such areas
 * together with blkg and invokes pd_init/exit_fn() methods.
 *
 * Such private data must embed struct blkg_policy_data (pd) at the
 * beginning and pd_size can't be smaller than pd.
 */
85
struct blkg_policy_data {
Tejun Heo's avatar
Tejun Heo committed
86
	/* the blkg and policy id this per-policy data belongs to */
Tejun Heo's avatar
Tejun Heo committed
87
	struct blkcg_gq			*blkg;
Tejun Heo's avatar
Tejun Heo committed
88
	int				plid;
89

90
	/* used during policy activation */
91
	struct list_head		alloc_node;
92
93
};

Tejun Heo's avatar
Tejun Heo committed
94
95
/* association between a blk cgroup and a request queue */
struct blkcg_gq {
96
	/* Pointer to the associated request_queue */
97
98
99
	struct request_queue		*q;
	struct list_head		q_node;
	struct hlist_node		blkcg_node;
Tejun Heo's avatar
Tejun Heo committed
100
	struct blkcg			*blkcg;
101
102
103
104

	/* all non-root blkcg_gq's are guaranteed to have access to parent */
	struct blkcg_gq			*parent;

105
106
	/* request allocation list for this blkcg-q pair */
	struct request_list		rl;
107

Tejun Heo's avatar
Tejun Heo committed
108
	/* reference count */
109
	atomic_t			refcnt;
110

111
112
113
	/* is this blkg online? protected by both blkcg and q locks */
	bool				online;

114
	struct blkg_policy_data		*pd[BLKCG_MAX_POLS];
Tejun Heo's avatar
Tejun Heo committed
115

116
	struct rcu_head			rcu_head;
117
118
};

Tejun Heo's avatar
Tejun Heo committed
119
typedef void (blkcg_pol_init_pd_fn)(struct blkcg_gq *blkg);
120
121
typedef void (blkcg_pol_online_pd_fn)(struct blkcg_gq *blkg);
typedef void (blkcg_pol_offline_pd_fn)(struct blkcg_gq *blkg);
Tejun Heo's avatar
Tejun Heo committed
122
123
typedef void (blkcg_pol_exit_pd_fn)(struct blkcg_gq *blkg);
typedef void (blkcg_pol_reset_pd_stats_fn)(struct blkcg_gq *blkg);
124

Tejun Heo's avatar
Tejun Heo committed
125
struct blkcg_policy {
126
127
	int				plid;
	/* policy specific private data size */
128
	size_t				pd_size;
129
130
	/* cgroup files for the policy */
	struct cftype			*cftypes;
131
132
133

	/* operations */
	blkcg_pol_init_pd_fn		*pd_init_fn;
134
135
	blkcg_pol_online_pd_fn		*pd_online_fn;
	blkcg_pol_offline_pd_fn		*pd_offline_fn;
136
137
	blkcg_pol_exit_pd_fn		*pd_exit_fn;
	blkcg_pol_reset_pd_stats_fn	*pd_reset_stats_fn;
138
139
};

Tejun Heo's avatar
Tejun Heo committed
140
extern struct blkcg blkcg_root;
Tejun Heo's avatar
Tejun Heo committed
141
extern struct cgroup_subsys_state * const blkcg_root_css;
142

Tejun Heo's avatar
Tejun Heo committed
143
144
145
struct blkcg_gq *blkg_lookup(struct blkcg *blkcg, struct request_queue *q);
struct blkcg_gq *blkg_lookup_create(struct blkcg *blkcg,
				    struct request_queue *q);
146
147
148
int blkcg_init_queue(struct request_queue *q);
void blkcg_drain_queue(struct request_queue *q);
void blkcg_exit_queue(struct request_queue *q);
149

150
/* Blkio controller policy registration */
151
int blkcg_policy_register(struct blkcg_policy *pol);
Tejun Heo's avatar
Tejun Heo committed
152
void blkcg_policy_unregister(struct blkcg_policy *pol);
153
int blkcg_activate_policy(struct request_queue *q,
Tejun Heo's avatar
Tejun Heo committed
154
			  const struct blkcg_policy *pol);
155
void blkcg_deactivate_policy(struct request_queue *q,
Tejun Heo's avatar
Tejun Heo committed
156
			     const struct blkcg_policy *pol);
157

Tejun Heo's avatar
Tejun Heo committed
158
void blkcg_print_blkgs(struct seq_file *sf, struct blkcg *blkcg,
159
160
		       u64 (*prfill)(struct seq_file *,
				     struct blkg_policy_data *, int),
Tejun Heo's avatar
Tejun Heo committed
161
		       const struct blkcg_policy *pol, int data,
162
		       bool show_total);
163
164
u64 __blkg_prfill_u64(struct seq_file *sf, struct blkg_policy_data *pd, u64 v);
u64 __blkg_prfill_rwstat(struct seq_file *sf, struct blkg_policy_data *pd,
165
			 const struct blkg_rwstat *rwstat);
166
167
168
u64 blkg_prfill_stat(struct seq_file *sf, struct blkg_policy_data *pd, int off);
u64 blkg_prfill_rwstat(struct seq_file *sf, struct blkg_policy_data *pd,
		       int off);
169

170
171
172
173
u64 blkg_stat_recursive_sum(struct blkg_policy_data *pd, int off);
struct blkg_rwstat blkg_rwstat_recursive_sum(struct blkg_policy_data *pd,
					     int off);

174
struct blkg_conf_ctx {
175
	struct gendisk			*disk;
Tejun Heo's avatar
Tejun Heo committed
176
	struct blkcg_gq			*blkg;
177
	u64				v;
178
179
};

Tejun Heo's avatar
Tejun Heo committed
180
181
int blkg_conf_prep(struct blkcg *blkcg, const struct blkcg_policy *pol,
		   const char *input, struct blkg_conf_ctx *ctx);
182
183
184
void blkg_conf_finish(struct blkg_conf_ctx *ctx);


185
186
187
188
189
static inline struct blkcg *css_to_blkcg(struct cgroup_subsys_state *css)
{
	return css ? container_of(css, struct blkcg, css) : NULL;
}

190
191
static inline struct blkcg *task_blkcg(struct task_struct *tsk)
{
192
	return css_to_blkcg(task_css(tsk, blkio_cgrp_id));
193
194
195
196
197
}

static inline struct blkcg *bio_blkcg(struct bio *bio)
{
	if (bio && bio->bi_css)
198
		return css_to_blkcg(bio->bi_css);
199
200
201
	return task_blkcg(current);
}

202
203
204
205
206
207
static inline struct cgroup_subsys_state *
task_get_blkcg_css(struct task_struct *task)
{
	return task_get_css(task, blkio_cgrp_id);
}

208
209
210
211
212
213
214
215
/**
 * blkcg_parent - get the parent of a blkcg
 * @blkcg: blkcg of interest
 *
 * Return the parent blkcg of @blkcg.  Can be called anytime.
 */
static inline struct blkcg *blkcg_parent(struct blkcg *blkcg)
{
Tejun Heo's avatar
Tejun Heo committed
216
	return css_to_blkcg(blkcg->css.parent);
217
218
}

219
220
221
222
223
224
225
/**
 * blkg_to_pdata - get policy private data
 * @blkg: blkg of interest
 * @pol: policy of interest
 *
 * Return pointer to private data associated with the @blkg-@pol pair.
 */
226
227
static inline struct blkg_policy_data *blkg_to_pd(struct blkcg_gq *blkg,
						  struct blkcg_policy *pol)
228
{
229
	return blkg ? blkg->pd[pol->plid] : NULL;
230
231
232
233
}

/**
 * pdata_to_blkg - get blkg associated with policy private data
234
 * @pd: policy private data of interest
235
 *
236
 * @pd is policy private data.  Determine the blkg it's associated with.
237
 */
238
static inline struct blkcg_gq *pd_to_blkg(struct blkg_policy_data *pd)
239
{
240
	return pd ? pd->blkg : NULL;
241
242
}

Tejun Heo's avatar
Tejun Heo committed
243
244
245
246
247
248
249
250
/**
 * blkg_path - format cgroup path of blkg
 * @blkg: blkg of interest
 * @buf: target buffer
 * @buflen: target buffer length
 *
 * Format the path of the cgroup of @blkg into @buf.
 */
Tejun Heo's avatar
Tejun Heo committed
251
static inline int blkg_path(struct blkcg_gq *blkg, char *buf, int buflen)
252
{
Tejun Heo's avatar
Tejun Heo committed
253
	char *p;
Tejun Heo's avatar
Tejun Heo committed
254

Tejun Heo's avatar
Tejun Heo committed
255
256
	p = cgroup_path(blkg->blkcg->css.cgroup, buf, buflen);
	if (!p) {
Tejun Heo's avatar
Tejun Heo committed
257
		strncpy(buf, "<unavailable>", buflen);
Tejun Heo's avatar
Tejun Heo committed
258
259
260
261
262
		return -ENAMETOOLONG;
	}

	memmove(buf, p, buf + buflen - p);
	return 0;
263
264
}

Tejun Heo's avatar
Tejun Heo committed
265
266
267
268
/**
 * blkg_get - get a blkg reference
 * @blkg: blkg to get
 *
269
 * The caller should be holding an existing reference.
Tejun Heo's avatar
Tejun Heo committed
270
 */
Tejun Heo's avatar
Tejun Heo committed
271
static inline void blkg_get(struct blkcg_gq *blkg)
Tejun Heo's avatar
Tejun Heo committed
272
{
273
274
	WARN_ON_ONCE(atomic_read(&blkg->refcnt) <= 0);
	atomic_inc(&blkg->refcnt);
Tejun Heo's avatar
Tejun Heo committed
275
276
}

277
void __blkg_release_rcu(struct rcu_head *rcu);
Tejun Heo's avatar
Tejun Heo committed
278
279
280
281
282

/**
 * blkg_put - put a blkg reference
 * @blkg: blkg to put
 */
Tejun Heo's avatar
Tejun Heo committed
283
static inline void blkg_put(struct blkcg_gq *blkg)
Tejun Heo's avatar
Tejun Heo committed
284
{
285
286
	WARN_ON_ONCE(atomic_read(&blkg->refcnt) <= 0);
	if (atomic_dec_and_test(&blkg->refcnt))
287
		call_rcu(&blkg->rcu_head, __blkg_release_rcu);
Tejun Heo's avatar
Tejun Heo committed
288
289
}

290
291
292
293
294
295
struct blkcg_gq *__blkg_lookup(struct blkcg *blkcg, struct request_queue *q,
			       bool update_hint);

/**
 * blkg_for_each_descendant_pre - pre-order walk of a blkg's descendants
 * @d_blkg: loop cursor pointing to the current descendant
296
 * @pos_css: used for iteration
297
298
299
300
301
 * @p_blkg: target blkg to walk descendants of
 *
 * Walk @c_blkg through the descendants of @p_blkg.  Must be used with RCU
 * read locked.  If called under either blkcg or queue lock, the iteration
 * is guaranteed to include all and only online blkgs.  The caller may
302
 * update @pos_css by calling css_rightmost_descendant() to skip subtree.
303
 * @p_blkg is included in the iteration and the first node to be visited.
304
 */
305
306
307
#define blkg_for_each_descendant_pre(d_blkg, pos_css, p_blkg)		\
	css_for_each_descendant_pre((pos_css), &(p_blkg)->blkcg->css)	\
		if (((d_blkg) = __blkg_lookup(css_to_blkcg(pos_css),	\
308
309
					      (p_blkg)->q, false)))

310
311
312
/**
 * blkg_for_each_descendant_post - post-order walk of a blkg's descendants
 * @d_blkg: loop cursor pointing to the current descendant
313
 * @pos_css: used for iteration
314
315
316
 * @p_blkg: target blkg to walk descendants of
 *
 * Similar to blkg_for_each_descendant_pre() but performs post-order
317
318
 * traversal instead.  Synchronization rules are the same.  @p_blkg is
 * included in the iteration and the last node to be visited.
319
 */
320
321
322
#define blkg_for_each_descendant_post(d_blkg, pos_css, p_blkg)		\
	css_for_each_descendant_post((pos_css), &(p_blkg)->blkcg->css)	\
		if (((d_blkg) = __blkg_lookup(css_to_blkcg(pos_css),	\
323
324
					      (p_blkg)->q, false)))

325
326
327
328
329
330
331
332
333
334
335
336
337
338
339
340
341
342
343
344
345
346
347
348
349
350
351
352
353
354
355
356
357
358
359
360
361
362
363
364
365
366
367
368
369
370
371
372
373
374
375
376
377
378
379
380
381
382
383
384
385
386
387
388
389
390
391
392
393
394
395
396
397
398
399
400
401
402
403
404
405
406
407
408
409
410
411
412
413
/**
 * blk_get_rl - get request_list to use
 * @q: request_queue of interest
 * @bio: bio which will be attached to the allocated request (may be %NULL)
 *
 * The caller wants to allocate a request from @q to use for @bio.  Find
 * the request_list to use and obtain a reference on it.  Should be called
 * under queue_lock.  This function is guaranteed to return non-%NULL
 * request_list.
 */
static inline struct request_list *blk_get_rl(struct request_queue *q,
					      struct bio *bio)
{
	struct blkcg *blkcg;
	struct blkcg_gq *blkg;

	rcu_read_lock();

	blkcg = bio_blkcg(bio);

	/* bypass blkg lookup and use @q->root_rl directly for root */
	if (blkcg == &blkcg_root)
		goto root_rl;

	/*
	 * Try to use blkg->rl.  blkg lookup may fail under memory pressure
	 * or if either the blkcg or queue is going away.  Fall back to
	 * root_rl in such cases.
	 */
	blkg = blkg_lookup_create(blkcg, q);
	if (unlikely(IS_ERR(blkg)))
		goto root_rl;

	blkg_get(blkg);
	rcu_read_unlock();
	return &blkg->rl;
root_rl:
	rcu_read_unlock();
	return &q->root_rl;
}

/**
 * blk_put_rl - put request_list
 * @rl: request_list to put
 *
 * Put the reference acquired by blk_get_rl().  Should be called under
 * queue_lock.
 */
static inline void blk_put_rl(struct request_list *rl)
{
	/* root_rl may not have blkg set */
	if (rl->blkg && rl->blkg->blkcg != &blkcg_root)
		blkg_put(rl->blkg);
}

/**
 * blk_rq_set_rl - associate a request with a request_list
 * @rq: request of interest
 * @rl: target request_list
 *
 * Associate @rq with @rl so that accounting and freeing can know the
 * request_list @rq came from.
 */
static inline void blk_rq_set_rl(struct request *rq, struct request_list *rl)
{
	rq->rl = rl;
}

/**
 * blk_rq_rl - return the request_list a request came from
 * @rq: request of interest
 *
 * Return the request_list @rq is allocated from.
 */
static inline struct request_list *blk_rq_rl(struct request *rq)
{
	return rq->rl;
}

struct request_list *__blk_queue_next_rl(struct request_list *rl,
					 struct request_queue *q);
/**
 * blk_queue_for_each_rl - iterate through all request_lists of a request_queue
 *
 * Should be used under queue_lock.
 */
#define blk_queue_for_each_rl(rl, q)	\
	for ((rl) = &(q)->root_rl; (rl); (rl) = __blk_queue_next_rl((rl), (q)))

414
415
416
417
418
static inline void blkg_stat_init(struct blkg_stat *stat)
{
	u64_stats_init(&stat->syncp);
}

419
420
421
422
423
424
425
426
427
428
429
430
431
432
433
434
435
436
437
438
439
440
441
442
443
444
445
446
/**
 * blkg_stat_add - add a value to a blkg_stat
 * @stat: target blkg_stat
 * @val: value to add
 *
 * Add @val to @stat.  The caller is responsible for synchronizing calls to
 * this function.
 */
static inline void blkg_stat_add(struct blkg_stat *stat, uint64_t val)
{
	u64_stats_update_begin(&stat->syncp);
	stat->cnt += val;
	u64_stats_update_end(&stat->syncp);
}

/**
 * blkg_stat_read - read the current value of a blkg_stat
 * @stat: blkg_stat to read
 *
 * Read the current value of @stat.  This function can be called without
 * synchroniztion and takes care of u64 atomicity.
 */
static inline uint64_t blkg_stat_read(struct blkg_stat *stat)
{
	unsigned int start;
	uint64_t v;

	do {
447
		start = u64_stats_fetch_begin_irq(&stat->syncp);
448
		v = stat->cnt;
449
	} while (u64_stats_fetch_retry_irq(&stat->syncp, start));
450
451
452
453
454
455
456
457
458
459
460
461
462

	return v;
}

/**
 * blkg_stat_reset - reset a blkg_stat
 * @stat: blkg_stat to reset
 */
static inline void blkg_stat_reset(struct blkg_stat *stat)
{
	stat->cnt = 0;
}

463
464
465
466
467
468
469
470
471
472
473
474
/**
 * blkg_stat_merge - merge a blkg_stat into another
 * @to: the destination blkg_stat
 * @from: the source
 *
 * Add @from's count to @to.
 */
static inline void blkg_stat_merge(struct blkg_stat *to, struct blkg_stat *from)
{
	blkg_stat_add(to, blkg_stat_read(from));
}

475
476
477
478
479
static inline void blkg_rwstat_init(struct blkg_rwstat *rwstat)
{
	u64_stats_init(&rwstat->syncp);
}

480
481
482
483
484
485
486
487
488
489
490
491
492
493
494
495
496
497
498
499
500
501
502
503
504
505
506
507
508
509
510
511
512
513
/**
 * blkg_rwstat_add - add a value to a blkg_rwstat
 * @rwstat: target blkg_rwstat
 * @rw: mask of REQ_{WRITE|SYNC}
 * @val: value to add
 *
 * Add @val to @rwstat.  The counters are chosen according to @rw.  The
 * caller is responsible for synchronizing calls to this function.
 */
static inline void blkg_rwstat_add(struct blkg_rwstat *rwstat,
				   int rw, uint64_t val)
{
	u64_stats_update_begin(&rwstat->syncp);

	if (rw & REQ_WRITE)
		rwstat->cnt[BLKG_RWSTAT_WRITE] += val;
	else
		rwstat->cnt[BLKG_RWSTAT_READ] += val;
	if (rw & REQ_SYNC)
		rwstat->cnt[BLKG_RWSTAT_SYNC] += val;
	else
		rwstat->cnt[BLKG_RWSTAT_ASYNC] += val;

	u64_stats_update_end(&rwstat->syncp);
}

/**
 * blkg_rwstat_read - read the current values of a blkg_rwstat
 * @rwstat: blkg_rwstat to read
 *
 * Read the current snapshot of @rwstat and return it as the return value.
 * This function can be called without synchronization and takes care of
 * u64 atomicity.
 */
514
static inline struct blkg_rwstat blkg_rwstat_read(struct blkg_rwstat *rwstat)
515
516
517
518
519
{
	unsigned int start;
	struct blkg_rwstat tmp;

	do {
520
		start = u64_stats_fetch_begin_irq(&rwstat->syncp);
521
		tmp = *rwstat;
522
	} while (u64_stats_fetch_retry_irq(&rwstat->syncp, start));
523
524
525
526
527

	return tmp;
}

/**
528
 * blkg_rwstat_total - read the total count of a blkg_rwstat
529
530
531
532
533
534
 * @rwstat: blkg_rwstat to read
 *
 * Return the total count of @rwstat regardless of the IO direction.  This
 * function can be called without synchronization and takes care of u64
 * atomicity.
 */
535
static inline uint64_t blkg_rwstat_total(struct blkg_rwstat *rwstat)
536
537
538
539
540
541
542
543
544
545
546
547
548
549
550
{
	struct blkg_rwstat tmp = blkg_rwstat_read(rwstat);

	return tmp.cnt[BLKG_RWSTAT_READ] + tmp.cnt[BLKG_RWSTAT_WRITE];
}

/**
 * blkg_rwstat_reset - reset a blkg_rwstat
 * @rwstat: blkg_rwstat to reset
 */
static inline void blkg_rwstat_reset(struct blkg_rwstat *rwstat)
{
	memset(rwstat->cnt, 0, sizeof(rwstat->cnt));
}

551
552
553
554
555
556
557
558
559
560
561
562
563
564
565
566
567
568
569
/**
 * blkg_rwstat_merge - merge a blkg_rwstat into another
 * @to: the destination blkg_rwstat
 * @from: the source
 *
 * Add @from's counts to @to.
 */
static inline void blkg_rwstat_merge(struct blkg_rwstat *to,
				     struct blkg_rwstat *from)
{
	struct blkg_rwstat v = blkg_rwstat_read(from);
	int i;

	u64_stats_update_begin(&to->syncp);
	for (i = 0; i < BLKG_RWSTAT_NR; i++)
		to->cnt[i] += v.cnt[i];
	u64_stats_update_end(&to->syncp);
}

570
571
#else	/* CONFIG_BLK_CGROUP */

572
573
struct blkcg {
};
574

575
576
577
struct blkg_policy_data {
};

Tejun Heo's avatar
Tejun Heo committed
578
struct blkcg_gq {
579
580
};

Tejun Heo's avatar
Tejun Heo committed
581
struct blkcg_policy {
582
583
};

Tejun Heo's avatar
Tejun Heo committed
584
585
#define blkcg_root_css	((struct cgroup_subsys_state *)ERR_PTR(-EINVAL))

586
587
588
589
590
591
static inline struct cgroup_subsys_state *
task_get_blkcg_css(struct task_struct *task)
{
	return NULL;
}

592
593
#ifdef CONFIG_BLOCK

Tejun Heo's avatar
Tejun Heo committed
594
static inline struct blkcg_gq *blkg_lookup(struct blkcg *blkcg, void *key) { return NULL; }
595
596
597
static inline int blkcg_init_queue(struct request_queue *q) { return 0; }
static inline void blkcg_drain_queue(struct request_queue *q) { }
static inline void blkcg_exit_queue(struct request_queue *q) { }
598
static inline int blkcg_policy_register(struct blkcg_policy *pol) { return 0; }
Tejun Heo's avatar
Tejun Heo committed
599
static inline void blkcg_policy_unregister(struct blkcg_policy *pol) { }
600
static inline int blkcg_activate_policy(struct request_queue *q,
Tejun Heo's avatar
Tejun Heo committed
601
					const struct blkcg_policy *pol) { return 0; }
602
static inline void blkcg_deactivate_policy(struct request_queue *q,
Tejun Heo's avatar
Tejun Heo committed
603
604
					   const struct blkcg_policy *pol) { }

605
static inline struct blkcg *bio_blkcg(struct bio *bio) { return NULL; }
606

607
608
609
static inline struct blkg_policy_data *blkg_to_pd(struct blkcg_gq *blkg,
						  struct blkcg_policy *pol) { return NULL; }
static inline struct blkcg_gq *pd_to_blkg(struct blkg_policy_data *pd) { return NULL; }
Tejun Heo's avatar
Tejun Heo committed
610
611
612
static inline char *blkg_path(struct blkcg_gq *blkg) { return NULL; }
static inline void blkg_get(struct blkcg_gq *blkg) { }
static inline void blkg_put(struct blkcg_gq *blkg) { }
613

614
615
616
617
618
619
620
621
622
static inline struct request_list *blk_get_rl(struct request_queue *q,
					      struct bio *bio) { return &q->root_rl; }
static inline void blk_put_rl(struct request_list *rl) { }
static inline void blk_rq_set_rl(struct request *rq, struct request_list *rl) { }
static inline struct request_list *blk_rq_rl(struct request *rq) { return &rq->q->root_rl; }

#define blk_queue_for_each_rl(rl, q)	\
	for ((rl) = &(q)->root_rl; (rl); (rl) = NULL)

623
#endif	/* CONFIG_BLOCK */
624
625
#endif	/* CONFIG_BLK_CGROUP */
#endif	/* _BLK_CGROUP_H */