blk-cgroup.h 17.7 KB
Newer Older
1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
#ifndef _BLK_CGROUP_H
#define _BLK_CGROUP_H
/*
 * Common Block IO controller cgroup interface
 *
 * Based on ideas and code from CFQ, CFS and BFQ:
 * Copyright (C) 2003 Jens Axboe <axboe@kernel.dk>
 *
 * Copyright (C) 2008 Fabio Checconi <fabio@gandalf.sssup.it>
 *		      Paolo Valente <paolo.valente@unimore.it>
 *
 * Copyright (C) 2009 Vivek Goyal <vgoyal@redhat.com>
 * 	              Nauman Rafique <nauman@google.com>
 */

#include <linux/cgroup.h>
17
#include <linux/u64_stats_sync.h>
18
#include <linux/seq_file.h>
19
#include <linux/radix-tree.h>
20
#include <linux/blkdev.h>
21
#include <linux/atomic.h>
22

23
24
25
/* Max limits for throttle policy */
#define THROTL_IOPS_MAX		UINT_MAX

Tejun Heo's avatar
Tejun Heo committed
26
27
#ifdef CONFIG_BLK_CGROUP

28
29
30
31
32
33
34
35
enum blkg_rwstat_type {
	BLKG_RWSTAT_READ,
	BLKG_RWSTAT_WRITE,
	BLKG_RWSTAT_SYNC,
	BLKG_RWSTAT_ASYNC,

	BLKG_RWSTAT_NR,
	BLKG_RWSTAT_TOTAL = BLKG_RWSTAT_NR,
36
37
};

38
39
struct blkcg_gq;

Tejun Heo's avatar
Tejun Heo committed
40
struct blkcg {
41
42
	struct cgroup_subsys_state	css;
	spinlock_t			lock;
43
44
45

	struct radix_tree_root		blkg_tree;
	struct blkcg_gq			*blkg_hint;
46
	struct hlist_head		blkg_list;
Tejun Heo's avatar
Tejun Heo committed
47

48
	struct blkcg_policy_data	*pd[BLKCG_MAX_POLS];
49

Tejun Heo's avatar
Tejun Heo committed
50
	struct list_head		all_blkcgs_node;
51
52
53
#ifdef CONFIG_CGROUP_WRITEBACK
	struct list_head		cgwb_list;
#endif
54
55
};

56
57
58
59
60
61
62
63
64
65
struct blkg_stat {
	struct u64_stats_sync		syncp;
	uint64_t			cnt;
};

struct blkg_rwstat {
	struct u64_stats_sync		syncp;
	uint64_t			cnt[BLKG_RWSTAT_NR];
};

66
67
68
69
70
71
72
73
74
75
76
77
78
/*
 * A blkcg_gq (blkg) is association between a block cgroup (blkcg) and a
 * request_queue (q).  This is used by blkcg policies which need to track
 * information per blkcg - q pair.
 *
 * There can be multiple active blkcg policies and each has its private
 * data on each blkg, the size of which is determined by
 * blkcg_policy->pd_size.  blkcg core allocates and frees such areas
 * together with blkg and invokes pd_init/exit_fn() methods.
 *
 * Such private data must embed struct blkg_policy_data (pd) at the
 * beginning and pd_size can't be smaller than pd.
 */
79
struct blkg_policy_data {
Tejun Heo's avatar
Tejun Heo committed
80
	/* the blkg and policy id this per-policy data belongs to */
Tejun Heo's avatar
Tejun Heo committed
81
	struct blkcg_gq			*blkg;
Tejun Heo's avatar
Tejun Heo committed
82
	int				plid;
83
84
};

85
86
87
88
/*
 * Policies that need to keep per-blkcg data which is independent
 * from any request_queue associated to it must specify its size
 * with the cpd_size field of the blkcg_policy structure and
89
90
 * embed a blkcg_policy_data in it.  cpd_init() is invoked to let
 * each policy handle per-blkcg data.
91
92
93
94
95
96
 */
struct blkcg_policy_data {
	/* the policy id this per-policy data belongs to */
	int				plid;
};

Tejun Heo's avatar
Tejun Heo committed
97
98
/* association between a blk cgroup and a request queue */
struct blkcg_gq {
99
	/* Pointer to the associated request_queue */
100
101
102
	struct request_queue		*q;
	struct list_head		q_node;
	struct hlist_node		blkcg_node;
Tejun Heo's avatar
Tejun Heo committed
103
	struct blkcg			*blkcg;
104

105
106
107
108
109
110
	/*
	 * Each blkg gets congested separately and the congestion state is
	 * propagated to the matching bdi_writeback_congested.
	 */
	struct bdi_writeback_congested	*wb_congested;

111
112
113
	/* all non-root blkcg_gq's are guaranteed to have access to parent */
	struct blkcg_gq			*parent;

114
115
	/* request allocation list for this blkcg-q pair */
	struct request_list		rl;
116

Tejun Heo's avatar
Tejun Heo committed
117
	/* reference count */
118
	atomic_t			refcnt;
119

120
121
122
	/* is this blkg online? protected by both blkcg and q locks */
	bool				online;

123
	struct blkg_policy_data		*pd[BLKCG_MAX_POLS];
Tejun Heo's avatar
Tejun Heo committed
124

125
	struct rcu_head			rcu_head;
126
127
};

128
typedef void (blkcg_pol_init_cpd_fn)(const struct blkcg *blkcg);
Tejun Heo's avatar
Tejun Heo committed
129
typedef void (blkcg_pol_init_pd_fn)(struct blkcg_gq *blkg);
130
131
typedef void (blkcg_pol_online_pd_fn)(struct blkcg_gq *blkg);
typedef void (blkcg_pol_offline_pd_fn)(struct blkcg_gq *blkg);
Tejun Heo's avatar
Tejun Heo committed
132
133
typedef void (blkcg_pol_exit_pd_fn)(struct blkcg_gq *blkg);
typedef void (blkcg_pol_reset_pd_stats_fn)(struct blkcg_gq *blkg);
134

Tejun Heo's avatar
Tejun Heo committed
135
struct blkcg_policy {
136
137
	int				plid;
	/* policy specific private data size */
138
	size_t				pd_size;
139
140
	/* policy specific per-blkcg data size */
	size_t				cpd_size;
141
142
	/* cgroup files for the policy */
	struct cftype			*cftypes;
143
144

	/* operations */
145
	blkcg_pol_init_cpd_fn		*cpd_init_fn;
146
	blkcg_pol_init_pd_fn		*pd_init_fn;
147
148
	blkcg_pol_online_pd_fn		*pd_online_fn;
	blkcg_pol_offline_pd_fn		*pd_offline_fn;
149
150
	blkcg_pol_exit_pd_fn		*pd_exit_fn;
	blkcg_pol_reset_pd_stats_fn	*pd_reset_stats_fn;
151
152
};

Tejun Heo's avatar
Tejun Heo committed
153
extern struct blkcg blkcg_root;
Tejun Heo's avatar
Tejun Heo committed
154
extern struct cgroup_subsys_state * const blkcg_root_css;
155

Tejun Heo's avatar
Tejun Heo committed
156
157
158
struct blkcg_gq *blkg_lookup(struct blkcg *blkcg, struct request_queue *q);
struct blkcg_gq *blkg_lookup_create(struct blkcg *blkcg,
				    struct request_queue *q);
159
160
161
int blkcg_init_queue(struct request_queue *q);
void blkcg_drain_queue(struct request_queue *q);
void blkcg_exit_queue(struct request_queue *q);
162

163
/* Blkio controller policy registration */
164
int blkcg_policy_register(struct blkcg_policy *pol);
Tejun Heo's avatar
Tejun Heo committed
165
void blkcg_policy_unregister(struct blkcg_policy *pol);
166
int blkcg_activate_policy(struct request_queue *q,
Tejun Heo's avatar
Tejun Heo committed
167
			  const struct blkcg_policy *pol);
168
void blkcg_deactivate_policy(struct request_queue *q,
Tejun Heo's avatar
Tejun Heo committed
169
			     const struct blkcg_policy *pol);
170

Tejun Heo's avatar
Tejun Heo committed
171
void blkcg_print_blkgs(struct seq_file *sf, struct blkcg *blkcg,
172
173
		       u64 (*prfill)(struct seq_file *,
				     struct blkg_policy_data *, int),
Tejun Heo's avatar
Tejun Heo committed
174
		       const struct blkcg_policy *pol, int data,
175
		       bool show_total);
176
177
u64 __blkg_prfill_u64(struct seq_file *sf, struct blkg_policy_data *pd, u64 v);
u64 __blkg_prfill_rwstat(struct seq_file *sf, struct blkg_policy_data *pd,
178
			 const struct blkg_rwstat *rwstat);
179
180
181
u64 blkg_prfill_stat(struct seq_file *sf, struct blkg_policy_data *pd, int off);
u64 blkg_prfill_rwstat(struct seq_file *sf, struct blkg_policy_data *pd,
		       int off);
182

183
184
185
186
u64 blkg_stat_recursive_sum(struct blkg_policy_data *pd, int off);
struct blkg_rwstat blkg_rwstat_recursive_sum(struct blkg_policy_data *pd,
					     int off);

187
struct blkg_conf_ctx {
188
	struct gendisk			*disk;
Tejun Heo's avatar
Tejun Heo committed
189
	struct blkcg_gq			*blkg;
190
	u64				v;
191
192
};

Tejun Heo's avatar
Tejun Heo committed
193
194
int blkg_conf_prep(struct blkcg *blkcg, const struct blkcg_policy *pol,
		   const char *input, struct blkg_conf_ctx *ctx);
195
196
197
void blkg_conf_finish(struct blkg_conf_ctx *ctx);


198
199
200
201
202
static inline struct blkcg *css_to_blkcg(struct cgroup_subsys_state *css)
{
	return css ? container_of(css, struct blkcg, css) : NULL;
}

203
204
static inline struct blkcg *task_blkcg(struct task_struct *tsk)
{
205
	return css_to_blkcg(task_css(tsk, blkio_cgrp_id));
206
207
208
209
210
}

static inline struct blkcg *bio_blkcg(struct bio *bio)
{
	if (bio && bio->bi_css)
211
		return css_to_blkcg(bio->bi_css);
212
213
214
	return task_blkcg(current);
}

215
216
217
218
219
220
static inline struct cgroup_subsys_state *
task_get_blkcg_css(struct task_struct *task)
{
	return task_get_css(task, blkio_cgrp_id);
}

221
222
223
224
225
226
227
228
/**
 * blkcg_parent - get the parent of a blkcg
 * @blkcg: blkcg of interest
 *
 * Return the parent blkcg of @blkcg.  Can be called anytime.
 */
static inline struct blkcg *blkcg_parent(struct blkcg *blkcg)
{
Tejun Heo's avatar
Tejun Heo committed
229
	return css_to_blkcg(blkcg->css.parent);
230
231
}

232
233
234
235
236
237
238
/**
 * blkg_to_pdata - get policy private data
 * @blkg: blkg of interest
 * @pol: policy of interest
 *
 * Return pointer to private data associated with the @blkg-@pol pair.
 */
239
240
static inline struct blkg_policy_data *blkg_to_pd(struct blkcg_gq *blkg,
						  struct blkcg_policy *pol)
241
{
242
	return blkg ? blkg->pd[pol->plid] : NULL;
243
244
}

245
246
247
248
249
250
static inline struct blkcg_policy_data *blkcg_to_cpd(struct blkcg *blkcg,
						     struct blkcg_policy *pol)
{
	return blkcg ? blkcg->pd[pol->plid] : NULL;
}

251
252
/**
 * pdata_to_blkg - get blkg associated with policy private data
253
 * @pd: policy private data of interest
254
 *
255
 * @pd is policy private data.  Determine the blkg it's associated with.
256
 */
257
static inline struct blkcg_gq *pd_to_blkg(struct blkg_policy_data *pd)
258
{
259
	return pd ? pd->blkg : NULL;
260
261
}

Tejun Heo's avatar
Tejun Heo committed
262
263
264
265
266
267
268
269
/**
 * blkg_path - format cgroup path of blkg
 * @blkg: blkg of interest
 * @buf: target buffer
 * @buflen: target buffer length
 *
 * Format the path of the cgroup of @blkg into @buf.
 */
Tejun Heo's avatar
Tejun Heo committed
270
static inline int blkg_path(struct blkcg_gq *blkg, char *buf, int buflen)
271
{
Tejun Heo's avatar
Tejun Heo committed
272
	char *p;
Tejun Heo's avatar
Tejun Heo committed
273

Tejun Heo's avatar
Tejun Heo committed
274
275
	p = cgroup_path(blkg->blkcg->css.cgroup, buf, buflen);
	if (!p) {
Tejun Heo's avatar
Tejun Heo committed
276
		strncpy(buf, "<unavailable>", buflen);
Tejun Heo's avatar
Tejun Heo committed
277
278
279
280
281
		return -ENAMETOOLONG;
	}

	memmove(buf, p, buf + buflen - p);
	return 0;
282
283
}

Tejun Heo's avatar
Tejun Heo committed
284
285
286
287
/**
 * blkg_get - get a blkg reference
 * @blkg: blkg to get
 *
288
 * The caller should be holding an existing reference.
Tejun Heo's avatar
Tejun Heo committed
289
 */
Tejun Heo's avatar
Tejun Heo committed
290
static inline void blkg_get(struct blkcg_gq *blkg)
Tejun Heo's avatar
Tejun Heo committed
291
{
292
293
	WARN_ON_ONCE(atomic_read(&blkg->refcnt) <= 0);
	atomic_inc(&blkg->refcnt);
Tejun Heo's avatar
Tejun Heo committed
294
295
}

296
void __blkg_release_rcu(struct rcu_head *rcu);
Tejun Heo's avatar
Tejun Heo committed
297
298
299
300
301

/**
 * blkg_put - put a blkg reference
 * @blkg: blkg to put
 */
Tejun Heo's avatar
Tejun Heo committed
302
static inline void blkg_put(struct blkcg_gq *blkg)
Tejun Heo's avatar
Tejun Heo committed
303
{
304
305
	WARN_ON_ONCE(atomic_read(&blkg->refcnt) <= 0);
	if (atomic_dec_and_test(&blkg->refcnt))
306
		call_rcu(&blkg->rcu_head, __blkg_release_rcu);
Tejun Heo's avatar
Tejun Heo committed
307
308
}

309
310
311
312
313
314
struct blkcg_gq *__blkg_lookup(struct blkcg *blkcg, struct request_queue *q,
			       bool update_hint);

/**
 * blkg_for_each_descendant_pre - pre-order walk of a blkg's descendants
 * @d_blkg: loop cursor pointing to the current descendant
315
 * @pos_css: used for iteration
316
317
318
319
320
 * @p_blkg: target blkg to walk descendants of
 *
 * Walk @c_blkg through the descendants of @p_blkg.  Must be used with RCU
 * read locked.  If called under either blkcg or queue lock, the iteration
 * is guaranteed to include all and only online blkgs.  The caller may
321
 * update @pos_css by calling css_rightmost_descendant() to skip subtree.
322
 * @p_blkg is included in the iteration and the first node to be visited.
323
 */
324
325
326
#define blkg_for_each_descendant_pre(d_blkg, pos_css, p_blkg)		\
	css_for_each_descendant_pre((pos_css), &(p_blkg)->blkcg->css)	\
		if (((d_blkg) = __blkg_lookup(css_to_blkcg(pos_css),	\
327
328
					      (p_blkg)->q, false)))

329
330
331
/**
 * blkg_for_each_descendant_post - post-order walk of a blkg's descendants
 * @d_blkg: loop cursor pointing to the current descendant
332
 * @pos_css: used for iteration
333
334
335
 * @p_blkg: target blkg to walk descendants of
 *
 * Similar to blkg_for_each_descendant_pre() but performs post-order
336
337
 * traversal instead.  Synchronization rules are the same.  @p_blkg is
 * included in the iteration and the last node to be visited.
338
 */
339
340
341
#define blkg_for_each_descendant_post(d_blkg, pos_css, p_blkg)		\
	css_for_each_descendant_post((pos_css), &(p_blkg)->blkcg->css)	\
		if (((d_blkg) = __blkg_lookup(css_to_blkcg(pos_css),	\
342
343
					      (p_blkg)->q, false)))

344
345
346
347
348
349
350
351
352
353
354
355
356
357
358
359
360
361
362
363
364
365
366
367
368
369
370
371
372
373
374
375
376
377
378
379
380
381
382
383
384
385
386
387
388
389
390
391
392
393
/**
 * blk_get_rl - get request_list to use
 * @q: request_queue of interest
 * @bio: bio which will be attached to the allocated request (may be %NULL)
 *
 * The caller wants to allocate a request from @q to use for @bio.  Find
 * the request_list to use and obtain a reference on it.  Should be called
 * under queue_lock.  This function is guaranteed to return non-%NULL
 * request_list.
 */
static inline struct request_list *blk_get_rl(struct request_queue *q,
					      struct bio *bio)
{
	struct blkcg *blkcg;
	struct blkcg_gq *blkg;

	rcu_read_lock();

	blkcg = bio_blkcg(bio);

	/* bypass blkg lookup and use @q->root_rl directly for root */
	if (blkcg == &blkcg_root)
		goto root_rl;

	/*
	 * Try to use blkg->rl.  blkg lookup may fail under memory pressure
	 * or if either the blkcg or queue is going away.  Fall back to
	 * root_rl in such cases.
	 */
	blkg = blkg_lookup_create(blkcg, q);
	if (unlikely(IS_ERR(blkg)))
		goto root_rl;

	blkg_get(blkg);
	rcu_read_unlock();
	return &blkg->rl;
root_rl:
	rcu_read_unlock();
	return &q->root_rl;
}

/**
 * blk_put_rl - put request_list
 * @rl: request_list to put
 *
 * Put the reference acquired by blk_get_rl().  Should be called under
 * queue_lock.
 */
static inline void blk_put_rl(struct request_list *rl)
{
394
	if (rl->blkg->blkcg != &blkcg_root)
395
396
397
398
399
400
401
402
403
404
405
406
407
408
409
410
411
412
413
414
415
416
417
418
419
420
421
422
423
424
425
426
427
428
429
430
431
		blkg_put(rl->blkg);
}

/**
 * blk_rq_set_rl - associate a request with a request_list
 * @rq: request of interest
 * @rl: target request_list
 *
 * Associate @rq with @rl so that accounting and freeing can know the
 * request_list @rq came from.
 */
static inline void blk_rq_set_rl(struct request *rq, struct request_list *rl)
{
	rq->rl = rl;
}

/**
 * blk_rq_rl - return the request_list a request came from
 * @rq: request of interest
 *
 * Return the request_list @rq is allocated from.
 */
static inline struct request_list *blk_rq_rl(struct request *rq)
{
	return rq->rl;
}

struct request_list *__blk_queue_next_rl(struct request_list *rl,
					 struct request_queue *q);
/**
 * blk_queue_for_each_rl - iterate through all request_lists of a request_queue
 *
 * Should be used under queue_lock.
 */
#define blk_queue_for_each_rl(rl, q)	\
	for ((rl) = &(q)->root_rl; (rl); (rl) = __blk_queue_next_rl((rl), (q)))

432
433
434
435
436
static inline void blkg_stat_init(struct blkg_stat *stat)
{
	u64_stats_init(&stat->syncp);
}

437
438
439
440
441
442
443
444
445
446
447
448
449
450
451
452
453
454
455
456
457
458
459
460
461
462
463
464
/**
 * blkg_stat_add - add a value to a blkg_stat
 * @stat: target blkg_stat
 * @val: value to add
 *
 * Add @val to @stat.  The caller is responsible for synchronizing calls to
 * this function.
 */
static inline void blkg_stat_add(struct blkg_stat *stat, uint64_t val)
{
	u64_stats_update_begin(&stat->syncp);
	stat->cnt += val;
	u64_stats_update_end(&stat->syncp);
}

/**
 * blkg_stat_read - read the current value of a blkg_stat
 * @stat: blkg_stat to read
 *
 * Read the current value of @stat.  This function can be called without
 * synchroniztion and takes care of u64 atomicity.
 */
static inline uint64_t blkg_stat_read(struct blkg_stat *stat)
{
	unsigned int start;
	uint64_t v;

	do {
465
		start = u64_stats_fetch_begin_irq(&stat->syncp);
466
		v = stat->cnt;
467
	} while (u64_stats_fetch_retry_irq(&stat->syncp, start));
468
469
470
471
472
473
474
475
476
477
478
479
480

	return v;
}

/**
 * blkg_stat_reset - reset a blkg_stat
 * @stat: blkg_stat to reset
 */
static inline void blkg_stat_reset(struct blkg_stat *stat)
{
	stat->cnt = 0;
}

481
482
483
484
485
486
487
488
489
490
491
492
/**
 * blkg_stat_merge - merge a blkg_stat into another
 * @to: the destination blkg_stat
 * @from: the source
 *
 * Add @from's count to @to.
 */
static inline void blkg_stat_merge(struct blkg_stat *to, struct blkg_stat *from)
{
	blkg_stat_add(to, blkg_stat_read(from));
}

493
494
495
496
497
static inline void blkg_rwstat_init(struct blkg_rwstat *rwstat)
{
	u64_stats_init(&rwstat->syncp);
}

498
499
500
501
502
503
504
505
506
507
508
509
510
511
512
513
514
515
516
517
518
519
520
521
522
523
524
525
526
527
528
529
530
531
/**
 * blkg_rwstat_add - add a value to a blkg_rwstat
 * @rwstat: target blkg_rwstat
 * @rw: mask of REQ_{WRITE|SYNC}
 * @val: value to add
 *
 * Add @val to @rwstat.  The counters are chosen according to @rw.  The
 * caller is responsible for synchronizing calls to this function.
 */
static inline void blkg_rwstat_add(struct blkg_rwstat *rwstat,
				   int rw, uint64_t val)
{
	u64_stats_update_begin(&rwstat->syncp);

	if (rw & REQ_WRITE)
		rwstat->cnt[BLKG_RWSTAT_WRITE] += val;
	else
		rwstat->cnt[BLKG_RWSTAT_READ] += val;
	if (rw & REQ_SYNC)
		rwstat->cnt[BLKG_RWSTAT_SYNC] += val;
	else
		rwstat->cnt[BLKG_RWSTAT_ASYNC] += val;

	u64_stats_update_end(&rwstat->syncp);
}

/**
 * blkg_rwstat_read - read the current values of a blkg_rwstat
 * @rwstat: blkg_rwstat to read
 *
 * Read the current snapshot of @rwstat and return it as the return value.
 * This function can be called without synchronization and takes care of
 * u64 atomicity.
 */
532
static inline struct blkg_rwstat blkg_rwstat_read(struct blkg_rwstat *rwstat)
533
534
535
536
537
{
	unsigned int start;
	struct blkg_rwstat tmp;

	do {
538
		start = u64_stats_fetch_begin_irq(&rwstat->syncp);
539
		tmp = *rwstat;
540
	} while (u64_stats_fetch_retry_irq(&rwstat->syncp, start));
541
542
543
544
545

	return tmp;
}

/**
546
 * blkg_rwstat_total - read the total count of a blkg_rwstat
547
548
549
550
551
552
 * @rwstat: blkg_rwstat to read
 *
 * Return the total count of @rwstat regardless of the IO direction.  This
 * function can be called without synchronization and takes care of u64
 * atomicity.
 */
553
static inline uint64_t blkg_rwstat_total(struct blkg_rwstat *rwstat)
554
555
556
557
558
559
560
561
562
563
564
565
566
567
568
{
	struct blkg_rwstat tmp = blkg_rwstat_read(rwstat);

	return tmp.cnt[BLKG_RWSTAT_READ] + tmp.cnt[BLKG_RWSTAT_WRITE];
}

/**
 * blkg_rwstat_reset - reset a blkg_rwstat
 * @rwstat: blkg_rwstat to reset
 */
static inline void blkg_rwstat_reset(struct blkg_rwstat *rwstat)
{
	memset(rwstat->cnt, 0, sizeof(rwstat->cnt));
}

569
570
571
572
573
574
575
576
577
578
579
580
581
582
583
584
585
586
587
/**
 * blkg_rwstat_merge - merge a blkg_rwstat into another
 * @to: the destination blkg_rwstat
 * @from: the source
 *
 * Add @from's counts to @to.
 */
static inline void blkg_rwstat_merge(struct blkg_rwstat *to,
				     struct blkg_rwstat *from)
{
	struct blkg_rwstat v = blkg_rwstat_read(from);
	int i;

	u64_stats_update_begin(&to->syncp);
	for (i = 0; i < BLKG_RWSTAT_NR; i++)
		to->cnt[i] += v.cnt[i];
	u64_stats_update_end(&to->syncp);
}

588
589
#else	/* CONFIG_BLK_CGROUP */

590
591
struct blkcg {
};
592

593
594
595
struct blkg_policy_data {
};

596
597
598
struct blkcg_policy_data {
};

Tejun Heo's avatar
Tejun Heo committed
599
struct blkcg_gq {
600
601
};

Tejun Heo's avatar
Tejun Heo committed
602
struct blkcg_policy {
603
604
};

Tejun Heo's avatar
Tejun Heo committed
605
606
#define blkcg_root_css	((struct cgroup_subsys_state *)ERR_PTR(-EINVAL))

607
608
609
610
611
612
static inline struct cgroup_subsys_state *
task_get_blkcg_css(struct task_struct *task)
{
	return NULL;
}

613
614
#ifdef CONFIG_BLOCK

Tejun Heo's avatar
Tejun Heo committed
615
static inline struct blkcg_gq *blkg_lookup(struct blkcg *blkcg, void *key) { return NULL; }
616
617
618
static inline int blkcg_init_queue(struct request_queue *q) { return 0; }
static inline void blkcg_drain_queue(struct request_queue *q) { }
static inline void blkcg_exit_queue(struct request_queue *q) { }
619
static inline int blkcg_policy_register(struct blkcg_policy *pol) { return 0; }
Tejun Heo's avatar
Tejun Heo committed
620
static inline void blkcg_policy_unregister(struct blkcg_policy *pol) { }
621
static inline int blkcg_activate_policy(struct request_queue *q,
Tejun Heo's avatar
Tejun Heo committed
622
					const struct blkcg_policy *pol) { return 0; }
623
static inline void blkcg_deactivate_policy(struct request_queue *q,
Tejun Heo's avatar
Tejun Heo committed
624
625
					   const struct blkcg_policy *pol) { }

626
static inline struct blkcg *bio_blkcg(struct bio *bio) { return NULL; }
627

628
629
630
static inline struct blkg_policy_data *blkg_to_pd(struct blkcg_gq *blkg,
						  struct blkcg_policy *pol) { return NULL; }
static inline struct blkcg_gq *pd_to_blkg(struct blkg_policy_data *pd) { return NULL; }
Tejun Heo's avatar
Tejun Heo committed
631
632
633
static inline char *blkg_path(struct blkcg_gq *blkg) { return NULL; }
static inline void blkg_get(struct blkcg_gq *blkg) { }
static inline void blkg_put(struct blkcg_gq *blkg) { }
634

635
636
637
638
639
640
641
642
643
static inline struct request_list *blk_get_rl(struct request_queue *q,
					      struct bio *bio) { return &q->root_rl; }
static inline void blk_put_rl(struct request_list *rl) { }
static inline void blk_rq_set_rl(struct request *rq, struct request_list *rl) { }
static inline struct request_list *blk_rq_rl(struct request *rq) { return &rq->q->root_rl; }

#define blk_queue_for_each_rl(rl, q)	\
	for ((rl) = &(q)->root_rl; (rl); (rl) = NULL)

644
#endif	/* CONFIG_BLOCK */
645
646
#endif	/* CONFIG_BLK_CGROUP */
#endif	/* _BLK_CGROUP_H */