blk-cgroup.h 21.8 KB
Newer Older
1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
#ifndef _BLK_CGROUP_H
#define _BLK_CGROUP_H
/*
 * Common Block IO controller cgroup interface
 *
 * Based on ideas and code from CFQ, CFS and BFQ:
 * Copyright (C) 2003 Jens Axboe <axboe@kernel.dk>
 *
 * Copyright (C) 2008 Fabio Checconi <fabio@gandalf.sssup.it>
 *		      Paolo Valente <paolo.valente@unimore.it>
 *
 * Copyright (C) 2009 Vivek Goyal <vgoyal@redhat.com>
 * 	              Nauman Rafique <nauman@google.com>
 */

#include <linux/cgroup.h>
Tejun Heo's avatar
Tejun Heo committed
17
#include <linux/percpu_counter.h>
18
#include <linux/seq_file.h>
19
#include <linux/radix-tree.h>
20
#include <linux/blkdev.h>
21
#include <linux/atomic.h>
22

Tejun Heo's avatar
Tejun Heo committed
23
24
25
/* percpu_counter batch for blkg_[rw]stats, per-cpu drift doesn't matter */
#define BLKG_STAT_CPU_BATCH	(INT_MAX / 2)

26
27
28
/* Max limits for throttle policy */
#define THROTL_IOPS_MAX		UINT_MAX

Tejun Heo's avatar
Tejun Heo committed
29
30
#ifdef CONFIG_BLK_CGROUP

31
32
33
34
35
36
37
38
enum blkg_rwstat_type {
	BLKG_RWSTAT_READ,
	BLKG_RWSTAT_WRITE,
	BLKG_RWSTAT_SYNC,
	BLKG_RWSTAT_ASYNC,

	BLKG_RWSTAT_NR,
	BLKG_RWSTAT_TOTAL = BLKG_RWSTAT_NR,
39
40
};

41
42
struct blkcg_gq;

Tejun Heo's avatar
Tejun Heo committed
43
struct blkcg {
44
45
	struct cgroup_subsys_state	css;
	spinlock_t			lock;
46
47
48

	struct radix_tree_root		blkg_tree;
	struct blkcg_gq			*blkg_hint;
49
	struct hlist_head		blkg_list;
Tejun Heo's avatar
Tejun Heo committed
50

51
	struct blkcg_policy_data	*cpd[BLKCG_MAX_POLS];
52

Tejun Heo's avatar
Tejun Heo committed
53
	struct list_head		all_blkcgs_node;
54
55
56
#ifdef CONFIG_CGROUP_WRITEBACK
	struct list_head		cgwb_list;
#endif
57
58
};

59
60
/*
 * blkg_[rw]stat->aux_cnt is excluded for local stats but included for
Tejun Heo's avatar
Tejun Heo committed
61
62
 * recursive.  Used to carry stats of dead children, and, for blkg_rwstat,
 * to carry result values from read and sum operations.
63
 */
64
struct blkg_stat {
Tejun Heo's avatar
Tejun Heo committed
65
	struct percpu_counter		cpu_cnt;
66
	atomic64_t			aux_cnt;
67
68
69
};

struct blkg_rwstat {
Tejun Heo's avatar
Tejun Heo committed
70
	struct percpu_counter		cpu_cnt[BLKG_RWSTAT_NR];
71
	atomic64_t			aux_cnt[BLKG_RWSTAT_NR];
72
73
};

74
75
76
77
78
/*
 * A blkcg_gq (blkg) is association between a block cgroup (blkcg) and a
 * request_queue (q).  This is used by blkcg policies which need to track
 * information per blkcg - q pair.
 *
79
80
81
82
83
 * There can be multiple active blkcg policies and each blkg:policy pair is
 * represented by a blkg_policy_data which is allocated and freed by each
 * policy's pd_alloc/free_fn() methods.  A policy can allocate private data
 * area by allocating larger data structure which embeds blkg_policy_data
 * at the beginning.
84
 */
85
struct blkg_policy_data {
Tejun Heo's avatar
Tejun Heo committed
86
	/* the blkg and policy id this per-policy data belongs to */
Tejun Heo's avatar
Tejun Heo committed
87
	struct blkcg_gq			*blkg;
Tejun Heo's avatar
Tejun Heo committed
88
	int				plid;
89
90
};

91
/*
92
93
94
95
96
 * Policies that need to keep per-blkcg data which is independent from any
 * request_queue associated to it should implement cpd_alloc/free_fn()
 * methods.  A policy can allocate private data area by allocating larger
 * data structure which embeds blkcg_policy_data at the beginning.
 * cpd_init() is invoked to let each policy handle per-blkcg data.
97
98
 */
struct blkcg_policy_data {
99
100
	/* the blkcg and policy id this per-policy data belongs to */
	struct blkcg			*blkcg;
101
102
103
	int				plid;
};

Tejun Heo's avatar
Tejun Heo committed
104
105
/* association between a blk cgroup and a request queue */
struct blkcg_gq {
106
	/* Pointer to the associated request_queue */
107
108
109
	struct request_queue		*q;
	struct list_head		q_node;
	struct hlist_node		blkcg_node;
Tejun Heo's avatar
Tejun Heo committed
110
	struct blkcg			*blkcg;
111

112
113
114
115
116
117
	/*
	 * Each blkg gets congested separately and the congestion state is
	 * propagated to the matching bdi_writeback_congested.
	 */
	struct bdi_writeback_congested	*wb_congested;

118
119
120
	/* all non-root blkcg_gq's are guaranteed to have access to parent */
	struct blkcg_gq			*parent;

121
122
	/* request allocation list for this blkcg-q pair */
	struct request_list		rl;
123

Tejun Heo's avatar
Tejun Heo committed
124
	/* reference count */
125
	atomic_t			refcnt;
126

127
128
129
	/* is this blkg online? protected by both blkcg and q locks */
	bool				online;

130
131
132
	struct blkg_rwstat		stat_bytes;
	struct blkg_rwstat		stat_ios;

133
	struct blkg_policy_data		*pd[BLKCG_MAX_POLS];
Tejun Heo's avatar
Tejun Heo committed
134

135
	struct rcu_head			rcu_head;
136
137
};

138
typedef struct blkcg_policy_data *(blkcg_pol_alloc_cpd_fn)(gfp_t gfp);
139
typedef void (blkcg_pol_init_cpd_fn)(struct blkcg_policy_data *cpd);
140
typedef void (blkcg_pol_free_cpd_fn)(struct blkcg_policy_data *cpd);
141
typedef struct blkg_policy_data *(blkcg_pol_alloc_pd_fn)(gfp_t gfp, int node);
142
143
144
typedef void (blkcg_pol_init_pd_fn)(struct blkg_policy_data *pd);
typedef void (blkcg_pol_online_pd_fn)(struct blkg_policy_data *pd);
typedef void (blkcg_pol_offline_pd_fn)(struct blkg_policy_data *pd);
145
typedef void (blkcg_pol_free_pd_fn)(struct blkg_policy_data *pd);
146
typedef void (blkcg_pol_reset_pd_stats_fn)(struct blkg_policy_data *pd);
147

Tejun Heo's avatar
Tejun Heo committed
148
struct blkcg_policy {
149
150
	int				plid;
	/* cgroup files for the policy */
151
	struct cftype			*dfl_cftypes;
152
	struct cftype			*legacy_cftypes;
153
154

	/* operations */
155
	blkcg_pol_alloc_cpd_fn		*cpd_alloc_fn;
156
	blkcg_pol_init_cpd_fn		*cpd_init_fn;
157
158
	blkcg_pol_free_cpd_fn		*cpd_free_fn;

159
	blkcg_pol_alloc_pd_fn		*pd_alloc_fn;
160
	blkcg_pol_init_pd_fn		*pd_init_fn;
161
162
	blkcg_pol_online_pd_fn		*pd_online_fn;
	blkcg_pol_offline_pd_fn		*pd_offline_fn;
163
	blkcg_pol_free_pd_fn		*pd_free_fn;
164
	blkcg_pol_reset_pd_stats_fn	*pd_reset_stats_fn;
165
166
};

Tejun Heo's avatar
Tejun Heo committed
167
extern struct blkcg blkcg_root;
Tejun Heo's avatar
Tejun Heo committed
168
extern struct cgroup_subsys_state * const blkcg_root_css;
169

Tejun Heo's avatar
Tejun Heo committed
170
171
struct blkcg_gq *blkg_lookup_slowpath(struct blkcg *blkcg,
				      struct request_queue *q, bool update_hint);
Tejun Heo's avatar
Tejun Heo committed
172
173
struct blkcg_gq *blkg_lookup_create(struct blkcg *blkcg,
				    struct request_queue *q);
174
175
176
int blkcg_init_queue(struct request_queue *q);
void blkcg_drain_queue(struct request_queue *q);
void blkcg_exit_queue(struct request_queue *q);
177

178
/* Blkio controller policy registration */
179
int blkcg_policy_register(struct blkcg_policy *pol);
Tejun Heo's avatar
Tejun Heo committed
180
void blkcg_policy_unregister(struct blkcg_policy *pol);
181
int blkcg_activate_policy(struct request_queue *q,
Tejun Heo's avatar
Tejun Heo committed
182
			  const struct blkcg_policy *pol);
183
void blkcg_deactivate_policy(struct request_queue *q,
Tejun Heo's avatar
Tejun Heo committed
184
			     const struct blkcg_policy *pol);
185

186
const char *blkg_dev_name(struct blkcg_gq *blkg);
Tejun Heo's avatar
Tejun Heo committed
187
void blkcg_print_blkgs(struct seq_file *sf, struct blkcg *blkcg,
188
189
		       u64 (*prfill)(struct seq_file *,
				     struct blkg_policy_data *, int),
Tejun Heo's avatar
Tejun Heo committed
190
		       const struct blkcg_policy *pol, int data,
191
		       bool show_total);
192
193
u64 __blkg_prfill_u64(struct seq_file *sf, struct blkg_policy_data *pd, u64 v);
u64 __blkg_prfill_rwstat(struct seq_file *sf, struct blkg_policy_data *pd,
194
			 const struct blkg_rwstat *rwstat);
195
196
197
u64 blkg_prfill_stat(struct seq_file *sf, struct blkg_policy_data *pd, int off);
u64 blkg_prfill_rwstat(struct seq_file *sf, struct blkg_policy_data *pd,
		       int off);
198
199
200
201
int blkg_print_stat_bytes(struct seq_file *sf, void *v);
int blkg_print_stat_ios(struct seq_file *sf, void *v);
int blkg_print_stat_bytes_recursive(struct seq_file *sf, void *v);
int blkg_print_stat_ios_recursive(struct seq_file *sf, void *v);
202

203
204
205
206
u64 blkg_stat_recursive_sum(struct blkcg_gq *blkg,
			    struct blkcg_policy *pol, int off);
struct blkg_rwstat blkg_rwstat_recursive_sum(struct blkcg_gq *blkg,
					     struct blkcg_policy *pol, int off);
207

208
struct blkg_conf_ctx {
209
	struct gendisk			*disk;
Tejun Heo's avatar
Tejun Heo committed
210
	struct blkcg_gq			*blkg;
211
	char				*body;
212
213
};

Tejun Heo's avatar
Tejun Heo committed
214
int blkg_conf_prep(struct blkcg *blkcg, const struct blkcg_policy *pol,
215
		   char *input, struct blkg_conf_ctx *ctx);
216
217
218
void blkg_conf_finish(struct blkg_conf_ctx *ctx);


219
220
221
222
223
static inline struct blkcg *css_to_blkcg(struct cgroup_subsys_state *css)
{
	return css ? container_of(css, struct blkcg, css) : NULL;
}

224
225
static inline struct blkcg *task_blkcg(struct task_struct *tsk)
{
226
	return css_to_blkcg(task_css(tsk, io_cgrp_id));
227
228
229
230
231
}

static inline struct blkcg *bio_blkcg(struct bio *bio)
{
	if (bio && bio->bi_css)
232
		return css_to_blkcg(bio->bi_css);
233
234
235
	return task_blkcg(current);
}

236
237
238
static inline struct cgroup_subsys_state *
task_get_blkcg_css(struct task_struct *task)
{
239
	return task_get_css(task, io_cgrp_id);
240
241
}

242
243
244
245
246
247
248
249
/**
 * blkcg_parent - get the parent of a blkcg
 * @blkcg: blkcg of interest
 *
 * Return the parent blkcg of @blkcg.  Can be called anytime.
 */
static inline struct blkcg *blkcg_parent(struct blkcg *blkcg)
{
Tejun Heo's avatar
Tejun Heo committed
250
	return css_to_blkcg(blkcg->css.parent);
251
252
}

Tejun Heo's avatar
Tejun Heo committed
253
254
255
256
257
258
259
260
261
262
263
264
265
266
267
268
269
/**
 * __blkg_lookup - internal version of blkg_lookup()
 * @blkcg: blkcg of interest
 * @q: request_queue of interest
 * @update_hint: whether to update lookup hint with the result or not
 *
 * This is internal version and shouldn't be used by policy
 * implementations.  Looks up blkgs for the @blkcg - @q pair regardless of
 * @q's bypass state.  If @update_hint is %true, the caller should be
 * holding @q->queue_lock and lookup hint is updated on success.
 */
static inline struct blkcg_gq *__blkg_lookup(struct blkcg *blkcg,
					     struct request_queue *q,
					     bool update_hint)
{
	struct blkcg_gq *blkg;

270
271
272
	if (blkcg == &blkcg_root)
		return q->root_blkg;

Tejun Heo's avatar
Tejun Heo committed
273
274
275
276
277
278
279
280
281
282
283
284
285
286
287
288
289
290
291
292
293
294
295
296
297
298
	blkg = rcu_dereference(blkcg->blkg_hint);
	if (blkg && blkg->q == q)
		return blkg;

	return blkg_lookup_slowpath(blkcg, q, update_hint);
}

/**
 * blkg_lookup - lookup blkg for the specified blkcg - q pair
 * @blkcg: blkcg of interest
 * @q: request_queue of interest
 *
 * Lookup blkg for the @blkcg - @q pair.  This function should be called
 * under RCU read lock and is guaranteed to return %NULL if @q is bypassing
 * - see blk_queue_bypass_start() for details.
 */
static inline struct blkcg_gq *blkg_lookup(struct blkcg *blkcg,
					   struct request_queue *q)
{
	WARN_ON_ONCE(!rcu_read_lock_held());

	if (unlikely(blk_queue_bypass(q)))
		return NULL;
	return __blkg_lookup(blkcg, q, false);
}

299
300
301
302
303
304
305
/**
 * blkg_to_pdata - get policy private data
 * @blkg: blkg of interest
 * @pol: policy of interest
 *
 * Return pointer to private data associated with the @blkg-@pol pair.
 */
306
307
static inline struct blkg_policy_data *blkg_to_pd(struct blkcg_gq *blkg,
						  struct blkcg_policy *pol)
308
{
309
	return blkg ? blkg->pd[pol->plid] : NULL;
310
311
}

312
313
314
static inline struct blkcg_policy_data *blkcg_to_cpd(struct blkcg *blkcg,
						     struct blkcg_policy *pol)
{
315
	return blkcg ? blkcg->cpd[pol->plid] : NULL;
316
317
}

318
319
/**
 * pdata_to_blkg - get blkg associated with policy private data
320
 * @pd: policy private data of interest
321
 *
322
 * @pd is policy private data.  Determine the blkg it's associated with.
323
 */
324
static inline struct blkcg_gq *pd_to_blkg(struct blkg_policy_data *pd)
325
{
326
	return pd ? pd->blkg : NULL;
327
328
}

329
330
331
332
333
static inline struct blkcg *cpd_to_blkcg(struct blkcg_policy_data *cpd)
{
	return cpd ? cpd->blkcg : NULL;
}

Tejun Heo's avatar
Tejun Heo committed
334
335
336
337
338
339
340
341
/**
 * blkg_path - format cgroup path of blkg
 * @blkg: blkg of interest
 * @buf: target buffer
 * @buflen: target buffer length
 *
 * Format the path of the cgroup of @blkg into @buf.
 */
Tejun Heo's avatar
Tejun Heo committed
342
static inline int blkg_path(struct blkcg_gq *blkg, char *buf, int buflen)
343
{
Tejun Heo's avatar
Tejun Heo committed
344
	char *p;
Tejun Heo's avatar
Tejun Heo committed
345

Tejun Heo's avatar
Tejun Heo committed
346
347
	p = cgroup_path(blkg->blkcg->css.cgroup, buf, buflen);
	if (!p) {
Tejun Heo's avatar
Tejun Heo committed
348
		strncpy(buf, "<unavailable>", buflen);
Tejun Heo's avatar
Tejun Heo committed
349
350
351
352
353
		return -ENAMETOOLONG;
	}

	memmove(buf, p, buf + buflen - p);
	return 0;
354
355
}

Tejun Heo's avatar
Tejun Heo committed
356
357
358
359
/**
 * blkg_get - get a blkg reference
 * @blkg: blkg to get
 *
360
 * The caller should be holding an existing reference.
Tejun Heo's avatar
Tejun Heo committed
361
 */
Tejun Heo's avatar
Tejun Heo committed
362
static inline void blkg_get(struct blkcg_gq *blkg)
Tejun Heo's avatar
Tejun Heo committed
363
{
364
365
	WARN_ON_ONCE(atomic_read(&blkg->refcnt) <= 0);
	atomic_inc(&blkg->refcnt);
Tejun Heo's avatar
Tejun Heo committed
366
367
}

368
void __blkg_release_rcu(struct rcu_head *rcu);
Tejun Heo's avatar
Tejun Heo committed
369
370
371
372
373

/**
 * blkg_put - put a blkg reference
 * @blkg: blkg to put
 */
Tejun Heo's avatar
Tejun Heo committed
374
static inline void blkg_put(struct blkcg_gq *blkg)
Tejun Heo's avatar
Tejun Heo committed
375
{
376
377
	WARN_ON_ONCE(atomic_read(&blkg->refcnt) <= 0);
	if (atomic_dec_and_test(&blkg->refcnt))
378
		call_rcu(&blkg->rcu_head, __blkg_release_rcu);
Tejun Heo's avatar
Tejun Heo committed
379
380
}

381
382
383
/**
 * blkg_for_each_descendant_pre - pre-order walk of a blkg's descendants
 * @d_blkg: loop cursor pointing to the current descendant
384
 * @pos_css: used for iteration
385
386
387
388
389
 * @p_blkg: target blkg to walk descendants of
 *
 * Walk @c_blkg through the descendants of @p_blkg.  Must be used with RCU
 * read locked.  If called under either blkcg or queue lock, the iteration
 * is guaranteed to include all and only online blkgs.  The caller may
390
 * update @pos_css by calling css_rightmost_descendant() to skip subtree.
391
 * @p_blkg is included in the iteration and the first node to be visited.
392
 */
393
394
395
#define blkg_for_each_descendant_pre(d_blkg, pos_css, p_blkg)		\
	css_for_each_descendant_pre((pos_css), &(p_blkg)->blkcg->css)	\
		if (((d_blkg) = __blkg_lookup(css_to_blkcg(pos_css),	\
396
397
					      (p_blkg)->q, false)))

398
399
400
/**
 * blkg_for_each_descendant_post - post-order walk of a blkg's descendants
 * @d_blkg: loop cursor pointing to the current descendant
401
 * @pos_css: used for iteration
402
403
404
 * @p_blkg: target blkg to walk descendants of
 *
 * Similar to blkg_for_each_descendant_pre() but performs post-order
405
406
 * traversal instead.  Synchronization rules are the same.  @p_blkg is
 * included in the iteration and the last node to be visited.
407
 */
408
409
410
#define blkg_for_each_descendant_post(d_blkg, pos_css, p_blkg)		\
	css_for_each_descendant_post((pos_css), &(p_blkg)->blkcg->css)	\
		if (((d_blkg) = __blkg_lookup(css_to_blkcg(pos_css),	\
411
412
					      (p_blkg)->q, false)))

413
414
415
416
417
418
419
420
421
422
423
424
425
426
427
428
429
430
431
432
433
434
435
436
437
438
439
440
441
/**
 * blk_get_rl - get request_list to use
 * @q: request_queue of interest
 * @bio: bio which will be attached to the allocated request (may be %NULL)
 *
 * The caller wants to allocate a request from @q to use for @bio.  Find
 * the request_list to use and obtain a reference on it.  Should be called
 * under queue_lock.  This function is guaranteed to return non-%NULL
 * request_list.
 */
static inline struct request_list *blk_get_rl(struct request_queue *q,
					      struct bio *bio)
{
	struct blkcg *blkcg;
	struct blkcg_gq *blkg;

	rcu_read_lock();

	blkcg = bio_blkcg(bio);

	/* bypass blkg lookup and use @q->root_rl directly for root */
	if (blkcg == &blkcg_root)
		goto root_rl;

	/*
	 * Try to use blkg->rl.  blkg lookup may fail under memory pressure
	 * or if either the blkcg or queue is going away.  Fall back to
	 * root_rl in such cases.
	 */
442
443
	blkg = blkg_lookup(blkcg, q);
	if (unlikely(!blkg))
444
445
446
447
448
449
450
451
452
453
454
455
456
457
458
459
460
461
462
		goto root_rl;

	blkg_get(blkg);
	rcu_read_unlock();
	return &blkg->rl;
root_rl:
	rcu_read_unlock();
	return &q->root_rl;
}

/**
 * blk_put_rl - put request_list
 * @rl: request_list to put
 *
 * Put the reference acquired by blk_get_rl().  Should be called under
 * queue_lock.
 */
static inline void blk_put_rl(struct request_list *rl)
{
463
	if (rl->blkg->blkcg != &blkcg_root)
464
465
466
467
468
469
470
471
472
473
474
475
476
477
478
479
480
481
482
483
484
485
486
487
488
489
490
491
492
493
494
495
496
497
498
499
500
		blkg_put(rl->blkg);
}

/**
 * blk_rq_set_rl - associate a request with a request_list
 * @rq: request of interest
 * @rl: target request_list
 *
 * Associate @rq with @rl so that accounting and freeing can know the
 * request_list @rq came from.
 */
static inline void blk_rq_set_rl(struct request *rq, struct request_list *rl)
{
	rq->rl = rl;
}

/**
 * blk_rq_rl - return the request_list a request came from
 * @rq: request of interest
 *
 * Return the request_list @rq is allocated from.
 */
static inline struct request_list *blk_rq_rl(struct request *rq)
{
	return rq->rl;
}

struct request_list *__blk_queue_next_rl(struct request_list *rl,
					 struct request_queue *q);
/**
 * blk_queue_for_each_rl - iterate through all request_lists of a request_queue
 *
 * Should be used under queue_lock.
 */
#define blk_queue_for_each_rl(rl, q)	\
	for ((rl) = &(q)->root_rl; (rl); (rl) = __blk_queue_next_rl((rl), (q)))

Tejun Heo's avatar
Tejun Heo committed
501
static inline int blkg_stat_init(struct blkg_stat *stat, gfp_t gfp)
502
{
Tejun Heo's avatar
Tejun Heo committed
503
504
505
506
507
508
	int ret;

	ret = percpu_counter_init(&stat->cpu_cnt, 0, gfp);
	if (ret)
		return ret;

509
	atomic64_set(&stat->aux_cnt, 0);
Tejun Heo's avatar
Tejun Heo committed
510
511
512
513
514
515
	return 0;
}

static inline void blkg_stat_exit(struct blkg_stat *stat)
{
	percpu_counter_destroy(&stat->cpu_cnt);
516
517
}

518
519
520
521
522
/**
 * blkg_stat_add - add a value to a blkg_stat
 * @stat: target blkg_stat
 * @val: value to add
 *
Tejun Heo's avatar
Tejun Heo committed
523
524
 * Add @val to @stat.  The caller must ensure that IRQ on the same CPU
 * don't re-enter this function for the same counter.
525
526
527
 */
static inline void blkg_stat_add(struct blkg_stat *stat, uint64_t val)
{
Tejun Heo's avatar
Tejun Heo committed
528
	__percpu_counter_add(&stat->cpu_cnt, val, BLKG_STAT_CPU_BATCH);
529
530
531
532
533
534
535
536
}

/**
 * blkg_stat_read - read the current value of a blkg_stat
 * @stat: blkg_stat to read
 */
static inline uint64_t blkg_stat_read(struct blkg_stat *stat)
{
Tejun Heo's avatar
Tejun Heo committed
537
	return percpu_counter_sum_positive(&stat->cpu_cnt);
538
539
540
541
542
543
544
545
}

/**
 * blkg_stat_reset - reset a blkg_stat
 * @stat: blkg_stat to reset
 */
static inline void blkg_stat_reset(struct blkg_stat *stat)
{
Tejun Heo's avatar
Tejun Heo committed
546
	percpu_counter_set(&stat->cpu_cnt, 0);
547
	atomic64_set(&stat->aux_cnt, 0);
548
549
}

550
/**
551
 * blkg_stat_add_aux - add a blkg_stat into another's aux count
552
553
554
 * @to: the destination blkg_stat
 * @from: the source
 *
555
 * Add @from's count including the aux one to @to's aux count.
556
 */
557
558
static inline void blkg_stat_add_aux(struct blkg_stat *to,
				     struct blkg_stat *from)
559
{
560
561
	atomic64_add(blkg_stat_read(from) + atomic64_read(&from->aux_cnt),
		     &to->aux_cnt);
562
563
}

Tejun Heo's avatar
Tejun Heo committed
564
static inline int blkg_rwstat_init(struct blkg_rwstat *rwstat, gfp_t gfp)
565
{
Tejun Heo's avatar
Tejun Heo committed
566
567
568
569
570
571
572
573
574
575
576
577
578
	int i, ret;

	for (i = 0; i < BLKG_RWSTAT_NR; i++) {
		ret = percpu_counter_init(&rwstat->cpu_cnt[i], 0, gfp);
		if (ret) {
			while (--i >= 0)
				percpu_counter_destroy(&rwstat->cpu_cnt[i]);
			return ret;
		}
		atomic64_set(&rwstat->aux_cnt[i], 0);
	}
	return 0;
}
579

Tejun Heo's avatar
Tejun Heo committed
580
581
582
static inline void blkg_rwstat_exit(struct blkg_rwstat *rwstat)
{
	int i;
583
584

	for (i = 0; i < BLKG_RWSTAT_NR; i++)
Tejun Heo's avatar
Tejun Heo committed
585
		percpu_counter_destroy(&rwstat->cpu_cnt[i]);
586
587
}

588
589
590
591
592
593
594
595
596
597
598
599
/**
 * blkg_rwstat_add - add a value to a blkg_rwstat
 * @rwstat: target blkg_rwstat
 * @rw: mask of REQ_{WRITE|SYNC}
 * @val: value to add
 *
 * Add @val to @rwstat.  The counters are chosen according to @rw.  The
 * caller is responsible for synchronizing calls to this function.
 */
static inline void blkg_rwstat_add(struct blkg_rwstat *rwstat,
				   int rw, uint64_t val)
{
Tejun Heo's avatar
Tejun Heo committed
600
	struct percpu_counter *cnt;
601
602

	if (rw & REQ_WRITE)
Tejun Heo's avatar
Tejun Heo committed
603
		cnt = &rwstat->cpu_cnt[BLKG_RWSTAT_WRITE];
604
	else
Tejun Heo's avatar
Tejun Heo committed
605
606
607
608
		cnt = &rwstat->cpu_cnt[BLKG_RWSTAT_READ];

	__percpu_counter_add(cnt, val, BLKG_STAT_CPU_BATCH);

609
	if (rw & REQ_SYNC)
Tejun Heo's avatar
Tejun Heo committed
610
		cnt = &rwstat->cpu_cnt[BLKG_RWSTAT_SYNC];
611
	else
Tejun Heo's avatar
Tejun Heo committed
612
		cnt = &rwstat->cpu_cnt[BLKG_RWSTAT_ASYNC];
613

Tejun Heo's avatar
Tejun Heo committed
614
	__percpu_counter_add(cnt, val, BLKG_STAT_CPU_BATCH);
615
616
617
618
619
620
}

/**
 * blkg_rwstat_read - read the current values of a blkg_rwstat
 * @rwstat: blkg_rwstat to read
 *
Tejun Heo's avatar
Tejun Heo committed
621
 * Read the current snapshot of @rwstat and return it in the aux counts.
622
 */
623
static inline struct blkg_rwstat blkg_rwstat_read(struct blkg_rwstat *rwstat)
624
{
Tejun Heo's avatar
Tejun Heo committed
625
626
	struct blkg_rwstat result;
	int i;
627

Tejun Heo's avatar
Tejun Heo committed
628
629
630
631
	for (i = 0; i < BLKG_RWSTAT_NR; i++)
		atomic64_set(&result.aux_cnt[i],
			     percpu_counter_sum_positive(&rwstat->cpu_cnt[i]));
	return result;
632
633
634
}

/**
635
 * blkg_rwstat_total - read the total count of a blkg_rwstat
636
637
638
639
640
641
 * @rwstat: blkg_rwstat to read
 *
 * Return the total count of @rwstat regardless of the IO direction.  This
 * function can be called without synchronization and takes care of u64
 * atomicity.
 */
642
static inline uint64_t blkg_rwstat_total(struct blkg_rwstat *rwstat)
643
644
645
{
	struct blkg_rwstat tmp = blkg_rwstat_read(rwstat);

Tejun Heo's avatar
Tejun Heo committed
646
647
	return atomic64_read(&tmp.aux_cnt[BLKG_RWSTAT_READ]) +
		atomic64_read(&tmp.aux_cnt[BLKG_RWSTAT_WRITE]);
648
649
650
651
652
653
654
655
}

/**
 * blkg_rwstat_reset - reset a blkg_rwstat
 * @rwstat: blkg_rwstat to reset
 */
static inline void blkg_rwstat_reset(struct blkg_rwstat *rwstat)
{
656
657
	int i;

Tejun Heo's avatar
Tejun Heo committed
658
659
	for (i = 0; i < BLKG_RWSTAT_NR; i++) {
		percpu_counter_set(&rwstat->cpu_cnt[i], 0);
660
		atomic64_set(&rwstat->aux_cnt[i], 0);
Tejun Heo's avatar
Tejun Heo committed
661
	}
662
663
}

664
/**
665
 * blkg_rwstat_add_aux - add a blkg_rwstat into another's aux count
666
667
668
 * @to: the destination blkg_rwstat
 * @from: the source
 *
669
 * Add @from's count including the aux one to @to's aux count.
670
 */
671
672
static inline void blkg_rwstat_add_aux(struct blkg_rwstat *to,
				       struct blkg_rwstat *from)
673
674
675
676
677
{
	struct blkg_rwstat v = blkg_rwstat_read(from);
	int i;

	for (i = 0; i < BLKG_RWSTAT_NR; i++)
Tejun Heo's avatar
Tejun Heo committed
678
679
		atomic64_add(atomic64_read(&v.aux_cnt[i]) +
			     atomic64_read(&from->aux_cnt[i]),
680
			     &to->aux_cnt[i]);
681
682
}

683
684
685
686
687
688
689
690
691
692
693
694
695
696
697
698
699
700
701
702
703
704
705
706
707
708
709
710
711
#ifdef CONFIG_BLK_DEV_THROTTLING
extern bool blk_throtl_bio(struct request_queue *q, struct blkcg_gq *blkg,
			   struct bio *bio);
#else
static inline bool blk_throtl_bio(struct request_queue *q, struct blkcg_gq *blkg,
				  struct bio *bio) { return false; }
#endif

static inline bool blkcg_bio_issue_check(struct request_queue *q,
					 struct bio *bio)
{
	struct blkcg *blkcg;
	struct blkcg_gq *blkg;
	bool throtl = false;

	rcu_read_lock();
	blkcg = bio_blkcg(bio);

	blkg = blkg_lookup(blkcg, q);
	if (unlikely(!blkg)) {
		spin_lock_irq(q->queue_lock);
		blkg = blkg_lookup_create(blkcg, q);
		if (IS_ERR(blkg))
			blkg = NULL;
		spin_unlock_irq(q->queue_lock);
	}

	throtl = blk_throtl_bio(q, blkg, bio);

712
713
714
715
716
717
718
	if (!throtl) {
		blkg = blkg ?: q->root_blkg;
		blkg_rwstat_add(&blkg->stat_bytes, bio->bi_flags,
				bio->bi_iter.bi_size);
		blkg_rwstat_add(&blkg->stat_ios, bio->bi_flags, 1);
	}

719
720
721
722
	rcu_read_unlock();
	return !throtl;
}

723
724
#else	/* CONFIG_BLK_CGROUP */

725
726
struct blkcg {
};
727

728
729
730
struct blkg_policy_data {
};

731
732
733
struct blkcg_policy_data {
};

Tejun Heo's avatar
Tejun Heo committed
734
struct blkcg_gq {
735
736
};

Tejun Heo's avatar
Tejun Heo committed
737
struct blkcg_policy {
738
739
};

Tejun Heo's avatar
Tejun Heo committed
740
741
#define blkcg_root_css	((struct cgroup_subsys_state *)ERR_PTR(-EINVAL))

742
743
744
745
746
747
static inline struct cgroup_subsys_state *
task_get_blkcg_css(struct task_struct *task)
{
	return NULL;
}

748
749
#ifdef CONFIG_BLOCK

Tejun Heo's avatar
Tejun Heo committed
750
static inline struct blkcg_gq *blkg_lookup(struct blkcg *blkcg, void *key) { return NULL; }
751
752
753
static inline int blkcg_init_queue(struct request_queue *q) { return 0; }
static inline void blkcg_drain_queue(struct request_queue *q) { }
static inline void blkcg_exit_queue(struct request_queue *q) { }
754
static inline int blkcg_policy_register(struct blkcg_policy *pol) { return 0; }
Tejun Heo's avatar
Tejun Heo committed
755
static inline void blkcg_policy_unregister(struct blkcg_policy *pol) { }
756
static inline int blkcg_activate_policy(struct request_queue *q,
Tejun Heo's avatar
Tejun Heo committed
757
					const struct blkcg_policy *pol) { return 0; }
758
static inline void blkcg_deactivate_policy(struct request_queue *q,
Tejun Heo's avatar
Tejun Heo committed
759
760
					   const struct blkcg_policy *pol) { }

761
static inline struct blkcg *bio_blkcg(struct bio *bio) { return NULL; }
762

763
764
765
static inline struct blkg_policy_data *blkg_to_pd(struct blkcg_gq *blkg,
						  struct blkcg_policy *pol) { return NULL; }
static inline struct blkcg_gq *pd_to_blkg(struct blkg_policy_data *pd) { return NULL; }
Tejun Heo's avatar
Tejun Heo committed
766
767
768
static inline char *blkg_path(struct blkcg_gq *blkg) { return NULL; }
static inline void blkg_get(struct blkcg_gq *blkg) { }
static inline void blkg_put(struct blkcg_gq *blkg) { }
769

770
771
772
773
774
775
static inline struct request_list *blk_get_rl(struct request_queue *q,
					      struct bio *bio) { return &q->root_rl; }
static inline void blk_put_rl(struct request_list *rl) { }
static inline void blk_rq_set_rl(struct request *rq, struct request_list *rl) { }
static inline struct request_list *blk_rq_rl(struct request *rq) { return &rq->q->root_rl; }

776
777
778
static inline bool blkcg_bio_issue_check(struct request_queue *q,
					 struct bio *bio) { return true; }

779
780
781
#define blk_queue_for_each_rl(rl, q)	\
	for ((rl) = &(q)->root_rl; (rl); (rl) = NULL)

782
#endif	/* CONFIG_BLOCK */
783
784
#endif	/* CONFIG_BLK_CGROUP */
#endif	/* _BLK_CGROUP_H */