percpu_counter.c 5.27 KB
Newer Older
1
2
3
4
5
/*
 * Fast batching percpu counters.
 */

#include <linux/percpu_counter.h>
6
7
8
9
#include <linux/notifier.h>
#include <linux/mutex.h>
#include <linux/init.h>
#include <linux/cpu.h>
10
#include <linux/module.h>
11
#include <linux/debugobjects.h>
12

13
#ifdef CONFIG_HOTPLUG_CPU
14
static LIST_HEAD(percpu_counters);
15
static DEFINE_SPINLOCK(percpu_counters_lock);
16
#endif
17

18
19
20
21
22
23
24
25
26
27
28
29
30
31
32
33
34
35
36
37
38
39
40
41
42
43
44
45
46
47
48
49
50
51
52
53
54
55
56
57
58
59
#ifdef CONFIG_DEBUG_OBJECTS_PERCPU_COUNTER

static struct debug_obj_descr percpu_counter_debug_descr;

static int percpu_counter_fixup_free(void *addr, enum debug_obj_state state)
{
	struct percpu_counter *fbc = addr;

	switch (state) {
	case ODEBUG_STATE_ACTIVE:
		percpu_counter_destroy(fbc);
		debug_object_free(fbc, &percpu_counter_debug_descr);
		return 1;
	default:
		return 0;
	}
}

static struct debug_obj_descr percpu_counter_debug_descr = {
	.name		= "percpu_counter",
	.fixup_free	= percpu_counter_fixup_free,
};

static inline void debug_percpu_counter_activate(struct percpu_counter *fbc)
{
	debug_object_init(fbc, &percpu_counter_debug_descr);
	debug_object_activate(fbc, &percpu_counter_debug_descr);
}

static inline void debug_percpu_counter_deactivate(struct percpu_counter *fbc)
{
	debug_object_deactivate(fbc, &percpu_counter_debug_descr);
	debug_object_free(fbc, &percpu_counter_debug_descr);
}

#else	/* CONFIG_DEBUG_OBJECTS_PERCPU_COUNTER */
static inline void debug_percpu_counter_activate(struct percpu_counter *fbc)
{ }
static inline void debug_percpu_counter_deactivate(struct percpu_counter *fbc)
{ }
#endif	/* CONFIG_DEBUG_OBJECTS_PERCPU_COUNTER */

Peter Zijlstra's avatar
Peter Zijlstra committed
60
61
62
void percpu_counter_set(struct percpu_counter *fbc, s64 amount)
{
	int cpu;
63
	unsigned long flags;
Peter Zijlstra's avatar
Peter Zijlstra committed
64

65
	raw_spin_lock_irqsave(&fbc->lock, flags);
Peter Zijlstra's avatar
Peter Zijlstra committed
66
67
68
69
70
	for_each_possible_cpu(cpu) {
		s32 *pcount = per_cpu_ptr(fbc->counters, cpu);
		*pcount = 0;
	}
	fbc->count = amount;
71
	raw_spin_unlock_irqrestore(&fbc->lock, flags);
Peter Zijlstra's avatar
Peter Zijlstra committed
72
73
74
}
EXPORT_SYMBOL(percpu_counter_set);

75
void __percpu_counter_add(struct percpu_counter *fbc, s64 amount, s32 batch)
76
{
77
	s64 count;
78

79
	preempt_disable();
80
	count = __this_cpu_read(*fbc->counters) + amount;
81
	if (count >= batch || count <= -batch) {
82
83
		unsigned long flags;
		raw_spin_lock_irqsave(&fbc->lock, flags);
84
		fbc->count += count;
85
		__this_cpu_sub(*fbc->counters, count - amount);
86
		raw_spin_unlock_irqrestore(&fbc->lock, flags);
87
	} else {
88
		this_cpu_add(*fbc->counters, amount);
89
	}
90
	preempt_enable();
91
}
92
EXPORT_SYMBOL(__percpu_counter_add);
93
94
95
96
97

/*
 * Add up all the per-cpu counts, return the result.  This is a more accurate
 * but much slower version of percpu_counter_read_positive()
 */
98
s64 __percpu_counter_sum(struct percpu_counter *fbc)
99
{
100
	s64 ret;
101
	int cpu;
102
	unsigned long flags;
103

104
	raw_spin_lock_irqsave(&fbc->lock, flags);
105
	ret = fbc->count;
106
	for_each_online_cpu(cpu) {
107
		s32 *pcount = per_cpu_ptr(fbc->counters, cpu);
108
109
		ret += *pcount;
	}
110
	raw_spin_unlock_irqrestore(&fbc->lock, flags);
Peter Zijlstra's avatar
Peter Zijlstra committed
111
	return ret;
112
}
Peter Zijlstra's avatar
Peter Zijlstra committed
113
EXPORT_SYMBOL(__percpu_counter_sum);
114

115
116
int __percpu_counter_init(struct percpu_counter *fbc, s64 amount,
			  struct lock_class_key *key)
117
{
118
	raw_spin_lock_init(&fbc->lock);
119
	lockdep_set_class(&fbc->lock, key);
120
121
	fbc->count = amount;
	fbc->counters = alloc_percpu(s32);
122
123
	if (!fbc->counters)
		return -ENOMEM;
124
125
126

	debug_percpu_counter_activate(fbc);

127
#ifdef CONFIG_HOTPLUG_CPU
128
	INIT_LIST_HEAD(&fbc->list);
129
	spin_lock(&percpu_counters_lock);
130
	list_add(&fbc->list, &percpu_counters);
131
	spin_unlock(&percpu_counters_lock);
132
#endif
133
	return 0;
134
}
135
EXPORT_SYMBOL(__percpu_counter_init);
136
137
138

void percpu_counter_destroy(struct percpu_counter *fbc)
{
139
140
141
	if (!fbc->counters)
		return;

142
143
	debug_percpu_counter_deactivate(fbc);

144
#ifdef CONFIG_HOTPLUG_CPU
145
	spin_lock(&percpu_counters_lock);
146
	list_del(&fbc->list);
147
	spin_unlock(&percpu_counters_lock);
148
#endif
149
150
	free_percpu(fbc->counters);
	fbc->counters = NULL;
151
152
153
}
EXPORT_SYMBOL(percpu_counter_destroy);

154
155
156
157
158
159
160
161
162
163
int percpu_counter_batch __read_mostly = 32;
EXPORT_SYMBOL(percpu_counter_batch);

static void compute_batch_value(void)
{
	int nr = num_online_cpus();

	percpu_counter_batch = max(32, nr*2);
}

164
static int percpu_counter_hotcpu_callback(struct notifier_block *nb,
165
166
					unsigned long action, void *hcpu)
{
167
#ifdef CONFIG_HOTPLUG_CPU
168
169
170
	unsigned int cpu;
	struct percpu_counter *fbc;

171
	compute_batch_value();
172
173
174
175
	if (action != CPU_DEAD)
		return NOTIFY_OK;

	cpu = (unsigned long)hcpu;
176
	spin_lock(&percpu_counters_lock);
177
178
	list_for_each_entry(fbc, &percpu_counters, list) {
		s32 *pcount;
179
		unsigned long flags;
180

181
		raw_spin_lock_irqsave(&fbc->lock, flags);
182
183
184
		pcount = per_cpu_ptr(fbc->counters, cpu);
		fbc->count += *pcount;
		*pcount = 0;
185
		raw_spin_unlock_irqrestore(&fbc->lock, flags);
186
	}
187
	spin_unlock(&percpu_counters_lock);
188
#endif
189
190
191
	return NOTIFY_OK;
}

192
193
194
195
196
197
198
199
200
201
202
203
204
205
206
207
208
209
210
211
212
213
214
215
216
217
218
/*
 * Compare counter against given value.
 * Return 1 if greater, 0 if equal and -1 if less
 */
int percpu_counter_compare(struct percpu_counter *fbc, s64 rhs)
{
	s64	count;

	count = percpu_counter_read(fbc);
	/* Check to see if rough count will be sufficient for comparison */
	if (abs(count - rhs) > (percpu_counter_batch*num_online_cpus())) {
		if (count > rhs)
			return 1;
		else
			return -1;
	}
	/* Need to use precise count */
	count = percpu_counter_sum(fbc);
	if (count > rhs)
		return 1;
	else if (count < rhs)
		return -1;
	else
		return 0;
}
EXPORT_SYMBOL(percpu_counter_compare);

219
220
static int __init percpu_counter_startup(void)
{
221
	compute_batch_value();
222
223
224
225
	hotcpu_notifier(percpu_counter_hotcpu_callback, 0);
	return 0;
}
module_init(percpu_counter_startup);