perf_counter.c 109 KB
Newer Older
1
2
3
/*
 * Performance counter core code
 *
4
5
6
 *  Copyright (C) 2008 Thomas Gleixner <tglx@linutronix.de>
 *  Copyright (C) 2008-2009 Red Hat, Inc., Ingo Molnar
 *  Copyright (C) 2008-2009 Red Hat, Inc., Peter Zijlstra <pzijlstr@redhat.com>
7
 *  Copyright    2009 Paul Mackerras, IBM Corp. <paulus@au1.ibm.com>
8
9
 *
 *  For licensing details see kernel-base/COPYING
10
11
12
 */

#include <linux/fs.h>
13
#include <linux/mm.h>
14
15
#include <linux/cpu.h>
#include <linux/smp.h>
16
#include <linux/file.h>
17
18
#include <linux/poll.h>
#include <linux/sysfs.h>
19
#include <linux/dcache.h>
20
#include <linux/percpu.h>
21
#include <linux/ptrace.h>
22
23
24
#include <linux/vmstat.h>
#include <linux/hardirq.h>
#include <linux/rculist.h>
25
26
27
#include <linux/uaccess.h>
#include <linux/syscalls.h>
#include <linux/anon_inodes.h>
28
#include <linux/kernel_stat.h>
29
30
#include <linux/perf_counter.h>

31
32
#include <asm/irq_regs.h>

33
34
35
36
37
/*
 * Each CPU has a list of per CPU counters:
 */
DEFINE_PER_CPU(struct perf_cpu_context, perf_cpu_context);

38
int perf_max_counters __read_mostly = 1;
39
40
41
static int perf_reserved_percpu __read_mostly;
static int perf_overcommit __read_mostly = 1;

42
static atomic_t nr_counters __read_mostly;
Peter Zijlstra's avatar
Peter Zijlstra committed
43
44
static atomic_t nr_mmap_counters __read_mostly;
static atomic_t nr_comm_counters __read_mostly;
45
static atomic_t nr_task_counters __read_mostly;
46

47
/*
48
49
50
51
 * perf counter paranoia level:
 *  0 - not paranoid
 *  1 - disallow cpu counters to unpriv
 *  2 - disallow kernel profiling to unpriv
52
 */
53
int sysctl_perf_counter_paranoid __read_mostly;
54
55
56
57
58
59
60
61
62
63
64

static inline bool perf_paranoid_cpu(void)
{
	return sysctl_perf_counter_paranoid > 0;
}

static inline bool perf_paranoid_kernel(void)
{
	return sysctl_perf_counter_paranoid > 1;
}

65
int sysctl_perf_counter_mlock __read_mostly = 512; /* 'free' kb per user */
66
67
68
69
70

/*
 * max perf counter sample rate
 */
int sysctl_perf_counter_sample_rate __read_mostly = 100000;
71

72
73
static atomic64_t perf_counter_id;

74
/*
75
 * Lock for (sysadmin-configurable) counter reservations:
76
 */
77
static DEFINE_SPINLOCK(perf_resource_lock);
78
79
80
81

/*
 * Architecture provided APIs - weak aliases:
 */
82
extern __weak const struct pmu *hw_perf_counter_init(struct perf_counter *counter)
83
{
84
	return NULL;
85
86
}

87
88
89
void __weak hw_perf_disable(void)		{ barrier(); }
void __weak hw_perf_enable(void)		{ barrier(); }

90
void __weak hw_perf_counter_setup(int cpu)	{ barrier(); }
91
92
93

int __weak
hw_perf_group_sched_in(struct perf_counter *group_leader,
94
95
96
97
98
	       struct perf_cpu_context *cpuctx,
	       struct perf_counter_context *ctx, int cpu)
{
	return 0;
}
99

100
101
void __weak perf_counter_print_debug(void)	{ }

102
103
104
105
106
107
108
109
110
111
112
113
114
115
116
117
118
119
120
121
122
123
124
125
static DEFINE_PER_CPU(int, disable_count);

void __perf_disable(void)
{
	__get_cpu_var(disable_count)++;
}

bool __perf_enable(void)
{
	return !--__get_cpu_var(disable_count);
}

void perf_disable(void)
{
	__perf_disable();
	hw_perf_disable();
}

void perf_enable(void)
{
	if (__perf_enable())
		hw_perf_enable();
}

126
127
static void get_ctx(struct perf_counter_context *ctx)
{
128
	WARN_ON(!atomic_inc_not_zero(&ctx->refcount));
129
130
}

131
132
133
134
135
136
137
138
static void free_ctx(struct rcu_head *head)
{
	struct perf_counter_context *ctx;

	ctx = container_of(head, struct perf_counter_context, rcu_head);
	kfree(ctx);
}

139
140
static void put_ctx(struct perf_counter_context *ctx)
{
141
142
143
	if (atomic_dec_and_test(&ctx->refcount)) {
		if (ctx->parent_ctx)
			put_ctx(ctx->parent_ctx);
144
145
146
		if (ctx->task)
			put_task_struct(ctx->task);
		call_rcu(&ctx->rcu_head, free_ctx);
147
	}
148
149
}

150
151
152
153
154
155
156
157
static void unclone_ctx(struct perf_counter_context *ctx)
{
	if (ctx->parent_ctx) {
		put_ctx(ctx->parent_ctx);
		ctx->parent_ctx = NULL;
	}
}

158
159
160
161
162
163
164
165
166
167
168
169
170
171
/*
 * If we inherit counters we want to return the parent counter id
 * to userspace.
 */
static u64 primary_counter_id(struct perf_counter *counter)
{
	u64 id = counter->id;

	if (counter->parent)
		id = counter->parent->id;

	return id;
}

172
173
174
175
176
/*
 * Get the perf_counter_context for a task and lock it.
 * This has to cope with with the fact that until it is locked,
 * the context could get moved to another task.
 */
177
178
static struct perf_counter_context *
perf_lock_task_context(struct task_struct *task, unsigned long *flags)
179
180
181
182
183
184
185
186
187
188
189
190
191
192
193
194
195
196
197
198
199
200
{
	struct perf_counter_context *ctx;

	rcu_read_lock();
 retry:
	ctx = rcu_dereference(task->perf_counter_ctxp);
	if (ctx) {
		/*
		 * If this context is a clone of another, it might
		 * get swapped for another underneath us by
		 * perf_counter_task_sched_out, though the
		 * rcu_read_lock() protects us from any context
		 * getting freed.  Lock the context and check if it
		 * got swapped before we could get the lock, and retry
		 * if so.  If we locked the right context, then it
		 * can't get swapped on us any more.
		 */
		spin_lock_irqsave(&ctx->lock, *flags);
		if (ctx != rcu_dereference(task->perf_counter_ctxp)) {
			spin_unlock_irqrestore(&ctx->lock, *flags);
			goto retry;
		}
201
202
203
204
205

		if (!atomic_inc_not_zero(&ctx->refcount)) {
			spin_unlock_irqrestore(&ctx->lock, *flags);
			ctx = NULL;
		}
206
207
208
209
210
211
212
213
214
215
216
217
218
219
220
221
222
223
224
225
226
227
228
229
230
231
232
233
234
235
236
237
238
	}
	rcu_read_unlock();
	return ctx;
}

/*
 * Get the context for a task and increment its pin_count so it
 * can't get swapped to another task.  This also increments its
 * reference count so that the context can't get freed.
 */
static struct perf_counter_context *perf_pin_task_context(struct task_struct *task)
{
	struct perf_counter_context *ctx;
	unsigned long flags;

	ctx = perf_lock_task_context(task, &flags);
	if (ctx) {
		++ctx->pin_count;
		spin_unlock_irqrestore(&ctx->lock, flags);
	}
	return ctx;
}

static void perf_unpin_context(struct perf_counter_context *ctx)
{
	unsigned long flags;

	spin_lock_irqsave(&ctx->lock, flags);
	--ctx->pin_count;
	spin_unlock_irqrestore(&ctx->lock, flags);
	put_ctx(ctx);
}

239
240
241
242
/*
 * Add a counter from the lists for its context.
 * Must be called with ctx->mutex and ctx->lock held.
 */
243
244
245
246
247
248
249
250
251
252
static void
list_add_counter(struct perf_counter *counter, struct perf_counter_context *ctx)
{
	struct perf_counter *group_leader = counter->group_leader;

	/*
	 * Depending on whether it is a standalone or sibling counter,
	 * add it straight to the context's counter list, or to the group
	 * leader's sibling list:
	 */
Peter Zijlstra's avatar
Peter Zijlstra committed
253
	if (group_leader == counter)
254
		list_add_tail(&counter->list_entry, &ctx->counter_list);
Peter Zijlstra's avatar
Peter Zijlstra committed
255
	else {
256
		list_add_tail(&counter->list_entry, &group_leader->sibling_list);
Peter Zijlstra's avatar
Peter Zijlstra committed
257
258
		group_leader->nr_siblings++;
	}
259
260

	list_add_rcu(&counter->event_entry, &ctx->event_list);
261
	ctx->nr_counters++;
262
263
	if (counter->attr.inherit_stat)
		ctx->nr_stat++;
264
265
}

266
267
/*
 * Remove a counter from the lists for its context.
268
 * Must be called with ctx->mutex and ctx->lock held.
269
 */
270
271
272
273
274
static void
list_del_counter(struct perf_counter *counter, struct perf_counter_context *ctx)
{
	struct perf_counter *sibling, *tmp;

275
276
	if (list_empty(&counter->list_entry))
		return;
277
	ctx->nr_counters--;
278
279
	if (counter->attr.inherit_stat)
		ctx->nr_stat--;
280

281
	list_del_init(&counter->list_entry);
282
	list_del_rcu(&counter->event_entry);
283

Peter Zijlstra's avatar
Peter Zijlstra committed
284
285
286
	if (counter->group_leader != counter)
		counter->group_leader->nr_siblings--;

287
288
289
290
291
292
293
294
	/*
	 * If this was a group counter with sibling counters then
	 * upgrade the siblings to singleton counters by adding them
	 * to the context list directly:
	 */
	list_for_each_entry_safe(sibling, tmp,
				 &counter->sibling_list, list_entry) {

295
		list_move_tail(&sibling->list_entry, &ctx->counter_list);
296
297
298
299
		sibling->group_leader = sibling;
	}
}

300
301
302
303
304
305
306
307
308
static void
counter_sched_out(struct perf_counter *counter,
		  struct perf_cpu_context *cpuctx,
		  struct perf_counter_context *ctx)
{
	if (counter->state != PERF_COUNTER_STATE_ACTIVE)
		return;

	counter->state = PERF_COUNTER_STATE_INACTIVE;
309
	counter->tstamp_stopped = ctx->time;
310
	counter->pmu->disable(counter);
311
312
313
314
315
	counter->oncpu = -1;

	if (!is_software_counter(counter))
		cpuctx->active_oncpu--;
	ctx->nr_active--;
316
	if (counter->attr.exclusive || !cpuctx->active_oncpu)
317
318
319
		cpuctx->exclusive = 0;
}

320
321
322
323
324
325
326
327
328
329
330
331
332
333
334
335
336
337
static void
group_sched_out(struct perf_counter *group_counter,
		struct perf_cpu_context *cpuctx,
		struct perf_counter_context *ctx)
{
	struct perf_counter *counter;

	if (group_counter->state != PERF_COUNTER_STATE_ACTIVE)
		return;

	counter_sched_out(group_counter, cpuctx, ctx);

	/*
	 * Schedule out siblings (if any):
	 */
	list_for_each_entry(counter, &group_counter->sibling_list, list_entry)
		counter_sched_out(counter, cpuctx, ctx);

338
	if (group_counter->attr.exclusive)
339
340
341
		cpuctx->exclusive = 0;
}

342
343
344
345
346
347
/*
 * Cross CPU call to remove a performance counter
 *
 * We disable the counter on the hardware level first. After that we
 * remove it from the context list.
 */
348
static void __perf_counter_remove_from_context(void *info)
349
350
351
352
353
354
355
356
357
358
{
	struct perf_cpu_context *cpuctx = &__get_cpu_var(perf_cpu_context);
	struct perf_counter *counter = info;
	struct perf_counter_context *ctx = counter->ctx;

	/*
	 * If this is a task context, we need to check whether it is
	 * the current task context of this cpu. If not it has been
	 * scheduled out before the smp call arrived.
	 */
359
	if (ctx->task && cpuctx->task_ctx != ctx)
360
361
		return;

362
	spin_lock(&ctx->lock);
363
364
365
366
367
	/*
	 * Protect the list operation against NMI by disabling the
	 * counters on a global level.
	 */
	perf_disable();
368

369
370
	counter_sched_out(counter, cpuctx, ctx);

371
	list_del_counter(counter, ctx);
372
373
374
375
376
377
378
379
380
381
382

	if (!ctx->task) {
		/*
		 * Allow more per task counters with respect to the
		 * reservation:
		 */
		cpuctx->max_pertask =
			min(perf_max_counters - ctx->nr_counters,
			    perf_max_counters - perf_reserved_percpu);
	}

383
	perf_enable();
384
	spin_unlock(&ctx->lock);
385
386
387
388
389
390
}


/*
 * Remove the counter from a task's (or a CPU's) list of counters.
 *
391
 * Must be called with ctx->mutex held.
392
393
394
 *
 * CPU counters are removed with a smp call. For task counters we only
 * call when the task is on a CPU.
395
396
397
398
399
400
401
 *
 * If counter->ctx is a cloned context, callers must make sure that
 * every task struct that counter->ctx->task could possibly point to
 * remains valid.  This is OK when called from perf_release since
 * that only calls us on the top-level context, which can't be a clone.
 * When called from perf_counter_exit_task, it's OK because the
 * context has been detached from its task.
402
 */
403
static void perf_counter_remove_from_context(struct perf_counter *counter)
404
405
406
407
408
409
410
411
412
413
{
	struct perf_counter_context *ctx = counter->ctx;
	struct task_struct *task = ctx->task;

	if (!task) {
		/*
		 * Per cpu counters are removed via an smp call and
		 * the removal is always sucessful.
		 */
		smp_call_function_single(counter->cpu,
414
					 __perf_counter_remove_from_context,
415
416
417
418
419
					 counter, 1);
		return;
	}

retry:
420
	task_oncpu_function_call(task, __perf_counter_remove_from_context,
421
422
423
424
425
426
				 counter);

	spin_lock_irq(&ctx->lock);
	/*
	 * If the context is active we need to retry the smp call.
	 */
427
	if (ctx->nr_active && !list_empty(&counter->list_entry)) {
428
429
430
431
432
433
		spin_unlock_irq(&ctx->lock);
		goto retry;
	}

	/*
	 * The lock prevents that this context is scheduled in so we
434
	 * can remove the counter safely, if the call above did not
435
436
	 * succeed.
	 */
437
438
	if (!list_empty(&counter->list_entry)) {
		list_del_counter(counter, ctx);
439
440
441
442
	}
	spin_unlock_irq(&ctx->lock);
}

443
static inline u64 perf_clock(void)
444
{
445
	return cpu_clock(smp_processor_id());
446
447
448
449
450
}

/*
 * Update the record of the current time in a context.
 */
451
static void update_context_time(struct perf_counter_context *ctx)
452
{
453
454
455
456
	u64 now = perf_clock();

	ctx->time += now - ctx->timestamp;
	ctx->timestamp = now;
457
458
459
460
461
462
463
464
465
466
}

/*
 * Update the total_time_enabled and total_time_running fields for a counter.
 */
static void update_counter_times(struct perf_counter *counter)
{
	struct perf_counter_context *ctx = counter->ctx;
	u64 run_end;

467
468
469
470
471
472
473
474
475
476
477
	if (counter->state < PERF_COUNTER_STATE_INACTIVE)
		return;

	counter->total_time_enabled = ctx->time - counter->tstamp_enabled;

	if (counter->state == PERF_COUNTER_STATE_INACTIVE)
		run_end = counter->tstamp_stopped;
	else
		run_end = ctx->time;

	counter->total_time_running = run_end - counter->tstamp_running;
478
479
480
481
482
483
484
485
486
487
488
489
490
491
}

/*
 * Update total_time_enabled and total_time_running for all counters in a group.
 */
static void update_group_times(struct perf_counter *leader)
{
	struct perf_counter *counter;

	update_counter_times(leader);
	list_for_each_entry(counter, &leader->sibling_list, list_entry)
		update_counter_times(counter);
}

492
493
494
495
496
497
498
499
500
501
502
503
504
/*
 * Cross CPU call to disable a performance counter
 */
static void __perf_counter_disable(void *info)
{
	struct perf_counter *counter = info;
	struct perf_cpu_context *cpuctx = &__get_cpu_var(perf_cpu_context);
	struct perf_counter_context *ctx = counter->ctx;

	/*
	 * If this is a per-task counter, need to check whether this
	 * counter's task is the current task on this cpu.
	 */
505
	if (ctx->task && cpuctx->task_ctx != ctx)
506
507
		return;

508
	spin_lock(&ctx->lock);
509
510
511
512
513
514

	/*
	 * If the counter is on, turn it off.
	 * If it is in error state, leave it in error state.
	 */
	if (counter->state >= PERF_COUNTER_STATE_INACTIVE) {
515
		update_context_time(ctx);
516
		update_counter_times(counter);
517
518
519
520
521
522
523
		if (counter == counter->group_leader)
			group_sched_out(counter, cpuctx, ctx);
		else
			counter_sched_out(counter, cpuctx, ctx);
		counter->state = PERF_COUNTER_STATE_OFF;
	}

524
	spin_unlock(&ctx->lock);
525
526
527
528
}

/*
 * Disable a counter.
529
530
531
532
533
534
535
536
537
538
 *
 * If counter->ctx is a cloned context, callers must make sure that
 * every task struct that counter->ctx->task could possibly point to
 * remains valid.  This condition is satisifed when called through
 * perf_counter_for_each_child or perf_counter_for_each because they
 * hold the top-level counter's child_mutex, so any descendant that
 * goes to exit will block in sync_child_counter.
 * When called from perf_pending_counter it's OK because counter->ctx
 * is the current context on this CPU and preemption is disabled,
 * hence we can't get into perf_counter_task_sched_out for this context.
539
540
541
542
543
544
545
546
547
548
549
550
551
552
553
554
555
556
557
558
559
560
561
562
563
564
565
566
567
568
569
 */
static void perf_counter_disable(struct perf_counter *counter)
{
	struct perf_counter_context *ctx = counter->ctx;
	struct task_struct *task = ctx->task;

	if (!task) {
		/*
		 * Disable the counter on the cpu that it's on
		 */
		smp_call_function_single(counter->cpu, __perf_counter_disable,
					 counter, 1);
		return;
	}

 retry:
	task_oncpu_function_call(task, __perf_counter_disable, counter);

	spin_lock_irq(&ctx->lock);
	/*
	 * If the counter is still active, we need to retry the cross-call.
	 */
	if (counter->state == PERF_COUNTER_STATE_ACTIVE) {
		spin_unlock_irq(&ctx->lock);
		goto retry;
	}

	/*
	 * Since we have the lock this context can't be scheduled
	 * in, so we can change the state safely.
	 */
570
571
	if (counter->state == PERF_COUNTER_STATE_INACTIVE) {
		update_counter_times(counter);
572
		counter->state = PERF_COUNTER_STATE_OFF;
573
	}
574
575
576
577

	spin_unlock_irq(&ctx->lock);
}

578
579
580
581
582
583
static int
counter_sched_in(struct perf_counter *counter,
		 struct perf_cpu_context *cpuctx,
		 struct perf_counter_context *ctx,
		 int cpu)
{
584
	if (counter->state <= PERF_COUNTER_STATE_OFF)
585
586
587
588
589
590
591
592
593
		return 0;

	counter->state = PERF_COUNTER_STATE_ACTIVE;
	counter->oncpu = cpu;	/* TODO: put 'cpu' into cpuctx->cpu */
	/*
	 * The new state must be visible before we turn it on in the hardware:
	 */
	smp_wmb();

594
	if (counter->pmu->enable(counter)) {
595
596
597
598
599
		counter->state = PERF_COUNTER_STATE_INACTIVE;
		counter->oncpu = -1;
		return -EAGAIN;
	}

600
	counter->tstamp_running += ctx->time - counter->tstamp_stopped;
601

602
603
	if (!is_software_counter(counter))
		cpuctx->active_oncpu++;
604
605
	ctx->nr_active++;

606
	if (counter->attr.exclusive)
607
608
		cpuctx->exclusive = 1;

609
610
611
	return 0;
}

612
613
614
615
616
617
618
619
620
621
622
623
624
625
626
627
628
629
630
631
632
633
634
635
636
637
638
639
640
641
642
643
644
645
646
647
648
649
650
651
652
653
654
655
656
657
static int
group_sched_in(struct perf_counter *group_counter,
	       struct perf_cpu_context *cpuctx,
	       struct perf_counter_context *ctx,
	       int cpu)
{
	struct perf_counter *counter, *partial_group;
	int ret;

	if (group_counter->state == PERF_COUNTER_STATE_OFF)
		return 0;

	ret = hw_perf_group_sched_in(group_counter, cpuctx, ctx, cpu);
	if (ret)
		return ret < 0 ? ret : 0;

	if (counter_sched_in(group_counter, cpuctx, ctx, cpu))
		return -EAGAIN;

	/*
	 * Schedule in siblings as one group (if any):
	 */
	list_for_each_entry(counter, &group_counter->sibling_list, list_entry) {
		if (counter_sched_in(counter, cpuctx, ctx, cpu)) {
			partial_group = counter;
			goto group_error;
		}
	}

	return 0;

group_error:
	/*
	 * Groups can be scheduled in as one unit only, so undo any
	 * partial group before returning:
	 */
	list_for_each_entry(counter, &group_counter->sibling_list, list_entry) {
		if (counter == partial_group)
			break;
		counter_sched_out(counter, cpuctx, ctx);
	}
	counter_sched_out(group_counter, cpuctx, ctx);

	return -EAGAIN;
}

658
659
660
661
662
663
664
665
666
667
/*
 * Return 1 for a group consisting entirely of software counters,
 * 0 if the group contains any hardware counters.
 */
static int is_software_only_group(struct perf_counter *leader)
{
	struct perf_counter *counter;

	if (!is_software_counter(leader))
		return 0;
Peter Zijlstra's avatar
Peter Zijlstra committed
668

669
670
671
	list_for_each_entry(counter, &leader->sibling_list, list_entry)
		if (!is_software_counter(counter))
			return 0;
Peter Zijlstra's avatar
Peter Zijlstra committed
672

673
674
675
676
677
678
679
680
681
682
683
684
685
686
687
688
689
690
691
692
693
694
695
696
697
	return 1;
}

/*
 * Work out whether we can put this counter group on the CPU now.
 */
static int group_can_go_on(struct perf_counter *counter,
			   struct perf_cpu_context *cpuctx,
			   int can_add_hw)
{
	/*
	 * Groups consisting entirely of software counters can always go on.
	 */
	if (is_software_only_group(counter))
		return 1;
	/*
	 * If an exclusive group is already on, no other hardware
	 * counters can go on.
	 */
	if (cpuctx->exclusive)
		return 0;
	/*
	 * If this group is exclusive and there are already
	 * counters on the CPU, it can't go on.
	 */
698
	if (counter->attr.exclusive && cpuctx->active_oncpu)
699
700
701
702
703
704
705
706
		return 0;
	/*
	 * Otherwise, try to add it if all previous groups were able
	 * to go on.
	 */
	return can_add_hw;
}

707
708
709
710
static void add_counter_to_ctx(struct perf_counter *counter,
			       struct perf_counter_context *ctx)
{
	list_add_counter(counter, ctx);
711
712
713
	counter->tstamp_enabled = ctx->time;
	counter->tstamp_running = ctx->time;
	counter->tstamp_stopped = ctx->time;
714
715
}

716
/*
717
 * Cross CPU call to install and enable a performance counter
718
719
 *
 * Must be called with ctx->mutex held
720
721
722
723
724
725
 */
static void __perf_install_in_context(void *info)
{
	struct perf_cpu_context *cpuctx = &__get_cpu_var(perf_cpu_context);
	struct perf_counter *counter = info;
	struct perf_counter_context *ctx = counter->ctx;
726
	struct perf_counter *leader = counter->group_leader;
727
	int cpu = smp_processor_id();
728
	int err;
729
730
731
732
733

	/*
	 * If this is a task context, we need to check whether it is
	 * the current task context of this cpu. If not it has been
	 * scheduled out before the smp call arrived.
734
735
	 * Or possibly this is the right context but it isn't
	 * on this cpu because it had no counters.
736
	 */
737
	if (ctx->task && cpuctx->task_ctx != ctx) {
738
		if (cpuctx->task_ctx || ctx->task != current)
739
740
741
			return;
		cpuctx->task_ctx = ctx;
	}
742

743
	spin_lock(&ctx->lock);
744
	ctx->is_active = 1;
745
	update_context_time(ctx);
746
747
748
749
750

	/*
	 * Protect the list operation against NMI by disabling the
	 * counters on a global level. NOP for non NMI based counters.
	 */
751
	perf_disable();
752

753
	add_counter_to_ctx(counter, ctx);
754

755
756
757
758
759
760
761
762
	/*
	 * Don't put the counter on if it is disabled or if
	 * it is in a group and the group isn't on.
	 */
	if (counter->state != PERF_COUNTER_STATE_INACTIVE ||
	    (leader != counter && leader->state != PERF_COUNTER_STATE_ACTIVE))
		goto unlock;

763
764
765
766
767
	/*
	 * An exclusive counter can't go on if there are already active
	 * hardware counters, and no hardware counter can go on if there
	 * is already an exclusive counter on.
	 */
768
	if (!group_can_go_on(counter, cpuctx, 1))
769
770
771
772
		err = -EEXIST;
	else
		err = counter_sched_in(counter, cpuctx, ctx, cpu);

773
774
775
776
777
778
779
780
	if (err) {
		/*
		 * This counter couldn't go on.  If it is in a group
		 * then we have to pull the whole group off.
		 * If the counter group is pinned then put it in error state.
		 */
		if (leader != counter)
			group_sched_out(leader, cpuctx, ctx);
781
		if (leader->attr.pinned) {
782
			update_group_times(leader);
783
			leader->state = PERF_COUNTER_STATE_ERROR;
784
		}
785
	}
786

787
	if (!err && !ctx->task && cpuctx->max_pertask)
788
789
		cpuctx->max_pertask--;

790
 unlock:
791
	perf_enable();
792

793
	spin_unlock(&ctx->lock);
794
795
796
797
798
799
800
801
802
803
804
}

/*
 * Attach a performance counter to a context
 *
 * First we add the counter to the list with the hardware enable bit
 * in counter->hw_config cleared.
 *
 * If the counter is attached to a task which is on a CPU we use a smp
 * call to enable it in the task context. The task might have been
 * scheduled away, but we check this in the smp call again.
805
806
 *
 * Must be called with ctx->mutex held.
807
808
809
810
811
812
813
814
815
816
817
818
819
820
821
822
823
824
825
826
827
828
829
830
831
832
 */
static void
perf_install_in_context(struct perf_counter_context *ctx,
			struct perf_counter *counter,
			int cpu)
{
	struct task_struct *task = ctx->task;

	if (!task) {
		/*
		 * Per cpu counters are installed via an smp call and
		 * the install is always sucessful.
		 */
		smp_call_function_single(cpu, __perf_install_in_context,
					 counter, 1);
		return;
	}

retry:
	task_oncpu_function_call(task, __perf_install_in_context,
				 counter);

	spin_lock_irq(&ctx->lock);
	/*
	 * we need to retry the smp call.
	 */
833
	if (ctx->is_active && list_empty(&counter->list_entry)) {
834
835
836
837
838
839
840
841
842
		spin_unlock_irq(&ctx->lock);
		goto retry;
	}

	/*
	 * The lock prevents that this context is scheduled in so we
	 * can add the counter safely, if it the call above did not
	 * succeed.
	 */
843
844
	if (list_empty(&counter->list_entry))
		add_counter_to_ctx(counter, ctx);
845
846
847
	spin_unlock_irq(&ctx->lock);
}

848
849
850
851
/*
 * Cross CPU call to enable a performance counter
 */
static void __perf_counter_enable(void *info)
852
{
853
854
855
856
857
	struct perf_counter *counter = info;
	struct perf_cpu_context *cpuctx = &__get_cpu_var(perf_cpu_context);
	struct perf_counter_context *ctx = counter->ctx;
	struct perf_counter *leader = counter->group_leader;
	int err;
858

859
860
861
862
	/*
	 * If this is a per-task counter, need to check whether this
	 * counter's task is the current task on this cpu.
	 */
863
	if (ctx->task && cpuctx->task_ctx != ctx) {
864
		if (cpuctx->task_ctx || ctx->task != current)
865
866
867
			return;
		cpuctx->task_ctx = ctx;
	}
868

869
	spin_lock(&ctx->lock);
870
	ctx->is_active = 1;
871
	update_context_time(ctx);
872
873
874
875

	if (counter->state >= PERF_COUNTER_STATE_INACTIVE)
		goto unlock;
	counter->state = PERF_COUNTER_STATE_INACTIVE;
876
	counter->tstamp_enabled = ctx->time - counter->total_time_enabled;
877
878

	/*
879
880
	 * If the counter is in a group and isn't the group leader,
	 * then don't put it on unless the group is on.
881
	 */
882
883
	if (leader != counter && leader->state != PERF_COUNTER_STATE_ACTIVE)
		goto unlock;
884

885
	if (!group_can_go_on(counter, cpuctx, 1)) {
886
		err = -EEXIST;
887
	} else {
888
		perf_disable();
889
890
891
892
893
894
		if (counter == leader)
			err = group_sched_in(counter, cpuctx, ctx,
					     smp_processor_id());
		else
			err = counter_sched_in(counter, cpuctx, ctx,
					       smp_processor_id());
895
		perf_enable();
896
	}
897
898
899
900
901
902
903
904

	if (err) {
		/*
		 * If this counter can't go on and it's part of a
		 * group, then the whole group has to come off.
		 */
		if (leader != counter)
			group_sched_out(leader, cpuctx, ctx);
905
		if (leader->attr.pinned) {
906
			update_group_times(leader);
907
			leader->state = PERF_COUNTER_STATE_ERROR;
908
		}
909
910
911
	}

 unlock:
912
	spin_unlock(&ctx->lock);
913
914
915
916
}

/*
 * Enable a counter.
917
918
919
920
921
922
 *
 * If counter->ctx is a cloned context, callers must make sure that
 * every task struct that counter->ctx->task could possibly point to
 * remains valid.  This condition is satisfied when called through
 * perf_counter_for_each_child or perf_counter_for_each as described
 * for perf_counter_disable.
923
924
925
926
927
928
929
930
931
932
933
934
935
936
937
938
939
940
941
942
943
944
945
946
947
948
949
950
951
952
953
954
955
956
957
958
959
960
961
962
963
964
965
966
967
968
 */
static void perf_counter_enable(struct perf_counter *counter)
{
	struct perf_counter_context *ctx = counter->ctx;
	struct task_struct *task = ctx->task;

	if (!task) {
		/*
		 * Enable the counter on the cpu that it's on
		 */
		smp_call_function_single(counter->cpu, __perf_counter_enable,
					 counter, 1);
		return;
	}

	spin_lock_irq(&ctx->lock);
	if (counter->state >= PERF_COUNTER_STATE_INACTIVE)
		goto out;

	/*
	 * If the counter is in error state, clear that first.
	 * That way, if we see the counter in error state below, we
	 * know that it has gone back into error state, as distinct
	 * from the task having been scheduled away before the
	 * cross-call arrived.
	 */
	if (counter->state == PERF_COUNTER_STATE_ERROR)
		counter->state = PERF_COUNTER_STATE_OFF;

 retry:
	spin_unlock_irq(&ctx->lock);
	task_oncpu_function_call(task, __perf_counter_enable, counter);

	spin_lock_irq(&ctx->lock);

	/*
	 * If the context is active and the counter is still off,
	 * we need to retry the cross-call.
	 */
	if (ctx->is_active && counter->state == PERF_COUNTER_STATE_OFF)
		goto retry;

	/*
	 * Since we have the lock this context can't be scheduled
	 * in, so we can change the state safely.
	 */
969
	if (counter->state == PERF_COUNTER_STATE_OFF) {
970
		counter->state = PERF_COUNTER_STATE_INACTIVE;
971
972
		counter->tstamp_enabled =
			ctx->time - counter->total_time_enabled;
973
	}
974
975
976
977
 out:
	spin_unlock_irq(&ctx->lock);
}

978
static int perf_counter_refresh(struct perf_counter *counter, int refresh)
979
{
980
981
982
	/*
	 * not supported on inherited counters
	 */
983
	if (counter->attr.inherit)
984
985
		return -EINVAL;

986
987
	atomic_add(refresh, &counter->event_limit);
	perf_counter_enable(counter);
988
989

	return 0;
990
991
}

992
993
994
995
996
void __perf_counter_sched_out(struct perf_counter_context *ctx,
			      struct perf_cpu_context *cpuctx)
{
	struct perf_counter *counter;

997
998
	spin_lock(&ctx->lock);
	ctx->is_active = 0;
999
	if (likely(!ctx->nr_counters))
1000
		goto out;
1001
	update_context_time(ctx);
1002

1003
	perf_disable();
1004
	if (ctx->nr_active) {
1005
1006
1007
1008
1009
1010
		list_for_each_entry(counter, &ctx->counter_list, list_entry) {
			if (counter != counter->group_leader)
				counter_sched_out(counter, cpuctx, ctx);
			else
				group_sched_out(counter, cpuctx, ctx);
		}
1011
	}
1012
	perf_enable();
1013
 out:
1014
1015
1016
	spin_unlock(&ctx->lock);
}

1017
1018
1019
1020
1021
1022
1023
1024
1025
1026
1027
1028
1029
1030
1031
/*
 * Test whether two contexts are equivalent, i.e. whether they
 * have both been cloned from the same version of the same context
 * and they both have the same number of enabled counters.
 * If the number of enabled counters is the same, then the set
 * of enabled counters should be the same, because these are both
 * inherited contexts, therefore we can't access individual counters
 * in them directly with an fd; we can only enable/disable all
 * counters via prctl, or enable/disable all counters in a family
 * via ioctl, which will have the same effect on both contexts.
 */
static int context_equiv(struct perf_counter_context *ctx1,
			 struct perf_counter_context *ctx2)
{
	return ctx1->parent_ctx && ctx1->parent_ctx == ctx2->parent_ctx
1032
		&& ctx1->parent_gen == ctx2->parent_gen
1033
		&& !ctx1->pin_count && !ctx2->pin_count;
1034
1035
}

1036
1037
1038
1039
1040
1041
1042
1043
1044
1045
1046
1047
1048
1049
1050
1051
1052
1053
1054
1055
1056
1057
1058
1059
1060
1061
1062
1063
1064
1065
1066
1067
1068
1069
1070
1071
1072
1073
static void __perf_counter_read(void *counter);

static void __perf_counter_sync_stat(struct perf_counter *counter,
				     struct perf_counter *next_counter)
{
	u64 value;

	if (!counter->attr.inherit_stat)
		return;

	/*
	 * Update the counter value, we cannot use perf_counter_read()
	 * because we're in the middle of a context switch and have IRQs
	 * disabled, which upsets smp_call_function_single(), however
	 * we know the counter must be on the current CPU, therefore we
	 * don't need to use it.
	 */
	switch (counter->state) {
	case PERF_COUNTER_STATE_ACTIVE:
		__perf_counter_read(counter);
		break;

	case PERF_COUNTER_STATE_INACTIVE:
		update_counter_times(counter);
		break;

	default:
		break;
	}

	/*
	 * In order to keep per-task stats reliable we need to flip the counter
	 * values when we flip the contexts.
	 */
	value = atomic64_read(&next_counter->count);
	value = atomic64_xchg(&counter->count, value);
	atomic64_set(&next_counter->count, value);

1074
1075
1076
	swap(counter->total_time_enabled, next_counter->total_time_enabled);
	swap(counter->total_time_running, next_counter->total_time_running);

1077
	/*
1078
	 * Since we swizzled the values, update the user visible data too.
1079
	 */
1080
1081
	perf_counter_update_userpage(counter);
	perf_counter_update_userpage(next_counter);
1082
1083
1084
1085
1086
1087
1088
1089
1090
1091
1092
1093
1094
1095
1096
1097
1098
1099
1100
1101
1102
1103
1104
1105
1106
}

#define list_next_entry(pos, member) \
	list_entry(pos->member.next, typeof(*pos), member)

static void perf_counter_sync_stat(struct perf_counter_context *ctx,
				   struct perf_counter_context *next_ctx)
{
	struct perf_counter *counter, *next_counter;

	if (!ctx->nr_stat)
		return;

	counter = list_first_entry(&ctx->event_list,
				   struct perf_counter, event_entry);

	next_counter = list_first_entry(&next_ctx->event_list,
					struct perf_counter, event_entry);

	while (&counter->event_entry != &ctx->event_list &&
	       &next_counter->event_entry != &next_ctx->event_list) {

		__perf_counter_sync_stat(counter, next_counter);

		counter = list_next_entry(counter, event_entry);
1107
		next_counter = list_next_entry(next_counter, event_entry);
1108
1109
1110
	}
}

1111
1112
1113
1114
1115
1116
/*
 * Called from scheduler to remove the counters of the current task,
 * with interrupts disabled.
 *
 * We stop each counter and update the counter value in counter->count.
 *
Ingo Molnar's avatar
Ingo Molnar committed
1117
 * This does not protect us against NMI, but disable()
1118
1119
1120
1121
 * sets the disabled bit in the control field of counter _before_
 * accessing the counter control register. If a NMI hits, then it will
 * not restart the counter.
 */
1122
1123
void perf_counter_task_sched_out(struct task_struct *task,
				 struct task_struct *next, int cpu)
1124
1125
{
	struct perf_cpu_context *cpuctx = &per_cpu(perf_cpu_context, cpu);
1126
	struct perf_counter_context *ctx = task->perf_counter_ctxp;
1127
	struct perf_counter_context *next_ctx;
1128
	struct perf_counter_context *parent;
1129
	struct pt_regs *regs;
1130
	int do_switch = 1;
1131

1132
	regs = task_pt_regs(task);
1133
	perf_swcounter_event(PERF_COUNT_SW_CONTEXT_SWITCHES, 1, 1, regs, 0);
1134

1135
	if (likely(!ctx || !cpuctx->task_ctx))
1136
1137
		return;

1138
	update_context_time(ctx);
1139
1140
1141

	rcu_read_lock();
	parent = rcu_dereference(ctx->parent_ctx);
1142
	next_ctx = next->perf_counter_ctxp;
1143
1144
1145
1146
1147
1148
1149
1150
1151
1152
1153
1154
1155
1156
	if (parent && next_ctx &&
	    rcu_dereference(next_ctx->parent_ctx) == parent) {
		/*
		 * Looks like the two contexts are clones, so we might be
		 * able to optimize the context switch.  We lock both
		 * contexts and check that they are clones under the
		 * lock (including re-checking that neither has been
		 * uncloned in the meantime).  It doesn't matter which
		 * order we take the locks because no other cpu could
		 * be trying to lock both of these tasks.
		 */
		spin_lock(&ctx->lock);
		spin_lock_nested(&next_ctx->lock, SINGLE_DEPTH_NESTING);
		if (context_equiv(ctx, next_ctx)) {
1157
1158
1159
1160
			/*
			 * XXX do we need a memory barrier of sorts
			 * wrt to rcu_dereference() of perf_counter_ctxp
			 */
1161
1162
1163
1164
1165
			task->perf_counter_ctxp = next_ctx;
			next->perf_counter_ctxp = ctx;
			ctx->task = next;
			next_ctx->task = task;
			do_switch = 0;
1166
1167

			perf_counter_sync_stat(ctx, next_ctx);
1168
1169
1170
		}
		spin_unlock(&next_ctx->lock);
		spin_unlock(&ctx->lock);
1171
	}
1172
	rcu_read_unlock();
1173

1174
1175
1176
1177
	if (do_switch) {
		__perf_counter_sched_out(ctx, cpuctx);
		cpuctx->task_ctx = NULL;
	}
1178
1179
}

1180
1181
1182
/*
 * Called with IRQs disabled
 */
1183
1184
1185
1186
static void __perf_counter_task_sched_out(struct perf_counter_context *ctx)
{
	struct perf_cpu_context *cpuctx = &__get_cpu_var(perf_cpu_context);

1187
1188
	if (!cpuctx->task_ctx)
		return;
1189
1190
1191
1192

	if (WARN_ON_ONCE(ctx != cpuctx->task_ctx))
		return;

1193
1194
1195
1196
	__perf_counter_sched_out(ctx, cpuctx);
	cpuctx->task_ctx = NULL;
}

1197
1198
1199
/*
 * Called with IRQs disabled
 */
1200
static void perf_counter_cpu_sched_out(struct perf_cpu_context *cpuctx)
1201
{
1202
	__perf_counter_sched_out(&cpuctx->ctx, cpuctx);
1203
1204
}

1205
1206
1207
static void
__perf_counter_sched_in(struct perf_counter_context *ctx,
			struct perf_cpu_context *cpuctx, int cpu)
1208
1209
{
	struct perf_counter *counter;
1210
	int can_add_hw = 1;
1211

1212
1213
	spin_lock(&ctx->lock);
	ctx->is_active = 1;
1214
	if (likely(!ctx->nr_counters))
1215
		goto out;
1216

1217
	ctx->timestamp = perf_clock();
1218

1219
	perf_disable();
1220
1221
1222
1223
1224
1225
1226

	/*
	 * First go through the list and put on any pinned groups
	 * in order to give them the best chance of going on.
	 */
	list_for_each_entry(counter, &ctx->counter_list, list_entry) {
		if (counter->state <= PERF_COUNTER_STATE_OFF ||
1227
		    !counter->attr.pinned)
1228
1229
1230
1231
			continue;
		if (counter->cpu != -1 && counter->cpu != cpu)
			continue;

1232
1233
1234
1235
1236
1237
		if (counter != counter->group_leader)
			counter_sched_in(counter, cpuctx, ctx, cpu);
		else {
			if (group_can_go_on(counter, cpuctx, 1))
				group_sched_in(counter, cpuctx, ctx, cpu);
		}
1238
1239
1240
1241
1242

		/*
		 * If this pinned group hasn't been scheduled,
		 * put it in error state.
		 */
1243
1244
		if (counter->state == PERF_COUNTER_STATE_INACTIVE) {
			update_group_times(counter);
1245
			counter->state = PERF_COUNTER_STATE_ERROR;
1246
		}
1247
1248
	}

1249
	list_for_each_entry(counter, &ctx->counter_list, list_entry) {
1250
1251
1252
1253
1254
		/*
		 * Ignore counters in OFF or ERROR state, and
		 * ignore pinned counters since we did them already.
		 */
		if (counter->state <= PERF_COUNTER_STATE_OFF ||
1255
		    counter->attr.pinned)
1256
1257
			continue;

1258
1259
1260
1261
		/*
		 * Listen to the 'cpu' scheduling filter constraint
		 * of counters:
		 */
1262
1263
1264
		if (counter->cpu != -1 && counter->cpu != cpu)
			continue;

1265
1266
		if (counter != counter->group_leader) {
			if (counter_sched_in(counter, cpuctx, ctx, cpu))
1267
				can_add_hw = 0;
1268
1269
1270
1271
1272
		} else {
			if (group_can_go_on(counter, cpuctx, can_add_hw)) {
				if (group_sched_in(counter, cpuctx, ctx, cpu))
					can_add_hw = 0;
			}
1273
		}
1274
	}
1275
	perf_enable();
1276
 out:
1277
	spin_unlock(&ctx->lock);
1278
1279
1280
1281
1282
1283
1284
1285
1286
1287
1288
1289
1290
1291
1292
1293
}

/*
 * Called from scheduler to add the counters of the current task
 * with interrupts disabled.
 *
 * We restore the counter value and then enable it.
 *
 * This does not protect us against NMI, but enable()
 * sets the enabled bit in the control field of counter _before_
 * accessing the counter control register. If a NMI hits, then it will
 * keep the counter running.
 */
void perf_counter_task_sched_in(struct task_struct *task, int cpu)
{
	struct perf_cpu_context *cpuctx = &per_cpu(perf_cpu_context, cpu);
1294
	struct perf_counter_context *ctx = task->perf_counter_ctxp;
1295

1296
1297
	if (likely(!ctx))
		return;
1298
1299
	if (cpuctx->task_ctx == ctx)
		return;
1300
	__perf_counter_sched_in(ctx, cpuctx, cpu);
1301
1302
1303
	cpuctx->task_ctx = ctx;
}

1304
1305
1306
1307
1308
1309
1310
static void perf_counter_cpu_sched_in(struct perf_cpu_context *cpuctx, int cpu)
{
	struct perf_counter_context *ctx = &cpuctx->ctx;

	__perf_counter_sched_in(ctx, cpuctx, cpu);
}

1311
1312
1313
#define MAX_INTERRUPTS (~0ULL)

static void perf_log_throttle(struct perf_counter *counter, int enable);
1314

1315
1316
1317
1318
1319
1320
1321
1322
1323
1324
1325
1326
1327
1328
1329
1330
1331
1332
1333
1334
1335
static void perf_adjust_period(struct perf_counter *counter, u64 events)
{
	struct hw_perf_counter *hwc = &counter->hw;
	u64 period, sample_period;
	s64 delta;

	events *= hwc->sample_period;
	period = div64_u64(events, counter->attr.sample_freq);

	delta = (s64)(period - hwc->sample_period);
	delta = (delta + 7) / 8; /* low pass filter */

	sample_period = hwc->sample_period + delta;

	if (!sample_period)
		sample_period = 1;

	hwc->sample_period = sample_period;
}

static void perf_ctx_adjust_freq(struct perf_counter_context *ctx)
1336
1337
{
	struct perf_counter *counter;
1338
	struct hw_perf_counter *hwc;
1339
	u64 interrupts, freq;
1340
1341
1342
1343
1344
1345

	spin_lock(&ctx->lock);
	list_for_each_entry(counter, &ctx->counter_list, list_entry) {
		if (counter->state != PERF_COUNTER_STATE_ACTIVE)
			continue;

1346
1347
1348
1349
		hwc = &counter->hw;

		interrupts = hwc->interrupts;
		hwc->interrupts = 0;
1350