ftrace.c 125 KB
Newer Older
1
2
3
4
5
6
7
8
9
10
11
12
/*
 * Infrastructure for profiling code inserted by 'gcc -pg'.
 *
 * Copyright (C) 2007-2008 Steven Rostedt <srostedt@redhat.com>
 * Copyright (C) 2004-2008 Ingo Molnar <mingo@redhat.com>
 *
 * Originally ported from the -rt patch by:
 *   Copyright (C) 2007 Arnaldo Carvalho de Melo <acme@redhat.com>
 *
 * Based on code in the latency_tracer, that is:
 *
 *  Copyright (C) 2004-2006 Ingo Molnar
13
 *  Copyright (C) 2004 Nadia Yvette Chambers
14
15
 */

16
17
18
#include <linux/stop_machine.h>
#include <linux/clocksource.h>
#include <linux/kallsyms.h>
19
#include <linux/seq_file.h>
20
#include <linux/suspend.h>
21
#include <linux/debugfs.h>
22
#include <linux/hardirq.h>
Ingo Molnar's avatar
Ingo Molnar committed
23
#include <linux/kthread.h>
24
#include <linux/uaccess.h>
25
#include <linux/bsearch.h>
26
#include <linux/module.h>
Ingo Molnar's avatar
Ingo Molnar committed
27
#include <linux/ftrace.h>
28
#include <linux/sysctl.h>
29
#include <linux/slab.h>
30
#include <linux/ctype.h>
31
#include <linux/sort.h>
32
#include <linux/list.h>
33
#include <linux/hash.h>
34
#include <linux/rcupdate.h>
35

36
#include <trace/events/sched.h>
37

38
#include <asm/setup.h>
39

40
#include "trace_output.h"
41
#include "trace_stat.h"
42

43
#define FTRACE_WARN_ON(cond)			\
44
45
46
	({					\
		int ___r = cond;		\
		if (WARN_ON(___r))		\
47
			ftrace_kill();		\
48
49
		___r;				\
	})
50
51

#define FTRACE_WARN_ON_ONCE(cond)		\
52
53
54
	({					\
		int ___r = cond;		\
		if (WARN_ON_ONCE(___r))		\
55
			ftrace_kill();		\
56
57
		___r;				\
	})
58

59
60
61
/* hash bits for specific function selection */
#define FTRACE_HASH_BITS 7
#define FTRACE_FUNC_HASHSIZE (1 << FTRACE_HASH_BITS)
62
63
#define FTRACE_HASH_DEFAULT_BITS 10
#define FTRACE_HASH_MAX_BITS 12
64

65
#define FL_GLOBAL_CONTROL_MASK (FTRACE_OPS_FL_CONTROL)
66

67
#ifdef CONFIG_DYNAMIC_FTRACE
68
69
70
#define INIT_OPS_HASH(opsname)	\
	.func_hash		= &opsname.local_hash,			\
	.local_hash.regex_lock	= __MUTEX_INITIALIZER(opsname.local_hash.regex_lock),
71
#else
72
#define INIT_OPS_HASH(opsname)
73
74
#endif

75
76
static struct ftrace_ops ftrace_list_end __read_mostly = {
	.func		= ftrace_stub,
77
	.flags		= FTRACE_OPS_FL_RECURSION_SAFE | FTRACE_OPS_FL_STUB,
78
	INIT_OPS_HASH(ftrace_list_end)
79
80
};

81
82
/* ftrace_enabled is a method to turn ftrace on or off */
int ftrace_enabled __read_mostly;
83
static int last_ftrace_enabled;
84

85
86
/* Current function tracing op */
struct ftrace_ops *function_trace_op __read_mostly = &ftrace_list_end;
87
88
/* What to set function_trace_op to */
static struct ftrace_ops *set_function_trace_op;
89

90
91
92
93
94
95
96
/* List for set_ftrace_pid's pids. */
LIST_HEAD(ftrace_pids);
struct ftrace_pid {
	struct list_head list;
	struct pid *pid;
};

97
98
99
100
101
102
/*
 * ftrace_disabled is set when an anomaly is discovered.
 * ftrace_disabled is much stronger than ftrace_enabled.
 */
static int ftrace_disabled __read_mostly;

103
static DEFINE_MUTEX(ftrace_lock);
104

105
static struct ftrace_ops *ftrace_control_list __read_mostly = &ftrace_list_end;
106
static struct ftrace_ops *ftrace_ops_list __read_mostly = &ftrace_list_end;
107
ftrace_func_t ftrace_trace_function __read_mostly = ftrace_stub;
108
ftrace_func_t ftrace_pid_function __read_mostly = ftrace_stub;
109
static struct ftrace_ops global_ops;
110
static struct ftrace_ops control_ops;
111

112
113
#if ARCH_SUPPORTS_FTRACE_OPS
static void ftrace_ops_list_func(unsigned long ip, unsigned long parent_ip,
114
				 struct ftrace_ops *op, struct pt_regs *regs);
115
116
117
118
119
#else
/* See comment below, where ftrace_ops_list_func is defined */
static void ftrace_ops_no_ops(unsigned long ip, unsigned long parent_ip);
#define ftrace_ops_list_func ((ftrace_func_t)ftrace_ops_no_ops)
#endif
120

121
122
/*
 * Traverse the ftrace_global_list, invoking all entries.  The reason that we
123
 * can use rcu_dereference_raw_notrace() is that elements removed from this list
124
 * are simply leaked, so there is no need to interact with a grace-period
125
 * mechanism.  The rcu_dereference_raw_notrace() calls are needed to handle
126
127
128
129
130
 * concurrent insertions into the ftrace_global_list.
 *
 * Silly Alpha and silly pointer-speculation compiler optimizations!
 */
#define do_for_each_ftrace_op(op, list)			\
131
	op = rcu_dereference_raw_notrace(list);			\
132
133
134
135
136
137
	do

/*
 * Optimized for just a single item in the list (as that is the normal case).
 */
#define while_for_each_ftrace_op(op)				\
138
	while (likely(op = rcu_dereference_raw_notrace((op)->next)) &&	\
139
140
	       unlikely((op) != &ftrace_list_end))

141
142
143
144
static inline void ftrace_ops_init(struct ftrace_ops *ops)
{
#ifdef CONFIG_DYNAMIC_FTRACE
	if (!(ops->flags & FTRACE_OPS_FL_INITIALIZED)) {
145
146
		mutex_init(&ops->local_hash.regex_lock);
		ops->func_hash = &ops->local_hash;
147
148
149
150
151
		ops->flags |= FTRACE_OPS_FL_INITIALIZED;
	}
#endif
}

152
153
154
155
156
157
158
159
160
161
162
163
164
165
166
167
168
169
170
171
172
/**
 * ftrace_nr_registered_ops - return number of ops registered
 *
 * Returns the number of ftrace_ops registered and tracing functions
 */
int ftrace_nr_registered_ops(void)
{
	struct ftrace_ops *ops;
	int cnt = 0;

	mutex_lock(&ftrace_lock);

	for (ops = ftrace_ops_list;
	     ops != &ftrace_list_end; ops = ops->next)
		cnt++;

	mutex_unlock(&ftrace_lock);

	return cnt;
}

173
static void ftrace_pid_func(unsigned long ip, unsigned long parent_ip,
174
			    struct ftrace_ops *op, struct pt_regs *regs)
175
{
176
	if (!test_tsk_trace_trace(current))
177
178
		return;

179
	ftrace_pid_function(ip, parent_ip, op, regs);
180
181
182
183
184
185
186
187
188
}

static void set_ftrace_pid_function(ftrace_func_t func)
{
	/* do not set ftrace_pid_function to itself! */
	if (func != ftrace_pid_func)
		ftrace_pid_function = func;
}

189
/**
190
 * clear_ftrace_function - reset the ftrace function
191
 *
192
193
 * This NULLs the ftrace function and in essence stops
 * tracing.  There may be lag
194
 */
195
void clear_ftrace_function(void)
196
{
197
	ftrace_trace_function = ftrace_stub;
198
	ftrace_pid_function = ftrace_stub;
199
200
}

201
202
203
204
205
206
207
208
209
210
211
212
213
214
215
216
217
218
219
220
221
static void control_ops_disable_all(struct ftrace_ops *ops)
{
	int cpu;

	for_each_possible_cpu(cpu)
		*per_cpu_ptr(ops->disabled, cpu) = 1;
}

static int control_ops_alloc(struct ftrace_ops *ops)
{
	int __percpu *disabled;

	disabled = alloc_percpu(int);
	if (!disabled)
		return -ENOMEM;

	ops->disabled = disabled;
	control_ops_disable_all(ops);
	return 0;
}

222
223
224
225
226
227
228
229
230
231
232
233
234
235
236
237
238
static void ftrace_sync(struct work_struct *work)
{
	/*
	 * This function is just a stub to implement a hard force
	 * of synchronize_sched(). This requires synchronizing
	 * tasks even in userspace and idle.
	 *
	 * Yes, function tracing is rude.
	 */
}

static void ftrace_sync_ipi(void *data)
{
	/* Probably not needed, but do it anyway */
	smp_rmb();
}

239
240
241
242
243
244
#ifdef CONFIG_FUNCTION_GRAPH_TRACER
static void update_function_graph_func(void);
#else
static inline void update_function_graph_func(void) { }
#endif

245
246
247
248
static void update_ftrace_function(void)
{
	ftrace_func_t func;

249
250
	/*
	 * If we are at the end of the list and this ops is
251
252
	 * recursion safe and not dynamic and the arch supports passing ops,
	 * then have the mcount trampoline call the function directly.
253
	 */
254
	if (ftrace_ops_list == &ftrace_list_end ||
255
	    (ftrace_ops_list->next == &ftrace_list_end &&
256
	     !(ftrace_ops_list->flags & FTRACE_OPS_FL_DYNAMIC) &&
257
	     (ftrace_ops_list->flags & FTRACE_OPS_FL_RECURSION_SAFE) &&
258
	     !FTRACE_FORCE_LIST_FUNC)) {
259
		/* Set the ftrace_ops that the arch callback uses */
260
		set_function_trace_op = ftrace_ops_list;
261
		func = ftrace_ops_list->func;
262
263
	} else {
		/* Just use the default ftrace_ops */
264
		set_function_trace_op = &ftrace_list_end;
265
		func = ftrace_ops_list_func;
266
	}
267

268
269
	update_function_graph_func();

270
271
272
273
274
275
276
277
278
279
280
281
282
283
284
285
286
287
288
289
290
291
292
293
294
295
296
297
298
299
300
301
302
303
304
305
306
307
308
309
310
311
312
	/* If there's no change, then do nothing more here */
	if (ftrace_trace_function == func)
		return;

	/*
	 * If we are using the list function, it doesn't care
	 * about the function_trace_ops.
	 */
	if (func == ftrace_ops_list_func) {
		ftrace_trace_function = func;
		/*
		 * Don't even bother setting function_trace_ops,
		 * it would be racy to do so anyway.
		 */
		return;
	}

#ifndef CONFIG_DYNAMIC_FTRACE
	/*
	 * For static tracing, we need to be a bit more careful.
	 * The function change takes affect immediately. Thus,
	 * we need to coorditate the setting of the function_trace_ops
	 * with the setting of the ftrace_trace_function.
	 *
	 * Set the function to the list ops, which will call the
	 * function we want, albeit indirectly, but it handles the
	 * ftrace_ops and doesn't depend on function_trace_op.
	 */
	ftrace_trace_function = ftrace_ops_list_func;
	/*
	 * Make sure all CPUs see this. Yes this is slow, but static
	 * tracing is slow and nasty to have enabled.
	 */
	schedule_on_each_cpu(ftrace_sync);
	/* Now all cpus are using the list ops. */
	function_trace_op = set_function_trace_op;
	/* Make sure the function_trace_op is visible on all CPUs */
	smp_wmb();
	/* Nasty way to force a rmb on all cpus */
	smp_call_function(ftrace_sync_ipi, NULL, 1);
	/* OK, we are all set to update the ftrace_trace_function now! */
#endif /* !CONFIG_DYNAMIC_FTRACE */

313
314
315
	ftrace_trace_function = func;
}

316
317
318
319
320
int using_ftrace_ops_list_func(void)
{
	return ftrace_trace_function == ftrace_ops_list_func;
}

321
static void add_ftrace_ops(struct ftrace_ops **list, struct ftrace_ops *ops)
322
{
323
	ops->next = *list;
324
	/*
325
	 * We are entering ops into the list but another
326
327
	 * CPU might be walking that list. We need to make sure
	 * the ops->next pointer is valid before another CPU sees
328
	 * the ops pointer included into the list.
329
	 */
330
	rcu_assign_pointer(*list, ops);
331
332
}

333
static int remove_ftrace_ops(struct ftrace_ops **list, struct ftrace_ops *ops)
334
335
336
337
{
	struct ftrace_ops **p;

	/*
338
339
	 * If we are removing the last function, then simply point
	 * to the ftrace_stub.
340
	 */
341
342
	if (*list == ops && ops->next == &ftrace_list_end) {
		*list = &ftrace_list_end;
Steven Rostedt's avatar
Steven Rostedt committed
343
		return 0;
344
345
	}

346
	for (p = list; *p != &ftrace_list_end; p = &(*p)->next)
347
348
349
		if (*p == ops)
			break;

Steven Rostedt's avatar
Steven Rostedt committed
350
351
	if (*p != ops)
		return -1;
352
353

	*p = (*p)->next;
354
355
	return 0;
}
356

357
358
359
360
361
362
363
364
365
366
367
368
369
370
371
372
373
374
375
376
static void add_ftrace_list_ops(struct ftrace_ops **list,
				struct ftrace_ops *main_ops,
				struct ftrace_ops *ops)
{
	int first = *list == &ftrace_list_end;
	add_ftrace_ops(list, ops);
	if (first)
		add_ftrace_ops(&ftrace_ops_list, main_ops);
}

static int remove_ftrace_list_ops(struct ftrace_ops **list,
				  struct ftrace_ops *main_ops,
				  struct ftrace_ops *ops)
{
	int ret = remove_ftrace_ops(list, ops);
	if (!ret && *list == &ftrace_list_end)
		ret = remove_ftrace_ops(&ftrace_ops_list, main_ops);
	return ret;
}

377
378
static int __register_ftrace_function(struct ftrace_ops *ops)
{
379
380
381
	if (ops->flags & FTRACE_OPS_FL_DELETED)
		return -EINVAL;

382
383
384
	if (WARN_ON(ops->flags & FTRACE_OPS_FL_ENABLED))
		return -EBUSY;

385
#ifndef CONFIG_DYNAMIC_FTRACE_WITH_REGS
386
387
388
389
390
391
392
393
394
395
396
397
398
	/*
	 * If the ftrace_ops specifies SAVE_REGS, then it only can be used
	 * if the arch supports it, or SAVE_REGS_IF_SUPPORTED is also set.
	 * Setting SAVE_REGS_IF_SUPPORTED makes SAVE_REGS irrelevant.
	 */
	if (ops->flags & FTRACE_OPS_FL_SAVE_REGS &&
	    !(ops->flags & FTRACE_OPS_FL_SAVE_REGS_IF_SUPPORTED))
		return -EINVAL;

	if (ops->flags & FTRACE_OPS_FL_SAVE_REGS_IF_SUPPORTED)
		ops->flags |= FTRACE_OPS_FL_SAVE_REGS;
#endif

399
400
401
	if (!core_kernel_data((unsigned long)ops))
		ops->flags |= FTRACE_OPS_FL_DYNAMIC;

402
	if (ops->flags & FTRACE_OPS_FL_CONTROL) {
403
404
405
		if (control_ops_alloc(ops))
			return -ENOMEM;
		add_ftrace_list_ops(&ftrace_control_list, &control_ops, ops);
406
407
408
	} else
		add_ftrace_ops(&ftrace_ops_list, ops);

409
410
411
412
413
414
415
416
417
418
	if (ftrace_enabled)
		update_ftrace_function();

	return 0;
}

static int __unregister_ftrace_function(struct ftrace_ops *ops)
{
	int ret;

419
420
421
	if (WARN_ON(!(ops->flags & FTRACE_OPS_FL_ENABLED)))
		return -EBUSY;

422
	if (ops->flags & FTRACE_OPS_FL_CONTROL) {
423
424
		ret = remove_ftrace_list_ops(&ftrace_control_list,
					     &control_ops, ops);
425
426
427
	} else
		ret = remove_ftrace_ops(&ftrace_ops_list, ops);

428
429
	if (ret < 0)
		return ret;
430

431
432
	if (ftrace_enabled)
		update_ftrace_function();
433

Steven Rostedt's avatar
Steven Rostedt committed
434
	return 0;
435
436
}

437
438
static void ftrace_update_pid_func(void)
{
439
	/* Only do something if we are tracing something */
440
	if (ftrace_trace_function == ftrace_stub)
441
		return;
442

443
	update_ftrace_function();
444
445
}

446
447
448
449
450
#ifdef CONFIG_FUNCTION_PROFILER
struct ftrace_profile {
	struct hlist_node		node;
	unsigned long			ip;
	unsigned long			counter;
451
452
#ifdef CONFIG_FUNCTION_GRAPH_TRACER
	unsigned long long		time;
453
	unsigned long long		time_squared;
454
#endif
455
456
};

457
458
459
460
struct ftrace_profile_page {
	struct ftrace_profile_page	*next;
	unsigned long			index;
	struct ftrace_profile		records[];
461
462
};

463
464
465
466
467
468
469
470
struct ftrace_profile_stat {
	atomic_t			disabled;
	struct hlist_head		*hash;
	struct ftrace_profile_page	*pages;
	struct ftrace_profile_page	*start;
	struct tracer_stat		stat;
};

471
472
#define PROFILE_RECORDS_SIZE						\
	(PAGE_SIZE - offsetof(struct ftrace_profile_page, records))
473

474
475
#define PROFILES_PER_PAGE					\
	(PROFILE_RECORDS_SIZE / sizeof(struct ftrace_profile))
476

477
478
479
static int ftrace_profile_enabled __read_mostly;

/* ftrace_profile_lock - synchronize the enable and disable of the profiler */
480
481
static DEFINE_MUTEX(ftrace_profile_lock);

482
static DEFINE_PER_CPU(struct ftrace_profile_stat, ftrace_profile_stats);
483

484
485
#define FTRACE_PROFILE_HASH_BITS 10
#define FTRACE_PROFILE_HASH_SIZE (1 << FTRACE_PROFILE_HASH_BITS)
486

487
488
489
static void *
function_stat_next(void *v, int idx)
{
490
491
	struct ftrace_profile *rec = v;
	struct ftrace_profile_page *pg;
492

493
	pg = (struct ftrace_profile_page *)((unsigned long)rec & PAGE_MASK);
494
495

 again:
Li Zefan's avatar
Li Zefan committed
496
497
498
	if (idx != 0)
		rec++;

499
500
501
502
503
	if ((void *)rec >= (void *)&pg->records[pg->index]) {
		pg = pg->next;
		if (!pg)
			return NULL;
		rec = &pg->records[0];
504
505
		if (!rec->counter)
			goto again;
506
507
508
509
510
511
512
	}

	return rec;
}

static void *function_stat_start(struct tracer_stat *trace)
{
513
514
515
516
517
518
519
	struct ftrace_profile_stat *stat =
		container_of(trace, struct ftrace_profile_stat, stat);

	if (!stat || !stat->start)
		return NULL;

	return function_stat_next(&stat->start->records[0], 0);
520
521
}

522
523
524
525
526
527
528
529
530
531
532
533
534
535
536
537
#ifdef CONFIG_FUNCTION_GRAPH_TRACER
/* function graph compares on total time */
static int function_stat_cmp(void *p1, void *p2)
{
	struct ftrace_profile *a = p1;
	struct ftrace_profile *b = p2;

	if (a->time < b->time)
		return -1;
	if (a->time > b->time)
		return 1;
	else
		return 0;
}
#else
/* not function graph compares against hits */
538
539
static int function_stat_cmp(void *p1, void *p2)
{
540
541
	struct ftrace_profile *a = p1;
	struct ftrace_profile *b = p2;
542
543
544
545
546
547
548
549

	if (a->counter < b->counter)
		return -1;
	if (a->counter > b->counter)
		return 1;
	else
		return 0;
}
550
#endif
551
552
553

static int function_stat_headers(struct seq_file *m)
{
554
#ifdef CONFIG_FUNCTION_GRAPH_TRACER
555
	seq_printf(m, "  Function                               "
556
		   "Hit    Time            Avg             s^2\n"
557
		      "  --------                               "
558
		   "---    ----            ---             ---\n");
559
#else
560
561
	seq_printf(m, "  Function                               Hit\n"
		      "  --------                               ---\n");
562
#endif
563
564
565
566
567
	return 0;
}

static int function_stat_show(struct seq_file *m, void *v)
{
568
	struct ftrace_profile *rec = v;
569
	char str[KSYM_SYMBOL_LEN];
570
	int ret = 0;
571
#ifdef CONFIG_FUNCTION_GRAPH_TRACER
572
573
	static struct trace_seq s;
	unsigned long long avg;
574
	unsigned long long stddev;
575
#endif
576
577
578
579
580
581
582
	mutex_lock(&ftrace_profile_lock);

	/* we raced with function_profile_reset() */
	if (unlikely(rec->counter == 0)) {
		ret = -EBUSY;
		goto out;
	}
583
584

	kallsyms_lookup(rec->ip, NULL, NULL, NULL, str);
585
586
587
588
	seq_printf(m, "  %-30.30s  %10lu", str, rec->counter);

#ifdef CONFIG_FUNCTION_GRAPH_TRACER
	seq_printf(m, "    ");
589
590
591
	avg = rec->time;
	do_div(avg, rec->counter);

592
593
594
595
	/* Sample standard deviation (s^2) */
	if (rec->counter <= 1)
		stddev = 0;
	else {
596
597
598
599
600
601
602
		/*
		 * Apply Welford's method:
		 * s^2 = 1 / (n * (n-1)) * (n * \Sum (x_i)^2 - (\Sum x_i)^2)
		 */
		stddev = rec->counter * rec->time_squared -
			 rec->time * rec->time;

603
604
605
606
		/*
		 * Divide only 1000 for ns^2 -> us^2 conversion.
		 * trace_print_graph_duration will divide 1000 again.
		 */
607
		do_div(stddev, rec->counter * (rec->counter - 1) * 1000);
608
609
	}

610
611
612
613
	trace_seq_init(&s);
	trace_print_graph_duration(rec->time, &s);
	trace_seq_puts(&s, "    ");
	trace_print_graph_duration(avg, &s);
614
615
	trace_seq_puts(&s, "    ");
	trace_print_graph_duration(stddev, &s);
616
617
618
	trace_print_seq(m, &s);
#endif
	seq_putc(m, '\n');
619
620
out:
	mutex_unlock(&ftrace_profile_lock);
621

622
	return ret;
623
624
}

625
static void ftrace_profile_reset(struct ftrace_profile_stat *stat)
626
{
627
	struct ftrace_profile_page *pg;
628

629
	pg = stat->pages = stat->start;
630

631
632
633
634
	while (pg) {
		memset(pg->records, 0, PROFILE_RECORDS_SIZE);
		pg->index = 0;
		pg = pg->next;
635
636
	}

637
	memset(stat->hash, 0,
638
639
	       FTRACE_PROFILE_HASH_SIZE * sizeof(struct hlist_head));
}
640

641
int ftrace_profile_pages_init(struct ftrace_profile_stat *stat)
642
643
{
	struct ftrace_profile_page *pg;
644
645
	int functions;
	int pages;
646
	int i;
647

648
	/* If we already allocated, do nothing */
649
	if (stat->pages)
650
		return 0;
651

652
653
	stat->pages = (void *)get_zeroed_page(GFP_KERNEL);
	if (!stat->pages)
654
		return -ENOMEM;
655

656
657
658
659
660
661
662
663
664
665
666
667
668
#ifdef CONFIG_DYNAMIC_FTRACE
	functions = ftrace_update_tot_cnt;
#else
	/*
	 * We do not know the number of functions that exist because
	 * dynamic tracing is what counts them. With past experience
	 * we have around 20K functions. That should be more than enough.
	 * It is highly unlikely we will execute every function in
	 * the kernel.
	 */
	functions = 20000;
#endif

669
	pg = stat->start = stat->pages;
670

671
672
	pages = DIV_ROUND_UP(functions, PROFILES_PER_PAGE);

673
	for (i = 1; i < pages; i++) {
674
675
		pg->next = (void *)get_zeroed_page(GFP_KERNEL);
		if (!pg->next)
676
			goto out_free;
677
678
679
680
		pg = pg->next;
	}

	return 0;
681
682
683
684
685
686
687
688
689
690
691
692
693
694

 out_free:
	pg = stat->start;
	while (pg) {
		unsigned long tmp = (unsigned long)pg;

		pg = pg->next;
		free_page(tmp);
	}

	stat->pages = NULL;
	stat->start = NULL;

	return -ENOMEM;
695
696
}

697
static int ftrace_profile_init_cpu(int cpu)
698
{
699
	struct ftrace_profile_stat *stat;
700
	int size;
701

702
703
704
	stat = &per_cpu(ftrace_profile_stats, cpu);

	if (stat->hash) {
705
		/* If the profile is already created, simply reset it */
706
		ftrace_profile_reset(stat);
707
708
		return 0;
	}
709

710
711
712
713
714
	/*
	 * We are profiling all functions, but usually only a few thousand
	 * functions are hit. We'll make a hash of 1024 items.
	 */
	size = FTRACE_PROFILE_HASH_SIZE;
715

716
	stat->hash = kzalloc(sizeof(struct hlist_head) * size, GFP_KERNEL);
717

718
	if (!stat->hash)
719
720
		return -ENOMEM;

721
	/* Preallocate the function profiling pages */
722
723
724
	if (ftrace_profile_pages_init(stat) < 0) {
		kfree(stat->hash);
		stat->hash = NULL;
725
726
727
728
		return -ENOMEM;
	}

	return 0;
729
730
}

731
732
733
734
735
static int ftrace_profile_init(void)
{
	int cpu;
	int ret = 0;

736
	for_each_possible_cpu(cpu) {
737
738
739
740
741
742
743
744
		ret = ftrace_profile_init_cpu(cpu);
		if (ret)
			break;
	}

	return ret;
}

745
/* interrupts must be disabled */
746
747
static struct ftrace_profile *
ftrace_find_profiled_func(struct ftrace_profile_stat *stat, unsigned long ip)
748
{
749
	struct ftrace_profile *rec;
750
751
752
	struct hlist_head *hhd;
	unsigned long key;

753
	key = hash_long(ip, FTRACE_PROFILE_HASH_BITS);
754
	hhd = &stat->hash[key];
755
756
757
758

	if (hlist_empty(hhd))
		return NULL;

759
	hlist_for_each_entry_rcu_notrace(rec, hhd, node) {
760
		if (rec->ip == ip)
761
762
763
764
765
766
			return rec;
	}

	return NULL;
}

767
768
static void ftrace_add_profile(struct ftrace_profile_stat *stat,
			       struct ftrace_profile *rec)
769
770
771
{
	unsigned long key;

772
	key = hash_long(rec->ip, FTRACE_PROFILE_HASH_BITS);
773
	hlist_add_head_rcu(&rec->node, &stat->hash[key]);
774
775
}

776
777
778
/*
 * The memory is already allocated, this simply finds a new record to use.
 */
779
static struct ftrace_profile *
780
ftrace_profile_alloc(struct ftrace_profile_stat *stat, unsigned long ip)
781
782
783
{
	struct ftrace_profile *rec = NULL;

784
	/* prevent recursion (from NMIs) */
785
	if (atomic_inc_return(&stat->disabled) != 1)
786
787
788
		goto out;

	/*
789
790
	 * Try to find the function again since an NMI
	 * could have added it
791
	 */
792
	rec = ftrace_find_profiled_func(stat, ip);
793
	if (rec)
794
		goto out;
795

796
797
798
799
	if (stat->pages->index == PROFILES_PER_PAGE) {
		if (!stat->pages->next)
			goto out;
		stat->pages = stat->pages->next;
800
	}
801

802
	rec = &stat->pages->records[stat->pages->index++];
803
	rec->ip = ip;
804
	ftrace_add_profile(stat, rec);
805

806
 out:
807
	atomic_dec(&stat->disabled);
808
809
810
811
812

	return rec;
}

static void
813
function_profile_call(unsigned long ip, unsigned long parent_ip,
814
		      struct ftrace_ops *ops, struct pt_regs *regs)
815
{
816
	struct ftrace_profile_stat *stat;
817
	struct ftrace_profile *rec;
818
819
820
821
822
823
	unsigned long flags;

	if (!ftrace_profile_enabled)
		return;

	local_irq_save(flags);
824

825
	stat = this_cpu_ptr(&ftrace_profile_stats);
826
	if (!stat->hash || !ftrace_profile_enabled)
827
828
829
		goto out;

	rec = ftrace_find_profiled_func(stat, ip);
830
	if (!rec) {
831
		rec = ftrace_profile_alloc(stat, ip);
832
833
834
		if (!rec)
			goto out;
	}
835
836
837
838
839
840

	rec->counter++;
 out:
	local_irq_restore(flags);
}

841
842
843
#ifdef CONFIG_FUNCTION_GRAPH_TRACER
static int profile_graph_entry(struct ftrace_graph_ent *trace)
{
844
	function_profile_call(trace->func, 0, NULL, NULL);
845
846
847
848
849
	return 1;
}

static void profile_graph_return(struct ftrace_graph_ret *trace)
{
850
	struct ftrace_profile_stat *stat;
851
	unsigned long long calltime;
852
	struct ftrace_profile *rec;
853
	unsigned long flags;
854
855

	local_irq_save(flags);
856
	stat = this_cpu_ptr(&ftrace_profile_stats);
857
	if (!stat->hash || !ftrace_profile_enabled)
858
859
		goto out;

860
861
862
863
	/* If the calltime was zero'd ignore it */
	if (!trace->calltime)
		goto out;

864
865
866
867
868
869
870
871
872
873
874
875
876
877
878
879
880
	calltime = trace->rettime - trace->calltime;

	if (!(trace_flags & TRACE_ITER_GRAPH_TIME)) {
		int index;

		index = trace->depth;

		/* Append this call time to the parent time to subtract */
		if (index)
			current->ret_stack[index - 1].subtime += calltime;

		if (current->ret_stack[index].subtime < calltime)
			calltime -= current->ret_stack[index].subtime;
		else
			calltime = 0;
	}

881
	rec = ftrace_find_profiled_func(stat, trace->func);
882
	if (rec) {
883
		rec->time += calltime;
884
885
		rec->time_squared += calltime * calltime;
	}
886

887
 out:
888
889
890
891
892
893
894
895
896
897
898
899
900
901
	local_irq_restore(flags);
}

static int register_ftrace_profiler(void)
{
	return register_ftrace_graph(&profile_graph_return,
				     &profile_graph_entry);
}

static void unregister_ftrace_profiler(void)
{
	unregister_ftrace_graph();
}
#else
902
static struct ftrace_ops ftrace_profile_ops __read_mostly = {
903
	.func		= function_profile_call,
904
	.flags		= FTRACE_OPS_FL_RECURSION_SAFE | FTRACE_OPS_FL_INITIALIZED,
905
	INIT_OPS_HASH(ftrace_profile_ops)
906
907
};

908
909
910
911
912
913
914
915
916
917
918
static int register_ftrace_profiler(void)
{
	return register_ftrace_function(&ftrace_profile_ops);
}

static void unregister_ftrace_profiler(void)
{
	unregister_ftrace_function(&ftrace_profile_ops);
}
#endif /* CONFIG_FUNCTION_GRAPH_TRACER */

919
920
921
922
923
924
925
static ssize_t
ftrace_profile_write(struct file *filp, const char __user *ubuf,
		     size_t cnt, loff_t *ppos)
{
	unsigned long val;
	int ret;

926
927
	ret = kstrtoul_from_user(ubuf, cnt, 10, &val);
	if (ret)
928
929
930
931
932
933
934
		return ret;

	val = !!val;

	mutex_lock(&ftrace_profile_lock);
	if (ftrace_profile_enabled ^ val) {
		if (val) {
935
936
937
938
939
940
			ret = ftrace_profile_init();
			if (ret < 0) {
				cnt = ret;
				goto out;
			}

941
942
943
944
945
			ret = register_ftrace_profiler();
			if (ret < 0) {
				cnt = ret;
				goto out;
			}
946
947
948
			ftrace_profile_enabled = 1;
		} else {
			ftrace_profile_enabled = 0;
949
950
951
952
			/*
			 * unregister_ftrace_profiler calls stop_machine
			 * so this acts like an synchronize_sched.
			 */
953
			unregister_ftrace_profiler();
954
955
		}
	}
956
 out:
957
958
	mutex_unlock(&ftrace_profile_lock);

959
	*ppos += cnt;
960
961
962
963

	return cnt;
}

964
965
966
967
static ssize_t
ftrace_profile_read(struct file *filp, char __user *ubuf,
		     size_t cnt, loff_t *ppos)
{
968
	char buf[64];		/* big enough to hold a number */
969
970
971
972
973
974
	int r;

	r = sprintf(buf, "%u\n", ftrace_profile_enabled);
	return simple_read_from_buffer(ubuf, cnt, ppos, buf, r);
}

975
976
977
978
static const struct file_operations ftrace_profile_fops = {
	.open		= tracing_open_generic,
	.read		= ftrace_profile_read,
	.write		= ftrace_profile_write,
979
	.llseek		= default_llseek,
980
981
};

982
983
/* used to initialize the real stat files */
static struct tracer_stat function_stats __initdata = {
984
985
986
987
988
989
	.name		= "functions",
	.stat_start	= function_stat_start,
	.stat_next	= function_stat_next,
	.stat_cmp	= function_stat_cmp,
	.stat_headers	= function_stat_headers,
	.stat_show	= function_stat_show
990
991
};

992
static __init void ftrace_profile_debugfs(struct dentry *d_tracer)
993
{
994
	struct ftrace_profile_stat *stat;
995
	struct dentry *entry;
996
	char *name;
997
	int ret;
998
999
1000
1001
1002
1003
1004
1005
1006
1007
1008
1009
1010
1011
1012
1013
1014
1015
1016
1017
1018
1019
1020
1021
1022
1023
1024
1025
	int cpu;

	for_each_possible_cpu(cpu) {
		stat = &per_cpu(ftrace_profile_stats, cpu);

		/* allocate enough for function name + cpu number */
		name = kmalloc(32, GFP_KERNEL);
		if (!name) {
			/*
			 * The files created are permanent, if something happens
			 * we still do not free memory.
			 */
			WARN(1,
			     "Could not allocate stat file for cpu %d\n",
			     cpu);
			return;
		}
		stat->stat = function_stats;
		snprintf(name, 32, "function%d", cpu);
		stat->stat.name = name;
		ret = register_stat_tracer(&stat->stat);
		if (ret) {
			WARN(1,
			     "Could not register function stat for cpu %d\n",
			     cpu);
			kfree(name);
			return;
		}
1026
1027
1028
1029
1030
1031
1032
1033
1034
1035
	}

	entry = debugfs_create_file("function_profile_enabled", 0644,
				    d_tracer, NULL, &ftrace_profile_fops);
	if (!entry)
		pr_warning("Could not create debugfs "
			   "'function_profile_enabled' entry\n");
}

#else /* CONFIG_FUNCTION_PROFILER */
1036
static __init void ftrace_profile_debugfs(struct dentry *d_tracer)
1037
1038
1039
1040
{
}
#endif /* CONFIG_FUNCTION_PROFILER */

1041
1042
1043
1044
static struct pid * const ftrace_swapper_pid = &init_struct_pid;

#ifdef CONFIG_DYNAMIC_FTRACE

1045
1046
static struct ftrace_ops *removed_ops;

1047
1048
1049
1050
1051
1052
1053
1054
1055
1056
1057
1058
#ifndef CONFIG_FTRACE_MCOUNT_RECORD
# error Dynamic ftrace depends on MCOUNT_RECORD
#endif

static struct hlist_head ftrace_func_hash[FTRACE_FUNC_HASHSIZE] __read_mostly;

struct ftrace_func_probe {
	struct hlist_node	node;
	struct ftrace_probe_ops	*ops;
	unsigned long		flags;
	unsigned long		ip;
	void			*data;
1059
	struct list_head	free_list;
1060
1061
};

1062
1063
1064
1065
1066
1067
1068
1069
1070
struct ftrace_func_entry {
	struct hlist_node hlist;
	unsigned long ip;
};

struct ftrace_hash {
	unsigned long		size_bits;
	struct hlist_head	*buckets;
	unsigned long		count;
1071
	struct rcu_head		rcu;
1072
1073
};

1074
1075
1076
1077
1078
1079
1080
1081
1082
/*
 * We make these constant because no one should touch them,
 * but they are used as the default "empty hash", to avoid allocating
 * it all the time. These are in a read only section such that if
 * anyone does try to modify it, it will cause an exception.
 */
static const struct hlist_head empty_buckets[1];
static const struct ftrace_hash empty_hash = {
	.buckets = (struct hlist_head *)empty_buckets,
1083
};
1084
#define EMPTY_HASH	((struct ftrace_hash *)&empty_hash)
1085

1086
static struct ftrace_ops global_ops = {
1087
1088
1089
1090
1091
1092
	.func				= ftrace_stub,
	.local_hash.notrace_hash	= EMPTY_HASH,
	.local_hash.filter_hash		= EMPTY_HASH,
	INIT_OPS_HASH(global_ops)
	.flags				= FTRACE_OPS_FL_RECURSION_SAFE |
					  FTRACE_OPS_FL_INITIALIZED,
1093
1094
};

1095
1096
struct ftrace_page {
	struct ftrace_page	*next;
1097
	struct dyn_ftrace	*records;
1098
	int			index;
1099
	int			size;
1100
1101
};

1102
1103
#define ENTRY_SIZE sizeof(struct dyn_ftrace)
#define ENTRIES_PER_PAGE (PAGE_SIZE / ENTRY_SIZE)
1104
1105
1106
1107
1108
1109
1110

/* estimate from running different kernels */
#define NR_TO_INIT		10000

static struct ftrace_page	*ftrace_pages_start;
static struct ftrace_page	*ftrace_pages;

1111
static bool __always_inline ftrace_hash_empty(struct ftrace_hash *hash)
1112
1113
1114
1115
{
	return !hash || !hash->count;
}

1116
1117
1118
1119
1120
1121
1122
static struct ftrace_func_entry *
ftrace_lookup_ip(struct ftrace_hash *hash, unsigned long ip)
{
	unsigned long key;
	struct ftrace_func_entry *entry;
	struct hlist_head *hhd;

1123
	if (ftrace_hash_empty(hash))
1124
1125
1126
1127
1128
1129
1130
1131
1132
		return NULL;

	if (hash->size_bits > 0)
		key = hash_long(ip, hash->size_bits);
	else
		key = 0;

	hhd = &hash->buckets[key];

1133
	hlist_for_each_entry_rcu_notrace(entry, hhd, hlist) {
1134
1135
1136
1137
1138
1139
		if (entry->ip == ip)
			return entry;
	}
	return NULL;
}

1140
1141
static void __add_hash_entry(struct ftrace_hash *hash,
			     struct ftrace_func_entry *entry