ftrace.c 55.4 KB
Newer Older
1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
/*
 * Infrastructure for profiling code inserted by 'gcc -pg'.
 *
 * Copyright (C) 2007-2008 Steven Rostedt <srostedt@redhat.com>
 * Copyright (C) 2004-2008 Ingo Molnar <mingo@redhat.com>
 *
 * Originally ported from the -rt patch by:
 *   Copyright (C) 2007 Arnaldo Carvalho de Melo <acme@redhat.com>
 *
 * Based on code in the latency_tracer, that is:
 *
 *  Copyright (C) 2004-2006 Ingo Molnar
 *  Copyright (C) 2004 William Lee Irwin III
 */

16
17
18
#include <linux/stop_machine.h>
#include <linux/clocksource.h>
#include <linux/kallsyms.h>
19
#include <linux/seq_file.h>
20
#include <linux/suspend.h>
21
#include <linux/debugfs.h>
22
#include <linux/hardirq.h>
Ingo Molnar's avatar
Ingo Molnar committed
23
#include <linux/kthread.h>
24
#include <linux/uaccess.h>
25
#include <linux/kprobes.h>
Ingo Molnar's avatar
Ingo Molnar committed
26
#include <linux/ftrace.h>
27
#include <linux/sysctl.h>
28
#include <linux/ctype.h>
29
#include <linux/list.h>
30
#include <linux/hash.h>
31

32
33
#include <asm/ftrace.h>

34
#include "trace.h"
35

36
37
38
39
40
41
42
43
44
45
46
47
#define FTRACE_WARN_ON(cond)			\
	do {					\
		if (WARN_ON(cond))		\
			ftrace_kill();		\
	} while (0)

#define FTRACE_WARN_ON_ONCE(cond)		\
	do {					\
		if (WARN_ON_ONCE(cond))		\
			ftrace_kill();		\
	} while (0)

48
49
50
51
/* hash bits for specific function selection */
#define FTRACE_HASH_BITS 7
#define FTRACE_FUNC_HASHSIZE (1 << FTRACE_HASH_BITS)

52
53
/* ftrace_enabled is a method to turn ftrace on or off */
int ftrace_enabled __read_mostly;
54
static int last_ftrace_enabled;
55

56
57
58
/* Quick disabling of function tracer. */
int function_trace_stop;

59
60
61
62
63
64
/*
 * ftrace_disabled is set when an anomaly is discovered.
 * ftrace_disabled is much stronger than ftrace_enabled.
 */
static int ftrace_disabled __read_mostly;

65
static DEFINE_MUTEX(ftrace_lock);
66

67
68
69
70
71
72
73
static struct ftrace_ops ftrace_list_end __read_mostly =
{
	.func = ftrace_stub,
};

static struct ftrace_ops *ftrace_list __read_mostly = &ftrace_list_end;
ftrace_func_t ftrace_trace_function __read_mostly = ftrace_stub;
74
ftrace_func_t __ftrace_trace_function __read_mostly = ftrace_stub;
75
ftrace_func_t ftrace_pid_function __read_mostly = ftrace_stub;
76

Ingo Molnar's avatar
Ingo Molnar committed
77
static void ftrace_list_func(unsigned long ip, unsigned long parent_ip)
78
79
80
81
82
83
84
85
86
87
88
89
90
91
{
	struct ftrace_ops *op = ftrace_list;

	/* in case someone actually ports this to alpha! */
	read_barrier_depends();

	while (op != &ftrace_list_end) {
		/* silly alpha */
		read_barrier_depends();
		op->func(ip, parent_ip);
		op = op->next;
	};
}

92
93
static void ftrace_pid_func(unsigned long ip, unsigned long parent_ip)
{
94
	if (!test_tsk_trace_trace(current))
95
96
97
98
99
100
101
102
103
104
105
106
		return;

	ftrace_pid_function(ip, parent_ip);
}

static void set_ftrace_pid_function(ftrace_func_t func)
{
	/* do not set ftrace_pid_function to itself! */
	if (func != ftrace_pid_func)
		ftrace_pid_function = func;
}

107
/**
108
 * clear_ftrace_function - reset the ftrace function
109
 *
110
111
 * This NULLs the ftrace function and in essence stops
 * tracing.  There may be lag
112
 */
113
void clear_ftrace_function(void)
114
{
115
	ftrace_trace_function = ftrace_stub;
116
	__ftrace_trace_function = ftrace_stub;
117
	ftrace_pid_function = ftrace_stub;
118
119
}

120
121
122
123
124
125
126
127
128
129
130
131
132
133
#ifndef CONFIG_HAVE_FUNCTION_TRACE_MCOUNT_TEST
/*
 * For those archs that do not test ftrace_trace_stop in their
 * mcount call site, we need to do it from C.
 */
static void ftrace_test_stop_func(unsigned long ip, unsigned long parent_ip)
{
	if (function_trace_stop)
		return;

	__ftrace_trace_function(ip, parent_ip);
}
#endif

Ingo Molnar's avatar
Ingo Molnar committed
134
static int __register_ftrace_function(struct ftrace_ops *ops)
135
{
136
137
138
139
140
141
142
143
144
	ops->next = ftrace_list;
	/*
	 * We are entering ops into the ftrace_list but another
	 * CPU might be walking that list. We need to make sure
	 * the ops->next pointer is valid before another CPU sees
	 * the ops pointer included into the ftrace_list.
	 */
	smp_wmb();
	ftrace_list = ops;
145

146
	if (ftrace_enabled) {
147
148
149
150
151
152
153
		ftrace_func_t func;

		if (ops->next == &ftrace_list_end)
			func = ops->func;
		else
			func = ftrace_list_func;

Steven Rostedt's avatar
Steven Rostedt committed
154
		if (ftrace_pid_trace) {
155
156
157
158
			set_ftrace_pid_function(func);
			func = ftrace_pid_func;
		}

159
160
161
162
		/*
		 * For one func, simply call it directly.
		 * For more than one func, call the chain.
		 */
163
#ifdef CONFIG_HAVE_FUNCTION_TRACE_MCOUNT_TEST
164
		ftrace_trace_function = func;
165
#else
166
		__ftrace_trace_function = func;
167
168
		ftrace_trace_function = ftrace_test_stop_func;
#endif
169
	}
170

171
172
173
	return 0;
}

Ingo Molnar's avatar
Ingo Molnar committed
174
static int __unregister_ftrace_function(struct ftrace_ops *ops)
175
176
177
178
{
	struct ftrace_ops **p;

	/*
179
180
	 * If we are removing the last function, then simply point
	 * to the ftrace_stub.
181
182
183
184
	 */
	if (ftrace_list == ops && ops->next == &ftrace_list_end) {
		ftrace_trace_function = ftrace_stub;
		ftrace_list = &ftrace_list_end;
Steven Rostedt's avatar
Steven Rostedt committed
185
		return 0;
186
187
188
189
190
191
	}

	for (p = &ftrace_list; *p != &ftrace_list_end; p = &(*p)->next)
		if (*p == ops)
			break;

Steven Rostedt's avatar
Steven Rostedt committed
192
193
	if (*p != ops)
		return -1;
194
195
196

	*p = (*p)->next;

197
198
	if (ftrace_enabled) {
		/* If we only have one func left, then call that directly */
199
200
201
		if (ftrace_list->next == &ftrace_list_end) {
			ftrace_func_t func = ftrace_list->func;

Steven Rostedt's avatar
Steven Rostedt committed
202
			if (ftrace_pid_trace) {
203
204
205
206
207
208
209
210
211
				set_ftrace_pid_function(func);
				func = ftrace_pid_func;
			}
#ifdef CONFIG_HAVE_FUNCTION_TRACE_MCOUNT_TEST
			ftrace_trace_function = func;
#else
			__ftrace_trace_function = func;
#endif
		}
212
	}
213

Steven Rostedt's avatar
Steven Rostedt committed
214
	return 0;
215
216
}

217
218
219
220
221
static void ftrace_update_pid_func(void)
{
	ftrace_func_t func;

	if (ftrace_trace_function == ftrace_stub)
222
		return;
223
224
225

	func = ftrace_trace_function;

Steven Rostedt's avatar
Steven Rostedt committed
226
	if (ftrace_pid_trace) {
227
228
229
		set_ftrace_pid_function(func);
		func = ftrace_pid_func;
	} else {
230
231
		if (func == ftrace_pid_func)
			func = ftrace_pid_function;
232
233
234
235
236
237
238
239
240
	}

#ifdef CONFIG_HAVE_FUNCTION_TRACE_MCOUNT_TEST
	ftrace_trace_function = func;
#else
	__ftrace_trace_function = func;
#endif
}

241
242
243
244
/* set when tracing only a pid */
struct pid *ftrace_pid_trace;
static struct pid * const ftrace_swapper_pid = &init_struct_pid;

245
#ifdef CONFIG_DYNAMIC_FTRACE
246

247
#ifndef CONFIG_FTRACE_MCOUNT_RECORD
Steven Rostedt's avatar
Steven Rostedt committed
248
# error Dynamic ftrace depends on MCOUNT_RECORD
249
250
#endif

251
252
static struct hlist_head ftrace_func_hash[FTRACE_FUNC_HASHSIZE] __read_mostly;

253
struct ftrace_func_probe {
254
	struct hlist_node	node;
255
	struct ftrace_probe_ops	*ops;
256
257
258
259
260
261
262
	unsigned long		flags;
	unsigned long		ip;
	void			*data;
	struct rcu_head		rcu;
};


263
264
265
266
267
268
enum {
	FTRACE_ENABLE_CALLS		= (1 << 0),
	FTRACE_DISABLE_CALLS		= (1 << 1),
	FTRACE_UPDATE_TRACE_FUNC	= (1 << 2),
	FTRACE_ENABLE_MCOUNT		= (1 << 3),
	FTRACE_DISABLE_MCOUNT		= (1 << 4),
269
270
	FTRACE_START_FUNC_RET		= (1 << 5),
	FTRACE_STOP_FUNC_RET		= (1 << 6),
271
272
};

273
274
static int ftrace_filtered;

275
static struct dyn_ftrace *ftrace_new_addrs;
276

277
static DEFINE_MUTEX(ftrace_regex_lock);
278

279
280
struct ftrace_page {
	struct ftrace_page	*next;
281
	int			index;
282
	struct dyn_ftrace	records[];
283
};
284
285
286
287
288
289
290
291
292
293

#define ENTRIES_PER_PAGE \
  ((PAGE_SIZE - sizeof(struct ftrace_page)) / sizeof(struct dyn_ftrace))

/* estimate from running different kernels */
#define NR_TO_INIT		10000

static struct ftrace_page	*ftrace_pages_start;
static struct ftrace_page	*ftrace_pages;

294
295
static struct dyn_ftrace *ftrace_free_records;

296
297
298
299
300
301
302
303
304
305
306
307
308
/*
 * This is a double for. Do not use 'break' to break out of the loop,
 * you must use a goto.
 */
#define do_for_each_ftrace_rec(pg, rec)					\
	for (pg = ftrace_pages_start; pg; pg = pg->next) {		\
		int _____i;						\
		for (_____i = 0; _____i < pg->index; _____i++) {	\
			rec = &pg->records[_____i];

#define while_for_each_ftrace_rec()		\
		}				\
	}
309
310

#ifdef CONFIG_KPROBES
311
312
313

static int frozen_record_count;

314
315
316
317
318
319
320
321
322
323
324
325
326
327
328
329
330
331
332
333
334
335
336
337
338
339
static inline void freeze_record(struct dyn_ftrace *rec)
{
	if (!(rec->flags & FTRACE_FL_FROZEN)) {
		rec->flags |= FTRACE_FL_FROZEN;
		frozen_record_count++;
	}
}

static inline void unfreeze_record(struct dyn_ftrace *rec)
{
	if (rec->flags & FTRACE_FL_FROZEN) {
		rec->flags &= ~FTRACE_FL_FROZEN;
		frozen_record_count--;
	}
}

static inline int record_frozen(struct dyn_ftrace *rec)
{
	return rec->flags & FTRACE_FL_FROZEN;
}
#else
# define freeze_record(rec)			({ 0; })
# define unfreeze_record(rec)			({ 0; })
# define record_frozen(rec)			({ 0; })
#endif /* CONFIG_KPROBES */

Ingo Molnar's avatar
Ingo Molnar committed
340
static void ftrace_free_rec(struct dyn_ftrace *rec)
341
342
343
344
345
346
{
	rec->ip = (unsigned long)ftrace_free_records;
	ftrace_free_records = rec;
	rec->flags |= FTRACE_FL_FREE;
}

347
348
349
350
351
352
353
void ftrace_release(void *start, unsigned long size)
{
	struct dyn_ftrace *rec;
	struct ftrace_page *pg;
	unsigned long s = (unsigned long)start;
	unsigned long e = s + size;

354
	if (ftrace_disabled || !start)
355
356
		return;

357
	mutex_lock(&ftrace_lock);
358
	do_for_each_ftrace_rec(pg, rec) {
359
360
		if ((rec->ip >= s) && (rec->ip < e) &&
		    !(rec->flags & FTRACE_FL_FREE))
361
362
			ftrace_free_rec(rec);
	} while_for_each_ftrace_rec();
363
	mutex_unlock(&ftrace_lock);
364
365
}

Ingo Molnar's avatar
Ingo Molnar committed
366
static struct dyn_ftrace *ftrace_alloc_dyn_node(unsigned long ip)
367
{
368
369
370
371
372
373
374
	struct dyn_ftrace *rec;

	/* First check for freed records */
	if (ftrace_free_records) {
		rec = ftrace_free_records;

		if (unlikely(!(rec->flags & FTRACE_FL_FREE))) {
375
			FTRACE_WARN_ON_ONCE(1);
376
377
378
379
380
381
382
383
384
			ftrace_free_records = NULL;
			return NULL;
		}

		ftrace_free_records = (void *)rec->ip;
		memset(rec, 0, sizeof(*rec));
		return rec;
	}

385
	if (ftrace_pages->index == ENTRIES_PER_PAGE) {
Steven Rostedt's avatar
Steven Rostedt committed
386
387
388
389
390
391
392
		if (!ftrace_pages->next) {
			/* allocate another page */
			ftrace_pages->next =
				(void *)get_zeroed_page(GFP_KERNEL);
			if (!ftrace_pages->next)
				return NULL;
		}
393
394
395
396
397
398
		ftrace_pages = ftrace_pages->next;
	}

	return &ftrace_pages->records[ftrace_pages->index++];
}

Steven Rostedt's avatar
Steven Rostedt committed
399
static struct dyn_ftrace *
400
ftrace_record_ip(unsigned long ip)
401
{
Steven Rostedt's avatar
Steven Rostedt committed
402
	struct dyn_ftrace *rec;
403

404
	if (ftrace_disabled)
Steven Rostedt's avatar
Steven Rostedt committed
405
		return NULL;
406

Steven Rostedt's avatar
Steven Rostedt committed
407
408
409
	rec = ftrace_alloc_dyn_node(ip);
	if (!rec)
		return NULL;
410

Steven Rostedt's avatar
Steven Rostedt committed
411
	rec->ip = ip;
412
413
	rec->flags = (unsigned long)ftrace_new_addrs;
	ftrace_new_addrs = rec;
414

Steven Rostedt's avatar
Steven Rostedt committed
415
	return rec;
416
417
}

418
419
420
421
422
423
424
425
426
427
static void print_ip_ins(const char *fmt, unsigned char *p)
{
	int i;

	printk(KERN_CONT "%s", fmt);

	for (i = 0; i < MCOUNT_INSN_SIZE; i++)
		printk(KERN_CONT "%s%02x", i ? ":" : "", p[i]);
}

428
static void ftrace_bug(int failed, unsigned long ip)
429
430
431
432
433
434
435
436
437
438
439
440
441
442
443
444
445
446
447
448
449
450
451
452
453
454
{
	switch (failed) {
	case -EFAULT:
		FTRACE_WARN_ON_ONCE(1);
		pr_info("ftrace faulted on modifying ");
		print_ip_sym(ip);
		break;
	case -EINVAL:
		FTRACE_WARN_ON_ONCE(1);
		pr_info("ftrace failed to modify ");
		print_ip_sym(ip);
		print_ip_ins(" actual: ", (unsigned char *)ip);
		printk(KERN_CONT "\n");
		break;
	case -EPERM:
		FTRACE_WARN_ON_ONCE(1);
		pr_info("ftrace faulted on writing ");
		print_ip_sym(ip);
		break;
	default:
		FTRACE_WARN_ON_ONCE(1);
		pr_info("ftrace faulted on unknown error ");
		print_ip_sym(ip);
	}
}

455

456
static int
457
__ftrace_replace_code(struct dyn_ftrace *rec, int enable)
458
{
459
	unsigned long ftrace_addr;
Steven Rostedt's avatar
Steven Rostedt committed
460
	unsigned long ip, fl;
461

462
	ftrace_addr = (unsigned long)FTRACE_ADDR;
463
464
465

	ip = rec->ip;

Steven Rostedt's avatar
Steven Rostedt committed
466
467
468
469
470
	/*
	 * If this record is not to be traced and
	 * it is not enabled then do nothing.
	 *
	 * If this record is not to be traced and
471
	 * it is enabled then disable it.
Steven Rostedt's avatar
Steven Rostedt committed
472
473
474
475
476
477
478
479
480
	 *
	 */
	if (rec->flags & FTRACE_FL_NOTRACE) {
		if (rec->flags & FTRACE_FL_ENABLED)
			rec->flags &= ~FTRACE_FL_ENABLED;
		else
			return 0;

	} else if (ftrace_filtered && enable) {
481
		/*
Steven Rostedt's avatar
Steven Rostedt committed
482
		 * Filtering is on:
483
		 */
484

Steven Rostedt's avatar
Steven Rostedt committed
485
		fl = rec->flags & (FTRACE_FL_FILTER | FTRACE_FL_ENABLED);
486

Steven Rostedt's avatar
Steven Rostedt committed
487
488
		/* Record is filtered and enabled, do nothing */
		if (fl == (FTRACE_FL_FILTER | FTRACE_FL_ENABLED))
489
			return 0;
490

491
		/* Record is not filtered or enabled, do nothing */
Steven Rostedt's avatar
Steven Rostedt committed
492
493
494
495
496
		if (!fl)
			return 0;

		/* Record is not filtered but enabled, disable it */
		if (fl == FTRACE_FL_ENABLED)
497
			rec->flags &= ~FTRACE_FL_ENABLED;
Steven Rostedt's avatar
Steven Rostedt committed
498
499
		else
		/* Otherwise record is filtered but not enabled, enable it */
500
501
			rec->flags |= FTRACE_FL_ENABLED;
	} else {
Steven Rostedt's avatar
Steven Rostedt committed
502
		/* Disable or not filtered */
503

504
		if (enable) {
Steven Rostedt's avatar
Steven Rostedt committed
505
			/* if record is enabled, do nothing */
506
			if (rec->flags & FTRACE_FL_ENABLED)
507
				return 0;
Steven Rostedt's avatar
Steven Rostedt committed
508

509
			rec->flags |= FTRACE_FL_ENABLED;
Steven Rostedt's avatar
Steven Rostedt committed
510

511
		} else {
Steven Rostedt's avatar
Steven Rostedt committed
512

513
			/* if record is not enabled, do nothing */
514
			if (!(rec->flags & FTRACE_FL_ENABLED))
515
				return 0;
Steven Rostedt's avatar
Steven Rostedt committed
516

517
518
519
520
			rec->flags &= ~FTRACE_FL_ENABLED;
		}
	}

Steven Rostedt's avatar
Steven Rostedt committed
521
	if (rec->flags & FTRACE_FL_ENABLED)
522
		return ftrace_make_call(rec, ftrace_addr);
523
	else
524
		return ftrace_make_nop(NULL, rec, ftrace_addr);
525
526
}

Ingo Molnar's avatar
Ingo Molnar committed
527
static void ftrace_replace_code(int enable)
528
529
530
{
	struct dyn_ftrace *rec;
	struct ftrace_page *pg;
Steven Rostedt's avatar
Steven Rostedt committed
531
	int failed;
532

533
534
	do_for_each_ftrace_rec(pg, rec) {
		/*
535
536
		 * Skip over free records, records that have
		 * failed and not converted.
537
538
		 */
		if (rec->flags & FTRACE_FL_FREE ||
539
		    rec->flags & FTRACE_FL_FAILED ||
540
		    !(rec->flags & FTRACE_FL_CONVERTED))
541
542
543
544
545
546
547
548
549
			continue;

		/* ignore updates to this record's mcount site */
		if (get_kprobe((void *)rec->ip)) {
			freeze_record(rec);
			continue;
		} else {
			unfreeze_record(rec);
		}
550

551
		failed = __ftrace_replace_code(rec, enable);
552
		if (failed) {
553
554
555
556
			rec->flags |= FTRACE_FL_FAILED;
			if ((system_state == SYSTEM_BOOTING) ||
			    !core_kernel_text(rec->ip)) {
				ftrace_free_rec(rec);
557
				} else {
558
				ftrace_bug(failed, rec->ip);
559
560
561
					/* Stop processing */
					return;
				}
562
		}
563
	} while_for_each_ftrace_rec();
564
565
}

566
static int
567
ftrace_code_disable(struct module *mod, struct dyn_ftrace *rec)
568
569
{
	unsigned long ip;
570
	int ret;
571
572
573

	ip = rec->ip;

574
	ret = ftrace_make_nop(mod, rec, MCOUNT_ADDR);
575
	if (ret) {
576
		ftrace_bug(ret, ip);
577
		rec->flags |= FTRACE_FL_FAILED;
578
		return 0;
579
	}
580
	return 1;
581
582
}

583
584
585
586
587
588
589
590
591
592
593
594
595
596
597
598
599
600
/*
 * archs can override this function if they must do something
 * before the modifying code is performed.
 */
int __weak ftrace_arch_code_modify_prepare(void)
{
	return 0;
}

/*
 * archs can override this function if they must do something
 * after the modifying code is performed.
 */
int __weak ftrace_arch_code_modify_post_process(void)
{
	return 0;
}

Ingo Molnar's avatar
Ingo Molnar committed
601
static int __ftrace_modify_code(void *data)
602
{
603
604
	int *command = data;

605
	if (*command & FTRACE_ENABLE_CALLS)
606
		ftrace_replace_code(1);
607
	else if (*command & FTRACE_DISABLE_CALLS)
608
609
610
611
612
		ftrace_replace_code(0);

	if (*command & FTRACE_UPDATE_TRACE_FUNC)
		ftrace_update_ftrace_func(ftrace_trace_function);

613
614
615
616
617
	if (*command & FTRACE_START_FUNC_RET)
		ftrace_enable_ftrace_graph_caller();
	else if (*command & FTRACE_STOP_FUNC_RET)
		ftrace_disable_ftrace_graph_caller();

618
	return 0;
619
620
}

Ingo Molnar's avatar
Ingo Molnar committed
621
static void ftrace_run_update_code(int command)
622
{
623
624
625
626
627
628
629
	int ret;

	ret = ftrace_arch_code_modify_prepare();
	FTRACE_WARN_ON(ret);
	if (ret)
		return;

Rusty Russell's avatar
Rusty Russell committed
630
	stop_machine(__ftrace_modify_code, &command, NULL);
631
632
633

	ret = ftrace_arch_code_modify_post_process();
	FTRACE_WARN_ON(ret);
634
635
}

636
static ftrace_func_t saved_ftrace_func;
637
static int ftrace_start_up;
638
639
640
641
642
643
644
645
646
647
648
649
650

static void ftrace_startup_enable(int command)
{
	if (saved_ftrace_func != ftrace_trace_function) {
		saved_ftrace_func = ftrace_trace_function;
		command |= FTRACE_UPDATE_TRACE_FUNC;
	}

	if (!command || !ftrace_enabled)
		return;

	ftrace_run_update_code(command);
}
651

652
static void ftrace_startup(int command)
653
{
654
655
656
	if (unlikely(ftrace_disabled))
		return;

657
	ftrace_start_up++;
Steven Rostedt's avatar
Steven Rostedt committed
658
	command |= FTRACE_ENABLE_CALLS;
659

660
	ftrace_startup_enable(command);
661
662
}

663
static void ftrace_shutdown(int command)
664
{
665
666
667
	if (unlikely(ftrace_disabled))
		return;

668
669
	ftrace_start_up--;
	if (!ftrace_start_up)
670
		command |= FTRACE_DISABLE_CALLS;
671

672
673
674
675
	if (saved_ftrace_func != ftrace_trace_function) {
		saved_ftrace_func = ftrace_trace_function;
		command |= FTRACE_UPDATE_TRACE_FUNC;
	}
676

677
	if (!command || !ftrace_enabled)
Steven Rostedt's avatar
Steven Rostedt committed
678
		return;
679
680

	ftrace_run_update_code(command);
681
682
}

Ingo Molnar's avatar
Ingo Molnar committed
683
static void ftrace_startup_sysctl(void)
684
{
685
686
	int command = FTRACE_ENABLE_MCOUNT;

687
688
689
	if (unlikely(ftrace_disabled))
		return;

690
691
	/* Force update next time */
	saved_ftrace_func = NULL;
692
693
	/* ftrace_start_up is true if we want ftrace running */
	if (ftrace_start_up)
694
695
696
		command |= FTRACE_ENABLE_CALLS;

	ftrace_run_update_code(command);
697
698
}

Ingo Molnar's avatar
Ingo Molnar committed
699
static void ftrace_shutdown_sysctl(void)
700
{
701
702
	int command = FTRACE_DISABLE_MCOUNT;

703
704
705
	if (unlikely(ftrace_disabled))
		return;

706
707
	/* ftrace_start_up is true if ftrace is running */
	if (ftrace_start_up)
708
709
710
		command |= FTRACE_DISABLE_CALLS;

	ftrace_run_update_code(command);
711
712
}

713
714
715
716
static cycle_t		ftrace_update_time;
static unsigned long	ftrace_update_cnt;
unsigned long		ftrace_update_tot_cnt;

717
static int ftrace_update_code(struct module *mod)
718
{
719
	struct dyn_ftrace *p;
720
	cycle_t start, stop;
721

722
	start = ftrace_now(raw_smp_processor_id());
723
724
	ftrace_update_cnt = 0;

725
	while (ftrace_new_addrs) {
726

Steven Rostedt's avatar
Steven Rostedt committed
727
728
729
		/* If something went wrong, bail without enabling anything */
		if (unlikely(ftrace_disabled))
			return -1;
730

731
732
733
		p = ftrace_new_addrs;
		ftrace_new_addrs = (struct dyn_ftrace *)p->flags;
		p->flags = 0L;
734

Steven Rostedt's avatar
Steven Rostedt committed
735
		/* convert record (i.e, patch mcount-call with NOP) */
736
		if (ftrace_code_disable(mod, p)) {
Steven Rostedt's avatar
Steven Rostedt committed
737
738
739
740
			p->flags |= FTRACE_FL_CONVERTED;
			ftrace_update_cnt++;
		} else
			ftrace_free_rec(p);
741
742
	}

743
	stop = ftrace_now(raw_smp_processor_id());
744
745
746
	ftrace_update_time = stop - start;
	ftrace_update_tot_cnt += ftrace_update_cnt;

747
748
749
	return 0;
}

750
static int __init ftrace_dyn_table_alloc(unsigned long num_to_init)
751
752
753
754
755
756
757
758
759
760
761
762
763
764
765
766
767
768
769
770
771
772
773
774
775
776
{
	struct ftrace_page *pg;
	int cnt;
	int i;

	/* allocate a few pages */
	ftrace_pages_start = (void *)get_zeroed_page(GFP_KERNEL);
	if (!ftrace_pages_start)
		return -1;

	/*
	 * Allocate a few more pages.
	 *
	 * TODO: have some parser search vmlinux before
	 *   final linking to find all calls to ftrace.
	 *   Then we can:
	 *    a) know how many pages to allocate.
	 *     and/or
	 *    b) set up the table then.
	 *
	 *  The dynamic code is still necessary for
	 *  modules.
	 */

	pg = ftrace_pages = ftrace_pages_start;

777
	cnt = num_to_init / ENTRIES_PER_PAGE;
Steven Rostedt's avatar
Steven Rostedt committed
778
	pr_info("ftrace: allocating %ld entries in %d pages\n",
779
		num_to_init, cnt + 1);
780
781
782
783
784
785
786
787
788
789
790
791
792
793

	for (i = 0; i < cnt; i++) {
		pg->next = (void *)get_zeroed_page(GFP_KERNEL);

		/* If we fail, we'll try later anyway */
		if (!pg->next)
			break;

		pg = pg->next;
	}

	return 0;
}

794
795
796
enum {
	FTRACE_ITER_FILTER	= (1 << 0),
	FTRACE_ITER_CONT	= (1 << 1),
797
	FTRACE_ITER_NOTRACE	= (1 << 2),
798
	FTRACE_ITER_FAILURES	= (1 << 3),
799
	FTRACE_ITER_PRINTALL	= (1 << 4),
800
	FTRACE_ITER_HASH	= (1 << 5),
801
802
803
804
805
806
};

#define FTRACE_BUFF_MAX (KSYM_SYMBOL_LEN+4) /* room for wildcards */

struct ftrace_iterator {
	struct ftrace_page	*pg;
807
	int			hidx;
808
	int			idx;
809
810
811
812
813
814
	unsigned		flags;
	unsigned char		buffer[FTRACE_BUFF_MAX+1];
	unsigned		buffer_idx;
	unsigned		filtered;
};

815
816
817
818
819
820
821
822
823
824
825
826
827
828
829
830
831
832
833
834
835
836
837
838
839
840
841
842
843
844
845
846
847
848
849
850
851
852
853
854
855
856
857
858
859
860
861
862
static void *
t_hash_next(struct seq_file *m, void *v, loff_t *pos)
{
	struct ftrace_iterator *iter = m->private;
	struct hlist_node *hnd = v;
	struct hlist_head *hhd;

	WARN_ON(!(iter->flags & FTRACE_ITER_HASH));

	(*pos)++;

 retry:
	if (iter->hidx >= FTRACE_FUNC_HASHSIZE)
		return NULL;

	hhd = &ftrace_func_hash[iter->hidx];

	if (hlist_empty(hhd)) {
		iter->hidx++;
		hnd = NULL;
		goto retry;
	}

	if (!hnd)
		hnd = hhd->first;
	else {
		hnd = hnd->next;
		if (!hnd) {
			iter->hidx++;
			goto retry;
		}
	}

	return hnd;
}

static void *t_hash_start(struct seq_file *m, loff_t *pos)
{
	struct ftrace_iterator *iter = m->private;
	void *p = NULL;

	iter->flags |= FTRACE_ITER_HASH;

	return t_hash_next(m, p, pos);
}

static int t_hash_show(struct seq_file *m, void *v)
{
863
	struct ftrace_func_probe *rec;
864
865
866
	struct hlist_node *hnd = v;
	char str[KSYM_SYMBOL_LEN];

867
	rec = hlist_entry(hnd, struct ftrace_func_probe, node);
868

869
870
871
	if (rec->ops->print)
		return rec->ops->print(m, rec->ip, rec->ops, rec->data);

872
873
874
875
876
877
878
879
880
881
882
883
884
	kallsyms_lookup(rec->ip, NULL, NULL, NULL, str);
	seq_printf(m, "%s:", str);

	kallsyms_lookup((unsigned long)rec->ops->func, NULL, NULL, NULL, str);
	seq_printf(m, "%s", str);

	if (rec->data)
		seq_printf(m, ":%p", rec->data);
	seq_putc(m, '\n');

	return 0;
}

Ingo Molnar's avatar
Ingo Molnar committed
885
static void *
886
887
888
889
890
t_next(struct seq_file *m, void *v, loff_t *pos)
{
	struct ftrace_iterator *iter = m->private;
	struct dyn_ftrace *rec = NULL;

891
892
893
	if (iter->flags & FTRACE_ITER_HASH)
		return t_hash_next(m, v, pos);

894
895
	(*pos)++;

896
897
898
	if (iter->flags & FTRACE_ITER_PRINTALL)
		return NULL;

899
900
901
902
903
904
 retry:
	if (iter->idx >= iter->pg->index) {
		if (iter->pg->next) {
			iter->pg = iter->pg->next;
			iter->idx = 0;
			goto retry;
905
906
		} else {
			iter->idx = -1;
907
908
909
		}
	} else {
		rec = &iter->pg->records[iter->idx++];
910
911
912
		if ((rec->flags & FTRACE_FL_FREE) ||

		    (!(iter->flags & FTRACE_ITER_FAILURES) &&
913
914
915
		     (rec->flags & FTRACE_FL_FAILED)) ||

		    ((iter->flags & FTRACE_ITER_FAILURES) &&
916
		     !(rec->flags & FTRACE_FL_FAILED)) ||
917

Steven Rostedt's avatar
Steven Rostedt committed
918
919
920
		    ((iter->flags & FTRACE_ITER_FILTER) &&
		     !(rec->flags & FTRACE_FL_FILTER)) ||

921
922
		    ((iter->flags & FTRACE_ITER_NOTRACE) &&
		     !(rec->flags & FTRACE_FL_NOTRACE))) {
923
924
925
926
927
928
929
930
931
932
933
934
935
			rec = NULL;
			goto retry;
		}
	}

	return rec;
}

static void *t_start(struct seq_file *m, loff_t *pos)
{
	struct ftrace_iterator *iter = m->private;
	void *p = NULL;

936
	mutex_lock(&ftrace_lock);
937
938
939
940
941
942
943
	/*
	 * For set_ftrace_filter reading, if we have the filter
	 * off, we can short cut and just print out that all
	 * functions are enabled.
	 */
	if (iter->flags & FTRACE_ITER_FILTER && !ftrace_filtered) {
		if (*pos > 0)
944
			return t_hash_start(m, pos);
945
946
947
948
949
		iter->flags |= FTRACE_ITER_PRINTALL;
		(*pos)++;
		return iter;
	}

950
951
952
	if (iter->flags & FTRACE_ITER_HASH)
		return t_hash_start(m, pos);

953
954
955
956
957
958
	if (*pos > 0) {
		if (iter->idx < 0)
			return p;
		(*pos)--;
		iter->idx--;
	}
959

960
	p = t_next(m, p, pos);
961

962
963
964
	if (!p)
		return t_hash_start(m, pos);

965
966
967
968
969
	return p;
}

static void t_stop(struct seq_file *m, void *p)
{
970
	mutex_unlock(&ftrace_lock);
971
972
973
974
}

static int t_show(struct seq_file *m, void *v)
{
975
	struct ftrace_iterator *iter = m->private;
976
977
978
	struct dyn_ftrace *rec = v;
	char str[KSYM_SYMBOL_LEN];

979
980
981
	if (iter->flags & FTRACE_ITER_HASH)
		return t_hash_show(m, v);

982
983
984
985
986
	if (iter->flags & FTRACE_ITER_PRINTALL) {
		seq_printf(m, "#### all functions enabled ####\n");
		return 0;
	}

987
988
989
990
991
	if (!rec)
		return 0;

	kallsyms_lookup(rec->ip, NULL, NULL, NULL, str);

992
	seq_printf(m, "%s\n", str);
993
994
995
996
997
998
999
1000

	return 0;
}

static struct seq_operations show_ftrace_seq_ops = {
	.start = t_start,
	.next = t_next,
	.stop = t_stop,