trace_functions.c 13.3 KB
Newer Older
Steven Rostedt's avatar
Steven Rostedt committed
1
2
3
4
5
6
7
8
9
/*
 * ring buffer based function tracer
 *
 * Copyright (C) 2007-2008 Steven Rostedt <srostedt@redhat.com>
 * Copyright (C) 2008 Ingo Molnar <mingo@redhat.com>
 *
 * Based on code from the latency_tracer, that is:
 *
 *  Copyright (C) 2004-2006 Ingo Molnar
10
 *  Copyright (C) 2004 Nadia Yvette Chambers
Steven Rostedt's avatar
Steven Rostedt committed
11
 */
12
#include <linux/ring_buffer.h>
Steven Rostedt's avatar
Steven Rostedt committed
13
14
15
#include <linux/debugfs.h>
#include <linux/uaccess.h>
#include <linux/ftrace.h>
16
#include <linux/slab.h>
Ingo Molnar's avatar
Ingo Molnar committed
17
#include <linux/fs.h>
Steven Rostedt's avatar
Steven Rostedt committed
18
19
20

#include "trace.h"

21
22
23
24
25
26
27
28
29
30
31
32
33
34
35
36
37
38
39
40
static void tracing_start_function_trace(struct trace_array *tr);
static void tracing_stop_function_trace(struct trace_array *tr);
static void
function_trace_call(unsigned long ip, unsigned long parent_ip,
		    struct ftrace_ops *op, struct pt_regs *pt_regs);
static void
function_stack_trace_call(unsigned long ip, unsigned long parent_ip,
			  struct ftrace_ops *op, struct pt_regs *pt_regs);
static struct ftrace_ops trace_ops;
static struct ftrace_ops trace_stack_ops;
static struct tracer_flags func_flags;

/* Our option */
enum {
	TRACE_FUNC_OPT_STACK	= 0x1,
};

static int allocate_ftrace_ops(struct trace_array *tr)
{
	struct ftrace_ops *ops;
41

42
43
44
	ops = kzalloc(sizeof(*ops), GFP_KERNEL);
	if (!ops)
		return -ENOMEM;
45

46
47
48
49
50
51
52
53
	/* Currently only the non stack verision is supported */
	ops->func = function_trace_call;
	ops->flags = FTRACE_OPS_FL_RECURSION_SAFE;

	tr->ops = ops;
	ops->private = tr;
	return 0;
}
54

55
56
57
58
59
60
61
62
63
64
65
66
67
68
69
70
71
72
73
74
75
76
77
78
79

int ftrace_create_function_files(struct trace_array *tr,
				 struct dentry *parent)
{
	int ret;

	/* The top level array uses the "global_ops". */
	if (!(tr->flags & TRACE_ARRAY_FL_GLOBAL)) {
		ret = allocate_ftrace_ops(tr);
		if (ret)
			return ret;
	}

	ftrace_create_filter_files(tr->ops, parent);

	return 0;
}

void ftrace_destroy_function_files(struct trace_array *tr)
{
	ftrace_destroy_filter_files(tr->ops);
	kfree(tr->ops);
	tr->ops = NULL;
}

80
static int function_trace_init(struct trace_array *tr)
Steven Rostedt's avatar
Steven Rostedt committed
81
{
82
83
84
85
86
87
88
89
90
91
92
93
94
95
	struct ftrace_ops *ops;

	if (tr->flags & TRACE_ARRAY_FL_GLOBAL) {
		/* There's only one global tr */
		if (!trace_ops.private) {
			trace_ops.private = tr;
			trace_stack_ops.private = tr;
		}

		if (func_flags.val & TRACE_FUNC_OPT_STACK)
			ops = &trace_stack_ops;
		else
			ops = &trace_ops;
		tr->ops = ops;
96
97
98
99
100
101
102
	} else if (!tr->ops) {
		/*
		 * Instance trace_arrays get their ops allocated
		 * at instance creation. Unless it failed
		 * the allocation.
		 */
		return -ENOMEM;
103
104
	}

105
	tr->trace_buffer.cpu = get_cpu();
106
107
	put_cpu();

108
	tracing_start_cmdline_record();
109
	tracing_start_function_trace(tr);
110
	return 0;
Steven Rostedt's avatar
Steven Rostedt committed
111
112
}

Ingo Molnar's avatar
Ingo Molnar committed
113
static void function_trace_reset(struct trace_array *tr)
Steven Rostedt's avatar
Steven Rostedt committed
114
{
115
	tracing_stop_function_trace(tr);
116
	tracing_stop_cmdline_record();
Steven Rostedt's avatar
Steven Rostedt committed
117
118
}

119
120
static void function_trace_start(struct trace_array *tr)
{
121
	tracing_reset_online_cpus(&tr->trace_buffer);
122
123
}

124
static void
125
function_trace_call(unsigned long ip, unsigned long parent_ip,
126
		    struct ftrace_ops *op, struct pt_regs *pt_regs)
127
{
128
	struct trace_array *tr = op->private;
129
130
	struct trace_array_cpu *data;
	unsigned long flags;
131
	int bit;
132
133
134
	int cpu;
	int pc;

135
	if (unlikely(!tr->function_enabled))
136
137
		return;

138
139
	pc = preempt_count();
	preempt_disable_notrace();
140

141
142
143
144
145
	bit = trace_test_and_set_recursion(TRACE_FTRACE_START, TRACE_FTRACE_MAX);
	if (bit < 0)
		goto out;

	cpu = smp_processor_id();
146
	data = per_cpu_ptr(tr->trace_buffer.data, cpu);
147
148
	if (!atomic_read(&data->disabled)) {
		local_save_flags(flags);
149
		trace_function(tr, ip, parent_ip, flags, pc);
150
	}
151
	trace_clear_recursion(bit);
152

153
154
 out:
	preempt_enable_notrace();
155
156
}

157
static void
158
function_stack_trace_call(unsigned long ip, unsigned long parent_ip,
159
			  struct ftrace_ops *op, struct pt_regs *pt_regs)
160
{
161
	struct trace_array *tr = op->private;
162
163
164
165
166
167
	struct trace_array_cpu *data;
	unsigned long flags;
	long disabled;
	int cpu;
	int pc;

168
	if (unlikely(!tr->function_enabled))
169
170
171
172
173
174
175
176
		return;

	/*
	 * Need to use raw, since this must be called before the
	 * recursive protection is performed.
	 */
	local_irq_save(flags);
	cpu = raw_smp_processor_id();
177
	data = per_cpu_ptr(tr->trace_buffer.data, cpu);
178
179
180
181
	disabled = atomic_inc_return(&data->disabled);

	if (likely(disabled == 1)) {
		pc = preempt_count();
182
		trace_function(tr, ip, parent_ip, flags, pc);
183
184
185
186
187
188
189
190
		/*
		 * skip over 5 funcs:
		 *    __ftrace_trace_stack,
		 *    __trace_stack,
		 *    function_stack_trace_call
		 *    ftrace_list_func
		 *    ftrace_call
		 */
191
		__trace_stack(tr, flags, 5, pc);
192
193
194
195
196
197
	}

	atomic_dec(&data->disabled);
	local_irq_restore(flags);
}

198
199
200
static struct ftrace_ops trace_ops __read_mostly =
{
	.func = function_trace_call,
201
	.flags = FTRACE_OPS_FL_GLOBAL | FTRACE_OPS_FL_RECURSION_SAFE,
202
203
};

204
205
206
static struct ftrace_ops trace_stack_ops __read_mostly =
{
	.func = function_stack_trace_call,
207
	.flags = FTRACE_OPS_FL_GLOBAL | FTRACE_OPS_FL_RECURSION_SAFE,
208
209
210
211
212
213
214
215
216
217
218
219
220
221
};

static struct tracer_opt func_opts[] = {
#ifdef CONFIG_STACKTRACE
	{ TRACER_OPT(func_stack_trace, TRACE_FUNC_OPT_STACK) },
#endif
	{ } /* Always set a last empty entry */
};

static struct tracer_flags func_flags = {
	.val = 0, /* By default: all flags disabled */
	.opts = func_opts
};

222
static void tracing_start_function_trace(struct trace_array *tr)
223
{
224
225
226
	tr->function_enabled = 0;
	register_ftrace_function(tr->ops);
	tr->function_enabled = 1;
227
228
}

229
static void tracing_stop_function_trace(struct trace_array *tr)
230
{
231
232
	tr->function_enabled = 0;
	unregister_ftrace_function(tr->ops);
233
234
}

235
236
static int
func_set_flag(struct trace_array *tr, u32 old_flags, u32 bit, int set)
237
{
238
239
	switch (bit) {
	case TRACE_FUNC_OPT_STACK:
240
241
		/* do nothing if already set */
		if (!!set == !!(func_flags.val & TRACE_FUNC_OPT_STACK))
242
			break;
243

244
245
		unregister_ftrace_function(tr->ops);

246
		if (set) {
247
248
			tr->ops = &trace_stack_ops;
			register_ftrace_function(tr->ops);
249
		} else {
250
251
			tr->ops = &trace_ops;
			register_ftrace_function(tr->ops);
252
		}
253

254
255
256
		break;
	default:
		return -EINVAL;
257
258
	}

259
	return 0;
260
261
}

262
static struct tracer function_trace __tracer_data =
Steven Rostedt's avatar
Steven Rostedt committed
263
{
264
265
266
267
	.name		= "function",
	.init		= function_trace_init,
	.reset		= function_trace_reset,
	.start		= function_trace_start,
268
	.wait_pipe	= poll_wait_pipe,
269
270
	.flags		= &func_flags,
	.set_flag	= func_set_flag,
271
	.allow_instances = true,
Steven Rostedt's avatar
Steven Rostedt committed
272
#ifdef CONFIG_FTRACE_SELFTEST
273
	.selftest	= trace_selftest_startup_function,
Steven Rostedt's avatar
Steven Rostedt committed
274
#endif
Steven Rostedt's avatar
Steven Rostedt committed
275
276
};

277
#ifdef CONFIG_DYNAMIC_FTRACE
278
static int update_count(void **data)
279
{
280
	unsigned long *count = (long *)data;
281
282

	if (!*count)
283
		return 0;
284
285
286
287

	if (*count != -1)
		(*count)--;

288
	return 1;
289
290
291
}

static void
292
ftrace_traceon_count(unsigned long ip, unsigned long parent_ip, void **data)
293
{
294
	if (tracing_is_on())
295
296
		return;

297
298
299
	if (update_count(data))
		tracing_on();
}
300

301
static void
302
ftrace_traceoff_count(unsigned long ip, unsigned long parent_ip, void **data)
303
304
305
{
	if (!tracing_is_on())
		return;
306

307
308
	if (update_count(data))
		tracing_off();
309
310
}

311
312
313
314
315
316
317
318
319
320
321
322
323
324
325
326
327
328
static void
ftrace_traceon(unsigned long ip, unsigned long parent_ip, void **data)
{
	if (tracing_is_on())
		return;

	tracing_on();
}

static void
ftrace_traceoff(unsigned long ip, unsigned long parent_ip, void **data)
{
	if (!tracing_is_on())
		return;

	tracing_off();
}

329
330
331
332
333
334
335
336
337
338
339
340
341
342
343
344
345
346
347
348
349
350
351
352
353
/*
 * Skip 4:
 *   ftrace_stacktrace()
 *   function_trace_probe_call()
 *   ftrace_ops_list_func()
 *   ftrace_call()
 */
#define STACK_SKIP 4

static void
ftrace_stacktrace(unsigned long ip, unsigned long parent_ip, void **data)
{
	trace_dump_stack(STACK_SKIP);
}

static void
ftrace_stacktrace_count(unsigned long ip, unsigned long parent_ip, void **data)
{
	if (!tracing_is_on())
		return;

	if (update_count(data))
		trace_dump_stack(STACK_SKIP);
}

354
355
356
357
358
359
360
static void
ftrace_dump_probe(unsigned long ip, unsigned long parent_ip, void **data)
{
	if (update_count(data))
		ftrace_dump(DUMP_ALL);
}

361
362
363
364
365
366
367
368
/* Only dump the current CPU buffer. */
static void
ftrace_cpudump_probe(unsigned long ip, unsigned long parent_ip, void **data)
{
	if (update_count(data))
		ftrace_dump(DUMP_ORIG);
}

369
static int
370
371
372
373
374
375
376
377
378
379
380
381
382
383
384
385
386
387
388
389
390
391
392
393
394
395
396
397
398
399
400
401
402
403
404
ftrace_probe_print(const char *name, struct seq_file *m,
		   unsigned long ip, void *data)
{
	long count = (long)data;

	seq_printf(m, "%ps:%s", (void *)ip, name);

	if (count == -1)
		seq_printf(m, ":unlimited\n");
	else
		seq_printf(m, ":count=%ld\n", count);

	return 0;
}

static int
ftrace_traceon_print(struct seq_file *m, unsigned long ip,
			 struct ftrace_probe_ops *ops, void *data)
{
	return ftrace_probe_print("traceon", m, ip, data);
}

static int
ftrace_traceoff_print(struct seq_file *m, unsigned long ip,
			 struct ftrace_probe_ops *ops, void *data)
{
	return ftrace_probe_print("traceoff", m, ip, data);
}

static int
ftrace_stacktrace_print(struct seq_file *m, unsigned long ip,
			struct ftrace_probe_ops *ops, void *data)
{
	return ftrace_probe_print("stacktrace", m, ip, data);
}
405

406
407
408
409
410
411
412
static int
ftrace_dump_print(struct seq_file *m, unsigned long ip,
			struct ftrace_probe_ops *ops, void *data)
{
	return ftrace_probe_print("dump", m, ip, data);
}

413
414
415
416
417
418
419
static int
ftrace_cpudump_print(struct seq_file *m, unsigned long ip,
			struct ftrace_probe_ops *ops, void *data)
{
	return ftrace_probe_print("cpudump", m, ip, data);
}

420
421
static struct ftrace_probe_ops traceon_count_probe_ops = {
	.func			= ftrace_traceon_count,
422
	.print			= ftrace_traceon_print,
423
424
425
426
};

static struct ftrace_probe_ops traceoff_count_probe_ops = {
	.func			= ftrace_traceoff_count,
427
428
429
430
431
432
	.print			= ftrace_traceoff_print,
};

static struct ftrace_probe_ops stacktrace_count_probe_ops = {
	.func			= ftrace_stacktrace_count,
	.print			= ftrace_stacktrace_print,
433
434
};

435
436
437
438
439
static struct ftrace_probe_ops dump_probe_ops = {
	.func			= ftrace_dump_probe,
	.print			= ftrace_dump_print,
};

440
441
442
443
444
static struct ftrace_probe_ops cpudump_probe_ops = {
	.func			= ftrace_cpudump_probe,
	.print			= ftrace_cpudump_print,
};

445
static struct ftrace_probe_ops traceon_probe_ops = {
446
	.func			= ftrace_traceon,
447
	.print			= ftrace_traceon_print,
448
449
};

450
static struct ftrace_probe_ops traceoff_probe_ops = {
451
	.func			= ftrace_traceoff,
452
	.print			= ftrace_traceoff_print,
453
454
};

455
456
457
458
static struct ftrace_probe_ops stacktrace_probe_ops = {
	.func			= ftrace_stacktrace,
	.print			= ftrace_stacktrace_print,
};
459

460
static int
461
462
463
ftrace_trace_probe_callback(struct ftrace_probe_ops *ops,
			    struct ftrace_hash *hash, char *glob,
			    char *cmd, char *param, int enable)
464
465
466
467
468
469
470
471
472
{
	void *count = (void *)-1;
	char *number;
	int ret;

	/* hash funcs only work with set_ftrace_filter */
	if (!enable)
		return -EINVAL;

473
474
475
476
477
	if (glob[0] == '!') {
		unregister_ftrace_function_probe_func(glob+1, ops);
		return 0;
	}

478
479
480
481
482
483
484
485
486
487
488
489
	if (!param)
		goto out_reg;

	number = strsep(&param, ":");

	if (!strlen(number))
		goto out_reg;

	/*
	 * We use the callback data field (which is a pointer)
	 * as our counter.
	 */
490
	ret = kstrtoul(number, 0, (unsigned long *)&count);
491
492
493
494
	if (ret)
		return ret;

 out_reg:
495
	ret = register_ftrace_function_probe(glob, ops, count);
496

497
	return ret < 0 ? ret : 0;
498
499
}

500
501
502
503
504
505
506
507
508
509
510
511
512
513
514
515
516
517
518
519
520
521
522
523
524
525
526
527
static int
ftrace_trace_onoff_callback(struct ftrace_hash *hash,
			    char *glob, char *cmd, char *param, int enable)
{
	struct ftrace_probe_ops *ops;

	/* we register both traceon and traceoff to this callback */
	if (strcmp(cmd, "traceon") == 0)
		ops = param ? &traceon_count_probe_ops : &traceon_probe_ops;
	else
		ops = param ? &traceoff_count_probe_ops : &traceoff_probe_ops;

	return ftrace_trace_probe_callback(ops, hash, glob, cmd,
					   param, enable);
}

static int
ftrace_stacktrace_callback(struct ftrace_hash *hash,
			   char *glob, char *cmd, char *param, int enable)
{
	struct ftrace_probe_ops *ops;

	ops = param ? &stacktrace_count_probe_ops : &stacktrace_probe_ops;

	return ftrace_trace_probe_callback(ops, hash, glob, cmd,
					   param, enable);
}

528
529
530
531
532
533
534
535
536
537
538
539
540
static int
ftrace_dump_callback(struct ftrace_hash *hash,
			   char *glob, char *cmd, char *param, int enable)
{
	struct ftrace_probe_ops *ops;

	ops = &dump_probe_ops;

	/* Only dump once. */
	return ftrace_trace_probe_callback(ops, hash, glob, cmd,
					   "1", enable);
}

541
542
543
544
545
546
547
548
549
550
551
552
553
static int
ftrace_cpudump_callback(struct ftrace_hash *hash,
			   char *glob, char *cmd, char *param, int enable)
{
	struct ftrace_probe_ops *ops;

	ops = &cpudump_probe_ops;

	/* Only dump once. */
	return ftrace_trace_probe_callback(ops, hash, glob, cmd,
					   "1", enable);
}

554
555
556
557
558
559
560
561
562
563
static struct ftrace_func_command ftrace_traceon_cmd = {
	.name			= "traceon",
	.func			= ftrace_trace_onoff_callback,
};

static struct ftrace_func_command ftrace_traceoff_cmd = {
	.name			= "traceoff",
	.func			= ftrace_trace_onoff_callback,
};

564
565
566
567
568
static struct ftrace_func_command ftrace_stacktrace_cmd = {
	.name			= "stacktrace",
	.func			= ftrace_stacktrace_callback,
};

569
570
571
572
573
static struct ftrace_func_command ftrace_dump_cmd = {
	.name			= "dump",
	.func			= ftrace_dump_callback,
};

574
575
576
577
578
static struct ftrace_func_command ftrace_cpudump_cmd = {
	.name			= "cpudump",
	.func			= ftrace_cpudump_callback,
};

579
580
581
582
583
584
585
586
587
588
static int __init init_func_cmd_traceon(void)
{
	int ret;

	ret = register_ftrace_command(&ftrace_traceoff_cmd);
	if (ret)
		return ret;

	ret = register_ftrace_command(&ftrace_traceon_cmd);
	if (ret)
589
		goto out_free_traceoff;
590
591

	ret = register_ftrace_command(&ftrace_stacktrace_cmd);
592
593
594
595
596
597
598
	if (ret)
		goto out_free_traceon;

	ret = register_ftrace_command(&ftrace_dump_cmd);
	if (ret)
		goto out_free_stacktrace;

599
600
601
602
	ret = register_ftrace_command(&ftrace_cpudump_cmd);
	if (ret)
		goto out_free_dump;

603
604
	return 0;

605
606
 out_free_dump:
	unregister_ftrace_command(&ftrace_dump_cmd);
607
608
609
610
611
612
613
 out_free_stacktrace:
	unregister_ftrace_command(&ftrace_stacktrace_cmd);
 out_free_traceon:
	unregister_ftrace_command(&ftrace_traceon_cmd);
 out_free_traceoff:
	unregister_ftrace_command(&ftrace_traceoff_cmd);

614
615
616
617
618
619
620
621
622
	return ret;
}
#else
static inline int init_func_cmd_traceon(void)
{
	return 0;
}
#endif /* CONFIG_DYNAMIC_FTRACE */

Steven Rostedt's avatar
Steven Rostedt committed
623
624
static __init int init_function_trace(void)
{
625
	init_func_cmd_traceon();
Steven Rostedt's avatar
Steven Rostedt committed
626
627
	return register_tracer(&function_trace);
}
628
core_initcall(init_function_trace);