trace_functions_graph.c 26.1 KB
Newer Older
1
2
3
/*
 *
 * Function graph tracer.
4
 * Copyright (c) 2008-2009 Frederic Weisbecker <fweisbec@gmail.com>
5
6
7
8
9
10
11
12
13
14
 * Mostly borrowed from function tracer which
 * is Copyright (c) Steven Rostedt <srostedt@redhat.com>
 *
 */
#include <linux/debugfs.h>
#include <linux/uaccess.h>
#include <linux/ftrace.h>
#include <linux/fs.h>

#include "trace.h"
15
#include "trace_output.h"
16

17
18
19
20
21
struct fgraph_data {
	pid_t		last_pid;
	int		depth;
};

22
#define TRACE_GRAPH_INDENT	2
23

24
/* Flag options */
25
#define TRACE_GRAPH_PRINT_OVERRUN	0x1
26
27
#define TRACE_GRAPH_PRINT_CPU		0x2
#define TRACE_GRAPH_PRINT_OVERHEAD	0x4
28
#define TRACE_GRAPH_PRINT_PROC		0x8
29
30
#define TRACE_GRAPH_PRINT_DURATION	0x10
#define TRACE_GRAPH_PRINT_ABS_TIME	0X20
31

32
static struct tracer_opt trace_opts[] = {
33
	/* Display overruns? (for self-debug purpose) */
34
35
36
37
38
	{ TRACER_OPT(funcgraph-overrun, TRACE_GRAPH_PRINT_OVERRUN) },
	/* Display CPU ? */
	{ TRACER_OPT(funcgraph-cpu, TRACE_GRAPH_PRINT_CPU) },
	/* Display Overhead ? */
	{ TRACER_OPT(funcgraph-overhead, TRACE_GRAPH_PRINT_OVERHEAD) },
39
40
	/* Display proc name/pid */
	{ TRACER_OPT(funcgraph-proc, TRACE_GRAPH_PRINT_PROC) },
41
42
43
44
	/* Display duration of execution */
	{ TRACER_OPT(funcgraph-duration, TRACE_GRAPH_PRINT_DURATION) },
	/* Display absolute time of an entry */
	{ TRACER_OPT(funcgraph-abstime, TRACE_GRAPH_PRINT_ABS_TIME) },
45
46
47
48
	{ } /* Empty entry */
};

static struct tracer_flags tracer_flags = {
49
	/* Don't display overruns and proc by default */
50
51
	.val = TRACE_GRAPH_PRINT_CPU | TRACE_GRAPH_PRINT_OVERHEAD |
	       TRACE_GRAPH_PRINT_DURATION,
52
53
54
	.opts = trace_opts
};

55
static struct trace_array *graph_array;
56

57

58
59
/* Add a function return address to the trace stack on thread info.*/
int
60
61
ftrace_push_return_trace(unsigned long ret, unsigned long func, int *depth,
			 unsigned long frame_pointer)
62
{
63
	unsigned long long calltime;
64
65
66
67
68
	int index;

	if (!current->ret_stack)
		return -EBUSY;

69
70
71
72
73
74
	/*
	 * We must make sure the ret_stack is tested before we read
	 * anything else.
	 */
	smp_rmb();

75
76
77
78
79
80
	/* The return trace stack is full */
	if (current->curr_ret_stack == FTRACE_RETFUNC_DEPTH - 1) {
		atomic_inc(&current->trace_overrun);
		return -EBUSY;
	}

81
82
	calltime = trace_clock_local();

83
84
85
86
	index = ++current->curr_ret_stack;
	barrier();
	current->ret_stack[index].ret = ret;
	current->ret_stack[index].func = func;
87
	current->ret_stack[index].calltime = calltime;
88
	current->ret_stack[index].subtime = 0;
89
	current->ret_stack[index].fp = frame_pointer;
90
91
92
93
94
95
	*depth = index;

	return 0;
}

/* Retrieve a function return address to the trace stack on thread info.*/
96
static void
97
98
ftrace_pop_return_trace(struct ftrace_graph_ret *trace, unsigned long *ret,
			unsigned long frame_pointer)
99
100
101
102
103
104
105
106
107
108
109
110
111
{
	int index;

	index = current->curr_ret_stack;

	if (unlikely(index < 0)) {
		ftrace_graph_stop();
		WARN_ON(1);
		/* Might as well panic, otherwise we have no where to go */
		*ret = (unsigned long)panic;
		return;
	}

112
113
114
115
116
117
118
119
120
121
122
123
124
125
126
127
128
129
130
131
132
133
134
135
136
#ifdef CONFIG_HAVE_FUNCTION_GRAPH_FP_TEST
	/*
	 * The arch may choose to record the frame pointer used
	 * and check it here to make sure that it is what we expect it
	 * to be. If gcc does not set the place holder of the return
	 * address in the frame pointer, and does a copy instead, then
	 * the function graph trace will fail. This test detects this
	 * case.
	 *
	 * Currently, x86_32 with optimize for size (-Os) makes the latest
	 * gcc do the above.
	 */
	if (unlikely(current->ret_stack[index].fp != frame_pointer)) {
		ftrace_graph_stop();
		WARN(1, "Bad frame pointer: expected %lx, received %lx\n"
		     "  from func %pF return to %lx\n",
		     current->ret_stack[index].fp,
		     frame_pointer,
		     (void *)current->ret_stack[index].func,
		     current->ret_stack[index].ret);
		*ret = (unsigned long)panic;
		return;
	}
#endif

137
138
139
140
141
142
143
144
145
146
147
	*ret = current->ret_stack[index].ret;
	trace->func = current->ret_stack[index].func;
	trace->calltime = current->ret_stack[index].calltime;
	trace->overrun = atomic_read(&current->trace_overrun);
	trace->depth = index;
}

/*
 * Send the trace to the ring-buffer.
 * @return the original return address.
 */
148
unsigned long ftrace_return_to_handler(unsigned long frame_pointer)
149
150
151
152
{
	struct ftrace_graph_ret trace;
	unsigned long ret;

153
	ftrace_pop_return_trace(&trace, &ret, frame_pointer);
154
	trace.rettime = trace_clock_local();
155
	ftrace_graph_return(&trace);
156
157
	barrier();
	current->curr_ret_stack--;
158
159
160
161
162
163
164
165
166
167
168

	if (unlikely(!ret)) {
		ftrace_graph_stop();
		WARN_ON(1);
		/* Might as well panic. What else to do? */
		ret = (unsigned long)panic;
	}

	return ret;
}

169
170
171
172
173
174
175
static int __trace_graph_entry(struct trace_array *tr,
				struct ftrace_graph_ent *trace,
				unsigned long flags,
				int pc)
{
	struct ftrace_event_call *call = &event_funcgraph_entry;
	struct ring_buffer_event *event;
176
	struct ring_buffer *buffer = tr->buffer;
177
178
179
180
181
	struct ftrace_graph_ent_entry *entry;

	if (unlikely(local_read(&__get_cpu_var(ftrace_cpu_disabled))))
		return 0;

182
	event = trace_buffer_lock_reserve(buffer, TRACE_GRAPH_ENT,
183
184
185
186
187
					  sizeof(*entry), flags, pc);
	if (!event)
		return 0;
	entry	= ring_buffer_event_data(event);
	entry->graph_ent			= *trace;
188
189
	if (!filter_current_check_discard(buffer, call, entry, event))
		ring_buffer_unlock_commit(buffer, event);
190
191
192
193
194
195
196
197
198
199
200
201
202
203
204
205
206
207
208
209
210
211
212
213
214
215
216
217
218
219
220
221
222
223
224
225
226
227
228
229
230
231
232
233
234
235
236
237
238
239

	return 1;
}

int trace_graph_entry(struct ftrace_graph_ent *trace)
{
	struct trace_array *tr = graph_array;
	struct trace_array_cpu *data;
	unsigned long flags;
	long disabled;
	int ret;
	int cpu;
	int pc;

	if (unlikely(!tr))
		return 0;

	if (!ftrace_trace_task(current))
		return 0;

	if (!ftrace_graph_addr(trace->func))
		return 0;

	local_irq_save(flags);
	cpu = raw_smp_processor_id();
	data = tr->data[cpu];
	disabled = atomic_inc_return(&data->disabled);
	if (likely(disabled == 1)) {
		pc = preempt_count();
		ret = __trace_graph_entry(tr, trace, flags, pc);
	} else {
		ret = 0;
	}
	/* Only do the atomic if it is not already set */
	if (!test_tsk_trace_graph(current))
		set_tsk_trace_graph(current);

	atomic_dec(&data->disabled);
	local_irq_restore(flags);

	return ret;
}

static void __trace_graph_return(struct trace_array *tr,
				struct ftrace_graph_ret *trace,
				unsigned long flags,
				int pc)
{
	struct ftrace_event_call *call = &event_funcgraph_exit;
	struct ring_buffer_event *event;
240
	struct ring_buffer *buffer = tr->buffer;
241
242
243
244
245
	struct ftrace_graph_ret_entry *entry;

	if (unlikely(local_read(&__get_cpu_var(ftrace_cpu_disabled))))
		return;

246
	event = trace_buffer_lock_reserve(buffer, TRACE_GRAPH_RET,
247
248
249
250
251
					  sizeof(*entry), flags, pc);
	if (!event)
		return;
	entry	= ring_buffer_event_data(event);
	entry->ret				= *trace;
252
253
	if (!filter_current_check_discard(buffer, call, entry, event))
		ring_buffer_unlock_commit(buffer, event);
254
255
256
257
258
259
260
261
262
263
264
265
266
267
268
269
270
271
272
273
274
275
276
277
278
}

void trace_graph_return(struct ftrace_graph_ret *trace)
{
	struct trace_array *tr = graph_array;
	struct trace_array_cpu *data;
	unsigned long flags;
	long disabled;
	int cpu;
	int pc;

	local_irq_save(flags);
	cpu = raw_smp_processor_id();
	data = tr->data[cpu];
	disabled = atomic_inc_return(&data->disabled);
	if (likely(disabled == 1)) {
		pc = preempt_count();
		__trace_graph_return(tr, trace, flags, pc);
	}
	if (!trace->depth)
		clear_tsk_trace_graph(current);
	atomic_dec(&data->disabled);
	local_irq_restore(flags);
}

279
280
static int graph_trace_init(struct trace_array *tr)
{
281
282
283
284
285
	int ret;

	graph_array = tr;
	ret = register_ftrace_graph(&trace_graph_return,
				    &trace_graph_entry);
286
287
288
289
290
	if (ret)
		return ret;
	tracing_start_cmdline_record();

	return 0;
291
292
}

293
294
295
296
297
void set_graph_array(struct trace_array *tr)
{
	graph_array = tr;
}

298
299
static void graph_trace_reset(struct trace_array *tr)
{
300
301
	tracing_stop_cmdline_record();
	unregister_ftrace_graph();
302
303
}

304
static int max_bytes_for_cpu;
305
306
307
308
309
310

static enum print_line_t
print_graph_cpu(struct trace_seq *s, int cpu)
{
	int ret;

311
312
313
314
315
	/*
	 * Start with a space character - to make it stand out
	 * to the right a bit when trace output is pasted into
	 * email:
	 */
316
	ret = trace_seq_printf(s, " %*d) ", max_bytes_for_cpu, cpu);
317
	if (!ret)
318
319
		return TRACE_TYPE_PARTIAL_LINE;

320
321
322
	return TRACE_TYPE_HANDLED;
}

323
324
325
326
327
#define TRACE_GRAPH_PROCINFO_LENGTH	14

static enum print_line_t
print_graph_proc(struct trace_seq *s, pid_t pid)
{
328
	char comm[TASK_COMM_LEN];
329
330
	/* sign + log10(MAX_INT) + '\0' */
	char pid_str[11];
331
332
333
334
	int spaces = 0;
	int ret;
	int len;
	int i;
335

336
	trace_find_cmdline(pid, comm);
337
338
339
340
341
342
343
344
345
346
347
348
349
350
351
352
353
354
355
356
357
358
359
360
361
362
363
364
365
	comm[7] = '\0';
	sprintf(pid_str, "%d", pid);

	/* 1 stands for the "-" character */
	len = strlen(comm) + strlen(pid_str) + 1;

	if (len < TRACE_GRAPH_PROCINFO_LENGTH)
		spaces = TRACE_GRAPH_PROCINFO_LENGTH - len;

	/* First spaces to align center */
	for (i = 0; i < spaces / 2; i++) {
		ret = trace_seq_printf(s, " ");
		if (!ret)
			return TRACE_TYPE_PARTIAL_LINE;
	}

	ret = trace_seq_printf(s, "%s-%s", comm, pid_str);
	if (!ret)
		return TRACE_TYPE_PARTIAL_LINE;

	/* Last spaces to align center */
	for (i = 0; i < spaces - (spaces / 2); i++) {
		ret = trace_seq_printf(s, " ");
		if (!ret)
			return TRACE_TYPE_PARTIAL_LINE;
	}
	return TRACE_TYPE_HANDLED;
}

366

367
368
369
370
371
372
373
374
375
376
377
378
379
380
381
382
383
384
385
386
387
388
389
static enum print_line_t
print_graph_lat_fmt(struct trace_seq *s, struct trace_entry *entry)
{
	int hardirq, softirq;

	hardirq = entry->flags & TRACE_FLAG_HARDIRQ;
	softirq = entry->flags & TRACE_FLAG_SOFTIRQ;

	if (!trace_seq_printf(s, " %c%c%c",
			      (entry->flags & TRACE_FLAG_IRQS_OFF) ? 'd' :
				(entry->flags & TRACE_FLAG_IRQS_NOSUPPORT) ?
				  'X' : '.',
			      (entry->flags & TRACE_FLAG_NEED_RESCHED) ?
				'N' : '.',
			      (hardirq && softirq) ? 'H' :
				hardirq ? 'h' : softirq ? 's' : '.'))
		return 0;

	if (entry->preempt_count)
		return trace_seq_printf(s, "%x", entry->preempt_count);
	return trace_seq_puts(s, ".");
}

390
/* If the pid changed since the last trace, output this event */
391
static enum print_line_t
392
verif_pid(struct trace_seq *s, pid_t pid, int cpu, struct fgraph_data *data)
393
{
394
	pid_t prev_pid;
395
	pid_t *last_pid;
396
	int ret;
397

398
	if (!data)
399
400
		return TRACE_TYPE_HANDLED;

401
	last_pid = &(per_cpu_ptr(data, cpu)->last_pid);
402
403

	if (*last_pid == pid)
404
		return TRACE_TYPE_HANDLED;
405

406
407
	prev_pid = *last_pid;
	*last_pid = pid;
408

409
410
	if (prev_pid == -1)
		return TRACE_TYPE_HANDLED;
411
412
413
414
415
416
417
418
419
/*
 * Context-switch trace line:

 ------------------------------------------
 | 1)  migration/0--1  =>  sshd-1755
 ------------------------------------------

 */
	ret = trace_seq_printf(s,
420
		" ------------------------------------------\n");
421
	if (!ret)
422
		return TRACE_TYPE_PARTIAL_LINE;
423
424
425

	ret = print_graph_cpu(s, cpu);
	if (ret == TRACE_TYPE_PARTIAL_LINE)
426
		return TRACE_TYPE_PARTIAL_LINE;
427
428
429

	ret = print_graph_proc(s, prev_pid);
	if (ret == TRACE_TYPE_PARTIAL_LINE)
430
		return TRACE_TYPE_PARTIAL_LINE;
431
432
433

	ret = trace_seq_printf(s, " => ");
	if (!ret)
434
		return TRACE_TYPE_PARTIAL_LINE;
435
436
437

	ret = print_graph_proc(s, pid);
	if (ret == TRACE_TYPE_PARTIAL_LINE)
438
		return TRACE_TYPE_PARTIAL_LINE;
439
440
441
442

	ret = trace_seq_printf(s,
		"\n ------------------------------------------\n\n");
	if (!ret)
443
		return TRACE_TYPE_PARTIAL_LINE;
444

445
	return TRACE_TYPE_HANDLED;
446
447
}

448
449
static struct ftrace_graph_ret_entry *
get_return_for_leaf(struct trace_iterator *iter,
450
451
452
453
454
455
456
457
		struct ftrace_graph_ent_entry *curr)
{
	struct ring_buffer_iter *ring_iter;
	struct ring_buffer_event *event;
	struct ftrace_graph_ret_entry *next;

	ring_iter = iter->buffer_iter[iter->cpu];

458
459
460
461
462
463
464
465
466
	/* First peek to compare current entry and the next one */
	if (ring_iter)
		event = ring_buffer_iter_peek(ring_iter, NULL);
	else {
	/* We need to consume the current entry to see the next one */
		ring_buffer_consume(iter->tr->buffer, iter->cpu, NULL);
		event = ring_buffer_peek(iter->tr->buffer, iter->cpu,
					NULL);
	}
467
468

	if (!event)
469
		return NULL;
470
471
472
473

	next = ring_buffer_event_data(event);

	if (next->ent.type != TRACE_GRAPH_RET)
474
		return NULL;
475
476
477

	if (curr->ent.pid != next->ent.pid ||
			curr->graph_ent.func != next->ret.func)
478
		return NULL;
479

480
481
482
483
484
	/* this is a leaf, now advance the iterator */
	if (ring_iter)
		ring_buffer_read(ring_iter, NULL);

	return next;
485
486
}

487
488
489
490
491
492
493
494
495
496
497
498
499
500
501
502
503
504
505
506
507
508
509
510
511
/* Signal a overhead of time execution to the output */
static int
print_graph_overhead(unsigned long long duration, struct trace_seq *s)
{
	/* If duration disappear, we don't need anything */
	if (!(tracer_flags.val & TRACE_GRAPH_PRINT_DURATION))
		return 1;

	/* Non nested entry or return */
	if (duration == -1)
		return trace_seq_printf(s, "  ");

	if (tracer_flags.val & TRACE_GRAPH_PRINT_OVERHEAD) {
		/* Duration exceeded 100 msecs */
		if (duration > 100000ULL)
			return trace_seq_printf(s, "! ");

		/* Duration exceeded 10 msecs */
		if (duration > 10000ULL)
			return trace_seq_printf(s, "+ ");
	}

	return trace_seq_printf(s, "  ");
}

512
513
514
515
516
517
518
519
520
521
522
static int print_graph_abs_time(u64 t, struct trace_seq *s)
{
	unsigned long usecs_rem;

	usecs_rem = do_div(t, NSEC_PER_SEC);
	usecs_rem /= 1000;

	return trace_seq_printf(s, "%5lu.%06lu |  ",
			(unsigned long)t, usecs_rem);
}

523
static enum print_line_t
524
print_graph_irq(struct trace_iterator *iter, unsigned long addr,
525
		enum trace_type type, int cpu, pid_t pid)
526
527
{
	int ret;
528
	struct trace_seq *s = &iter->seq;
529
530
531
532
533

	if (addr < (unsigned long)__irqentry_text_start ||
		addr >= (unsigned long)__irqentry_text_end)
		return TRACE_TYPE_UNHANDLED;

534
535
536
537
538
539
540
	/* Absolute time */
	if (tracer_flags.val & TRACE_GRAPH_PRINT_ABS_TIME) {
		ret = print_graph_abs_time(iter->ts, s);
		if (!ret)
			return TRACE_TYPE_PARTIAL_LINE;
	}

541
542
543
544
545
546
	/* Cpu */
	if (tracer_flags.val & TRACE_GRAPH_PRINT_CPU) {
		ret = print_graph_cpu(s, cpu);
		if (ret == TRACE_TYPE_PARTIAL_LINE)
			return TRACE_TYPE_PARTIAL_LINE;
	}
547

548
549
550
551
552
553
554
555
556
	/* Proc */
	if (tracer_flags.val & TRACE_GRAPH_PRINT_PROC) {
		ret = print_graph_proc(s, pid);
		if (ret == TRACE_TYPE_PARTIAL_LINE)
			return TRACE_TYPE_PARTIAL_LINE;
		ret = trace_seq_printf(s, " | ");
		if (!ret)
			return TRACE_TYPE_PARTIAL_LINE;
	}
557

558
559
560
561
	/* No overhead */
	ret = print_graph_overhead(-1, s);
	if (!ret)
		return TRACE_TYPE_PARTIAL_LINE;
562

563
564
565
566
567
568
569
570
571
572
573
574
	if (type == TRACE_GRAPH_ENT)
		ret = trace_seq_printf(s, "==========>");
	else
		ret = trace_seq_printf(s, "<==========");

	if (!ret)
		return TRACE_TYPE_PARTIAL_LINE;

	/* Don't close the duration column if haven't one */
	if (tracer_flags.val & TRACE_GRAPH_PRINT_DURATION)
		trace_seq_printf(s, " |");
	ret = trace_seq_printf(s, "\n");
575
576
577
578
579

	if (!ret)
		return TRACE_TYPE_PARTIAL_LINE;
	return TRACE_TYPE_HANDLED;
}
580

581
582
enum print_line_t
trace_print_graph_duration(unsigned long long duration, struct trace_seq *s)
583
584
{
	unsigned long nsecs_rem = do_div(duration, 1000);
585
586
587
588
589
590
591
592
593
	/* log10(ULONG_MAX) + '\0' */
	char msecs_str[21];
	char nsecs_str[5];
	int ret, len;
	int i;

	sprintf(msecs_str, "%lu", (unsigned long) duration);

	/* Print msecs */
594
	ret = trace_seq_printf(s, "%s", msecs_str);
595
596
597
598
599
600
601
602
603
604
605
606
607
608
609
610
611
612
613
614
615
616
617
618
	if (!ret)
		return TRACE_TYPE_PARTIAL_LINE;

	len = strlen(msecs_str);

	/* Print nsecs (we don't want to exceed 7 numbers) */
	if (len < 7) {
		snprintf(nsecs_str, 8 - len, "%03lu", nsecs_rem);
		ret = trace_seq_printf(s, ".%s", nsecs_str);
		if (!ret)
			return TRACE_TYPE_PARTIAL_LINE;
		len += strlen(nsecs_str);
	}

	ret = trace_seq_printf(s, " us ");
	if (!ret)
		return TRACE_TYPE_PARTIAL_LINE;

	/* Print remaining spaces to fit the row's width */
	for (i = len; i < 7; i++) {
		ret = trace_seq_printf(s, " ");
		if (!ret)
			return TRACE_TYPE_PARTIAL_LINE;
	}
619
620
621
622
623
624
625
626
627
628
629
	return TRACE_TYPE_HANDLED;
}

static enum print_line_t
print_graph_duration(unsigned long long duration, struct trace_seq *s)
{
	int ret;

	ret = trace_print_graph_duration(duration, s);
	if (ret != TRACE_TYPE_HANDLED)
		return ret;
630
631
632
633
634

	ret = trace_seq_printf(s, "|  ");
	if (!ret)
		return TRACE_TYPE_PARTIAL_LINE;

635
	return TRACE_TYPE_HANDLED;
636
637
638
}

/* Case of a leaf function on its call entry */
639
static enum print_line_t
640
print_graph_entry_leaf(struct trace_iterator *iter,
641
642
		struct ftrace_graph_ent_entry *entry,
		struct ftrace_graph_ret_entry *ret_entry, struct trace_seq *s)
643
{
644
	struct fgraph_data *data = iter->private;
645
646
647
	struct ftrace_graph_ret *graph_ret;
	struct ftrace_graph_ent *call;
	unsigned long long duration;
648
	int ret;
649
	int i;
650

651
652
653
654
	graph_ret = &ret_entry->ret;
	call = &entry->graph_ent;
	duration = graph_ret->rettime - graph_ret->calltime;

655
656
657
658
659
660
661
662
663
664
665
666
	if (data) {
		int cpu = iter->cpu;
		int *depth = &(per_cpu_ptr(data, cpu)->depth);

		/*
		 * Comments display at + 1 to depth. Since
		 * this is a leaf function, keep the comments
		 * equal to this depth.
		 */
		*depth = call->depth - 1;
	}

667
	/* Overhead */
668
669
670
	ret = print_graph_overhead(duration, s);
	if (!ret)
		return TRACE_TYPE_PARTIAL_LINE;
671
672

	/* Duration */
673
674
675
676
677
	if (tracer_flags.val & TRACE_GRAPH_PRINT_DURATION) {
		ret = print_graph_duration(duration, s);
		if (ret == TRACE_TYPE_PARTIAL_LINE)
			return TRACE_TYPE_PARTIAL_LINE;
	}
678

679
680
681
682
683
684
685
	/* Function */
	for (i = 0; i < call->depth * TRACE_GRAPH_INDENT; i++) {
		ret = trace_seq_printf(s, " ");
		if (!ret)
			return TRACE_TYPE_PARTIAL_LINE;
	}

686
	ret = trace_seq_printf(s, "%pf();\n", (void *)call->func);
687
688
689
690
691
692
693
	if (!ret)
		return TRACE_TYPE_PARTIAL_LINE;

	return TRACE_TYPE_HANDLED;
}

static enum print_line_t
694
695
696
print_graph_entry_nested(struct trace_iterator *iter,
			 struct ftrace_graph_ent_entry *entry,
			 struct trace_seq *s, int cpu)
697
698
{
	struct ftrace_graph_ent *call = &entry->graph_ent;
699
700
701
702
703
704
705
706
707
708
	struct fgraph_data *data = iter->private;
	int ret;
	int i;

	if (data) {
		int cpu = iter->cpu;
		int *depth = &(per_cpu_ptr(data, cpu)->depth);

		*depth = call->depth;
	}
709
710

	/* No overhead */
711
712
713
	ret = print_graph_overhead(-1, s);
	if (!ret)
		return TRACE_TYPE_PARTIAL_LINE;
714

715
716
	/* No time */
	if (tracer_flags.val & TRACE_GRAPH_PRINT_DURATION) {
717
718
719
720
721
		ret = trace_seq_printf(s, "            |  ");
		if (!ret)
			return TRACE_TYPE_PARTIAL_LINE;
	}

722
	/* Function */
723
724
	for (i = 0; i < call->depth * TRACE_GRAPH_INDENT; i++) {
		ret = trace_seq_printf(s, " ");
725
726
		if (!ret)
			return TRACE_TYPE_PARTIAL_LINE;
727
728
	}

729
	ret = trace_seq_printf(s, "%pf() {\n", (void *)call->func);
730
731
732
	if (!ret)
		return TRACE_TYPE_PARTIAL_LINE;

733
734
735
736
737
	/*
	 * we already consumed the current entry to check the next one
	 * and see if this is a leaf.
	 */
	return TRACE_TYPE_NO_CONSUME;
738
739
}

740
static enum print_line_t
741
742
print_graph_prologue(struct trace_iterator *iter, struct trace_seq *s,
		     int type, unsigned long addr)
743
{
744
	struct fgraph_data *data = iter->private;
745
	struct trace_entry *ent = iter->ent;
746
747
	int cpu = iter->cpu;
	int ret;
748

749
	/* Pid */
750
	if (verif_pid(s, ent->pid, cpu, data) == TRACE_TYPE_PARTIAL_LINE)
751
752
		return TRACE_TYPE_PARTIAL_LINE;

753
754
755
756
757
758
	if (type) {
		/* Interrupt */
		ret = print_graph_irq(iter, addr, type, cpu, ent->pid);
		if (ret == TRACE_TYPE_PARTIAL_LINE)
			return TRACE_TYPE_PARTIAL_LINE;
	}
759

760
761
762
763
764
765
766
	/* Absolute time */
	if (tracer_flags.val & TRACE_GRAPH_PRINT_ABS_TIME) {
		ret = print_graph_abs_time(iter->ts, s);
		if (!ret)
			return TRACE_TYPE_PARTIAL_LINE;
	}

767
768
769
	/* Cpu */
	if (tracer_flags.val & TRACE_GRAPH_PRINT_CPU) {
		ret = print_graph_cpu(s, cpu);
770
771
772
773
774
775
		if (ret == TRACE_TYPE_PARTIAL_LINE)
			return TRACE_TYPE_PARTIAL_LINE;
	}

	/* Proc */
	if (tracer_flags.val & TRACE_GRAPH_PRINT_PROC) {
776
		ret = print_graph_proc(s, ent->pid);
777
778
779
780
		if (ret == TRACE_TYPE_PARTIAL_LINE)
			return TRACE_TYPE_PARTIAL_LINE;

		ret = trace_seq_printf(s, " | ");
781
782
783
		if (!ret)
			return TRACE_TYPE_PARTIAL_LINE;
	}
784

785
786
787
788
789
790
791
	/* Latency format */
	if (trace_flags & TRACE_ITER_LATENCY_FMT) {
		ret = print_graph_lat_fmt(s, ent);
		if (ret == TRACE_TYPE_PARTIAL_LINE)
			return TRACE_TYPE_PARTIAL_LINE;
	}

792
793
794
795
796
797
798
799
800
801
802
803
804
805
	return 0;
}

static enum print_line_t
print_graph_entry(struct ftrace_graph_ent_entry *field, struct trace_seq *s,
			struct trace_iterator *iter)
{
	int cpu = iter->cpu;
	struct ftrace_graph_ent *call = &field->graph_ent;
	struct ftrace_graph_ret_entry *leaf_ret;

	if (print_graph_prologue(iter, s, TRACE_GRAPH_ENT, call->func))
		return TRACE_TYPE_PARTIAL_LINE;

806
807
808
	leaf_ret = get_return_for_leaf(iter, field);
	if (leaf_ret)
		return print_graph_entry_leaf(iter, field, leaf_ret, s);
809
	else
810
		return print_graph_entry_nested(iter, field, s, cpu);
811
812
813

}

814
815
static enum print_line_t
print_graph_return(struct ftrace_graph_ret *trace, struct trace_seq *s,
816
		   struct trace_entry *ent, struct trace_iterator *iter)
817
{
818
	unsigned long long duration = trace->rettime - trace->calltime;
819
820
821
822
823
824
825
826
827
828
829
830
831
832
833
834
835
	struct fgraph_data *data = iter->private;
	pid_t pid = ent->pid;
	int cpu = iter->cpu;
	int ret;
	int i;

	if (data) {
		int cpu = iter->cpu;
		int *depth = &(per_cpu_ptr(data, cpu)->depth);

		/*
		 * Comments display at + 1 to depth. This is the
		 * return from a function, we now want the comments
		 * to display at the same level of the bracket.
		 */
		*depth = trace->depth - 1;
	}
836

837
	if (print_graph_prologue(iter, s, 0, 0))
838
839
		return TRACE_TYPE_PARTIAL_LINE;

840
	/* Overhead */
841
842
843
	ret = print_graph_overhead(duration, s);
	if (!ret)
		return TRACE_TYPE_PARTIAL_LINE;
844
845

	/* Duration */
846
847
848
849
850
	if (tracer_flags.val & TRACE_GRAPH_PRINT_DURATION) {
		ret = print_graph_duration(duration, s);
		if (ret == TRACE_TYPE_PARTIAL_LINE)
			return TRACE_TYPE_PARTIAL_LINE;
	}
851
852

	/* Closing brace */
853
854
	for (i = 0; i < trace->depth * TRACE_GRAPH_INDENT; i++) {
		ret = trace_seq_printf(s, " ");
855
856
		if (!ret)
			return TRACE_TYPE_PARTIAL_LINE;
857
858
	}

859
	ret = trace_seq_printf(s, "}\n");
860
861
	if (!ret)
		return TRACE_TYPE_PARTIAL_LINE;
862

863
	/* Overrun */
864
865
866
	if (tracer_flags.val & TRACE_GRAPH_PRINT_OVERRUN) {
		ret = trace_seq_printf(s, " (Overruns: %lu)\n",
					trace->overrun);
867
868
		if (!ret)
			return TRACE_TYPE_PARTIAL_LINE;
869
	}
870

871
	ret = print_graph_irq(iter, trace->func, TRACE_GRAPH_RET, cpu, pid);
872
873
874
	if (ret == TRACE_TYPE_PARTIAL_LINE)
		return TRACE_TYPE_PARTIAL_LINE;

875
876
877
	return TRACE_TYPE_HANDLED;
}

878
static enum print_line_t
879
880
print_graph_comment(struct trace_seq *s,  struct trace_entry *ent,
		    struct trace_iterator *iter)
881
{
882
	unsigned long sym_flags = (trace_flags & TRACE_ITER_SYM_MASK);
883
	struct fgraph_data *data = iter->private;
884
	struct trace_event *event;
885
	int depth = 0;
886
	int ret;
887
888
889
890
	int i;

	if (data)
		depth = per_cpu_ptr(data, iter->cpu)->depth;
891

892
	if (print_graph_prologue(iter, s, 0, 0))
893
894
		return TRACE_TYPE_PARTIAL_LINE;

895
	/* No overhead */
896
897
898
899
900
901
902
	ret = print_graph_overhead(-1, s);
	if (!ret)
		return TRACE_TYPE_PARTIAL_LINE;

	/* No time */
	if (tracer_flags.val & TRACE_GRAPH_PRINT_DURATION) {
		ret = trace_seq_printf(s, "            |  ");
903
904
905
906
907
		if (!ret)
			return TRACE_TYPE_PARTIAL_LINE;
	}

	/* Indentation */
908
909
	if (depth > 0)
		for (i = 0; i < (depth + 1) * TRACE_GRAPH_INDENT; i++) {
910
911
912
913
914
915
			ret = trace_seq_printf(s, " ");
			if (!ret)
				return TRACE_TYPE_PARTIAL_LINE;
		}

	/* The comment */
916
917
918
919
	ret = trace_seq_printf(s, "/* ");
	if (!ret)
		return TRACE_TYPE_PARTIAL_LINE;

920
921
922
923
924
925
926
927
928
929
930
931
932
933
934
935
936
937
938
939
	switch (iter->ent->type) {
	case TRACE_BPRINT:
		ret = trace_print_bprintk_msg_only(iter);
		if (ret != TRACE_TYPE_HANDLED)
			return ret;
		break;
	case TRACE_PRINT:
		ret = trace_print_printk_msg_only(iter);
		if (ret != TRACE_TYPE_HANDLED)
			return ret;
		break;
	default:
		event = ftrace_find_event(ent->type);
		if (!event)
			return TRACE_TYPE_UNHANDLED;

		ret = event->trace(iter, sym_flags);
		if (ret != TRACE_TYPE_HANDLED)
			return ret;
	}
940

941
942
943
944
945
946
	/* Strip ending newline */
	if (s->buffer[s->len - 1] == '\n') {
		s->buffer[s->len - 1] = '\0';
		s->len--;
	}

947
948
949
950
951
952
953
954
	ret = trace_seq_printf(s, " */\n");
	if (!ret)
		return TRACE_TYPE_PARTIAL_LINE;

	return TRACE_TYPE_HANDLED;
}


955
956
957
958
enum print_line_t
print_graph_function(struct trace_iterator *iter)
{
	struct trace_entry *entry = iter->ent;
959
	struct trace_seq *s = &iter->seq;
960

961
962
	switch (entry->type) {
	case TRACE_GRAPH_ENT: {
963
964
965
966
967
968
969
		/*
		 * print_graph_entry() may consume the current event,
		 * thus @field may become invalid, so we need to save it.
		 * sizeof(struct ftrace_graph_ent_entry) is very small,
		 * it can be safely saved at the stack.
		 */
		struct ftrace_graph_ent_entry *field, saved;
970
		trace_assign_type(field, entry);
971
972
		saved = *field;
		return print_graph_entry(&saved, s, iter);
973
974
975
976
	}
	case TRACE_GRAPH_RET: {
		struct ftrace_graph_ret_entry *field;
		trace_assign_type(field, entry);
977
		return print_graph_return(&field->ret, s, entry, iter);
978
979
	}
	default:
980
		return print_graph_comment(s, entry, iter);
981
	}
982
983

	return TRACE_TYPE_HANDLED;
984
985
}

986
987
988
989
990
991
992
993
994
995
996
997
998
999
1000
1001
1002
1003
1004
1005
1006
1007
static void print_lat_header(struct seq_file *s)
{
	static const char spaces[] = "                "	/* 16 spaces */
		"    "					/* 4 spaces */
		"                 ";			/* 17 spaces */
	int size = 0;

	if (tracer_flags.val & TRACE_GRAPH_PRINT_ABS_TIME)
		size += 16;
	if (tracer_flags.val & TRACE_GRAPH_PRINT_CPU)
		size += 4;
	if (tracer_flags.val & TRACE_GRAPH_PRINT_PROC)
		size += 17;

	seq_printf(s, "#%.*s  _-----=> irqs-off        \n", size, spaces);
	seq_printf(s, "#%.*s / _----=> need-resched    \n", size, spaces);
	seq_printf(s, "#%.*s| / _---=> hardirq/softirq \n", size, spaces);
	seq_printf(s, "#%.*s|| / _--=> preempt-depth   \n", size, spaces);
	seq_printf(s, "#%.*s||| /                      \n", size, spaces);
	seq_printf(s, "#%.*s||||                       \n", size, spaces);
}

1008
1009
static void print_graph_headers(struct seq_file *s)
{
1010
1011
1012
1013
1014
	int lat = trace_flags & TRACE_ITER_LATENCY_FMT;

	if (lat)
		print_lat_header(s);

1015
	/* 1st line */
1016
	seq_printf(s, "#");
1017
1018
	if (tracer_flags.val & TRACE_GRAPH_PRINT_ABS_TIME)
		seq_printf(s, "     TIME       ");
1019
	if (tracer_flags.val & TRACE_GRAPH_PRINT_CPU)
1020
		seq_printf(s, " CPU");
1021
	if (tracer_flags.val & TRACE_GRAPH_PRINT_PROC)
1022
1023
1024
		seq_printf(s, "  TASK/PID       ");
	if (lat)
		seq_printf(s, "||||");
1025
1026
1027
	if (tracer_flags.val & TRACE_GRAPH_PRINT_DURATION)
		seq_printf(s, "  DURATION   ");
	seq_printf(s, "               FUNCTION CALLS\n");
1028
1029

	/* 2nd line */
1030
	seq_printf(s, "#");
1031
1032
	if (tracer_flags.val & TRACE_GRAPH_PRINT_ABS_TIME)
		seq_printf(s, "      |         ");
1033
	if (tracer_flags.val & TRACE_GRAPH_PRINT_CPU)
1034
		seq_printf(s, " |  ");
1035
	if (tracer_flags.val & TRACE_GRAPH_PRINT_PROC)
1036
1037
1038
		seq_printf(s, "   |    |        ");
	if (lat)
		seq_printf(s, "||||");
1039
1040
1041
	if (tracer_flags.val & TRACE_GRAPH_PRINT_DURATION)
		seq_printf(s, "   |   |      ");
	seq_printf(s, "               |   |   |   |\n");
1042
}
1043
1044
1045

static void graph_trace_open(struct trace_iterator *iter)
{
1046
1047
	/* pid and depth on the last trace processed */
	struct fgraph_data *data = alloc_percpu(struct fgraph_data);
1048
1049
	int cpu;

1050
	if (!data)
1051
1052
1053
		pr_warning("function graph tracer: not enough memory\n");
	else
		for_each_possible_cpu(cpu) {
1054
1055
			pid_t *pid = &(per_cpu_ptr(data, cpu)->last_pid);
			int *depth = &(per_cpu_ptr(data, cpu)->depth);
1056
			*pid = -1;
1057
			*depth = 0;
1058
1059
		}

1060
	iter->private = data;
1061
1062
1063
1064
}

static void graph_trace_close(struct trace_iterator *iter)
{
1065
	free_percpu(iter->private);
1066
1067
}

1068
static struct tracer graph_trace __read_mostly = {
1069
	.name		= "function_graph",
1070
1071
	.open		= graph_trace_open,
	.close		= graph_trace_close,
1072
	.wait_pipe	= poll_wait_pipe,
1073
1074
	.init		= graph_trace_init,
	.reset		= graph_trace_reset,
1075
1076
	.print_line	= print_graph_function,
	.print_header	= print_graph_headers,
1077
	.flags		= &tracer_flags,
1078
1079
1080
#ifdef CONFIG_FTRACE_SELFTEST
	.selftest	= trace_selftest_startup_function_graph,
#endif
1081
1082
1083
1084
};

static __init int init_graph_trace(void)
{
1085
1086
	max_bytes_for_cpu = snprintf(NULL, 0, "%d", nr_cpu_ids - 1);

1087
1088
1089
1090
	return register_tracer(&graph_trace);
}

device_initcall(init_graph_trace);