trace_functions_graph.c 20.8 KB
Newer Older
1
2
3
/*
 *
 * Function graph tracer.
4
 * Copyright (c) 2008-2009 Frederic Weisbecker <fweisbec@gmail.com>
5
6
7
8
9
10
11
12
13
14
 * Mostly borrowed from function tracer which
 * is Copyright (c) Steven Rostedt <srostedt@redhat.com>
 *
 */
#include <linux/debugfs.h>
#include <linux/uaccess.h>
#include <linux/ftrace.h>
#include <linux/fs.h>

#include "trace.h"
15
#include "trace_output.h"
16

17
18
19
20
21
struct fgraph_data {
	pid_t		last_pid;
	int		depth;
};

22
#define TRACE_GRAPH_INDENT	2
23

24
/* Flag options */
25
#define TRACE_GRAPH_PRINT_OVERRUN	0x1
26
27
#define TRACE_GRAPH_PRINT_CPU		0x2
#define TRACE_GRAPH_PRINT_OVERHEAD	0x4
28
#define TRACE_GRAPH_PRINT_PROC		0x8
29
30
#define TRACE_GRAPH_PRINT_DURATION	0x10
#define TRACE_GRAPH_PRINT_ABS_TIME	0X20
31

32
static struct tracer_opt trace_opts[] = {
33
	/* Display overruns? (for self-debug purpose) */
34
35
36
37
38
	{ TRACER_OPT(funcgraph-overrun, TRACE_GRAPH_PRINT_OVERRUN) },
	/* Display CPU ? */
	{ TRACER_OPT(funcgraph-cpu, TRACE_GRAPH_PRINT_CPU) },
	/* Display Overhead ? */
	{ TRACER_OPT(funcgraph-overhead, TRACE_GRAPH_PRINT_OVERHEAD) },
39
40
	/* Display proc name/pid */
	{ TRACER_OPT(funcgraph-proc, TRACE_GRAPH_PRINT_PROC) },
41
42
43
44
	/* Display duration of execution */
	{ TRACER_OPT(funcgraph-duration, TRACE_GRAPH_PRINT_DURATION) },
	/* Display absolute time of an entry */
	{ TRACER_OPT(funcgraph-abstime, TRACE_GRAPH_PRINT_ABS_TIME) },
45
46
47
48
	{ } /* Empty entry */
};

static struct tracer_flags tracer_flags = {
49
	/* Don't display overruns and proc by default */
50
51
	.val = TRACE_GRAPH_PRINT_CPU | TRACE_GRAPH_PRINT_OVERHEAD |
	       TRACE_GRAPH_PRINT_DURATION,
52
53
54
	.opts = trace_opts
};

55
/* pid on the last trace processed */
56

57

58
59
/* Add a function return address to the trace stack on thread info.*/
int
60
ftrace_push_return_trace(unsigned long ret, unsigned long func, int *depth)
61
{
62
	unsigned long long calltime;
63
64
65
66
67
	int index;

	if (!current->ret_stack)
		return -EBUSY;

68
69
70
71
72
73
	/*
	 * We must make sure the ret_stack is tested before we read
	 * anything else.
	 */
	smp_rmb();

74
75
76
77
78
79
	/* The return trace stack is full */
	if (current->curr_ret_stack == FTRACE_RETFUNC_DEPTH - 1) {
		atomic_inc(&current->trace_overrun);
		return -EBUSY;
	}

80
81
	calltime = trace_clock_local();

82
83
84
85
	index = ++current->curr_ret_stack;
	barrier();
	current->ret_stack[index].ret = ret;
	current->ret_stack[index].func = func;
86
	current->ret_stack[index].calltime = calltime;
87
88
89
90
91
92
93
94
95
96
97
98
99
100
101
102
103
104
105
106
107
108
109
110
111
112
113
114
115
116
117
118
119
120
121
122
123
124
125
126
127
	*depth = index;

	return 0;
}

/* Retrieve a function return address to the trace stack on thread info.*/
void
ftrace_pop_return_trace(struct ftrace_graph_ret *trace, unsigned long *ret)
{
	int index;

	index = current->curr_ret_stack;

	if (unlikely(index < 0)) {
		ftrace_graph_stop();
		WARN_ON(1);
		/* Might as well panic, otherwise we have no where to go */
		*ret = (unsigned long)panic;
		return;
	}

	*ret = current->ret_stack[index].ret;
	trace->func = current->ret_stack[index].func;
	trace->calltime = current->ret_stack[index].calltime;
	trace->overrun = atomic_read(&current->trace_overrun);
	trace->depth = index;
	barrier();
	current->curr_ret_stack--;

}

/*
 * Send the trace to the ring-buffer.
 * @return the original return address.
 */
unsigned long ftrace_return_to_handler(void)
{
	struct ftrace_graph_ret trace;
	unsigned long ret;

	ftrace_pop_return_trace(&trace, &ret);
128
	trace.rettime = trace_clock_local();
129
130
131
132
133
134
135
136
137
138
139
140
	ftrace_graph_return(&trace);

	if (unlikely(!ret)) {
		ftrace_graph_stop();
		WARN_ON(1);
		/* Might as well panic. What else to do? */
		ret = (unsigned long)panic;
	}

	return ret;
}

141
142
static int graph_trace_init(struct trace_array *tr)
{
143
	int ret = register_ftrace_graph(&trace_graph_return,
144
					&trace_graph_entry);
145
146
147
148
149
	if (ret)
		return ret;
	tracing_start_cmdline_record();

	return 0;
150
151
152
153
}

static void graph_trace_reset(struct trace_array *tr)
{
154
155
	tracing_stop_cmdline_record();
	unregister_ftrace_graph();
156
157
}

158
159
160
161
162
163
164
165
166
167
168
169
170
171
172
static inline int log10_cpu(int nb)
{
	if (nb / 100)
		return 3;
	if (nb / 10)
		return 2;
	return 1;
}

static enum print_line_t
print_graph_cpu(struct trace_seq *s, int cpu)
{
	int i;
	int ret;
	int log10_this = log10_cpu(cpu);
173
	int log10_all = log10_cpu(cpumask_weight(cpu_online_mask));
174
175


176
177
178
179
180
181
182
183
184
185
186
187
188
	/*
	 * Start with a space character - to make it stand out
	 * to the right a bit when trace output is pasted into
	 * email:
	 */
	ret = trace_seq_printf(s, " ");

	/*
	 * Tricky - we space the CPU field according to the max
	 * number of online CPUs. On a 2-cpu system it would take
	 * a maximum of 1 digit - on a 128 cpu system it would
	 * take up to 3 digits:
	 */
189
190
191
192
193
194
195
	for (i = 0; i < log10_all - log10_this; i++) {
		ret = trace_seq_printf(s, " ");
		if (!ret)
			return TRACE_TYPE_PARTIAL_LINE;
	}
	ret = trace_seq_printf(s, "%d) ", cpu);
	if (!ret)
196
197
		return TRACE_TYPE_PARTIAL_LINE;

198
199
200
	return TRACE_TYPE_HANDLED;
}

201
202
203
204
205
#define TRACE_GRAPH_PROCINFO_LENGTH	14

static enum print_line_t
print_graph_proc(struct trace_seq *s, pid_t pid)
{
206
	char comm[TASK_COMM_LEN];
207
208
	/* sign + log10(MAX_INT) + '\0' */
	char pid_str[11];
209
210
211
212
	int spaces = 0;
	int ret;
	int len;
	int i;
213

214
	trace_find_cmdline(pid, comm);
215
216
217
218
219
220
221
222
223
224
225
226
227
228
229
230
231
232
233
234
235
236
237
238
239
240
241
242
243
	comm[7] = '\0';
	sprintf(pid_str, "%d", pid);

	/* 1 stands for the "-" character */
	len = strlen(comm) + strlen(pid_str) + 1;

	if (len < TRACE_GRAPH_PROCINFO_LENGTH)
		spaces = TRACE_GRAPH_PROCINFO_LENGTH - len;

	/* First spaces to align center */
	for (i = 0; i < spaces / 2; i++) {
		ret = trace_seq_printf(s, " ");
		if (!ret)
			return TRACE_TYPE_PARTIAL_LINE;
	}

	ret = trace_seq_printf(s, "%s-%s", comm, pid_str);
	if (!ret)
		return TRACE_TYPE_PARTIAL_LINE;

	/* Last spaces to align center */
	for (i = 0; i < spaces - (spaces / 2); i++) {
		ret = trace_seq_printf(s, " ");
		if (!ret)
			return TRACE_TYPE_PARTIAL_LINE;
	}
	return TRACE_TYPE_HANDLED;
}

244

245
/* If the pid changed since the last trace, output this event */
246
static enum print_line_t
247
verif_pid(struct trace_seq *s, pid_t pid, int cpu, struct fgraph_data *data)
248
{
249
	pid_t prev_pid;
250
	pid_t *last_pid;
251
	int ret;
252

253
	if (!data)
254
255
		return TRACE_TYPE_HANDLED;

256
	last_pid = &(per_cpu_ptr(data, cpu)->last_pid);
257
258

	if (*last_pid == pid)
259
		return TRACE_TYPE_HANDLED;
260

261
262
	prev_pid = *last_pid;
	*last_pid = pid;
263

264
265
	if (prev_pid == -1)
		return TRACE_TYPE_HANDLED;
266
267
268
269
270
271
272
273
274
/*
 * Context-switch trace line:

 ------------------------------------------
 | 1)  migration/0--1  =>  sshd-1755
 ------------------------------------------

 */
	ret = trace_seq_printf(s,
275
		" ------------------------------------------\n");
276
	if (!ret)
277
		return TRACE_TYPE_PARTIAL_LINE;
278
279
280

	ret = print_graph_cpu(s, cpu);
	if (ret == TRACE_TYPE_PARTIAL_LINE)
281
		return TRACE_TYPE_PARTIAL_LINE;
282
283
284

	ret = print_graph_proc(s, prev_pid);
	if (ret == TRACE_TYPE_PARTIAL_LINE)
285
		return TRACE_TYPE_PARTIAL_LINE;
286
287
288

	ret = trace_seq_printf(s, " => ");
	if (!ret)
289
		return TRACE_TYPE_PARTIAL_LINE;
290
291
292

	ret = print_graph_proc(s, pid);
	if (ret == TRACE_TYPE_PARTIAL_LINE)
293
		return TRACE_TYPE_PARTIAL_LINE;
294
295
296
297

	ret = trace_seq_printf(s,
		"\n ------------------------------------------\n\n");
	if (!ret)
298
		return TRACE_TYPE_PARTIAL_LINE;
299

300
	return TRACE_TYPE_HANDLED;
301
302
}

303
304
static struct ftrace_graph_ret_entry *
get_return_for_leaf(struct trace_iterator *iter,
305
306
307
308
309
310
311
312
		struct ftrace_graph_ent_entry *curr)
{
	struct ring_buffer_iter *ring_iter;
	struct ring_buffer_event *event;
	struct ftrace_graph_ret_entry *next;

	ring_iter = iter->buffer_iter[iter->cpu];

313
314
315
316
317
318
319
320
321
	/* First peek to compare current entry and the next one */
	if (ring_iter)
		event = ring_buffer_iter_peek(ring_iter, NULL);
	else {
	/* We need to consume the current entry to see the next one */
		ring_buffer_consume(iter->tr->buffer, iter->cpu, NULL);
		event = ring_buffer_peek(iter->tr->buffer, iter->cpu,
					NULL);
	}
322
323

	if (!event)
324
		return NULL;
325
326
327
328

	next = ring_buffer_event_data(event);

	if (next->ent.type != TRACE_GRAPH_RET)
329
		return NULL;
330
331
332

	if (curr->ent.pid != next->ent.pid ||
			curr->graph_ent.func != next->ret.func)
333
		return NULL;
334

335
336
337
338
339
	/* this is a leaf, now advance the iterator */
	if (ring_iter)
		ring_buffer_read(ring_iter, NULL);

	return next;
340
341
}

342
343
344
345
346
347
348
349
350
351
352
353
354
355
356
357
358
359
360
361
362
363
364
365
366
/* Signal a overhead of time execution to the output */
static int
print_graph_overhead(unsigned long long duration, struct trace_seq *s)
{
	/* If duration disappear, we don't need anything */
	if (!(tracer_flags.val & TRACE_GRAPH_PRINT_DURATION))
		return 1;

	/* Non nested entry or return */
	if (duration == -1)
		return trace_seq_printf(s, "  ");

	if (tracer_flags.val & TRACE_GRAPH_PRINT_OVERHEAD) {
		/* Duration exceeded 100 msecs */
		if (duration > 100000ULL)
			return trace_seq_printf(s, "! ");

		/* Duration exceeded 10 msecs */
		if (duration > 10000ULL)
			return trace_seq_printf(s, "+ ");
	}

	return trace_seq_printf(s, "  ");
}

367
368
369
370
371
372
373
374
375
376
377
static int print_graph_abs_time(u64 t, struct trace_seq *s)
{
	unsigned long usecs_rem;

	usecs_rem = do_div(t, NSEC_PER_SEC);
	usecs_rem /= 1000;

	return trace_seq_printf(s, "%5lu.%06lu |  ",
			(unsigned long)t, usecs_rem);
}

378
static enum print_line_t
379
print_graph_irq(struct trace_iterator *iter, unsigned long addr,
380
		enum trace_type type, int cpu, pid_t pid)
381
382
{
	int ret;
383
	struct trace_seq *s = &iter->seq;
384
385
386
387
388

	if (addr < (unsigned long)__irqentry_text_start ||
		addr >= (unsigned long)__irqentry_text_end)
		return TRACE_TYPE_UNHANDLED;

389
390
391
392
393
394
395
	/* Absolute time */
	if (tracer_flags.val & TRACE_GRAPH_PRINT_ABS_TIME) {
		ret = print_graph_abs_time(iter->ts, s);
		if (!ret)
			return TRACE_TYPE_PARTIAL_LINE;
	}

396
397
398
399
400
401
402
403
404
405
406
407
408
409
410
	/* Cpu */
	if (tracer_flags.val & TRACE_GRAPH_PRINT_CPU) {
		ret = print_graph_cpu(s, cpu);
		if (ret == TRACE_TYPE_PARTIAL_LINE)
			return TRACE_TYPE_PARTIAL_LINE;
	}
	/* Proc */
	if (tracer_flags.val & TRACE_GRAPH_PRINT_PROC) {
		ret = print_graph_proc(s, pid);
		if (ret == TRACE_TYPE_PARTIAL_LINE)
			return TRACE_TYPE_PARTIAL_LINE;
		ret = trace_seq_printf(s, " | ");
		if (!ret)
			return TRACE_TYPE_PARTIAL_LINE;
	}
411

412
413
414
415
	/* No overhead */
	ret = print_graph_overhead(-1, s);
	if (!ret)
		return TRACE_TYPE_PARTIAL_LINE;
416

417
418
419
420
421
422
423
424
425
426
427
428
	if (type == TRACE_GRAPH_ENT)
		ret = trace_seq_printf(s, "==========>");
	else
		ret = trace_seq_printf(s, "<==========");

	if (!ret)
		return TRACE_TYPE_PARTIAL_LINE;

	/* Don't close the duration column if haven't one */
	if (tracer_flags.val & TRACE_GRAPH_PRINT_DURATION)
		trace_seq_printf(s, " |");
	ret = trace_seq_printf(s, "\n");
429
430
431
432
433

	if (!ret)
		return TRACE_TYPE_PARTIAL_LINE;
	return TRACE_TYPE_HANDLED;
}
434

435
static enum print_line_t
436
437
438
print_graph_duration(unsigned long long duration, struct trace_seq *s)
{
	unsigned long nsecs_rem = do_div(duration, 1000);
439
440
441
442
443
444
445
446
447
	/* log10(ULONG_MAX) + '\0' */
	char msecs_str[21];
	char nsecs_str[5];
	int ret, len;
	int i;

	sprintf(msecs_str, "%lu", (unsigned long) duration);

	/* Print msecs */
448
	ret = trace_seq_printf(s, "%s", msecs_str);
449
450
451
452
453
454
455
456
457
458
459
460
461
462
463
464
465
466
467
468
469
470
471
472
473
474
475
476
477
478
	if (!ret)
		return TRACE_TYPE_PARTIAL_LINE;

	len = strlen(msecs_str);

	/* Print nsecs (we don't want to exceed 7 numbers) */
	if (len < 7) {
		snprintf(nsecs_str, 8 - len, "%03lu", nsecs_rem);
		ret = trace_seq_printf(s, ".%s", nsecs_str);
		if (!ret)
			return TRACE_TYPE_PARTIAL_LINE;
		len += strlen(nsecs_str);
	}

	ret = trace_seq_printf(s, " us ");
	if (!ret)
		return TRACE_TYPE_PARTIAL_LINE;

	/* Print remaining spaces to fit the row's width */
	for (i = len; i < 7; i++) {
		ret = trace_seq_printf(s, " ");
		if (!ret)
			return TRACE_TYPE_PARTIAL_LINE;
	}

	ret = trace_seq_printf(s, "|  ");
	if (!ret)
		return TRACE_TYPE_PARTIAL_LINE;
	return TRACE_TYPE_HANDLED;

479
480
481
}

/* Case of a leaf function on its call entry */
482
static enum print_line_t
483
print_graph_entry_leaf(struct trace_iterator *iter,
484
485
		struct ftrace_graph_ent_entry *entry,
		struct ftrace_graph_ret_entry *ret_entry, struct trace_seq *s)
486
{
487
	struct fgraph_data *data = iter->private;
488
489
490
	struct ftrace_graph_ret *graph_ret;
	struct ftrace_graph_ent *call;
	unsigned long long duration;
491
	int ret;
492
	int i;
493

494
495
496
497
	graph_ret = &ret_entry->ret;
	call = &entry->graph_ent;
	duration = graph_ret->rettime - graph_ret->calltime;

498
499
500
501
502
503
504
505
506
507
508
509
	if (data) {
		int cpu = iter->cpu;
		int *depth = &(per_cpu_ptr(data, cpu)->depth);

		/*
		 * Comments display at + 1 to depth. Since
		 * this is a leaf function, keep the comments
		 * equal to this depth.
		 */
		*depth = call->depth - 1;
	}

510
	/* Overhead */
511
512
513
	ret = print_graph_overhead(duration, s);
	if (!ret)
		return TRACE_TYPE_PARTIAL_LINE;
514
515

	/* Duration */
516
517
518
519
520
	if (tracer_flags.val & TRACE_GRAPH_PRINT_DURATION) {
		ret = print_graph_duration(duration, s);
		if (ret == TRACE_TYPE_PARTIAL_LINE)
			return TRACE_TYPE_PARTIAL_LINE;
	}
521

522
523
524
525
526
527
528
529
530
531
532
	/* Function */
	for (i = 0; i < call->depth * TRACE_GRAPH_INDENT; i++) {
		ret = trace_seq_printf(s, " ");
		if (!ret)
			return TRACE_TYPE_PARTIAL_LINE;
	}

	ret = seq_print_ip_sym(s, call->func, 0);
	if (!ret)
		return TRACE_TYPE_PARTIAL_LINE;

533
	ret = trace_seq_printf(s, "();\n");
534
535
536
537
538
539
540
	if (!ret)
		return TRACE_TYPE_PARTIAL_LINE;

	return TRACE_TYPE_HANDLED;
}

static enum print_line_t
541
542
543
print_graph_entry_nested(struct trace_iterator *iter,
			 struct ftrace_graph_ent_entry *entry,
			 struct trace_seq *s, int cpu)
544
545
{
	struct ftrace_graph_ent *call = &entry->graph_ent;
546
547
548
549
550
551
552
553
554
555
	struct fgraph_data *data = iter->private;
	int ret;
	int i;

	if (data) {
		int cpu = iter->cpu;
		int *depth = &(per_cpu_ptr(data, cpu)->depth);

		*depth = call->depth;
	}
556
557

	/* No overhead */
558
559
560
	ret = print_graph_overhead(-1, s);
	if (!ret)
		return TRACE_TYPE_PARTIAL_LINE;
561

562
563
	/* No time */
	if (tracer_flags.val & TRACE_GRAPH_PRINT_DURATION) {
564
565
566
567
568
		ret = trace_seq_printf(s, "            |  ");
		if (!ret)
			return TRACE_TYPE_PARTIAL_LINE;
	}

569
	/* Function */
570
571
	for (i = 0; i < call->depth * TRACE_GRAPH_INDENT; i++) {
		ret = trace_seq_printf(s, " ");
572
573
		if (!ret)
			return TRACE_TYPE_PARTIAL_LINE;
574
575
576
577
578
579
	}

	ret = seq_print_ip_sym(s, call->func, 0);
	if (!ret)
		return TRACE_TYPE_PARTIAL_LINE;

580
	ret = trace_seq_printf(s, "() {\n");
581
582
583
	if (!ret)
		return TRACE_TYPE_PARTIAL_LINE;

584
585
586
587
588
	/*
	 * we already consumed the current entry to check the next one
	 * and see if this is a leaf.
	 */
	return TRACE_TYPE_NO_CONSUME;
589
590
}

591
static enum print_line_t
592
593
print_graph_prologue(struct trace_iterator *iter, struct trace_seq *s,
		     int type, unsigned long addr)
594
{
595
	struct fgraph_data *data = iter->private;
596
	struct trace_entry *ent = iter->ent;
597
598
	int cpu = iter->cpu;
	int ret;
599

600
	/* Pid */
601
	if (verif_pid(s, ent->pid, cpu, data) == TRACE_TYPE_PARTIAL_LINE)
602
603
		return TRACE_TYPE_PARTIAL_LINE;

604
605
606
607
608
609
	if (type) {
		/* Interrupt */
		ret = print_graph_irq(iter, addr, type, cpu, ent->pid);
		if (ret == TRACE_TYPE_PARTIAL_LINE)
			return TRACE_TYPE_PARTIAL_LINE;
	}
610

611
612
613
614
615
616
617
	/* Absolute time */
	if (tracer_flags.val & TRACE_GRAPH_PRINT_ABS_TIME) {
		ret = print_graph_abs_time(iter->ts, s);
		if (!ret)
			return TRACE_TYPE_PARTIAL_LINE;
	}

618
619
620
	/* Cpu */
	if (tracer_flags.val & TRACE_GRAPH_PRINT_CPU) {
		ret = print_graph_cpu(s, cpu);
621
622
623
624
625
626
		if (ret == TRACE_TYPE_PARTIAL_LINE)
			return TRACE_TYPE_PARTIAL_LINE;
	}

	/* Proc */
	if (tracer_flags.val & TRACE_GRAPH_PRINT_PROC) {
627
		ret = print_graph_proc(s, ent->pid);
628
629
630
631
		if (ret == TRACE_TYPE_PARTIAL_LINE)
			return TRACE_TYPE_PARTIAL_LINE;

		ret = trace_seq_printf(s, " | ");
632
633
634
		if (!ret)
			return TRACE_TYPE_PARTIAL_LINE;
	}
635

636
637
638
639
640
641
642
643
644
645
646
647
648
649
	return 0;
}

static enum print_line_t
print_graph_entry(struct ftrace_graph_ent_entry *field, struct trace_seq *s,
			struct trace_iterator *iter)
{
	int cpu = iter->cpu;
	struct ftrace_graph_ent *call = &field->graph_ent;
	struct ftrace_graph_ret_entry *leaf_ret;

	if (print_graph_prologue(iter, s, TRACE_GRAPH_ENT, call->func))
		return TRACE_TYPE_PARTIAL_LINE;

650
651
652
	leaf_ret = get_return_for_leaf(iter, field);
	if (leaf_ret)
		return print_graph_entry_leaf(iter, field, leaf_ret, s);
653
	else
654
		return print_graph_entry_nested(iter, field, s, cpu);
655
656
657

}

658
659
static enum print_line_t
print_graph_return(struct ftrace_graph_ret *trace, struct trace_seq *s,
660
		   struct trace_entry *ent, struct trace_iterator *iter)
661
{
662
	unsigned long long duration = trace->rettime - trace->calltime;
663
664
665
666
667
668
669
670
671
672
673
674
675
676
677
678
679
	struct fgraph_data *data = iter->private;
	pid_t pid = ent->pid;
	int cpu = iter->cpu;
	int ret;
	int i;

	if (data) {
		int cpu = iter->cpu;
		int *depth = &(per_cpu_ptr(data, cpu)->depth);

		/*
		 * Comments display at + 1 to depth. This is the
		 * return from a function, we now want the comments
		 * to display at the same level of the bracket.
		 */
		*depth = trace->depth - 1;
	}
680

681
	if (print_graph_prologue(iter, s, 0, 0))
682
683
		return TRACE_TYPE_PARTIAL_LINE;

684
	/* Overhead */
685
686
687
	ret = print_graph_overhead(duration, s);
	if (!ret)
		return TRACE_TYPE_PARTIAL_LINE;
688
689

	/* Duration */
690
691
692
693
694
	if (tracer_flags.val & TRACE_GRAPH_PRINT_DURATION) {
		ret = print_graph_duration(duration, s);
		if (ret == TRACE_TYPE_PARTIAL_LINE)
			return TRACE_TYPE_PARTIAL_LINE;
	}
695
696

	/* Closing brace */
697
698
	for (i = 0; i < trace->depth * TRACE_GRAPH_INDENT; i++) {
		ret = trace_seq_printf(s, " ");
699
700
		if (!ret)
			return TRACE_TYPE_PARTIAL_LINE;
701
702
	}

703
	ret = trace_seq_printf(s, "}\n");
704
705
	if (!ret)
		return TRACE_TYPE_PARTIAL_LINE;
706

707
	/* Overrun */
708
709
710
	if (tracer_flags.val & TRACE_GRAPH_PRINT_OVERRUN) {
		ret = trace_seq_printf(s, " (Overruns: %lu)\n",
					trace->overrun);
711
712
		if (!ret)
			return TRACE_TYPE_PARTIAL_LINE;
713
	}
714

715
	ret = print_graph_irq(iter, trace->func, TRACE_GRAPH_RET, cpu, pid);
716
717
718
	if (ret == TRACE_TYPE_PARTIAL_LINE)
		return TRACE_TYPE_PARTIAL_LINE;

719
720
721
	return TRACE_TYPE_HANDLED;
}

722
static enum print_line_t
723
724
print_graph_comment(struct trace_seq *s,  struct trace_entry *ent,
		    struct trace_iterator *iter)
725
{
726
	unsigned long sym_flags = (trace_flags & TRACE_ITER_SYM_MASK);
727
	struct fgraph_data *data = iter->private;
728
	struct trace_event *event;
729
	int depth = 0;
730
	int ret;
731
732
733
734
	int i;

	if (data)
		depth = per_cpu_ptr(data, iter->cpu)->depth;
735

736
	if (print_graph_prologue(iter, s, 0, 0))
737
738
		return TRACE_TYPE_PARTIAL_LINE;

739
	/* No overhead */
740
741
742
743
744
745
746
	ret = print_graph_overhead(-1, s);
	if (!ret)
		return TRACE_TYPE_PARTIAL_LINE;

	/* No time */
	if (tracer_flags.val & TRACE_GRAPH_PRINT_DURATION) {
		ret = trace_seq_printf(s, "            |  ");
747
748
749
750
751
		if (!ret)
			return TRACE_TYPE_PARTIAL_LINE;
	}

	/* Indentation */
752
753
	if (depth > 0)
		for (i = 0; i < (depth + 1) * TRACE_GRAPH_INDENT; i++) {
754
755
756
757
758
759
			ret = trace_seq_printf(s, " ");
			if (!ret)
				return TRACE_TYPE_PARTIAL_LINE;
		}

	/* The comment */
760
761
762
763
	ret = trace_seq_printf(s, "/* ");
	if (!ret)
		return TRACE_TYPE_PARTIAL_LINE;

764
765
766
767
768
769
770
771
772
773
774
775
776
777
778
779
780
781
782
783
	switch (iter->ent->type) {
	case TRACE_BPRINT:
		ret = trace_print_bprintk_msg_only(iter);
		if (ret != TRACE_TYPE_HANDLED)
			return ret;
		break;
	case TRACE_PRINT:
		ret = trace_print_printk_msg_only(iter);
		if (ret != TRACE_TYPE_HANDLED)
			return ret;
		break;
	default:
		event = ftrace_find_event(ent->type);
		if (!event)
			return TRACE_TYPE_UNHANDLED;

		ret = event->trace(iter, sym_flags);
		if (ret != TRACE_TYPE_HANDLED)
			return ret;
	}
784

785
786
787
788
789
790
	/* Strip ending newline */
	if (s->buffer[s->len - 1] == '\n') {
		s->buffer[s->len - 1] = '\0';
		s->len--;
	}

791
792
793
794
795
796
797
798
	ret = trace_seq_printf(s, " */\n");
	if (!ret)
		return TRACE_TYPE_PARTIAL_LINE;

	return TRACE_TYPE_HANDLED;
}


799
800
801
802
enum print_line_t
print_graph_function(struct trace_iterator *iter)
{
	struct trace_entry *entry = iter->ent;
803
	struct trace_seq *s = &iter->seq;
804

805
806
807
808
	switch (entry->type) {
	case TRACE_GRAPH_ENT: {
		struct ftrace_graph_ent_entry *field;
		trace_assign_type(field, entry);
809
		return print_graph_entry(field, s, iter);
810
811
812
813
	}
	case TRACE_GRAPH_RET: {
		struct ftrace_graph_ret_entry *field;
		trace_assign_type(field, entry);
814
		return print_graph_return(&field->ret, s, entry, iter);
815
816
	}
	default:
817
		return print_graph_comment(s, entry, iter);
818
	}
819
820

	return TRACE_TYPE_HANDLED;
821
822
}

823
824
825
826
static void print_graph_headers(struct seq_file *s)
{
	/* 1st line */
	seq_printf(s, "# ");
827
828
	if (tracer_flags.val & TRACE_GRAPH_PRINT_ABS_TIME)
		seq_printf(s, "     TIME       ");
829
	if (tracer_flags.val & TRACE_GRAPH_PRINT_CPU)
830
		seq_printf(s, "CPU");
831
	if (tracer_flags.val & TRACE_GRAPH_PRINT_PROC)
832
833
834
835
		seq_printf(s, "  TASK/PID      ");
	if (tracer_flags.val & TRACE_GRAPH_PRINT_DURATION)
		seq_printf(s, "  DURATION   ");
	seq_printf(s, "               FUNCTION CALLS\n");
836
837
838

	/* 2nd line */
	seq_printf(s, "# ");
839
840
	if (tracer_flags.val & TRACE_GRAPH_PRINT_ABS_TIME)
		seq_printf(s, "      |         ");
841
	if (tracer_flags.val & TRACE_GRAPH_PRINT_CPU)
842
		seq_printf(s, "|  ");
843
	if (tracer_flags.val & TRACE_GRAPH_PRINT_PROC)
844
845
846
847
		seq_printf(s, "  |    |        ");
	if (tracer_flags.val & TRACE_GRAPH_PRINT_DURATION)
		seq_printf(s, "   |   |      ");
	seq_printf(s, "               |   |   |   |\n");
848
}
849
850
851

static void graph_trace_open(struct trace_iterator *iter)
{
852
853
	/* pid and depth on the last trace processed */
	struct fgraph_data *data = alloc_percpu(struct fgraph_data);
854
855
	int cpu;

856
	if (!data)
857
858
859
		pr_warning("function graph tracer: not enough memory\n");
	else
		for_each_possible_cpu(cpu) {
860
861
			pid_t *pid = &(per_cpu_ptr(data, cpu)->last_pid);
			int *depth = &(per_cpu_ptr(data, cpu)->depth);
862
			*pid = -1;
863
			*depth = 0;
864
865
		}

866
	iter->private = data;
867
868
869
870
}

static void graph_trace_close(struct trace_iterator *iter)
{
871
	free_percpu(iter->private);
872
873
}

874
static struct tracer graph_trace __read_mostly = {
875
	.name		= "function_graph",
876
877
	.open		= graph_trace_open,
	.close		= graph_trace_close,
878
	.wait_pipe	= poll_wait_pipe,
879
880
	.init		= graph_trace_init,
	.reset		= graph_trace_reset,
881
882
	.print_line	= print_graph_function,
	.print_header	= print_graph_headers,
883
	.flags		= &tracer_flags,
884
885
886
#ifdef CONFIG_FTRACE_SELFTEST
	.selftest	= trace_selftest_startup_function_graph,
#endif
887
888
889
890
891
892
893
894
};

static __init int init_graph_trace(void)
{
	return register_tracer(&graph_trace);
}

device_initcall(init_graph_trace);