trace_functions_graph.c 10.9 KB
Newer Older
1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
/*
 *
 * Function graph tracer.
 * Copyright (c) 2008 Frederic Weisbecker <fweisbec@gmail.com>
 * Mostly borrowed from function tracer which
 * is Copyright (c) Steven Rostedt <srostedt@redhat.com>
 *
 */
#include <linux/debugfs.h>
#include <linux/uaccess.h>
#include <linux/ftrace.h>
#include <linux/fs.h>

#include "trace.h"

16
#define TRACE_GRAPH_INDENT	2
17

18
/* Flag options */
19
#define TRACE_GRAPH_PRINT_OVERRUN	0x1
20
21
#define TRACE_GRAPH_PRINT_CPU		0x2
#define TRACE_GRAPH_PRINT_OVERHEAD	0x4
22
#define TRACE_GRAPH_PRINT_PROC		0x8
23

24
static struct tracer_opt trace_opts[] = {
25
26
27
28
29
30
	/* Display overruns ? */
	{ TRACER_OPT(funcgraph-overrun, TRACE_GRAPH_PRINT_OVERRUN) },
	/* Display CPU ? */
	{ TRACER_OPT(funcgraph-cpu, TRACE_GRAPH_PRINT_CPU) },
	/* Display Overhead ? */
	{ TRACER_OPT(funcgraph-overhead, TRACE_GRAPH_PRINT_OVERHEAD) },
31
32
	/* Display proc name/pid */
	{ TRACER_OPT(funcgraph-proc, TRACE_GRAPH_PRINT_PROC) },
33
34
35
36
	{ } /* Empty entry */
};

static struct tracer_flags tracer_flags = {
37
	/* Don't display overruns and proc by default */
38
	.val = TRACE_GRAPH_PRINT_CPU | TRACE_GRAPH_PRINT_OVERHEAD,
39
40
41
	.opts = trace_opts
};

42
/* pid on the last trace processed */
43
static pid_t last_pid[NR_CPUS] = { [0 ... NR_CPUS-1] = -1 };
44
45
46

static int graph_trace_init(struct trace_array *tr)
{
47
48
	int cpu, ret;

49
50
51
	for_each_online_cpu(cpu)
		tracing_reset(tr, cpu);

52
	ret = register_ftrace_graph(&trace_graph_return,
53
					&trace_graph_entry);
54
55
56
57
58
	if (ret)
		return ret;
	tracing_start_cmdline_record();

	return 0;
59
60
61
62
}

static void graph_trace_reset(struct trace_array *tr)
{
63
64
	tracing_stop_cmdline_record();
	unregister_ftrace_graph();
65
66
}

67
68
69
70
71
72
73
74
75
76
77
78
79
80
81
82
83
84
static inline int log10_cpu(int nb)
{
	if (nb / 100)
		return 3;
	if (nb / 10)
		return 2;
	return 1;
}

static enum print_line_t
print_graph_cpu(struct trace_seq *s, int cpu)
{
	int i;
	int ret;
	int log10_this = log10_cpu(cpu);
	int log10_all = log10_cpu(cpus_weight_nr(cpu_online_map));


85
86
87
88
89
90
91
92
93
94
95
96
97
	/*
	 * Start with a space character - to make it stand out
	 * to the right a bit when trace output is pasted into
	 * email:
	 */
	ret = trace_seq_printf(s, " ");

	/*
	 * Tricky - we space the CPU field according to the max
	 * number of online CPUs. On a 2-cpu system it would take
	 * a maximum of 1 digit - on a 128 cpu system it would
	 * take up to 3 digits:
	 */
98
99
100
101
102
103
104
	for (i = 0; i < log10_all - log10_this; i++) {
		ret = trace_seq_printf(s, " ");
		if (!ret)
			return TRACE_TYPE_PARTIAL_LINE;
	}
	ret = trace_seq_printf(s, "%d) ", cpu);
	if (!ret)
105
106
		return TRACE_TYPE_PARTIAL_LINE;

107
108
109
	return TRACE_TYPE_HANDLED;
}

110
111
112
113
114
115
116
117
118
119
120
121
122
123
124
125
126
127
128
129
130
131
132
133
134
135
136
137
138
139
140
141
142
143
144
145
146
147
148
149
150
151
152
#define TRACE_GRAPH_PROCINFO_LENGTH	14

static enum print_line_t
print_graph_proc(struct trace_seq *s, pid_t pid)
{
	int i;
	int ret;
	int len;
	char comm[8];
	int spaces = 0;
	/* sign + log10(MAX_INT) + '\0' */
	char pid_str[11];

	strncpy(comm, trace_find_cmdline(pid), 7);
	comm[7] = '\0';
	sprintf(pid_str, "%d", pid);

	/* 1 stands for the "-" character */
	len = strlen(comm) + strlen(pid_str) + 1;

	if (len < TRACE_GRAPH_PROCINFO_LENGTH)
		spaces = TRACE_GRAPH_PROCINFO_LENGTH - len;

	/* First spaces to align center */
	for (i = 0; i < spaces / 2; i++) {
		ret = trace_seq_printf(s, " ");
		if (!ret)
			return TRACE_TYPE_PARTIAL_LINE;
	}

	ret = trace_seq_printf(s, "%s-%s", comm, pid_str);
	if (!ret)
		return TRACE_TYPE_PARTIAL_LINE;

	/* Last spaces to align center */
	for (i = 0; i < spaces - (spaces / 2); i++) {
		ret = trace_seq_printf(s, " ");
		if (!ret)
			return TRACE_TYPE_PARTIAL_LINE;
	}
	return TRACE_TYPE_HANDLED;
}

153

154
/* If the pid changed since the last trace, output this event */
155
156
static enum print_line_t
verif_pid(struct trace_seq *s, pid_t pid, int cpu)
157
{
158
159
	pid_t prev_pid;
	int ret;
160

161
	if (last_pid[cpu] != -1 && last_pid[cpu] == pid)
162
		return TRACE_TYPE_HANDLED;
163

164
	prev_pid = last_pid[cpu];
165
	last_pid[cpu] = pid;
166
167
168
169
170
171
172
173
174
175

/*
 * Context-switch trace line:

 ------------------------------------------
 | 1)  migration/0--1  =>  sshd-1755
 ------------------------------------------

 */
	ret = trace_seq_printf(s,
176
177
178
179
180
181
182
183
184
185
186
187
188
189
190
191
192
193
194
195
196
197
198
199
200
		"\n ------------------------------------------\n |");
	if (!ret)
		TRACE_TYPE_PARTIAL_LINE;

	ret = print_graph_cpu(s, cpu);
	if (ret == TRACE_TYPE_PARTIAL_LINE)
		TRACE_TYPE_PARTIAL_LINE;

	ret = print_graph_proc(s, prev_pid);
	if (ret == TRACE_TYPE_PARTIAL_LINE)
		TRACE_TYPE_PARTIAL_LINE;

	ret = trace_seq_printf(s, " => ");
	if (!ret)
		TRACE_TYPE_PARTIAL_LINE;

	ret = print_graph_proc(s, pid);
	if (ret == TRACE_TYPE_PARTIAL_LINE)
		TRACE_TYPE_PARTIAL_LINE;

	ret = trace_seq_printf(s,
		"\n ------------------------------------------\n\n");
	if (!ret)
		TRACE_TYPE_PARTIAL_LINE;

201
	return ret;
202
203
}

204
205
206
207
208
209
210
211
212
213
214
215
216
static bool
trace_branch_is_leaf(struct trace_iterator *iter,
		struct ftrace_graph_ent_entry *curr)
{
	struct ring_buffer_iter *ring_iter;
	struct ring_buffer_event *event;
	struct ftrace_graph_ret_entry *next;

	ring_iter = iter->buffer_iter[iter->cpu];

	if (!ring_iter)
		return false;

217
	event = ring_buffer_iter_peek(ring_iter, NULL);
218
219
220
221
222
223
224
225
226
227
228
229
230
231
232
233
234
235
236
237
238

	if (!event)
		return false;

	next = ring_buffer_event_data(event);

	if (next->ent.type != TRACE_GRAPH_RET)
		return false;

	if (curr->ent.pid != next->ent.pid ||
			curr->graph_ent.func != next->ret.func)
		return false;

	return true;
}


static inline int
print_graph_duration(unsigned long long duration, struct trace_seq *s)
{
	unsigned long nsecs_rem = do_div(duration, 1000);
239
	return trace_seq_printf(s, "%4llu.%3lu us |  ", duration, nsecs_rem);
240
241
242
243
244
245
246
247
248
249
250
251
252
253
254
255
256
257
}

/* Signal a overhead of time execution to the output */
static int
print_graph_overhead(unsigned long long duration, struct trace_seq *s)
{
	/* Duration exceeded 100 msecs */
	if (duration > 100000ULL)
		return trace_seq_printf(s, "! ");

	/* Duration exceeded 10 msecs */
	if (duration > 10000ULL)
		return trace_seq_printf(s, "+ ");

	return trace_seq_printf(s, "  ");
}

/* Case of a leaf function on its call entry */
258
static enum print_line_t
259
260
print_graph_entry_leaf(struct trace_iterator *iter,
		struct ftrace_graph_ent_entry *entry, struct trace_seq *s)
261
{
262
263
264
265
266
	struct ftrace_graph_ret_entry *ret_entry;
	struct ftrace_graph_ret *graph_ret;
	struct ring_buffer_event *event;
	struct ftrace_graph_ent *call;
	unsigned long long duration;
267
	int ret;
268
	int i;
269

270
271
272
273
274
275
	event = ring_buffer_read(iter->buffer_iter[iter->cpu], NULL);
	ret_entry = ring_buffer_event_data(event);
	graph_ret = &ret_entry->ret;
	call = &entry->graph_ent;
	duration = graph_ret->rettime - graph_ret->calltime;

276
277
278
279
	/* Must not exceed 8 characters: 9999.999 us */
	if (duration > 10000000ULL)
		duration = 9999999ULL;

280
	/* Overhead */
281
282
283
284
285
286
287
288
	if (tracer_flags.val & TRACE_GRAPH_PRINT_OVERHEAD) {
		ret = print_graph_overhead(duration, s);
		if (!ret)
			return TRACE_TYPE_PARTIAL_LINE;
	}

	/* Duration */
	ret = print_graph_duration(duration, s);
289
	if (!ret)
290
291
		return TRACE_TYPE_PARTIAL_LINE;

292
293
294
295
296
297
298
299
300
301
302
	/* Function */
	for (i = 0; i < call->depth * TRACE_GRAPH_INDENT; i++) {
		ret = trace_seq_printf(s, " ");
		if (!ret)
			return TRACE_TYPE_PARTIAL_LINE;
	}

	ret = seq_print_ip_sym(s, call->func, 0);
	if (!ret)
		return TRACE_TYPE_PARTIAL_LINE;

303
	ret = trace_seq_printf(s, "();\n");
304
305
306
307
308
309
310
311
312
313
314
315
316
317
318
	if (!ret)
		return TRACE_TYPE_PARTIAL_LINE;

	return TRACE_TYPE_HANDLED;
}

static enum print_line_t
print_graph_entry_nested(struct ftrace_graph_ent_entry *entry,
			struct trace_seq *s)
{
	int i;
	int ret;
	struct ftrace_graph_ent *call = &entry->graph_ent;

	/* No overhead */
319
320
321
322
323
324
325
	if (tracer_flags.val & TRACE_GRAPH_PRINT_OVERHEAD) {
		ret = trace_seq_printf(s, "  ");
		if (!ret)
			return TRACE_TYPE_PARTIAL_LINE;
	}

	/* No time */
326
	ret = trace_seq_printf(s, "            |  ");
327
328

	/* Function */
329
330
	for (i = 0; i < call->depth * TRACE_GRAPH_INDENT; i++) {
		ret = trace_seq_printf(s, " ");
331
332
		if (!ret)
			return TRACE_TYPE_PARTIAL_LINE;
333
334
335
336
337
338
	}

	ret = seq_print_ip_sym(s, call->func, 0);
	if (!ret)
		return TRACE_TYPE_PARTIAL_LINE;

339
	ret = trace_seq_printf(s, "() {\n");
340
341
342
	if (!ret)
		return TRACE_TYPE_PARTIAL_LINE;

343
344
345
	return TRACE_TYPE_HANDLED;
}

346
347
348
349
350
351
352
static enum print_line_t
print_graph_entry(struct ftrace_graph_ent_entry *field, struct trace_seq *s,
			struct trace_iterator *iter, int cpu)
{
	int ret;
	struct trace_entry *ent = iter->ent;

353
	/* Pid */
354
	if (verif_pid(s, ent->pid, cpu) == TRACE_TYPE_PARTIAL_LINE)
355
356
		return TRACE_TYPE_PARTIAL_LINE;

357
358
359
	/* Cpu */
	if (tracer_flags.val & TRACE_GRAPH_PRINT_CPU) {
		ret = print_graph_cpu(s, cpu);
360
361
362
363
364
365
366
367
368
369
370
		if (ret == TRACE_TYPE_PARTIAL_LINE)
			return TRACE_TYPE_PARTIAL_LINE;
	}

	/* Proc */
	if (tracer_flags.val & TRACE_GRAPH_PRINT_PROC) {
		ret = print_graph_proc(s, ent->pid);
		if (ret == TRACE_TYPE_PARTIAL_LINE)
			return TRACE_TYPE_PARTIAL_LINE;

		ret = trace_seq_printf(s, " | ");
371
372
373
		if (!ret)
			return TRACE_TYPE_PARTIAL_LINE;
	}
374
375
376
377
378
379
380
381

	if (trace_branch_is_leaf(iter, field))
		return print_graph_entry_leaf(iter, field, s);
	else
		return print_graph_entry_nested(field, s);

}

382
383
static enum print_line_t
print_graph_return(struct ftrace_graph_ret *trace, struct trace_seq *s,
384
		   struct trace_entry *ent, int cpu)
385
386
387
{
	int i;
	int ret;
388
	unsigned long long duration = trace->rettime - trace->calltime;
389

390
391
392
393
	/* Must not exceed 8 characters: xxxx.yyy us */
	if (duration > 10000000ULL)
		duration = 9999999ULL;

394
	/* Pid */
395
	if (verif_pid(s, ent->pid, cpu) == TRACE_TYPE_PARTIAL_LINE)
396
397
		return TRACE_TYPE_PARTIAL_LINE;

398
	/* Cpu */
399
400
	if (tracer_flags.val & TRACE_GRAPH_PRINT_CPU) {
		ret = print_graph_cpu(s, cpu);
401
402
403
404
405
406
407
408
409
410
411
		if (ret == TRACE_TYPE_PARTIAL_LINE)
			return TRACE_TYPE_PARTIAL_LINE;
	}

	/* Proc */
	if (tracer_flags.val & TRACE_GRAPH_PRINT_PROC) {
		ret = print_graph_proc(s, ent->pid);
		if (ret == TRACE_TYPE_PARTIAL_LINE)
			return TRACE_TYPE_PARTIAL_LINE;

		ret = trace_seq_printf(s, " | ");
412
413
414
		if (!ret)
			return TRACE_TYPE_PARTIAL_LINE;
	}
415

416
	/* Overhead */
417
418
419
420
421
422
423
424
	if (tracer_flags.val & TRACE_GRAPH_PRINT_OVERHEAD) {
		ret = print_graph_overhead(duration, s);
		if (!ret)
			return TRACE_TYPE_PARTIAL_LINE;
	}

	/* Duration */
	ret = print_graph_duration(duration, s);
425
426
427
428
	if (!ret)
		return TRACE_TYPE_PARTIAL_LINE;

	/* Closing brace */
429
430
	for (i = 0; i < trace->depth * TRACE_GRAPH_INDENT; i++) {
		ret = trace_seq_printf(s, " ");
431
432
		if (!ret)
			return TRACE_TYPE_PARTIAL_LINE;
433
434
	}

435
	ret = trace_seq_printf(s, "}\n");
436
437
	if (!ret)
		return TRACE_TYPE_PARTIAL_LINE;
438

439
	/* Overrun */
440
441
442
	if (tracer_flags.val & TRACE_GRAPH_PRINT_OVERRUN) {
		ret = trace_seq_printf(s, " (Overruns: %lu)\n",
					trace->overrun);
443
444
		if (!ret)
			return TRACE_TYPE_PARTIAL_LINE;
445
446
447
448
449
450
451
452
453
	}
	return TRACE_TYPE_HANDLED;
}

enum print_line_t
print_graph_function(struct trace_iterator *iter)
{
	struct trace_seq *s = &iter->seq;
	struct trace_entry *entry = iter->ent;
454

455
456
457
458
	switch (entry->type) {
	case TRACE_GRAPH_ENT: {
		struct ftrace_graph_ent_entry *field;
		trace_assign_type(field, entry);
459
		return print_graph_entry(field, s, iter,
460
					 iter->cpu);
461
462
463
464
	}
	case TRACE_GRAPH_RET: {
		struct ftrace_graph_ret_entry *field;
		trace_assign_type(field, entry);
465
		return print_graph_return(&field->ret, s, entry, iter->cpu);
466
467
468
	}
	default:
		return TRACE_TYPE_UNHANDLED;
469
470
471
472
	}
}

static struct tracer graph_trace __read_mostly = {
473
	.name	     = "function_graph",
474
475
476
477
478
479
480
481
482
483
484
485
	.init	     = graph_trace_init,
	.reset	     = graph_trace_reset,
	.print_line = print_graph_function,
	.flags		= &tracer_flags,
};

static __init int init_graph_trace(void)
{
	return register_tracer(&graph_trace);
}

device_initcall(init_graph_trace);