trace.c 149 KB
Newer Older
1
2
3
/*
 * ring buffer based function tracer
 *
4
 * Copyright (C) 2007-2012 Steven Rostedt <srostedt@redhat.com>
5
6
7
8
9
10
11
 * Copyright (C) 2008 Ingo Molnar <mingo@redhat.com>
 *
 * Originally taken from the RT patch by:
 *    Arnaldo Carvalho de Melo <acme@redhat.com>
 *
 * Based on code from the latency_tracer, that is:
 *  Copyright (C) 2004-2006 Ingo Molnar
12
 *  Copyright (C) 2004 Nadia Yvette Chambers
13
 */
14
#include <linux/ring_buffer.h>
15
#include <generated/utsrelease.h>
16
17
#include <linux/stacktrace.h>
#include <linux/writeback.h>
18
19
#include <linux/kallsyms.h>
#include <linux/seq_file.h>
20
#include <linux/notifier.h>
21
#include <linux/irqflags.h>
22
#include <linux/debugfs.h>
23
#include <linux/pagemap.h>
24
25
26
#include <linux/hardirq.h>
#include <linux/linkage.h>
#include <linux/uaccess.h>
27
#include <linux/kprobes.h>
28
29
30
#include <linux/ftrace.h>
#include <linux/module.h>
#include <linux/percpu.h>
31
#include <linux/splice.h>
32
#include <linux/kdebug.h>
33
#include <linux/string.h>
34
#include <linux/rwsem.h>
35
#include <linux/slab.h>
36
37
#include <linux/ctype.h>
#include <linux/init.h>
38
#include <linux/poll.h>
39
#include <linux/nmi.h>
40
#include <linux/fs.h>
41
#include <linux/sched/rt.h>
Ingo Molnar's avatar
Ingo Molnar committed
42

43
#include "trace.h"
44
#include "trace_output.h"
45

46
47
48
49
/*
 * On boot up, the ring buffer is set to the minimum size, so that
 * we do not waste memory on systems that are not using tracing.
 */
50
bool ring_buffer_expanded;
51

52
53
/*
 * We need to change this state when a selftest is running.
54
55
 * A selftest will lurk into the ring-buffer to count the
 * entries inserted during the selftest although some concurrent
56
 * insertions into the ring-buffer such as trace_printk could occurred
57
58
 * at the same time, giving false positive or negative results.
 */
59
static bool __read_mostly tracing_selftest_running;
60

61
62
63
/*
 * If a tracer is running, we do not want to run SELFTEST.
 */
64
bool __read_mostly tracing_selftest_disabled;
65

66
67
68
69
70
71
72
73
74
75
76
77
78
79
/* For tracers that don't implement custom flags */
static struct tracer_opt dummy_tracer_opt[] = {
	{ }
};

static struct tracer_flags dummy_tracer_flags = {
	.val = 0,
	.opts = dummy_tracer_opt
};

static int dummy_set_flag(u32 old_flags, u32 bit, int set)
{
	return 0;
}
80

81
82
83
84
85
86
87
/*
 * To prevent the comm cache from being overwritten when no
 * tracing is active, only save the comm when a trace event
 * occurred.
 */
static DEFINE_PER_CPU(bool, trace_cmdline_save);

88
89
90
91
92
93
/*
 * Kill all tracing for good (never come back).
 * It is initialized to 1 but will turn to zero if the initialization
 * of the tracer is successful. But that is the only place that sets
 * this back to zero.
 */
94
static int tracing_disabled = 1;
95

96
DEFINE_PER_CPU(int, ftrace_cpu_disabled);
97

98
cpumask_var_t __read_mostly	tracing_buffer_mask;
99

100
101
102
103
104
105
106
107
108
109
110
/*
 * ftrace_dump_on_oops - variable to dump ftrace buffer on oops
 *
 * If there is an oops (or kernel panic) and the ftrace_dump_on_oops
 * is set, then ftrace_dump is called. This will output the contents
 * of the ftrace buffers to the console.  This is very useful for
 * capturing traces that lead to crashes and outputing it to a
 * serial console.
 *
 * It is default off, but you can enable it with either specifying
 * "ftrace_dump_on_oops" in the kernel command line, or setting
111
112
113
 * /proc/sys/kernel/ftrace_dump_on_oops
 * Set 1 if you want to dump buffers of all CPUs
 * Set 2 if you want to dump the buffer of the CPU that triggered oops
114
 */
115
116

enum ftrace_dump_mode ftrace_dump_on_oops;
117

118
119
static int tracing_set_tracer(const char *buf);

Li Zefan's avatar
Li Zefan committed
120
121
#define MAX_TRACER_SIZE		100
static char bootup_tracer_buf[MAX_TRACER_SIZE] __initdata;
122
static char *default_bootup_tracer;
123

124
125
static bool allocate_snapshot;

126
static int __init set_cmdline_ftrace(char *str)
127
{
128
	strlcpy(bootup_tracer_buf, str, MAX_TRACER_SIZE);
129
	default_bootup_tracer = bootup_tracer_buf;
130
	/* We are using ftrace early, expand it */
131
	ring_buffer_expanded = true;
132
133
	return 1;
}
134
__setup("ftrace=", set_cmdline_ftrace);
135

136
137
static int __init set_ftrace_dump_on_oops(char *str)
{
138
139
140
141
142
143
144
145
146
147
148
	if (*str++ != '=' || !*str) {
		ftrace_dump_on_oops = DUMP_ALL;
		return 1;
	}

	if (!strcmp("orig_cpu", str)) {
		ftrace_dump_on_oops = DUMP_ORIG;
                return 1;
        }

        return 0;
149
150
}
__setup("ftrace_dump_on_oops", set_ftrace_dump_on_oops);
Steven Rostedt's avatar
Steven Rostedt committed
151

152
static int __init boot_alloc_snapshot(char *str)
153
154
155
156
157
158
{
	allocate_snapshot = true;
	/* We also need the main ring buffer expanded */
	ring_buffer_expanded = true;
	return 1;
}
159
__setup("alloc_snapshot", boot_alloc_snapshot);
160

161
162
163
164
165
166

static char trace_boot_options_buf[MAX_TRACER_SIZE] __initdata;
static char *trace_boot_options __initdata;

static int __init set_trace_boot_options(char *str)
{
167
	strlcpy(trace_boot_options_buf, str, MAX_TRACER_SIZE);
168
169
170
171
172
	trace_boot_options = trace_boot_options_buf;
	return 0;
}
__setup("trace_options=", set_trace_boot_options);

173
unsigned long long ns2usecs(cycle_t nsec)
174
175
176
177
178
179
{
	nsec += 500;
	do_div(nsec, 1000);
	return nsec;
}

Steven Rostedt's avatar
Steven Rostedt committed
180
181
182
183
184
185
186
187
188
189
190
191
/*
 * The global_trace is the descriptor that holds the tracing
 * buffers for the live tracing. For each CPU, it contains
 * a link list of pages that will store trace entries. The
 * page descriptor of the pages in the memory is used to hold
 * the link list by linking the lru item in the page descriptor
 * to each of the pages in the buffer per CPU.
 *
 * For each active CPU there is a data field that holds the
 * pages for the buffer for that CPU. Each CPU has the same number
 * of pages allocated for its buffer.
 */
192
193
static struct trace_array	global_trace;

194
LIST_HEAD(ftrace_trace_arrays);
195

196
197
int filter_current_check_discard(struct ring_buffer *buffer,
				 struct ftrace_event_call *call, void *rec,
198
199
				 struct ring_buffer_event *event)
{
200
	return filter_check_discard(call, rec, buffer, event);
201
}
202
EXPORT_SYMBOL_GPL(filter_current_check_discard);
203

204
205
206
207
208
cycle_t ftrace_now(int cpu)
{
	u64 ts;

	/* Early boot up does not have a buffer yet */
209
	if (!global_trace.trace_buffer.buffer)
210
211
		return trace_clock_local();

212
213
	ts = ring_buffer_time_stamp(global_trace.trace_buffer.buffer, cpu);
	ring_buffer_normalize_time_stamp(global_trace.trace_buffer.buffer, cpu, &ts);
214
215
216

	return ts;
}
217

218
219
int tracing_is_enabled(void)
{
220
	return tracing_is_on();
221
222
}

Steven Rostedt's avatar
Steven Rostedt committed
223
/*
224
225
226
 * trace_buf_size is the size in bytes that is allocated
 * for a buffer. Note, the number of bytes is always rounded
 * to page size.
227
228
229
230
231
 *
 * This number is purposely set to a low number of 16384.
 * If the dump on oops happens, it will be much appreciated
 * to not have to wait for all that output. Anyway this can be
 * boot time and run time configurable.
Steven Rostedt's avatar
Steven Rostedt committed
232
 */
233
#define TRACE_BUF_SIZE_DEFAULT	1441792UL /* 16384 * 88 (sizeof(entry)) */
234

235
static unsigned long		trace_buf_size = TRACE_BUF_SIZE_DEFAULT;
236

Steven Rostedt's avatar
Steven Rostedt committed
237
/* trace_types holds a link list of available tracers. */
238
static struct tracer		*trace_types __read_mostly;
Steven Rostedt's avatar
Steven Rostedt committed
239
240
241
242

/*
 * trace_types_lock is used to protect the trace_types list.
 */
243
static DEFINE_MUTEX(trace_types_lock);
Steven Rostedt's avatar
Steven Rostedt committed
244

245
246
247
248
249
250
251
252
253
254
255
256
257
258
259
260
261
262
263
264
265
266
267
268
269
270
271
272
/*
 * serialize the access of the ring buffer
 *
 * ring buffer serializes readers, but it is low level protection.
 * The validity of the events (which returns by ring_buffer_peek() ..etc)
 * are not protected by ring buffer.
 *
 * The content of events may become garbage if we allow other process consumes
 * these events concurrently:
 *   A) the page of the consumed events may become a normal page
 *      (not reader page) in ring buffer, and this page will be rewrited
 *      by events producer.
 *   B) The page of the consumed events may become a page for splice_read,
 *      and this page will be returned to system.
 *
 * These primitives allow multi process access to different cpu ring buffer
 * concurrently.
 *
 * These primitives don't distinguish read-only and read-consume access.
 * Multi read-only access are also serialized.
 */

#ifdef CONFIG_SMP
static DECLARE_RWSEM(all_cpu_access_lock);
static DEFINE_PER_CPU(struct mutex, cpu_access_lock);

static inline void trace_access_lock(int cpu)
{
273
	if (cpu == RING_BUFFER_ALL_CPUS) {
274
275
276
277
278
		/* gain it for accessing the whole ring buffer. */
		down_write(&all_cpu_access_lock);
	} else {
		/* gain it for accessing a cpu ring buffer. */

279
		/* Firstly block other trace_access_lock(RING_BUFFER_ALL_CPUS). */
280
281
282
283
284
285
286
287
288
		down_read(&all_cpu_access_lock);

		/* Secondly block other access to this @cpu ring buffer. */
		mutex_lock(&per_cpu(cpu_access_lock, cpu));
	}
}

static inline void trace_access_unlock(int cpu)
{
289
	if (cpu == RING_BUFFER_ALL_CPUS) {
290
291
292
293
294
295
296
297
298
299
300
301
302
303
304
305
306
307
308
309
310
311
312
313
314
315
316
317
318
319
320
321
322
323
324
325
326
		up_write(&all_cpu_access_lock);
	} else {
		mutex_unlock(&per_cpu(cpu_access_lock, cpu));
		up_read(&all_cpu_access_lock);
	}
}

static inline void trace_access_lock_init(void)
{
	int cpu;

	for_each_possible_cpu(cpu)
		mutex_init(&per_cpu(cpu_access_lock, cpu));
}

#else

static DEFINE_MUTEX(access_lock);

static inline void trace_access_lock(int cpu)
{
	(void)cpu;
	mutex_lock(&access_lock);
}

static inline void trace_access_unlock(int cpu)
{
	(void)cpu;
	mutex_unlock(&access_lock);
}

static inline void trace_access_lock_init(void)
{
}

#endif

327
/* trace_flags holds trace_options default values */
328
unsigned long trace_flags = TRACE_ITER_PRINT_PARENT | TRACE_ITER_PRINTK |
329
	TRACE_ITER_ANNOTATE | TRACE_ITER_CONTEXT_INFO | TRACE_ITER_SLEEP_TIME |
330
	TRACE_ITER_GRAPH_TIME | TRACE_ITER_RECORD_CMD | TRACE_ITER_OVERWRITE |
331
	TRACE_ITER_IRQ_INFO | TRACE_ITER_MARKERS | TRACE_ITER_FUNCTION;
332

333
334
335
336
337
338
339
340
/**
 * tracing_on - enable tracing buffers
 *
 * This function enables tracing buffers that may have been
 * disabled with tracing_off.
 */
void tracing_on(void)
{
341
342
	if (global_trace.trace_buffer.buffer)
		ring_buffer_record_on(global_trace.trace_buffer.buffer);
343
344
345
346
347
348
349
350
351
352
	/*
	 * This flag is only looked at when buffers haven't been
	 * allocated yet. We don't really care about the race
	 * between setting this flag and actually turning
	 * on the buffer.
	 */
	global_trace.buffer_disabled = 0;
}
EXPORT_SYMBOL_GPL(tracing_on);

353
354
355
356
357
358
359
360
361
362
363
364
365
366
367
368
369
370
371
372
373
374
375
376
377
378
379
380
381
382
383
384
385
386
387
388
389
390
391
392
393
394
395
396
397
398
399
400
401
402
403
404
405
406
407
408
409
410
411
412
413
414
415
416
417
418
419
420
421
422
423
/**
 * __trace_puts - write a constant string into the trace buffer.
 * @ip:	   The address of the caller
 * @str:   The constant string to write
 * @size:  The size of the string.
 */
int __trace_puts(unsigned long ip, const char *str, int size)
{
	struct ring_buffer_event *event;
	struct ring_buffer *buffer;
	struct print_entry *entry;
	unsigned long irq_flags;
	int alloc;

	alloc = sizeof(*entry) + size + 2; /* possible \n added */

	local_save_flags(irq_flags);
	buffer = global_trace.trace_buffer.buffer;
	event = trace_buffer_lock_reserve(buffer, TRACE_PRINT, alloc, 
					  irq_flags, preempt_count());
	if (!event)
		return 0;

	entry = ring_buffer_event_data(event);
	entry->ip = ip;

	memcpy(&entry->buf, str, size);

	/* Add a newline if necessary */
	if (entry->buf[size - 1] != '\n') {
		entry->buf[size] = '\n';
		entry->buf[size + 1] = '\0';
	} else
		entry->buf[size] = '\0';

	__buffer_unlock_commit(buffer, event);

	return size;
}
EXPORT_SYMBOL_GPL(__trace_puts);

/**
 * __trace_bputs - write the pointer to a constant string into trace buffer
 * @ip:	   The address of the caller
 * @str:   The constant string to write to the buffer to
 */
int __trace_bputs(unsigned long ip, const char *str)
{
	struct ring_buffer_event *event;
	struct ring_buffer *buffer;
	struct bputs_entry *entry;
	unsigned long irq_flags;
	int size = sizeof(struct bputs_entry);

	local_save_flags(irq_flags);
	buffer = global_trace.trace_buffer.buffer;
	event = trace_buffer_lock_reserve(buffer, TRACE_BPUTS, size,
					  irq_flags, preempt_count());
	if (!event)
		return 0;

	entry = ring_buffer_event_data(event);
	entry->ip			= ip;
	entry->str			= str;

	__buffer_unlock_commit(buffer, event);

	return 1;
}
EXPORT_SYMBOL_GPL(__trace_bputs);

424
425
426
427
428
429
430
431
432
433
434
435
436
437
438
439
440
441
442
443
444
#ifdef CONFIG_TRACER_SNAPSHOT
/**
 * trace_snapshot - take a snapshot of the current buffer.
 *
 * This causes a swap between the snapshot buffer and the current live
 * tracing buffer. You can use this to take snapshots of the live
 * trace when some condition is triggered, but continue to trace.
 *
 * Note, make sure to allocate the snapshot with either
 * a tracing_snapshot_alloc(), or by doing it manually
 * with: echo 1 > /sys/kernel/debug/tracing/snapshot
 *
 * If the snapshot buffer is not allocated, it will stop tracing.
 * Basically making a permanent snapshot.
 */
void tracing_snapshot(void)
{
	struct trace_array *tr = &global_trace;
	struct tracer *tracer = tr->current_trace;
	unsigned long flags;

445
446
447
448
449
450
	if (in_nmi()) {
		internal_trace_puts("*** SNAPSHOT CALLED FROM NMI CONTEXT ***\n");
		internal_trace_puts("*** snapshot is being ignored        ***\n");
		return;
	}

451
	if (!tr->allocated_snapshot) {
452
453
		internal_trace_puts("*** SNAPSHOT NOT ALLOCATED ***\n");
		internal_trace_puts("*** stopping trace here!   ***\n");
454
455
456
457
458
459
		tracing_off();
		return;
	}

	/* Note, snapshot can not be used when the tracer uses it */
	if (tracer->use_max_tr) {
460
461
		internal_trace_puts("*** LATENCY TRACER ACTIVE ***\n");
		internal_trace_puts("*** Can not use snapshot (sorry) ***\n");
462
463
464
465
466
467
468
		return;
	}

	local_irq_save(flags);
	update_max_tr(tr, current, smp_processor_id());
	local_irq_restore(flags);
}
469
EXPORT_SYMBOL_GPL(tracing_snapshot);
470
471
472

static int resize_buffer_duplicate_size(struct trace_buffer *trace_buf,
					struct trace_buffer *size_buf, int cpu_id);
473
474
475
476
477
478
479
480
481
482
483
484
485
486
487
488
489
490
491
492
493
494
495
496
497
498
499
500
501
502
503
504
static void set_buffer_entries(struct trace_buffer *buf, unsigned long val);

static int alloc_snapshot(struct trace_array *tr)
{
	int ret;

	if (!tr->allocated_snapshot) {

		/* allocate spare buffer */
		ret = resize_buffer_duplicate_size(&tr->max_buffer,
				   &tr->trace_buffer, RING_BUFFER_ALL_CPUS);
		if (ret < 0)
			return ret;

		tr->allocated_snapshot = true;
	}

	return 0;
}

void free_snapshot(struct trace_array *tr)
{
	/*
	 * We don't free the ring buffer. instead, resize it because
	 * The max_tr ring buffer has some state (e.g. ring->clock) and
	 * we want preserve it.
	 */
	ring_buffer_resize(tr->max_buffer.buffer, 1, RING_BUFFER_ALL_CPUS);
	set_buffer_entries(&tr->max_buffer, 1);
	tracing_reset_online_cpus(&tr->max_buffer);
	tr->allocated_snapshot = false;
}
505
506
507
508
509
510
511
512
513
514
515
516
517
518
519
520
521

/**
 * trace_snapshot_alloc - allocate and take a snapshot of the current buffer.
 *
 * This is similar to trace_snapshot(), but it will allocate the
 * snapshot buffer if it isn't already allocated. Use this only
 * where it is safe to sleep, as the allocation may sleep.
 *
 * This causes a swap between the snapshot buffer and the current live
 * tracing buffer. You can use this to take snapshots of the live
 * trace when some condition is triggered, but continue to trace.
 */
void tracing_snapshot_alloc(void)
{
	struct trace_array *tr = &global_trace;
	int ret;

522
523
524
	ret = alloc_snapshot(tr);
	if (WARN_ON(ret < 0))
		return;
525
526
527

	tracing_snapshot();
}
528
EXPORT_SYMBOL_GPL(tracing_snapshot_alloc);
529
530
531
532
533
#else
void tracing_snapshot(void)
{
	WARN_ONCE(1, "Snapshot feature not enabled, but internal snapshot used");
}
534
EXPORT_SYMBOL_GPL(tracing_snapshot);
535
536
537
538
539
void tracing_snapshot_alloc(void)
{
	/* Give warning */
	tracing_snapshot();
}
540
EXPORT_SYMBOL_GPL(tracing_snapshot_alloc);
541
542
#endif /* CONFIG_TRACER_SNAPSHOT */

543
544
545
546
547
548
549
550
551
552
/**
 * tracing_off - turn off tracing buffers
 *
 * This function stops the tracing buffers from recording data.
 * It does not disable any overhead the tracers themselves may
 * be causing. This function simply causes all recording to
 * the ring buffers to fail.
 */
void tracing_off(void)
{
553
554
	if (global_trace.trace_buffer.buffer)
		ring_buffer_record_off(global_trace.trace_buffer.buffer);
555
556
557
558
559
560
561
562
563
564
565
566
567
568
569
	/*
	 * This flag is only looked at when buffers haven't been
	 * allocated yet. We don't really care about the race
	 * between setting this flag and actually turning
	 * on the buffer.
	 */
	global_trace.buffer_disabled = 1;
}
EXPORT_SYMBOL_GPL(tracing_off);

/**
 * tracing_is_on - show state of ring buffers enabled
 */
int tracing_is_on(void)
{
570
571
	if (global_trace.trace_buffer.buffer)
		return ring_buffer_record_is_on(global_trace.trace_buffer.buffer);
572
573
574
575
	return !global_trace.buffer_disabled;
}
EXPORT_SYMBOL_GPL(tracing_is_on);

576
static int __init set_buf_size(char *str)
577
{
578
	unsigned long buf_size;
579

580
581
	if (!str)
		return 0;
582
	buf_size = memparse(str, &str);
583
	/* nr_entries can not be zero */
584
	if (buf_size == 0)
585
		return 0;
586
	trace_buf_size = buf_size;
587
588
	return 1;
}
589
__setup("trace_buf_size=", set_buf_size);
590

591
592
static int __init set_tracing_thresh(char *str)
{
593
	unsigned long threshold;
594
595
596
597
	int ret;

	if (!str)
		return 0;
598
	ret = kstrtoul(str, 0, &threshold);
599
600
	if (ret < 0)
		return 0;
601
	tracing_thresh = threshold * 1000;
602
603
604
605
	return 1;
}
__setup("tracing_thresh=", set_tracing_thresh);

Steven Rostedt's avatar
Steven Rostedt committed
606
607
608
609
610
unsigned long nsecs_to_usecs(unsigned long nsecs)
{
	return nsecs / 1000;
}

Steven Rostedt's avatar
Steven Rostedt committed
611
/* These must match the bit postions in trace_iterator_flags */
612
613
614
615
616
static const char *trace_options[] = {
	"print-parent",
	"sym-offset",
	"sym-addr",
	"verbose",
Ingo Molnar's avatar
Ingo Molnar committed
617
	"raw",
618
	"hex",
Ingo Molnar's avatar
Ingo Molnar committed
619
	"bin",
620
	"block",
Ingo Molnar's avatar
Ingo Molnar committed
621
	"stacktrace",
622
	"trace_printk",
623
	"ftrace_preempt",
624
	"branch",
625
	"annotate",
626
	"userstacktrace",
627
	"sym-userobj",
628
	"printk-msg-only",
629
	"context-info",
630
	"latency-format",
631
	"sleep-time",
632
	"graph-time",
633
	"record-cmd",
634
	"overwrite",
635
	"disable_on_free",
636
	"irq-info",
637
	"markers",
638
	"function-trace",
639
640
641
	NULL
};

642
643
644
static struct {
	u64 (*func)(void);
	const char *name;
645
	int in_ns;		/* is this clock in nanoseconds? */
646
} trace_clocks[] = {
647
648
649
	{ trace_clock_local,	"local",	1 },
	{ trace_clock_global,	"global",	1 },
	{ trace_clock_counter,	"counter",	0 },
650
	{ trace_clock_jiffies,	"uptime",	1 },
651
	{ trace_clock,		"perf",		1 },
652
	ARCH_TRACE_CLOCKS
653
654
655
656
};

int trace_clock_id;

657
658
659
660
661
662
663
664
665
666
667
668
669
670
671
672
673
674
675
676
677
678
679
680
681
682
683
684
685
686
687
688
689
690
691
692
693
694
695
696
697
698
699
700
701
702
703
704
705
706
707
708
709
710
711
712
713
714
715
716
717
718
719
720
721
722
723
724
725
726
727
728
729
730
731
732
733
/*
 * trace_parser_get_init - gets the buffer for trace parser
 */
int trace_parser_get_init(struct trace_parser *parser, int size)
{
	memset(parser, 0, sizeof(*parser));

	parser->buffer = kmalloc(size, GFP_KERNEL);
	if (!parser->buffer)
		return 1;

	parser->size = size;
	return 0;
}

/*
 * trace_parser_put - frees the buffer for trace parser
 */
void trace_parser_put(struct trace_parser *parser)
{
	kfree(parser->buffer);
}

/*
 * trace_get_user - reads the user input string separated by  space
 * (matched by isspace(ch))
 *
 * For each string found the 'struct trace_parser' is updated,
 * and the function returns.
 *
 * Returns number of bytes read.
 *
 * See kernel/trace/trace.h for 'struct trace_parser' details.
 */
int trace_get_user(struct trace_parser *parser, const char __user *ubuf,
	size_t cnt, loff_t *ppos)
{
	char ch;
	size_t read = 0;
	ssize_t ret;

	if (!*ppos)
		trace_parser_clear(parser);

	ret = get_user(ch, ubuf++);
	if (ret)
		goto out;

	read++;
	cnt--;

	/*
	 * The parser is not finished with the last write,
	 * continue reading the user input without skipping spaces.
	 */
	if (!parser->cont) {
		/* skip white space */
		while (cnt && isspace(ch)) {
			ret = get_user(ch, ubuf++);
			if (ret)
				goto out;
			read++;
			cnt--;
		}

		/* only spaces were written */
		if (isspace(ch)) {
			*ppos += read;
			ret = read;
			goto out;
		}

		parser->idx = 0;
	}

	/* read the non-space input */
	while (cnt && !isspace(ch)) {
734
		if (parser->idx < parser->size - 1)
735
736
737
738
739
740
741
742
743
744
745
746
747
748
749
750
751
752
753
754
755
756
757
758
759
760
761
762
			parser->buffer[parser->idx++] = ch;
		else {
			ret = -EINVAL;
			goto out;
		}
		ret = get_user(ch, ubuf++);
		if (ret)
			goto out;
		read++;
		cnt--;
	}

	/* We either got finished input or we have to wait for another call. */
	if (isspace(ch)) {
		parser->buffer[parser->idx] = 0;
		parser->cont = false;
	} else {
		parser->cont = true;
		parser->buffer[parser->idx++] = ch;
	}

	*ppos += read;
	ret = read;

out:
	return ret;
}

763
764
765
766
767
ssize_t trace_seq_to_user(struct trace_seq *s, char __user *ubuf, size_t cnt)
{
	int len;
	int ret;

768
769
770
	if (!cnt)
		return 0;

771
772
773
774
775
776
777
	if (s->len <= s->readpos)
		return -EBUSY;

	len = s->len - s->readpos;
	if (cnt > len)
		cnt = len;
	ret = copy_to_user(ubuf, s->buffer + s->readpos, cnt);
778
	if (ret == cnt)
779
780
		return -EFAULT;

781
782
	cnt -= ret;

783
	s->readpos += cnt;
784
	return cnt;
785
786
}

787
static ssize_t trace_seq_to_buffer(struct trace_seq *s, void *buf, size_t cnt)
788
789
790
791
792
793
794
795
796
{
	int len;

	if (s->len <= s->readpos)
		return -EBUSY;

	len = s->len - s->readpos;
	if (cnt > len)
		cnt = len;
797
	memcpy(buf, s->buffer + s->readpos, cnt);
798

799
	s->readpos += cnt;
800
801
802
	return cnt;
}

803
804
805
806
807
808
/*
 * ftrace_max_lock is used to protect the swapping of buffers
 * when taking a max snapshot. The buffers themselves are
 * protected by per_cpu spinlocks. But the action of the swap
 * needs its own lock.
 *
809
 * This is defined as a arch_spinlock_t in order to help
810
811
812
813
814
815
 * with performance when lockdep debugging is enabled.
 *
 * It is also used in other places outside the update_max_tr
 * so it needs to be defined outside of the
 * CONFIG_TRACER_MAX_TRACE.
 */
816
static arch_spinlock_t ftrace_max_lock =
817
	(arch_spinlock_t)__ARCH_SPIN_LOCK_UNLOCKED;
818

819
820
unsigned long __read_mostly	tracing_thresh;

821
822
823
824
825
826
827
828
829
830
831
#ifdef CONFIG_TRACER_MAX_TRACE
unsigned long __read_mostly	tracing_max_latency;

/*
 * Copy the new maximum trace into the separate maximum-trace
 * structure. (this way the maximum trace is permanently saved,
 * for later retrieval via /sys/kernel/debug/tracing/latency_trace)
 */
static void
__update_max_tr(struct trace_array *tr, struct task_struct *tsk, int cpu)
{
832
833
834
835
	struct trace_buffer *trace_buf = &tr->trace_buffer;
	struct trace_buffer *max_buf = &tr->max_buffer;
	struct trace_array_cpu *data = per_cpu_ptr(trace_buf->data, cpu);
	struct trace_array_cpu *max_data = per_cpu_ptr(max_buf->data, cpu);
836

837
838
	max_buf->cpu = cpu;
	max_buf->time_start = data->preempt_timestamp;
839

840
841
842
	max_data->saved_latency = tracing_max_latency;
	max_data->critical_start = data->critical_start;
	max_data->critical_end = data->critical_end;
843

844
	memcpy(max_data->comm, tsk->comm, TASK_COMM_LEN);
845
846
847
848
849
	max_data->pid = tsk->pid;
	max_data->uid = task_uid(tsk);
	max_data->nice = tsk->static_prio - 20 - MAX_RT_PRIO;
	max_data->policy = tsk->policy;
	max_data->rt_priority = tsk->rt_priority;
850
851
852
853
854

	/* record this tasks comm */
	tracing_record_cmdline(tsk);
}

Steven Rostedt's avatar
Steven Rostedt committed
855
856
857
858
859
860
861
862
863
/**
 * update_max_tr - snapshot all trace buffers from global_trace to max_tr
 * @tr: tracer
 * @tsk: the task with the latency
 * @cpu: The cpu that initiated the trace.
 *
 * Flip the buffers between the @tr and the max_tr and record information
 * about which task was the cause of this latency.
 */
Ingo Molnar's avatar
Ingo Molnar committed
864
void
865
866
update_max_tr(struct trace_array *tr, struct task_struct *tsk, int cpu)
{
867
	struct ring_buffer *buf;
868

869
	if (tr->stop_count)
870
871
		return;

872
	WARN_ON_ONCE(!irqs_disabled());
873

874
	if (!tr->allocated_snapshot) {
875
		/* Only the nop tracer should hit this when disabling */
876
		WARN_ON_ONCE(tr->current_trace != &nop_trace);
877
		return;
878
	}
879

880
	arch_spin_lock(&ftrace_max_lock);
881

882
883
884
	buf = tr->trace_buffer.buffer;
	tr->trace_buffer.buffer = tr->max_buffer.buffer;
	tr->max_buffer.buffer = buf;
885

886
	__update_max_tr(tr, tsk, cpu);
887
	arch_spin_unlock(&ftrace_max_lock);
888
889
890
891
892
893
894
}

/**
 * update_max_tr_single - only copy one trace over, and reset the rest
 * @tr - tracer
 * @tsk - task with the latency
 * @cpu - the cpu of the buffer to copy.
Steven Rostedt's avatar
Steven Rostedt committed
895
896
 *
 * Flip the trace of a single CPU buffer between the @tr and the max_tr.
897
 */
Ingo Molnar's avatar
Ingo Molnar committed
898
void
899
900
update_max_tr_single(struct trace_array *tr, struct task_struct *tsk, int cpu)
{
901
	int ret;
902

903
	if (tr->stop_count)
904
905
		return;

906
	WARN_ON_ONCE(!irqs_disabled());
Steven Rostedt's avatar
Steven Rostedt committed
907
	if (!tr->allocated_snapshot) {
908
		/* Only the nop tracer should hit this when disabling */
909
		WARN_ON_ONCE(tr->current_trace != &nop_trace);
910
		return;
911
	}
912

913
	arch_spin_lock(&ftrace_max_lock);
914

915
	ret = ring_buffer_swap_cpu(tr->max_buffer.buffer, tr->trace_buffer.buffer, cpu);
916

917
918
919
920
921
922
923
	if (ret == -EBUSY) {
		/*
		 * We failed to swap the buffer due to a commit taking
		 * place on this CPU. We fail to record, but we reset
		 * the max trace buffer (no one writes directly to it)
		 * and flag that it failed.
		 */
924
		trace_array_printk_buf(tr->max_buffer.buffer, _THIS_IP_,
925
926
927
928
			"Failed to swap buffers due to commit in progress\n");
	}

	WARN_ON_ONCE(ret && ret != -EAGAIN && ret != -EBUSY);
929
930

	__update_max_tr(tr, tsk, cpu);
931
	arch_spin_unlock(&ftrace_max_lock);
932
}
933
#endif /* CONFIG_TRACER_MAX_TRACE */
934

935
936
static void default_wait_pipe(struct trace_iterator *iter)
{
937
938
939
	/* Iterators are static, they should be filled or empty */
	if (trace_buffer_iter(iter, iter->cpu_file))
		return;
940

941
	ring_buffer_wait(iter->trace_buffer->buffer, iter->cpu_file);
942
943
}

944
945
946
947
948
949
#ifdef CONFIG_FTRACE_STARTUP_TEST
static int run_tracer_selftest(struct tracer *type)
{
	struct trace_array *tr = &global_trace;
	struct tracer *saved_tracer = tr->current_trace;
	int ret;
950

951
952
	if (!type->selftest || tracing_selftest_disabled)
		return 0;
953
954

	/*
955
956
957
958
959
	 * Run a selftest on this tracer.
	 * Here we reset the trace buffer, and set the current
	 * tracer to be this tracer. The tracer can then run some
	 * internal tracing to verify that everything is in order.
	 * If we fail, we do not register this tracer.
960
	 */
961
	tracing_reset_online_cpus(&tr->trace_buffer);
962

963
964
965
966
967
968
969
970
971
972
973
974
975
976
977
978
979
980
981
982
983
984
985
986
987
988
989
990
991
	tr->current_trace = type;

#ifdef CONFIG_TRACER_MAX_TRACE
	if (type->use_max_tr) {
		/* If we expanded the buffers, make sure the max is expanded too */
		if (ring_buffer_expanded)
			ring_buffer_resize(tr->max_buffer.buffer, trace_buf_size,
					   RING_BUFFER_ALL_CPUS);
		tr->allocated_snapshot = true;
	}
#endif

	/* the test is responsible for initializing and enabling */
	pr_info("Testing tracer %s: ", type->name);
	ret = type->selftest(type, tr);
	/* the test is responsible for resetting too */
	tr->current_trace = saved_tracer;
	if (ret) {
		printk(KERN_CONT "FAILED!\n");
		/* Add the warning after printing 'FAILED' */
		WARN_ON(1);
		return -1;
	}
	/* Only reset on passing, to avoid touching corrupted buffers */
	tracing_reset_online_cpus(&tr->trace_buffer);

#ifdef CONFIG_TRACER_MAX_TRACE
	if (type->use_max_tr) {
		tr->allocated_snapshot = false;
992

993
994
995
996
997
998
999
1000
1001
1002
1003
1004
1005
1006
		/* Shrink the max buffer again */
		if (ring_buffer_expanded)
			ring_buffer_resize(tr->max_buffer.buffer, 1,
					   RING_BUFFER_ALL_CPUS);
	}
#endif

	printk(KERN_CONT "PASSED\n");
	return 0;
}
#else
static inline int run_tracer_selftest(struct tracer *type)
{
	return 0;
1007
}
1008
#endif /* CONFIG_FTRACE_STARTUP_TEST */
1009

Steven Rostedt's avatar
Steven Rostedt committed
1010
1011
1012
1013
1014
1015
/**
 * register_tracer - register a tracer with the ftrace system.
 * @type - the plugin for the tracer
 *
 * Register a new plugin tracer.
 */
1016
1017
1018
1019
1020
1021
1022
1023
1024
1025
int register_tracer(struct tracer *type)
{
	struct tracer *t;
	int ret = 0;

	if (!type->name) {
		pr_info("Tracer must have a name\n");
		return -1;
	}

1026
	if (strlen(type->name) >= MAX_TRACER_SIZE) {
Li Zefan's avatar
Li Zefan committed
1027
1028
1029
1030
		pr_info("Tracer has a name longer than %d\n", MAX_TRACER_SIZE);
		return -1;
	}

1031
	mutex_lock(&trace_types_lock);
Ingo Molnar's avatar
Ingo Molnar committed
1032

1033
1034
	tracing_selftest_running = true;

1035
1036
1037
	for (t = trace_types; t; t = t->next) {
		if (strcmp(type->name, t->name) == 0) {
			/* already found */
Li Zefan's avatar
Li Zefan committed
1038
			pr_info("Tracer %s already registered\n",
1039
1040
1041
1042
1043
1044
				type->name);
			ret = -1;
			goto out;
		}
	}

1045
1046
1047
1048
1049
1050
1051
	if (!type->set_flag)
		type->set_flag = &dummy_set_flag;
	if (!type->flags)
		type->flags = &dummy_tracer_flags;
	else
		if (!type->flags->opts)
			type->flags->opts = dummy_tracer_opt;
1052
1053
1054
	if (!type->wait_pipe)
		type->wait_pipe = default_wait_pipe;

1055
1056
1057
	ret = run_tracer_selftest(type);
	if (ret < 0)
		goto out;
Steven Rostedt's avatar
Steven Rostedt committed
1058

1059
1060
	type->next = trace_types;
	trace_types = type;
Steven Rostedt's avatar
Steven Rostedt committed
1061

1062
 out:
1063
	tracing_selftest_running = false;
1064
1065
	mutex_unlock(&trace_types_lock);

Steven Rostedt's avatar
Steven Rostedt committed
1066
1067
1068
	if (ret || !default_bootup_tracer)
		goto out_unlock;

Li Zefan's avatar
Li Zefan committed
1069
	if (strncmp(default_bootup_tracer, type->name, MAX_TRACER_SIZE))
Steven Rostedt's avatar
Steven Rostedt committed
1070
1071
1072
1073
1074
1075
1076
		goto out_unlock;

	printk(KERN_INFO "Starting tracer '%s'\n", type->name);
	/* Do we want this tracer to start on bootup? */
	tracing_set_tracer(type->name);
	default_bootup_tracer = NULL;
	/* disable other selftests, since this will break it. */
1077
	tracing_selftest_disabled = true;
1078
#ifdef CONFIG_FTRACE_STARTUP_TEST
Steven Rostedt's avatar
Steven Rostedt committed
1079
1080
	printk(KERN_INFO "Disabling FTRACE selftests due to running tracer '%s'\n",
	       type->name);
1081
1082
#endif

Steven Rostedt's avatar
Steven Rostedt committed
1083
 out_unlock:
1084
1085
1086
	return ret;
}

1087
void tracing_reset(struct trace_buffer *buf, int cpu)
1088
{
1089
	struct ring_buffer *buffer = buf->buffer;
1090

1091
1092
1093
	if (!buffer)
		return;

1094
1095
1096
1097
	ring_buffer_record_disable(buffer);

	/* Make sure all commits have finished */
	synchronize_sched();
1098
	ring_buffer_reset_cpu(buffer, cpu);
1099
1100
1101
1102

	ring_buffer_record_enable(buffer);
}

1103
void tracing_reset_online_cpus(struct trace_buffer *buf)
1104
{
1105
	struct ring_buffer *buffer = buf->buffer;
1106
1107
	int cpu;

1108
1109
1110
	if (!buffer)
		return;

1111
1112
1113
1114
1115
	ring_buffer_record_disable(buffer);

	/* Make sure all commits have finished */
	synchronize_sched();

1116
	buf->time_start = ftrace_now(buf->cpu);
1117
1118

	for_each_online_cpu(cpu)
1119
		ring_buffer_reset_cpu(buffer, cpu);
1120
1121

	ring_buffer_record_enable(buffer);
1122
1123
}

1124
1125
void tracing_reset_current(int cpu)
{
1126
	tracing_reset(&global_trace.trace_buffer, cpu);
1127
1128
}

1129
void tracing_reset_all_online_cpus(void)
1130
{
1131
1132
1133
1134
	struct trace_array *tr;

	mutex_lock(&trace_types_lock);
	list_for_each_entry(tr, &ftrace_trace_arrays, list) {
1135
1136
1137
1138
		tracing_reset_online_cpus(&tr->trace_buffer);
#ifdef CONFIG_TRACER_MAX_TRACE
		tracing_reset_online_cpus(&tr->max_buffer);
#endif
1139
1140
	}
	mutex_unlock(&trace_types_lock);
1141
1142
}

1143
#define SAVED_CMDLINES 128
1144
#define NO_CMDLINE_MAP UINT_MAX
1145
1146
1147
1148
static unsigned map_pid_to_cmdline[PID_MAX_DEFAULT+1];
static unsigned map_cmdline_to_pid[SAVED_CMDLINES];
static char saved_cmdlines[SAVED_CMDLINES][TASK_COMM_LEN];
static int cmdline_idx;
1149
static arch_spinlock_t trace_cmdline_lock = __ARCH_SPIN_LOCK_UNLOCKED;
1150
1151

/* temporary disable recording */
1152
static atomic_t trace_record_cmdline_disabled __read_mostly;
1153
1154
1155

static void trace_init_cmdlines(void)
{
1156
1157
	memset(&map_pid_to_cmdline, NO_CMDLINE_MAP, sizeof(map_pid_to_cmdline));
	memset(&map_cmdline_to_pid, NO_CMDLINE_MAP, sizeof(map_cmdline_to_pid));
1158
1159
1160
	cmdline_idx = 0;
}

1161
1162
int is_tracing_stopped(void)
{
1163
	return global_trace.stop_count;
1164
1165
}

1166
1167
1168
1169
1170
1171
1172
1173
1174
1175
1176
1177
1178
1179
1180
/**
 * ftrace_off_permanent - disable all ftrace code permanently
 *
 * This should only be called when a serious anomally has
 * been detected.  This will turn off the function tracing,
 * ring buffers, and other tracing utilites. It takes no
 * locks and can be called from any context.
 */
void ftrace_off_permanent(void)
{
	tracing_disabled = 1;
	ftrace_stop();
	tracing_off_permanent();
}

1181
1182
1183
1184
1185
1186
1187
1188
1189
1190
1191
1192
1193
1194
/**
 * tracing_start - quick start of the tracer
 *
 * If tracing is enabled but was stopped by tracing_stop,
 * this will start the tracer back up.
 */
void tracing_start(void)
{
	struct ring_buffer *buffer;
	unsigned long flags;

	if (tracing_disabled)
		return;

1195
1196
1197
	raw_spin_lock_irqsave(&global_trace.start_lock, flags);
	if (--global_trace.stop_count) {
		if (global_trace.stop_count < 0) {
1198
1199
			/* Someone screwed up their debugging */
			WARN_ON_ONCE(1);
1200
			global_trace.stop_count = 0;
1201
		}
1202
1203
1204
		goto out;
	}

1205
1206
	/* Prevent the buffers from switching */
	arch_spin_lock(&ftrace_max_lock);
1207

1208
	buffer = global_trace.trace_buffer.buffer;
1209
1210
1211
	if (buffer)
		ring_buffer_record_enable(buffer);

1212
1213
#ifdef CONFIG_TRACER_MAX_TRACE
	buffer = global_trace.max_buffer.buffer;
1214
1215
	if (buffer)
		ring_buffer_record_enable(buffer);
1216
#endif
1217

1218
1219
	arch_spin_unlock(&ftrace_max_lock);

1220
1221
	ftrace_start();
 out:
1222
1223
1224
1225
1226
1227
1228
1229
1230
1231
1232
1233
1234
1235
1236
1237
1238
1239
1240
1241
1242
1243
1244
1245
1246
1247
	raw_spin_unlock_irqrestore(&global_trace.start_lock, flags);
}

static void tracing_start_tr(struct trace_array *tr)
{
	struct ring_buffer *buffer;
	unsigned long flags;

	if (tracing_disabled)
		return;

	/* If global, we need to also start the max tracer */
	if (tr->flags & TRACE_ARRAY_FL_GLOBAL)
		return tracing_start();

	raw_spin_lock_irqsave(&tr->start_lock, flags);

	if (--tr->stop_count) {
		if (tr->stop_count < 0) {
			/* Someone screwed up their debugging */
			WARN_ON_ONCE(1);
			tr->stop_count = 0;
		}
		goto out;
	}

1248
	buffer = tr->trace_buffer.buffer;
1249
1250
1251
1252
1253
	if (buffer)
		ring_buffer_record_enable(buffer);

 out:
	raw_spin_unlock_irqrestore(&tr->start_lock, flags);
1254
1255
1256
1257
1258
1259
1260
1261
1262
1263
1264
1265
1266
1267
}

/**
 * tracing_stop - quick stop of the tracer
 *
 * Light weight way to stop tracing. Use in conjunction with
 * tracing_start.
 */
void tracing_stop(void)
{
	struct ring_buffer *buffer;
	unsigned long flags;

	ftrace_stop();
1268
1269
	raw_spin_lock_irqsave(&global_trace.start_lock, flags);
	if (global_trace.stop_count++)
1270
1271
		goto out;

1272
1273
1274
	/* Prevent the buffers from switching */
	arch_spin_lock(&ftrace_max_lock);

1275
	buffer = global_trace.trace_buffer.buffer;
1276
1277
1278
	if (buffer)
		ring_buffer_record_disable(buffer);

1279
1280
#ifdef CONFIG_TRACER_MAX_TRACE
	buffer = global_trace.max_buffer.buffer;
1281
1282
	if (buffer)
		ring_buffer_record_disable(buffer);
1283
#endif
1284

1285
1286
	arch_spin_unlock(&ftrace_max_lock);