trace.c 149 KB
Newer Older
1
2
3
/*
 * ring buffer based function tracer
 *
4
 * Copyright (C) 2007-2012 Steven Rostedt <srostedt@redhat.com>
5
6
7
8
9
10
11
 * Copyright (C) 2008 Ingo Molnar <mingo@redhat.com>
 *
 * Originally taken from the RT patch by:
 *    Arnaldo Carvalho de Melo <acme@redhat.com>
 *
 * Based on code from the latency_tracer, that is:
 *  Copyright (C) 2004-2006 Ingo Molnar
12
 *  Copyright (C) 2004 Nadia Yvette Chambers
13
 */
14
#include <linux/ring_buffer.h>
15
#include <generated/utsrelease.h>
16
17
#include <linux/stacktrace.h>
#include <linux/writeback.h>
18
19
#include <linux/kallsyms.h>
#include <linux/seq_file.h>
20
#include <linux/notifier.h>
21
#include <linux/irqflags.h>
22
#include <linux/debugfs.h>
23
#include <linux/pagemap.h>
24
25
26
#include <linux/hardirq.h>
#include <linux/linkage.h>
#include <linux/uaccess.h>
27
#include <linux/kprobes.h>
28
29
30
#include <linux/ftrace.h>
#include <linux/module.h>
#include <linux/percpu.h>
31
#include <linux/splice.h>
32
#include <linux/kdebug.h>
33
#include <linux/string.h>
34
#include <linux/rwsem.h>
35
#include <linux/slab.h>
36
37
#include <linux/ctype.h>
#include <linux/init.h>
38
#include <linux/poll.h>
39
#include <linux/nmi.h>
40
#include <linux/fs.h>
41
#include <linux/sched/rt.h>
Ingo Molnar's avatar
Ingo Molnar committed
42

43
#include "trace.h"
44
#include "trace_output.h"
45

46
47
48
49
/*
 * On boot up, the ring buffer is set to the minimum size, so that
 * we do not waste memory on systems that are not using tracing.
 */
50
bool ring_buffer_expanded;
51

52
53
/*
 * We need to change this state when a selftest is running.
54
55
 * A selftest will lurk into the ring-buffer to count the
 * entries inserted during the selftest although some concurrent
56
 * insertions into the ring-buffer such as trace_printk could occurred
57
58
 * at the same time, giving false positive or negative results.
 */
59
static bool __read_mostly tracing_selftest_running;
60

61
62
63
/*
 * If a tracer is running, we do not want to run SELFTEST.
 */
64
bool __read_mostly tracing_selftest_disabled;
65

66
67
68
69
70
71
72
73
74
75
76
77
78
79
/* For tracers that don't implement custom flags */
static struct tracer_opt dummy_tracer_opt[] = {
	{ }
};

static struct tracer_flags dummy_tracer_flags = {
	.val = 0,
	.opts = dummy_tracer_opt
};

static int dummy_set_flag(u32 old_flags, u32 bit, int set)
{
	return 0;
}
80

81
82
83
84
85
86
87
/*
 * To prevent the comm cache from being overwritten when no
 * tracing is active, only save the comm when a trace event
 * occurred.
 */
static DEFINE_PER_CPU(bool, trace_cmdline_save);

88
89
90
91
92
93
/*
 * Kill all tracing for good (never come back).
 * It is initialized to 1 but will turn to zero if the initialization
 * of the tracer is successful. But that is the only place that sets
 * this back to zero.
 */
94
static int tracing_disabled = 1;
95

96
DEFINE_PER_CPU(int, ftrace_cpu_disabled);
97

98
cpumask_var_t __read_mostly	tracing_buffer_mask;
99

100
101
102
103
104
105
106
107
108
109
110
/*
 * ftrace_dump_on_oops - variable to dump ftrace buffer on oops
 *
 * If there is an oops (or kernel panic) and the ftrace_dump_on_oops
 * is set, then ftrace_dump is called. This will output the contents
 * of the ftrace buffers to the console.  This is very useful for
 * capturing traces that lead to crashes and outputing it to a
 * serial console.
 *
 * It is default off, but you can enable it with either specifying
 * "ftrace_dump_on_oops" in the kernel command line, or setting
111
112
113
 * /proc/sys/kernel/ftrace_dump_on_oops
 * Set 1 if you want to dump buffers of all CPUs
 * Set 2 if you want to dump the buffer of the CPU that triggered oops
114
 */
115
116

enum ftrace_dump_mode ftrace_dump_on_oops;
117

118
119
120
/* When set, tracing will stop when a WARN*() is hit */
int __disable_trace_on_warning;

121
122
static int tracing_set_tracer(const char *buf);

Li Zefan's avatar
Li Zefan committed
123
124
#define MAX_TRACER_SIZE		100
static char bootup_tracer_buf[MAX_TRACER_SIZE] __initdata;
125
static char *default_bootup_tracer;
126

127
128
static bool allocate_snapshot;

129
static int __init set_cmdline_ftrace(char *str)
130
{
131
	strlcpy(bootup_tracer_buf, str, MAX_TRACER_SIZE);
132
	default_bootup_tracer = bootup_tracer_buf;
133
	/* We are using ftrace early, expand it */
134
	ring_buffer_expanded = true;
135
136
	return 1;
}
137
__setup("ftrace=", set_cmdline_ftrace);
138

139
140
static int __init set_ftrace_dump_on_oops(char *str)
{
141
142
143
144
145
146
147
148
149
150
151
	if (*str++ != '=' || !*str) {
		ftrace_dump_on_oops = DUMP_ALL;
		return 1;
	}

	if (!strcmp("orig_cpu", str)) {
		ftrace_dump_on_oops = DUMP_ORIG;
                return 1;
        }

        return 0;
152
153
}
__setup("ftrace_dump_on_oops", set_ftrace_dump_on_oops);
Steven Rostedt's avatar
Steven Rostedt committed
154

155
156
157
158
159
160
161
static int __init stop_trace_on_warning(char *str)
{
	__disable_trace_on_warning = 1;
	return 1;
}
__setup("traceoff_on_warning=", stop_trace_on_warning);

162
static int __init boot_alloc_snapshot(char *str)
163
164
165
166
167
168
{
	allocate_snapshot = true;
	/* We also need the main ring buffer expanded */
	ring_buffer_expanded = true;
	return 1;
}
169
__setup("alloc_snapshot", boot_alloc_snapshot);
170

171
172
173
174
175
176

static char trace_boot_options_buf[MAX_TRACER_SIZE] __initdata;
static char *trace_boot_options __initdata;

static int __init set_trace_boot_options(char *str)
{
177
	strlcpy(trace_boot_options_buf, str, MAX_TRACER_SIZE);
178
179
180
181
182
	trace_boot_options = trace_boot_options_buf;
	return 0;
}
__setup("trace_options=", set_trace_boot_options);

183

184
unsigned long long ns2usecs(cycle_t nsec)
185
186
187
188
189
190
{
	nsec += 500;
	do_div(nsec, 1000);
	return nsec;
}

Steven Rostedt's avatar
Steven Rostedt committed
191
192
193
194
195
196
197
198
199
200
201
202
/*
 * The global_trace is the descriptor that holds the tracing
 * buffers for the live tracing. For each CPU, it contains
 * a link list of pages that will store trace entries. The
 * page descriptor of the pages in the memory is used to hold
 * the link list by linking the lru item in the page descriptor
 * to each of the pages in the buffer per CPU.
 *
 * For each active CPU there is a data field that holds the
 * pages for the buffer for that CPU. Each CPU has the same number
 * of pages allocated for its buffer.
 */
203
204
static struct trace_array	global_trace;

205
LIST_HEAD(ftrace_trace_arrays);
206

207
208
int filter_current_check_discard(struct ring_buffer *buffer,
				 struct ftrace_event_call *call, void *rec,
209
210
				 struct ring_buffer_event *event)
{
211
	return filter_check_discard(call, rec, buffer, event);
212
}
213
EXPORT_SYMBOL_GPL(filter_current_check_discard);
214

215
216
217
218
219
cycle_t ftrace_now(int cpu)
{
	u64 ts;

	/* Early boot up does not have a buffer yet */
220
	if (!global_trace.trace_buffer.buffer)
221
222
		return trace_clock_local();

223
224
	ts = ring_buffer_time_stamp(global_trace.trace_buffer.buffer, cpu);
	ring_buffer_normalize_time_stamp(global_trace.trace_buffer.buffer, cpu, &ts);
225
226
227

	return ts;
}
228

229
230
int tracing_is_enabled(void)
{
231
	return tracing_is_on();
232
233
}

Steven Rostedt's avatar
Steven Rostedt committed
234
/*
235
236
237
 * trace_buf_size is the size in bytes that is allocated
 * for a buffer. Note, the number of bytes is always rounded
 * to page size.
238
239
240
241
242
 *
 * This number is purposely set to a low number of 16384.
 * If the dump on oops happens, it will be much appreciated
 * to not have to wait for all that output. Anyway this can be
 * boot time and run time configurable.
Steven Rostedt's avatar
Steven Rostedt committed
243
 */
244
#define TRACE_BUF_SIZE_DEFAULT	1441792UL /* 16384 * 88 (sizeof(entry)) */
245

246
static unsigned long		trace_buf_size = TRACE_BUF_SIZE_DEFAULT;
247

Steven Rostedt's avatar
Steven Rostedt committed
248
/* trace_types holds a link list of available tracers. */
249
static struct tracer		*trace_types __read_mostly;
Steven Rostedt's avatar
Steven Rostedt committed
250
251
252
253

/*
 * trace_types_lock is used to protect the trace_types list.
 */
254
static DEFINE_MUTEX(trace_types_lock);
Steven Rostedt's avatar
Steven Rostedt committed
255

256
257
258
259
260
261
262
263
264
265
266
267
268
269
270
271
272
273
274
275
276
277
278
279
280
281
282
283
/*
 * serialize the access of the ring buffer
 *
 * ring buffer serializes readers, but it is low level protection.
 * The validity of the events (which returns by ring_buffer_peek() ..etc)
 * are not protected by ring buffer.
 *
 * The content of events may become garbage if we allow other process consumes
 * these events concurrently:
 *   A) the page of the consumed events may become a normal page
 *      (not reader page) in ring buffer, and this page will be rewrited
 *      by events producer.
 *   B) The page of the consumed events may become a page for splice_read,
 *      and this page will be returned to system.
 *
 * These primitives allow multi process access to different cpu ring buffer
 * concurrently.
 *
 * These primitives don't distinguish read-only and read-consume access.
 * Multi read-only access are also serialized.
 */

#ifdef CONFIG_SMP
static DECLARE_RWSEM(all_cpu_access_lock);
static DEFINE_PER_CPU(struct mutex, cpu_access_lock);

static inline void trace_access_lock(int cpu)
{
284
	if (cpu == RING_BUFFER_ALL_CPUS) {
285
286
287
288
289
		/* gain it for accessing the whole ring buffer. */
		down_write(&all_cpu_access_lock);
	} else {
		/* gain it for accessing a cpu ring buffer. */

290
		/* Firstly block other trace_access_lock(RING_BUFFER_ALL_CPUS). */
291
292
293
294
295
296
297
298
299
		down_read(&all_cpu_access_lock);

		/* Secondly block other access to this @cpu ring buffer. */
		mutex_lock(&per_cpu(cpu_access_lock, cpu));
	}
}

static inline void trace_access_unlock(int cpu)
{
300
	if (cpu == RING_BUFFER_ALL_CPUS) {
301
302
303
304
305
306
307
308
309
310
311
312
313
314
315
316
317
318
319
320
321
322
323
324
325
326
327
328
329
330
331
332
333
334
335
336
337
		up_write(&all_cpu_access_lock);
	} else {
		mutex_unlock(&per_cpu(cpu_access_lock, cpu));
		up_read(&all_cpu_access_lock);
	}
}

static inline void trace_access_lock_init(void)
{
	int cpu;

	for_each_possible_cpu(cpu)
		mutex_init(&per_cpu(cpu_access_lock, cpu));
}

#else

static DEFINE_MUTEX(access_lock);

static inline void trace_access_lock(int cpu)
{
	(void)cpu;
	mutex_lock(&access_lock);
}

static inline void trace_access_unlock(int cpu)
{
	(void)cpu;
	mutex_unlock(&access_lock);
}

static inline void trace_access_lock_init(void)
{
}

#endif

338
/* trace_flags holds trace_options default values */
339
unsigned long trace_flags = TRACE_ITER_PRINT_PARENT | TRACE_ITER_PRINTK |
340
	TRACE_ITER_ANNOTATE | TRACE_ITER_CONTEXT_INFO | TRACE_ITER_SLEEP_TIME |
341
	TRACE_ITER_GRAPH_TIME | TRACE_ITER_RECORD_CMD | TRACE_ITER_OVERWRITE |
342
	TRACE_ITER_IRQ_INFO | TRACE_ITER_MARKERS | TRACE_ITER_FUNCTION;
343

344
345
346
347
348
349
350
351
/**
 * tracing_on - enable tracing buffers
 *
 * This function enables tracing buffers that may have been
 * disabled with tracing_off.
 */
void tracing_on(void)
{
352
353
	if (global_trace.trace_buffer.buffer)
		ring_buffer_record_on(global_trace.trace_buffer.buffer);
354
355
356
357
358
359
360
361
362
363
	/*
	 * This flag is only looked at when buffers haven't been
	 * allocated yet. We don't really care about the race
	 * between setting this flag and actually turning
	 * on the buffer.
	 */
	global_trace.buffer_disabled = 0;
}
EXPORT_SYMBOL_GPL(tracing_on);

364
365
366
367
368
369
370
371
372
373
374
375
376
377
378
379
380
381
382
383
384
385
386
387
388
389
390
391
392
393
394
395
396
397
398
399
400
401
402
403
404
405
406
407
408
409
410
411
412
413
414
415
416
417
418
419
420
421
422
423
424
425
426
427
428
429
430
431
432
433
434
/**
 * __trace_puts - write a constant string into the trace buffer.
 * @ip:	   The address of the caller
 * @str:   The constant string to write
 * @size:  The size of the string.
 */
int __trace_puts(unsigned long ip, const char *str, int size)
{
	struct ring_buffer_event *event;
	struct ring_buffer *buffer;
	struct print_entry *entry;
	unsigned long irq_flags;
	int alloc;

	alloc = sizeof(*entry) + size + 2; /* possible \n added */

	local_save_flags(irq_flags);
	buffer = global_trace.trace_buffer.buffer;
	event = trace_buffer_lock_reserve(buffer, TRACE_PRINT, alloc, 
					  irq_flags, preempt_count());
	if (!event)
		return 0;

	entry = ring_buffer_event_data(event);
	entry->ip = ip;

	memcpy(&entry->buf, str, size);

	/* Add a newline if necessary */
	if (entry->buf[size - 1] != '\n') {
		entry->buf[size] = '\n';
		entry->buf[size + 1] = '\0';
	} else
		entry->buf[size] = '\0';

	__buffer_unlock_commit(buffer, event);

	return size;
}
EXPORT_SYMBOL_GPL(__trace_puts);

/**
 * __trace_bputs - write the pointer to a constant string into trace buffer
 * @ip:	   The address of the caller
 * @str:   The constant string to write to the buffer to
 */
int __trace_bputs(unsigned long ip, const char *str)
{
	struct ring_buffer_event *event;
	struct ring_buffer *buffer;
	struct bputs_entry *entry;
	unsigned long irq_flags;
	int size = sizeof(struct bputs_entry);

	local_save_flags(irq_flags);
	buffer = global_trace.trace_buffer.buffer;
	event = trace_buffer_lock_reserve(buffer, TRACE_BPUTS, size,
					  irq_flags, preempt_count());
	if (!event)
		return 0;

	entry = ring_buffer_event_data(event);
	entry->ip			= ip;
	entry->str			= str;

	__buffer_unlock_commit(buffer, event);

	return 1;
}
EXPORT_SYMBOL_GPL(__trace_bputs);

435
436
437
438
439
440
441
442
443
444
445
446
447
448
449
450
451
452
453
454
455
#ifdef CONFIG_TRACER_SNAPSHOT
/**
 * trace_snapshot - take a snapshot of the current buffer.
 *
 * This causes a swap between the snapshot buffer and the current live
 * tracing buffer. You can use this to take snapshots of the live
 * trace when some condition is triggered, but continue to trace.
 *
 * Note, make sure to allocate the snapshot with either
 * a tracing_snapshot_alloc(), or by doing it manually
 * with: echo 1 > /sys/kernel/debug/tracing/snapshot
 *
 * If the snapshot buffer is not allocated, it will stop tracing.
 * Basically making a permanent snapshot.
 */
void tracing_snapshot(void)
{
	struct trace_array *tr = &global_trace;
	struct tracer *tracer = tr->current_trace;
	unsigned long flags;

456
457
458
459
460
461
	if (in_nmi()) {
		internal_trace_puts("*** SNAPSHOT CALLED FROM NMI CONTEXT ***\n");
		internal_trace_puts("*** snapshot is being ignored        ***\n");
		return;
	}

462
	if (!tr->allocated_snapshot) {
463
464
		internal_trace_puts("*** SNAPSHOT NOT ALLOCATED ***\n");
		internal_trace_puts("*** stopping trace here!   ***\n");
465
466
467
468
469
470
		tracing_off();
		return;
	}

	/* Note, snapshot can not be used when the tracer uses it */
	if (tracer->use_max_tr) {
471
472
		internal_trace_puts("*** LATENCY TRACER ACTIVE ***\n");
		internal_trace_puts("*** Can not use snapshot (sorry) ***\n");
473
474
475
476
477
478
479
		return;
	}

	local_irq_save(flags);
	update_max_tr(tr, current, smp_processor_id());
	local_irq_restore(flags);
}
480
EXPORT_SYMBOL_GPL(tracing_snapshot);
481
482
483

static int resize_buffer_duplicate_size(struct trace_buffer *trace_buf,
					struct trace_buffer *size_buf, int cpu_id);
484
485
486
487
488
489
490
491
492
493
494
495
496
497
498
499
500
501
502
503
504
505
506
507
508
509
510
511
512
513
514
515
static void set_buffer_entries(struct trace_buffer *buf, unsigned long val);

static int alloc_snapshot(struct trace_array *tr)
{
	int ret;

	if (!tr->allocated_snapshot) {

		/* allocate spare buffer */
		ret = resize_buffer_duplicate_size(&tr->max_buffer,
				   &tr->trace_buffer, RING_BUFFER_ALL_CPUS);
		if (ret < 0)
			return ret;

		tr->allocated_snapshot = true;
	}

	return 0;
}

void free_snapshot(struct trace_array *tr)
{
	/*
	 * We don't free the ring buffer. instead, resize it because
	 * The max_tr ring buffer has some state (e.g. ring->clock) and
	 * we want preserve it.
	 */
	ring_buffer_resize(tr->max_buffer.buffer, 1, RING_BUFFER_ALL_CPUS);
	set_buffer_entries(&tr->max_buffer, 1);
	tracing_reset_online_cpus(&tr->max_buffer);
	tr->allocated_snapshot = false;
}
516
517
518
519
520
521
522
523
524
525
526
527
528
529
530
531
532

/**
 * trace_snapshot_alloc - allocate and take a snapshot of the current buffer.
 *
 * This is similar to trace_snapshot(), but it will allocate the
 * snapshot buffer if it isn't already allocated. Use this only
 * where it is safe to sleep, as the allocation may sleep.
 *
 * This causes a swap between the snapshot buffer and the current live
 * tracing buffer. You can use this to take snapshots of the live
 * trace when some condition is triggered, but continue to trace.
 */
void tracing_snapshot_alloc(void)
{
	struct trace_array *tr = &global_trace;
	int ret;

533
534
535
	ret = alloc_snapshot(tr);
	if (WARN_ON(ret < 0))
		return;
536
537
538

	tracing_snapshot();
}
539
EXPORT_SYMBOL_GPL(tracing_snapshot_alloc);
540
541
542
543
544
#else
void tracing_snapshot(void)
{
	WARN_ONCE(1, "Snapshot feature not enabled, but internal snapshot used");
}
545
EXPORT_SYMBOL_GPL(tracing_snapshot);
546
547
548
549
550
void tracing_snapshot_alloc(void)
{
	/* Give warning */
	tracing_snapshot();
}
551
EXPORT_SYMBOL_GPL(tracing_snapshot_alloc);
552
553
#endif /* CONFIG_TRACER_SNAPSHOT */

554
555
556
557
558
559
560
561
562
563
/**
 * tracing_off - turn off tracing buffers
 *
 * This function stops the tracing buffers from recording data.
 * It does not disable any overhead the tracers themselves may
 * be causing. This function simply causes all recording to
 * the ring buffers to fail.
 */
void tracing_off(void)
{
564
565
	if (global_trace.trace_buffer.buffer)
		ring_buffer_record_off(global_trace.trace_buffer.buffer);
566
567
568
569
570
571
572
573
574
575
	/*
	 * This flag is only looked at when buffers haven't been
	 * allocated yet. We don't really care about the race
	 * between setting this flag and actually turning
	 * on the buffer.
	 */
	global_trace.buffer_disabled = 1;
}
EXPORT_SYMBOL_GPL(tracing_off);

576
577
578
579
580
581
void disable_trace_on_warning(void)
{
	if (__disable_trace_on_warning)
		tracing_off();
}

582
583
584
585
586
/**
 * tracing_is_on - show state of ring buffers enabled
 */
int tracing_is_on(void)
{
587
588
	if (global_trace.trace_buffer.buffer)
		return ring_buffer_record_is_on(global_trace.trace_buffer.buffer);
589
590
591
592
	return !global_trace.buffer_disabled;
}
EXPORT_SYMBOL_GPL(tracing_is_on);

593
static int __init set_buf_size(char *str)
594
{
595
	unsigned long buf_size;
596

597
598
	if (!str)
		return 0;
599
	buf_size = memparse(str, &str);
600
	/* nr_entries can not be zero */
601
	if (buf_size == 0)
602
		return 0;
603
	trace_buf_size = buf_size;
604
605
	return 1;
}
606
__setup("trace_buf_size=", set_buf_size);
607

608
609
static int __init set_tracing_thresh(char *str)
{
610
	unsigned long threshold;
611
612
613
614
	int ret;

	if (!str)
		return 0;
615
	ret = kstrtoul(str, 0, &threshold);
616
617
	if (ret < 0)
		return 0;
618
	tracing_thresh = threshold * 1000;
619
620
621
622
	return 1;
}
__setup("tracing_thresh=", set_tracing_thresh);

Steven Rostedt's avatar
Steven Rostedt committed
623
624
625
626
627
unsigned long nsecs_to_usecs(unsigned long nsecs)
{
	return nsecs / 1000;
}

Steven Rostedt's avatar
Steven Rostedt committed
628
/* These must match the bit postions in trace_iterator_flags */
629
630
631
632
633
static const char *trace_options[] = {
	"print-parent",
	"sym-offset",
	"sym-addr",
	"verbose",
Ingo Molnar's avatar
Ingo Molnar committed
634
	"raw",
635
	"hex",
Ingo Molnar's avatar
Ingo Molnar committed
636
	"bin",
637
	"block",
Ingo Molnar's avatar
Ingo Molnar committed
638
	"stacktrace",
639
	"trace_printk",
640
	"ftrace_preempt",
641
	"branch",
642
	"annotate",
643
	"userstacktrace",
644
	"sym-userobj",
645
	"printk-msg-only",
646
	"context-info",
647
	"latency-format",
648
	"sleep-time",
649
	"graph-time",
650
	"record-cmd",
651
	"overwrite",
652
	"disable_on_free",
653
	"irq-info",
654
	"markers",
655
	"function-trace",
656
657
658
	NULL
};

659
660
661
static struct {
	u64 (*func)(void);
	const char *name;
662
	int in_ns;		/* is this clock in nanoseconds? */
663
} trace_clocks[] = {
664
665
666
	{ trace_clock_local,	"local",	1 },
	{ trace_clock_global,	"global",	1 },
	{ trace_clock_counter,	"counter",	0 },
667
	{ trace_clock_jiffies,	"uptime",	1 },
668
	{ trace_clock,		"perf",		1 },
669
	ARCH_TRACE_CLOCKS
670
671
672
673
};

int trace_clock_id;

674
675
676
677
678
679
680
681
682
683
684
685
686
687
688
689
690
691
692
693
694
695
696
697
698
699
700
701
702
703
704
705
706
707
708
709
710
711
712
713
714
715
716
717
718
719
720
721
722
723
724
725
726
727
728
729
730
731
732
733
734
735
736
737
738
739
740
741
742
743
744
745
746
747
748
749
750
/*
 * trace_parser_get_init - gets the buffer for trace parser
 */
int trace_parser_get_init(struct trace_parser *parser, int size)
{
	memset(parser, 0, sizeof(*parser));

	parser->buffer = kmalloc(size, GFP_KERNEL);
	if (!parser->buffer)
		return 1;

	parser->size = size;
	return 0;
}

/*
 * trace_parser_put - frees the buffer for trace parser
 */
void trace_parser_put(struct trace_parser *parser)
{
	kfree(parser->buffer);
}

/*
 * trace_get_user - reads the user input string separated by  space
 * (matched by isspace(ch))
 *
 * For each string found the 'struct trace_parser' is updated,
 * and the function returns.
 *
 * Returns number of bytes read.
 *
 * See kernel/trace/trace.h for 'struct trace_parser' details.
 */
int trace_get_user(struct trace_parser *parser, const char __user *ubuf,
	size_t cnt, loff_t *ppos)
{
	char ch;
	size_t read = 0;
	ssize_t ret;

	if (!*ppos)
		trace_parser_clear(parser);

	ret = get_user(ch, ubuf++);
	if (ret)
		goto out;

	read++;
	cnt--;

	/*
	 * The parser is not finished with the last write,
	 * continue reading the user input without skipping spaces.
	 */
	if (!parser->cont) {
		/* skip white space */
		while (cnt && isspace(ch)) {
			ret = get_user(ch, ubuf++);
			if (ret)
				goto out;
			read++;
			cnt--;
		}

		/* only spaces were written */
		if (isspace(ch)) {
			*ppos += read;
			ret = read;
			goto out;
		}

		parser->idx = 0;
	}

	/* read the non-space input */
	while (cnt && !isspace(ch)) {
751
		if (parser->idx < parser->size - 1)
752
753
754
755
756
757
758
759
760
761
762
763
764
765
766
767
768
769
770
771
772
773
774
775
776
777
778
779
			parser->buffer[parser->idx++] = ch;
		else {
			ret = -EINVAL;
			goto out;
		}
		ret = get_user(ch, ubuf++);
		if (ret)
			goto out;
		read++;
		cnt--;
	}

	/* We either got finished input or we have to wait for another call. */
	if (isspace(ch)) {
		parser->buffer[parser->idx] = 0;
		parser->cont = false;
	} else {
		parser->cont = true;
		parser->buffer[parser->idx++] = ch;
	}

	*ppos += read;
	ret = read;

out:
	return ret;
}

780
781
782
783
784
ssize_t trace_seq_to_user(struct trace_seq *s, char __user *ubuf, size_t cnt)
{
	int len;
	int ret;

785
786
787
	if (!cnt)
		return 0;

788
789
790
791
792
793
794
	if (s->len <= s->readpos)
		return -EBUSY;

	len = s->len - s->readpos;
	if (cnt > len)
		cnt = len;
	ret = copy_to_user(ubuf, s->buffer + s->readpos, cnt);
795
	if (ret == cnt)
796
797
		return -EFAULT;

798
799
	cnt -= ret;

800
	s->readpos += cnt;
801
	return cnt;
802
803
}

804
static ssize_t trace_seq_to_buffer(struct trace_seq *s, void *buf, size_t cnt)
805
806
807
808
809
810
811
812
813
{
	int len;

	if (s->len <= s->readpos)
		return -EBUSY;

	len = s->len - s->readpos;
	if (cnt > len)
		cnt = len;
814
	memcpy(buf, s->buffer + s->readpos, cnt);
815

816
	s->readpos += cnt;
817
818
819
	return cnt;
}

820
821
822
823
824
825
/*
 * ftrace_max_lock is used to protect the swapping of buffers
 * when taking a max snapshot. The buffers themselves are
 * protected by per_cpu spinlocks. But the action of the swap
 * needs its own lock.
 *
826
 * This is defined as a arch_spinlock_t in order to help
827
828
829
830
831
832
 * with performance when lockdep debugging is enabled.
 *
 * It is also used in other places outside the update_max_tr
 * so it needs to be defined outside of the
 * CONFIG_TRACER_MAX_TRACE.
 */
833
static arch_spinlock_t ftrace_max_lock =
834
	(arch_spinlock_t)__ARCH_SPIN_LOCK_UNLOCKED;
835

836
837
unsigned long __read_mostly	tracing_thresh;

838
839
840
841
842
843
844
845
846
847
848
#ifdef CONFIG_TRACER_MAX_TRACE
unsigned long __read_mostly	tracing_max_latency;

/*
 * Copy the new maximum trace into the separate maximum-trace
 * structure. (this way the maximum trace is permanently saved,
 * for later retrieval via /sys/kernel/debug/tracing/latency_trace)
 */
static void
__update_max_tr(struct trace_array *tr, struct task_struct *tsk, int cpu)
{
849
850
851
852
	struct trace_buffer *trace_buf = &tr->trace_buffer;
	struct trace_buffer *max_buf = &tr->max_buffer;
	struct trace_array_cpu *data = per_cpu_ptr(trace_buf->data, cpu);
	struct trace_array_cpu *max_data = per_cpu_ptr(max_buf->data, cpu);
853

854
855
	max_buf->cpu = cpu;
	max_buf->time_start = data->preempt_timestamp;
856

857
858
859
	max_data->saved_latency = tracing_max_latency;
	max_data->critical_start = data->critical_start;
	max_data->critical_end = data->critical_end;
860

861
	memcpy(max_data->comm, tsk->comm, TASK_COMM_LEN);
862
	max_data->pid = tsk->pid;
863
864
865
866
867
868
869
870
871
	/*
	 * If tsk == current, then use current_uid(), as that does not use
	 * RCU. The irq tracer can be called out of RCU scope.
	 */
	if (tsk == current)
		max_data->uid = current_uid();
	else
		max_data->uid = task_uid(tsk);

872
873
874
	max_data->nice = tsk->static_prio - 20 - MAX_RT_PRIO;
	max_data->policy = tsk->policy;
	max_data->rt_priority = tsk->rt_priority;
875
876
877
878
879

	/* record this tasks comm */
	tracing_record_cmdline(tsk);
}

Steven Rostedt's avatar
Steven Rostedt committed
880
881
882
883
884
885
886
887
888
/**
 * update_max_tr - snapshot all trace buffers from global_trace to max_tr
 * @tr: tracer
 * @tsk: the task with the latency
 * @cpu: The cpu that initiated the trace.
 *
 * Flip the buffers between the @tr and the max_tr and record information
 * about which task was the cause of this latency.
 */
Ingo Molnar's avatar
Ingo Molnar committed
889
void
890
891
update_max_tr(struct trace_array *tr, struct task_struct *tsk, int cpu)
{
892
	struct ring_buffer *buf;
893

894
	if (tr->stop_count)
895
896
		return;

897
	WARN_ON_ONCE(!irqs_disabled());
898

899
	if (!tr->allocated_snapshot) {
900
		/* Only the nop tracer should hit this when disabling */
901
		WARN_ON_ONCE(tr->current_trace != &nop_trace);
902
		return;
903
	}
904

905
	arch_spin_lock(&ftrace_max_lock);
906

907
908
909
	buf = tr->trace_buffer.buffer;
	tr->trace_buffer.buffer = tr->max_buffer.buffer;
	tr->max_buffer.buffer = buf;
910

911
	__update_max_tr(tr, tsk, cpu);
912
	arch_spin_unlock(&ftrace_max_lock);
913
914
915
916
917
918
919
}

/**
 * update_max_tr_single - only copy one trace over, and reset the rest
 * @tr - tracer
 * @tsk - task with the latency
 * @cpu - the cpu of the buffer to copy.
Steven Rostedt's avatar
Steven Rostedt committed
920
921
 *
 * Flip the trace of a single CPU buffer between the @tr and the max_tr.
922
 */
Ingo Molnar's avatar
Ingo Molnar committed
923
void
924
925
update_max_tr_single(struct trace_array *tr, struct task_struct *tsk, int cpu)
{
926
	int ret;
927

928
	if (tr->stop_count)
929
930
		return;

931
	WARN_ON_ONCE(!irqs_disabled());
Steven Rostedt's avatar
Steven Rostedt committed
932
	if (!tr->allocated_snapshot) {
933
		/* Only the nop tracer should hit this when disabling */
934
		WARN_ON_ONCE(tr->current_trace != &nop_trace);
935
		return;
936
	}
937

938
	arch_spin_lock(&ftrace_max_lock);
939

940
	ret = ring_buffer_swap_cpu(tr->max_buffer.buffer, tr->trace_buffer.buffer, cpu);
941

942
943
944
945
946
947
948
	if (ret == -EBUSY) {
		/*
		 * We failed to swap the buffer due to a commit taking
		 * place on this CPU. We fail to record, but we reset
		 * the max trace buffer (no one writes directly to it)
		 * and flag that it failed.
		 */
949
		trace_array_printk_buf(tr->max_buffer.buffer, _THIS_IP_,
950
951
952
953
			"Failed to swap buffers due to commit in progress\n");
	}

	WARN_ON_ONCE(ret && ret != -EAGAIN && ret != -EBUSY);
954
955

	__update_max_tr(tr, tsk, cpu);
956
	arch_spin_unlock(&ftrace_max_lock);
957
}
958
#endif /* CONFIG_TRACER_MAX_TRACE */
959

960
961
static void default_wait_pipe(struct trace_iterator *iter)
{
962
963
964
	/* Iterators are static, they should be filled or empty */
	if (trace_buffer_iter(iter, iter->cpu_file))
		return;
965

966
	ring_buffer_wait(iter->trace_buffer->buffer, iter->cpu_file);
967
968
}

969
970
971
972
973
974
#ifdef CONFIG_FTRACE_STARTUP_TEST
static int run_tracer_selftest(struct tracer *type)
{
	struct trace_array *tr = &global_trace;
	struct tracer *saved_tracer = tr->current_trace;
	int ret;
975

976
977
	if (!type->selftest || tracing_selftest_disabled)
		return 0;
978
979

	/*
980
981
982
983
984
	 * Run a selftest on this tracer.
	 * Here we reset the trace buffer, and set the current
	 * tracer to be this tracer. The tracer can then run some
	 * internal tracing to verify that everything is in order.
	 * If we fail, we do not register this tracer.
985
	 */
986
	tracing_reset_online_cpus(&tr->trace_buffer);
987

988
989
990
991
992
993
994
995
996
997
998
999
1000
1001
1002
1003
1004
1005
1006
1007
1008
1009
1010
1011
1012
1013
1014
1015
1016
	tr->current_trace = type;

#ifdef CONFIG_TRACER_MAX_TRACE
	if (type->use_max_tr) {
		/* If we expanded the buffers, make sure the max is expanded too */
		if (ring_buffer_expanded)
			ring_buffer_resize(tr->max_buffer.buffer, trace_buf_size,
					   RING_BUFFER_ALL_CPUS);
		tr->allocated_snapshot = true;
	}
#endif

	/* the test is responsible for initializing and enabling */
	pr_info("Testing tracer %s: ", type->name);
	ret = type->selftest(type, tr);
	/* the test is responsible for resetting too */
	tr->current_trace = saved_tracer;
	if (ret) {
		printk(KERN_CONT "FAILED!\n");
		/* Add the warning after printing 'FAILED' */
		WARN_ON(1);
		return -1;
	}
	/* Only reset on passing, to avoid touching corrupted buffers */
	tracing_reset_online_cpus(&tr->trace_buffer);

#ifdef CONFIG_TRACER_MAX_TRACE
	if (type->use_max_tr) {
		tr->allocated_snapshot = false;
1017

1018
1019
1020
1021
1022
1023
1024
1025
1026
1027
1028
1029
1030
1031
		/* Shrink the max buffer again */
		if (ring_buffer_expanded)
			ring_buffer_resize(tr->max_buffer.buffer, 1,
					   RING_BUFFER_ALL_CPUS);
	}
#endif

	printk(KERN_CONT "PASSED\n");
	return 0;
}
#else
static inline int run_tracer_selftest(struct tracer *type)
{
	return 0;
1032
}
1033
#endif /* CONFIG_FTRACE_STARTUP_TEST */
1034

Steven Rostedt's avatar
Steven Rostedt committed
1035
1036
1037
1038
1039
1040
/**
 * register_tracer - register a tracer with the ftrace system.
 * @type - the plugin for the tracer
 *
 * Register a new plugin tracer.
 */
1041
1042
1043
1044
1045
1046
1047
1048
1049
1050
int register_tracer(struct tracer *type)
{
	struct tracer *t;
	int ret = 0;

	if (!type->name) {
		pr_info("Tracer must have a name\n");
		return -1;
	}

1051
	if (strlen(type->name) >= MAX_TRACER_SIZE) {
Li Zefan's avatar
Li Zefan committed
1052
1053
1054
1055
		pr_info("Tracer has a name longer than %d\n", MAX_TRACER_SIZE);
		return -1;
	}

1056
	mutex_lock(&trace_types_lock);
Ingo Molnar's avatar
Ingo Molnar committed
1057

1058
1059
	tracing_selftest_running = true;

1060
1061
1062
	for (t = trace_types; t; t = t->next) {
		if (strcmp(type->name, t->name) == 0) {
			/* already found */
Li Zefan's avatar
Li Zefan committed
1063
			pr_info("Tracer %s already registered\n",
1064
1065
1066
1067
1068
1069
				type->name);
			ret = -1;
			goto out;
		}
	}

1070
1071
1072
1073
1074
1075
1076
	if (!type->set_flag)
		type->set_flag = &dummy_set_flag;
	if (!type->flags)
		type->flags = &dummy_tracer_flags;
	else
		if (!type->flags->opts)
			type->flags->opts = dummy_tracer_opt;
1077
1078
1079
	if (!type->wait_pipe)
		type->wait_pipe = default_wait_pipe;

1080
1081
1082
	ret = run_tracer_selftest(type);
	if (ret < 0)
		goto out;
Steven Rostedt's avatar
Steven Rostedt committed
1083

1084
1085
	type->next = trace_types;
	trace_types = type;
Steven Rostedt's avatar
Steven Rostedt committed
1086

1087
 out:
1088
	tracing_selftest_running = false;
1089
1090
	mutex_unlock(&trace_types_lock);

Steven Rostedt's avatar
Steven Rostedt committed
1091
1092
1093
	if (ret || !default_bootup_tracer)
		goto out_unlock;

Li Zefan's avatar
Li Zefan committed
1094
	if (strncmp(default_bootup_tracer, type->name, MAX_TRACER_SIZE))
Steven Rostedt's avatar
Steven Rostedt committed
1095
1096
1097
1098
1099
1100
1101
		goto out_unlock;

	printk(KERN_INFO "Starting tracer '%s'\n", type->name);
	/* Do we want this tracer to start on bootup? */
	tracing_set_tracer(type->name);
	default_bootup_tracer = NULL;
	/* disable other selftests, since this will break it. */
1102
	tracing_selftest_disabled = true;
1103
#ifdef CONFIG_FTRACE_STARTUP_TEST
Steven Rostedt's avatar
Steven Rostedt committed
1104
1105
	printk(KERN_INFO "Disabling FTRACE selftests due to running tracer '%s'\n",
	       type->name);
1106
1107
#endif

Steven Rostedt's avatar
Steven Rostedt committed
1108
 out_unlock:
1109
1110
1111
	return ret;
}

1112
void tracing_reset(struct trace_buffer *buf, int cpu)
1113
{
1114
	struct ring_buffer *buffer = buf->buffer;
1115

1116
1117
1118
	if (!buffer)
		return;

1119
1120
1121
1122
	ring_buffer_record_disable(buffer);

	/* Make sure all commits have finished */
	synchronize_sched();
1123
	ring_buffer_reset_cpu(buffer, cpu);
1124
1125
1126
1127

	ring_buffer_record_enable(buffer);
}

1128
void tracing_reset_online_cpus(struct trace_buffer *buf)
1129
{
1130
	struct ring_buffer *buffer = buf->buffer;
1131
1132
	int cpu;

1133
1134
1135
	if (!buffer)
		return;

1136
1137
1138
1139
1140
	ring_buffer_record_disable(buffer);

	/* Make sure all commits have finished */
	synchronize_sched();

1141
	buf->time_start = ftrace_now(buf->cpu);
1142
1143

	for_each_online_cpu(cpu)
1144
		ring_buffer_reset_cpu(buffer, cpu);
1145
1146

	ring_buffer_record_enable(buffer);
1147
1148
}

1149
1150
void tracing_reset_current(int cpu)
{
1151
	tracing_reset(&global_trace.trace_buffer, cpu);
1152
1153
}

1154
void tracing_reset_all_online_cpus(void)
1155
{
1156
1157
1158
1159
	struct trace_array *tr;

	mutex_lock(&trace_types_lock);
	list_for_each_entry(tr, &ftrace_trace_arrays, list) {
1160
1161
1162
1163
		tracing_reset_online_cpus(&tr->trace_buffer);
#ifdef CONFIG_TRACER_MAX_TRACE
		tracing_reset_online_cpus(&tr->max_buffer);
#endif
1164
1165
	}
	mutex_unlock(&trace_types_lock);
1166
1167
}

1168
#define SAVED_CMDLINES 128
1169
#define NO_CMDLINE_MAP UINT_MAX
1170
1171
1172
1173
static unsigned map_pid_to_cmdline[PID_MAX_DEFAULT+1];
static unsigned map_cmdline_to_pid[SAVED_CMDLINES];
static char saved_cmdlines[SAVED_CMDLINES][TASK_COMM_LEN];
static int cmdline_idx;
1174
static arch_spinlock_t trace_cmdline_lock = __ARCH_SPIN_LOCK_UNLOCKED;
1175
1176

/* temporary disable recording */
1177
static atomic_t trace_record_cmdline_disabled __read_mostly;
1178
1179
1180

static void trace_init_cmdlines(void)
{
1181
1182
	memset(&map_pid_to_cmdline, NO_CMDLINE_MAP, sizeof(map_pid_to_cmdline));
	memset(&map_cmdline_to_pid, NO_CMDLINE_MAP, sizeof(map_cmdline_to_pid));
1183
1184
1185
	cmdline_idx = 0;
}

1186
1187
int is_tracing_stopped(void)
{
1188
	return global_trace.stop_count;
1189
1190
}

1191
1192
1193
1194
1195
1196
1197
1198
1199
1200
1201
1202
1203
1204
1205
/**
 * ftrace_off_permanent - disable all ftrace code permanently
 *
 * This should only be called when a serious anomally has
 * been detected.  This will turn off the function tracing,
 * ring buffers, and other tracing utilites. It takes no
 * locks and can be called from any context.
 */
void ftrace_off_permanent(void)
{
	tracing_disabled = 1;
	ftrace_stop();
	tracing_off_permanent();
}

1206
1207
1208
1209
1210
1211
1212
1213
1214
1215
1216
1217
1218
1219
/**
 * tracing_start - quick start of the tracer
 *
 * If tracing is enabled but was stopped by tracing_stop,
 * this will start the tracer back up.
 */
void tracing_start(void)
{
	struct ring_buffer *buffer;
	unsigned long flags;

	if (tracing_disabled)
		return;

1220
1221
1222
	raw_spin_lock_irqsave(&global_trace.start_lock, flags);
	if (--global_trace.stop_count) {
		if (global_trace.stop_count < 0) {
1223
1224
			/* Someone screwed up their debugging */
			WARN_ON_ONCE(1);
1225
			global_trace.stop_count = 0;
1226
		}
1227
1228
1229
		goto out;
	}

1230
1231
	/* Prevent the buffers from switching */
	arch_spin_lock(&ftrace_max_lock);
1232

1233
	buffer = global_trace.trace_buffer.buffer;
1234
1235
1236
	if (buffer)
		ring_buffer_record_enable(buffer);

1237
1238
#ifdef CONFIG_TRACER_MAX_TRACE
	buffer = global_trace.max_buffer.buffer;
1239
1240
	if (buffer)
		ring_buffer_record_enable(buffer);
1241
#endif
1242

1243
1244
	arch_spin_unlock(&ftrace_max_lock);

1245
1246
	ftrace_start();
 out:
1247
1248
1249
1250
1251
1252
1253
1254
1255
1256
1257
1258
1259
1260
1261
1262
1263
1264
1265
1266
1267
1268
1269
1270
1271
1272
	raw_spin_unlock_irqrestore(&global_trace.start_lock, flags);
}

static void tracing_start_tr(struct trace_array *tr)
{
	struct ring_buffer *buffer;
	unsigned long flags;

	if (tracing_disabled)
		return;

	/* If global, we need to also start the max tracer */
	if (tr->flags & TRACE_ARRAY_FL_GLOBAL)
		return tracing_start();

	raw_spin_lock_irqsave(&tr->start_lock, flags);

	if (--tr->stop_count) {
		if (tr->stop_count < 0) {
			/* Someone screwed up their debugging */
			WARN_ON_ONCE(1);
			tr->stop_count = 0;
		}
		goto out;
	}

1273
	buffer = tr->trace_buffer.buffer;
1274
1275
1276
1277
1278
	if (buffer)
		ring_buffer_record_enable(buffer);

 out:
	raw_spin_unlock_irqrestore(&tr->start_lock, flags);
1279
1280
1281
1282
1283
1284
1285
1286
1287
1288
1289
1290
1291
1292
}

/**
 * tracing_stop - quick stop of the tracer
 *
 * Light weight way to stop tracing. Use in conjunction with
 * tracing_start.
 */
void tracing_stop(void)
{
	struct ring_buffer *buffer;
	unsigned long flags;

	ftrace_stop();
1293
1294
	raw_spin_lock_irqsave(&global_trace.start_lock, flags);
	if (global_trace.stop_count++)