trace.c 156 KB
Newer Older
1
2
3
/*
 * ring buffer based function tracer
 *
4
 * Copyright (C) 2007-2012 Steven Rostedt <srostedt@redhat.com>
5
6
7
8
9
10
11
 * Copyright (C) 2008 Ingo Molnar <mingo@redhat.com>
 *
 * Originally taken from the RT patch by:
 *    Arnaldo Carvalho de Melo <acme@redhat.com>
 *
 * Based on code from the latency_tracer, that is:
 *  Copyright (C) 2004-2006 Ingo Molnar
12
 *  Copyright (C) 2004 Nadia Yvette Chambers
13
 */
14
#include <linux/ring_buffer.h>
15
#include <generated/utsrelease.h>
16
17
#include <linux/stacktrace.h>
#include <linux/writeback.h>
18
19
#include <linux/kallsyms.h>
#include <linux/seq_file.h>
20
#include <linux/notifier.h>
21
#include <linux/irqflags.h>
22
#include <linux/debugfs.h>
23
#include <linux/pagemap.h>
24
25
26
#include <linux/hardirq.h>
#include <linux/linkage.h>
#include <linux/uaccess.h>
27
#include <linux/kprobes.h>
28
29
30
#include <linux/ftrace.h>
#include <linux/module.h>
#include <linux/percpu.h>
31
#include <linux/splice.h>
32
#include <linux/kdebug.h>
33
#include <linux/string.h>
34
#include <linux/rwsem.h>
35
#include <linux/slab.h>
36
37
#include <linux/ctype.h>
#include <linux/init.h>
38
#include <linux/poll.h>
39
#include <linux/nmi.h>
40
#include <linux/fs.h>
41
#include <linux/sched/rt.h>
Ingo Molnar's avatar
Ingo Molnar committed
42

43
#include "trace.h"
44
#include "trace_output.h"
45

46
47
48
49
/*
 * On boot up, the ring buffer is set to the minimum size, so that
 * we do not waste memory on systems that are not using tracing.
 */
50
bool ring_buffer_expanded;
51

52
53
/*
 * We need to change this state when a selftest is running.
54
55
 * A selftest will lurk into the ring-buffer to count the
 * entries inserted during the selftest although some concurrent
56
 * insertions into the ring-buffer such as trace_printk could occurred
57
58
 * at the same time, giving false positive or negative results.
 */
59
static bool __read_mostly tracing_selftest_running;
60

61
62
63
/*
 * If a tracer is running, we do not want to run SELFTEST.
 */
64
bool __read_mostly tracing_selftest_disabled;
65

66
67
68
69
70
71
72
73
74
75
76
77
78
79
/* For tracers that don't implement custom flags */
static struct tracer_opt dummy_tracer_opt[] = {
	{ }
};

static struct tracer_flags dummy_tracer_flags = {
	.val = 0,
	.opts = dummy_tracer_opt
};

static int dummy_set_flag(u32 old_flags, u32 bit, int set)
{
	return 0;
}
80

81
82
83
84
85
86
87
/*
 * To prevent the comm cache from being overwritten when no
 * tracing is active, only save the comm when a trace event
 * occurred.
 */
static DEFINE_PER_CPU(bool, trace_cmdline_save);

88
89
90
91
92
93
/*
 * Kill all tracing for good (never come back).
 * It is initialized to 1 but will turn to zero if the initialization
 * of the tracer is successful. But that is the only place that sets
 * this back to zero.
 */
94
static int tracing_disabled = 1;
95

96
DEFINE_PER_CPU(int, ftrace_cpu_disabled);
97

98
cpumask_var_t __read_mostly	tracing_buffer_mask;
99

100
101
102
103
104
105
106
107
108
109
110
/*
 * ftrace_dump_on_oops - variable to dump ftrace buffer on oops
 *
 * If there is an oops (or kernel panic) and the ftrace_dump_on_oops
 * is set, then ftrace_dump is called. This will output the contents
 * of the ftrace buffers to the console.  This is very useful for
 * capturing traces that lead to crashes and outputing it to a
 * serial console.
 *
 * It is default off, but you can enable it with either specifying
 * "ftrace_dump_on_oops" in the kernel command line, or setting
111
112
113
 * /proc/sys/kernel/ftrace_dump_on_oops
 * Set 1 if you want to dump buffers of all CPUs
 * Set 2 if you want to dump the buffer of the CPU that triggered oops
114
 */
115
116

enum ftrace_dump_mode ftrace_dump_on_oops;
117

118
119
120
/* When set, tracing will stop when a WARN*() is hit */
int __disable_trace_on_warning;

121
122
static int tracing_set_tracer(const char *buf);

Li Zefan's avatar
Li Zefan committed
123
124
#define MAX_TRACER_SIZE		100
static char bootup_tracer_buf[MAX_TRACER_SIZE] __initdata;
125
static char *default_bootup_tracer;
126

127
128
static bool allocate_snapshot;

129
static int __init set_cmdline_ftrace(char *str)
130
{
131
	strlcpy(bootup_tracer_buf, str, MAX_TRACER_SIZE);
132
	default_bootup_tracer = bootup_tracer_buf;
133
	/* We are using ftrace early, expand it */
134
	ring_buffer_expanded = true;
135
136
	return 1;
}
137
__setup("ftrace=", set_cmdline_ftrace);
138

139
140
static int __init set_ftrace_dump_on_oops(char *str)
{
141
142
143
144
145
146
147
148
149
150
151
	if (*str++ != '=' || !*str) {
		ftrace_dump_on_oops = DUMP_ALL;
		return 1;
	}

	if (!strcmp("orig_cpu", str)) {
		ftrace_dump_on_oops = DUMP_ORIG;
                return 1;
        }

        return 0;
152
153
}
__setup("ftrace_dump_on_oops", set_ftrace_dump_on_oops);
Steven Rostedt's avatar
Steven Rostedt committed
154

155
156
157
158
159
160
161
static int __init stop_trace_on_warning(char *str)
{
	__disable_trace_on_warning = 1;
	return 1;
}
__setup("traceoff_on_warning=", stop_trace_on_warning);

162
static int __init boot_alloc_snapshot(char *str)
163
164
165
166
167
168
{
	allocate_snapshot = true;
	/* We also need the main ring buffer expanded */
	ring_buffer_expanded = true;
	return 1;
}
169
__setup("alloc_snapshot", boot_alloc_snapshot);
170

171
172
173
174
175
176

static char trace_boot_options_buf[MAX_TRACER_SIZE] __initdata;
static char *trace_boot_options __initdata;

static int __init set_trace_boot_options(char *str)
{
177
	strlcpy(trace_boot_options_buf, str, MAX_TRACER_SIZE);
178
179
180
181
182
	trace_boot_options = trace_boot_options_buf;
	return 0;
}
__setup("trace_options=", set_trace_boot_options);

183

184
unsigned long long ns2usecs(cycle_t nsec)
185
186
187
188
189
190
{
	nsec += 500;
	do_div(nsec, 1000);
	return nsec;
}

Steven Rostedt's avatar
Steven Rostedt committed
191
192
193
194
195
196
197
198
199
200
201
202
/*
 * The global_trace is the descriptor that holds the tracing
 * buffers for the live tracing. For each CPU, it contains
 * a link list of pages that will store trace entries. The
 * page descriptor of the pages in the memory is used to hold
 * the link list by linking the lru item in the page descriptor
 * to each of the pages in the buffer per CPU.
 *
 * For each active CPU there is a data field that holds the
 * pages for the buffer for that CPU. Each CPU has the same number
 * of pages allocated for its buffer.
 */
203
204
static struct trace_array	global_trace;

205
LIST_HEAD(ftrace_trace_arrays);
206

207
208
209
210
211
212
213
214
215
216
217
218
219
220
221
222
223
224
225
226
227
228
229
230
231
232
233
234
235
236
237
int trace_array_get(struct trace_array *this_tr)
{
	struct trace_array *tr;
	int ret = -ENODEV;

	mutex_lock(&trace_types_lock);
	list_for_each_entry(tr, &ftrace_trace_arrays, list) {
		if (tr == this_tr) {
			tr->ref++;
			ret = 0;
			break;
		}
	}
	mutex_unlock(&trace_types_lock);

	return ret;
}

static void __trace_array_put(struct trace_array *this_tr)
{
	WARN_ON(!this_tr->ref);
	this_tr->ref--;
}

void trace_array_put(struct trace_array *this_tr)
{
	mutex_lock(&trace_types_lock);
	__trace_array_put(this_tr);
	mutex_unlock(&trace_types_lock);
}

238
239
240
int filter_check_discard(struct ftrace_event_file *file, void *rec,
			 struct ring_buffer *buffer,
			 struct ring_buffer_event *event)
241
{
242
243
244
245
246
247
248
249
250
251
252
253
254
255
256
257
258
259
260
261
262
	if (unlikely(file->flags & FTRACE_EVENT_FL_FILTERED) &&
	    !filter_match_preds(file->filter, rec)) {
		ring_buffer_discard_commit(buffer, event);
		return 1;
	}

	return 0;
}
EXPORT_SYMBOL_GPL(filter_check_discard);

int call_filter_check_discard(struct ftrace_event_call *call, void *rec,
			      struct ring_buffer *buffer,
			      struct ring_buffer_event *event)
{
	if (unlikely(call->flags & TRACE_EVENT_FL_FILTERED) &&
	    !filter_match_preds(call->filter, rec)) {
		ring_buffer_discard_commit(buffer, event);
		return 1;
	}

	return 0;
263
}
264
EXPORT_SYMBOL_GPL(call_filter_check_discard);
265

266
cycle_t buffer_ftrace_now(struct trace_buffer *buf, int cpu)
267
268
269
270
{
	u64 ts;

	/* Early boot up does not have a buffer yet */
271
	if (!buf->buffer)
272
273
		return trace_clock_local();

274
275
	ts = ring_buffer_time_stamp(buf->buffer, cpu);
	ring_buffer_normalize_time_stamp(buf->buffer, cpu, &ts);
276
277
278

	return ts;
}
279

280
281
282
283
284
cycle_t ftrace_now(int cpu)
{
	return buffer_ftrace_now(&global_trace.trace_buffer, cpu);
}

285
286
287
288
289
290
291
292
293
/**
 * tracing_is_enabled - Show if global_trace has been disabled
 *
 * Shows if the global trace has been enabled or not. It uses the
 * mirror flag "buffer_disabled" to be used in fast paths such as for
 * the irqsoff tracer. But it may be inaccurate due to races. If you
 * need to know the accurate state, use tracing_is_on() which is a little
 * slower, but accurate.
 */
294
295
int tracing_is_enabled(void)
{
296
297
298
299
300
301
302
	/*
	 * For quick access (irqsoff uses this in fast path), just
	 * return the mirror variable of the state of the ring buffer.
	 * It's a little racy, but we don't really care.
	 */
	smp_rmb();
	return !global_trace.buffer_disabled;
303
304
}

Steven Rostedt's avatar
Steven Rostedt committed
305
/*
306
307
308
 * trace_buf_size is the size in bytes that is allocated
 * for a buffer. Note, the number of bytes is always rounded
 * to page size.
309
310
311
312
313
 *
 * This number is purposely set to a low number of 16384.
 * If the dump on oops happens, it will be much appreciated
 * to not have to wait for all that output. Anyway this can be
 * boot time and run time configurable.
Steven Rostedt's avatar
Steven Rostedt committed
314
 */
315
#define TRACE_BUF_SIZE_DEFAULT	1441792UL /* 16384 * 88 (sizeof(entry)) */
316

317
static unsigned long		trace_buf_size = TRACE_BUF_SIZE_DEFAULT;
318

Steven Rostedt's avatar
Steven Rostedt committed
319
/* trace_types holds a link list of available tracers. */
320
static struct tracer		*trace_types __read_mostly;
Steven Rostedt's avatar
Steven Rostedt committed
321
322
323
324

/*
 * trace_types_lock is used to protect the trace_types list.
 */
325
DEFINE_MUTEX(trace_types_lock);
Steven Rostedt's avatar
Steven Rostedt committed
326

327
328
329
330
331
332
333
334
335
336
337
338
339
340
341
342
343
344
345
346
347
348
349
350
351
352
353
354
/*
 * serialize the access of the ring buffer
 *
 * ring buffer serializes readers, but it is low level protection.
 * The validity of the events (which returns by ring_buffer_peek() ..etc)
 * are not protected by ring buffer.
 *
 * The content of events may become garbage if we allow other process consumes
 * these events concurrently:
 *   A) the page of the consumed events may become a normal page
 *      (not reader page) in ring buffer, and this page will be rewrited
 *      by events producer.
 *   B) The page of the consumed events may become a page for splice_read,
 *      and this page will be returned to system.
 *
 * These primitives allow multi process access to different cpu ring buffer
 * concurrently.
 *
 * These primitives don't distinguish read-only and read-consume access.
 * Multi read-only access are also serialized.
 */

#ifdef CONFIG_SMP
static DECLARE_RWSEM(all_cpu_access_lock);
static DEFINE_PER_CPU(struct mutex, cpu_access_lock);

static inline void trace_access_lock(int cpu)
{
355
	if (cpu == RING_BUFFER_ALL_CPUS) {
356
357
358
359
360
		/* gain it for accessing the whole ring buffer. */
		down_write(&all_cpu_access_lock);
	} else {
		/* gain it for accessing a cpu ring buffer. */

361
		/* Firstly block other trace_access_lock(RING_BUFFER_ALL_CPUS). */
362
363
364
365
366
367
368
369
370
		down_read(&all_cpu_access_lock);

		/* Secondly block other access to this @cpu ring buffer. */
		mutex_lock(&per_cpu(cpu_access_lock, cpu));
	}
}

static inline void trace_access_unlock(int cpu)
{
371
	if (cpu == RING_BUFFER_ALL_CPUS) {
372
373
374
375
376
377
378
379
380
381
382
383
384
385
386
387
388
389
390
391
392
393
394
395
396
397
398
399
400
401
402
403
404
405
406
407
408
		up_write(&all_cpu_access_lock);
	} else {
		mutex_unlock(&per_cpu(cpu_access_lock, cpu));
		up_read(&all_cpu_access_lock);
	}
}

static inline void trace_access_lock_init(void)
{
	int cpu;

	for_each_possible_cpu(cpu)
		mutex_init(&per_cpu(cpu_access_lock, cpu));
}

#else

static DEFINE_MUTEX(access_lock);

static inline void trace_access_lock(int cpu)
{
	(void)cpu;
	mutex_lock(&access_lock);
}

static inline void trace_access_unlock(int cpu)
{
	(void)cpu;
	mutex_unlock(&access_lock);
}

static inline void trace_access_lock_init(void)
{
}

#endif

409
/* trace_flags holds trace_options default values */
410
unsigned long trace_flags = TRACE_ITER_PRINT_PARENT | TRACE_ITER_PRINTK |
411
	TRACE_ITER_ANNOTATE | TRACE_ITER_CONTEXT_INFO | TRACE_ITER_SLEEP_TIME |
412
	TRACE_ITER_GRAPH_TIME | TRACE_ITER_RECORD_CMD | TRACE_ITER_OVERWRITE |
413
	TRACE_ITER_IRQ_INFO | TRACE_ITER_MARKERS | TRACE_ITER_FUNCTION;
414

415
static void tracer_tracing_on(struct trace_array *tr)
416
417
418
419
420
421
422
423
424
425
426
427
428
429
430
431
{
	if (tr->trace_buffer.buffer)
		ring_buffer_record_on(tr->trace_buffer.buffer);
	/*
	 * This flag is looked at when buffers haven't been allocated
	 * yet, or by some tracers (like irqsoff), that just want to
	 * know if the ring buffer has been disabled, but it can handle
	 * races of where it gets disabled but we still do a record.
	 * As the check is in the fast path of the tracers, it is more
	 * important to be fast than accurate.
	 */
	tr->buffer_disabled = 0;
	/* Make the flag seen by readers */
	smp_wmb();
}

432
433
434
435
436
437
438
439
/**
 * tracing_on - enable tracing buffers
 *
 * This function enables tracing buffers that may have been
 * disabled with tracing_off.
 */
void tracing_on(void)
{
440
	tracer_tracing_on(&global_trace);
441
442
443
}
EXPORT_SYMBOL_GPL(tracing_on);

444
445
446
447
448
449
450
451
452
453
454
455
456
457
/**
 * __trace_puts - write a constant string into the trace buffer.
 * @ip:	   The address of the caller
 * @str:   The constant string to write
 * @size:  The size of the string.
 */
int __trace_puts(unsigned long ip, const char *str, int size)
{
	struct ring_buffer_event *event;
	struct ring_buffer *buffer;
	struct print_entry *entry;
	unsigned long irq_flags;
	int alloc;

458
459
460
	if (unlikely(tracing_selftest_running || tracing_disabled))
		return 0;

461
462
463
464
465
466
467
468
469
470
471
472
473
474
475
476
477
478
479
480
481
482
483
484
485
486
487
488
489
490
491
492
493
494
495
496
497
498
499
500
	alloc = sizeof(*entry) + size + 2; /* possible \n added */

	local_save_flags(irq_flags);
	buffer = global_trace.trace_buffer.buffer;
	event = trace_buffer_lock_reserve(buffer, TRACE_PRINT, alloc, 
					  irq_flags, preempt_count());
	if (!event)
		return 0;

	entry = ring_buffer_event_data(event);
	entry->ip = ip;

	memcpy(&entry->buf, str, size);

	/* Add a newline if necessary */
	if (entry->buf[size - 1] != '\n') {
		entry->buf[size] = '\n';
		entry->buf[size + 1] = '\0';
	} else
		entry->buf[size] = '\0';

	__buffer_unlock_commit(buffer, event);

	return size;
}
EXPORT_SYMBOL_GPL(__trace_puts);

/**
 * __trace_bputs - write the pointer to a constant string into trace buffer
 * @ip:	   The address of the caller
 * @str:   The constant string to write to the buffer to
 */
int __trace_bputs(unsigned long ip, const char *str)
{
	struct ring_buffer_event *event;
	struct ring_buffer *buffer;
	struct bputs_entry *entry;
	unsigned long irq_flags;
	int size = sizeof(struct bputs_entry);

501
502
503
	if (unlikely(tracing_selftest_running || tracing_disabled))
		return 0;

504
505
506
507
508
509
510
511
512
513
514
515
516
517
518
519
520
	local_save_flags(irq_flags);
	buffer = global_trace.trace_buffer.buffer;
	event = trace_buffer_lock_reserve(buffer, TRACE_BPUTS, size,
					  irq_flags, preempt_count());
	if (!event)
		return 0;

	entry = ring_buffer_event_data(event);
	entry->ip			= ip;
	entry->str			= str;

	__buffer_unlock_commit(buffer, event);

	return 1;
}
EXPORT_SYMBOL_GPL(__trace_bputs);

521
522
523
524
525
526
527
528
529
530
531
532
533
534
535
536
537
538
539
540
541
#ifdef CONFIG_TRACER_SNAPSHOT
/**
 * trace_snapshot - take a snapshot of the current buffer.
 *
 * This causes a swap between the snapshot buffer and the current live
 * tracing buffer. You can use this to take snapshots of the live
 * trace when some condition is triggered, but continue to trace.
 *
 * Note, make sure to allocate the snapshot with either
 * a tracing_snapshot_alloc(), or by doing it manually
 * with: echo 1 > /sys/kernel/debug/tracing/snapshot
 *
 * If the snapshot buffer is not allocated, it will stop tracing.
 * Basically making a permanent snapshot.
 */
void tracing_snapshot(void)
{
	struct trace_array *tr = &global_trace;
	struct tracer *tracer = tr->current_trace;
	unsigned long flags;

542
543
544
545
546
547
	if (in_nmi()) {
		internal_trace_puts("*** SNAPSHOT CALLED FROM NMI CONTEXT ***\n");
		internal_trace_puts("*** snapshot is being ignored        ***\n");
		return;
	}

548
	if (!tr->allocated_snapshot) {
549
550
		internal_trace_puts("*** SNAPSHOT NOT ALLOCATED ***\n");
		internal_trace_puts("*** stopping trace here!   ***\n");
551
552
553
554
555
556
		tracing_off();
		return;
	}

	/* Note, snapshot can not be used when the tracer uses it */
	if (tracer->use_max_tr) {
557
558
		internal_trace_puts("*** LATENCY TRACER ACTIVE ***\n");
		internal_trace_puts("*** Can not use snapshot (sorry) ***\n");
559
560
561
562
563
564
565
		return;
	}

	local_irq_save(flags);
	update_max_tr(tr, current, smp_processor_id());
	local_irq_restore(flags);
}
566
EXPORT_SYMBOL_GPL(tracing_snapshot);
567
568
569

static int resize_buffer_duplicate_size(struct trace_buffer *trace_buf,
					struct trace_buffer *size_buf, int cpu_id);
570
571
572
573
574
575
576
577
578
579
580
581
582
583
584
585
586
587
588
589
590
591
592
593
594
595
596
597
598
599
600
601
static void set_buffer_entries(struct trace_buffer *buf, unsigned long val);

static int alloc_snapshot(struct trace_array *tr)
{
	int ret;

	if (!tr->allocated_snapshot) {

		/* allocate spare buffer */
		ret = resize_buffer_duplicate_size(&tr->max_buffer,
				   &tr->trace_buffer, RING_BUFFER_ALL_CPUS);
		if (ret < 0)
			return ret;

		tr->allocated_snapshot = true;
	}

	return 0;
}

void free_snapshot(struct trace_array *tr)
{
	/*
	 * We don't free the ring buffer. instead, resize it because
	 * The max_tr ring buffer has some state (e.g. ring->clock) and
	 * we want preserve it.
	 */
	ring_buffer_resize(tr->max_buffer.buffer, 1, RING_BUFFER_ALL_CPUS);
	set_buffer_entries(&tr->max_buffer, 1);
	tracing_reset_online_cpus(&tr->max_buffer);
	tr->allocated_snapshot = false;
}
602

603
604
605
606
607
608
609
610
611
612
613
614
615
616
617
618
619
620
621
622
623
624
/**
 * tracing_alloc_snapshot - allocate snapshot buffer.
 *
 * This only allocates the snapshot buffer if it isn't already
 * allocated - it doesn't also take a snapshot.
 *
 * This is meant to be used in cases where the snapshot buffer needs
 * to be set up for events that can't sleep but need to be able to
 * trigger a snapshot.
 */
int tracing_alloc_snapshot(void)
{
	struct trace_array *tr = &global_trace;
	int ret;

	ret = alloc_snapshot(tr);
	WARN_ON(ret < 0);

	return ret;
}
EXPORT_SYMBOL_GPL(tracing_alloc_snapshot);

625
626
627
628
629
630
631
632
633
634
635
636
637
638
639
/**
 * trace_snapshot_alloc - allocate and take a snapshot of the current buffer.
 *
 * This is similar to trace_snapshot(), but it will allocate the
 * snapshot buffer if it isn't already allocated. Use this only
 * where it is safe to sleep, as the allocation may sleep.
 *
 * This causes a swap between the snapshot buffer and the current live
 * tracing buffer. You can use this to take snapshots of the live
 * trace when some condition is triggered, but continue to trace.
 */
void tracing_snapshot_alloc(void)
{
	int ret;

640
641
	ret = tracing_alloc_snapshot();
	if (ret < 0)
642
		return;
643
644
645

	tracing_snapshot();
}
646
EXPORT_SYMBOL_GPL(tracing_snapshot_alloc);
647
648
649
650
651
#else
void tracing_snapshot(void)
{
	WARN_ONCE(1, "Snapshot feature not enabled, but internal snapshot used");
}
652
EXPORT_SYMBOL_GPL(tracing_snapshot);
653
654
655
656
657
658
int tracing_alloc_snapshot(void)
{
	WARN_ONCE(1, "Snapshot feature not enabled, but snapshot allocation used");
	return -ENODEV;
}
EXPORT_SYMBOL_GPL(tracing_alloc_snapshot);
659
660
661
662
663
void tracing_snapshot_alloc(void)
{
	/* Give warning */
	tracing_snapshot();
}
664
EXPORT_SYMBOL_GPL(tracing_snapshot_alloc);
665
666
#endif /* CONFIG_TRACER_SNAPSHOT */

667
static void tracer_tracing_off(struct trace_array *tr)
668
669
670
671
672
673
674
675
676
677
678
679
680
681
682
683
{
	if (tr->trace_buffer.buffer)
		ring_buffer_record_off(tr->trace_buffer.buffer);
	/*
	 * This flag is looked at when buffers haven't been allocated
	 * yet, or by some tracers (like irqsoff), that just want to
	 * know if the ring buffer has been disabled, but it can handle
	 * races of where it gets disabled but we still do a record.
	 * As the check is in the fast path of the tracers, it is more
	 * important to be fast than accurate.
	 */
	tr->buffer_disabled = 1;
	/* Make the flag seen by readers */
	smp_wmb();
}

684
685
686
687
688
689
690
691
692
693
/**
 * tracing_off - turn off tracing buffers
 *
 * This function stops the tracing buffers from recording data.
 * It does not disable any overhead the tracers themselves may
 * be causing. This function simply causes all recording to
 * the ring buffers to fail.
 */
void tracing_off(void)
{
694
	tracer_tracing_off(&global_trace);
695
696
697
}
EXPORT_SYMBOL_GPL(tracing_off);

698
699
700
701
702
703
void disable_trace_on_warning(void)
{
	if (__disable_trace_on_warning)
		tracing_off();
}

704
705
706
707
708
709
/**
 * tracer_tracing_is_on - show real state of ring buffer enabled
 * @tr : the trace array to know if ring buffer is enabled
 *
 * Shows real state of the ring buffer if it is enabled or not.
 */
710
static int tracer_tracing_is_on(struct trace_array *tr)
711
712
713
714
715
716
{
	if (tr->trace_buffer.buffer)
		return ring_buffer_record_is_on(tr->trace_buffer.buffer);
	return !tr->buffer_disabled;
}

717
718
719
720
721
/**
 * tracing_is_on - show state of ring buffers enabled
 */
int tracing_is_on(void)
{
722
	return tracer_tracing_is_on(&global_trace);
723
724
725
}
EXPORT_SYMBOL_GPL(tracing_is_on);

726
static int __init set_buf_size(char *str)
727
{
728
	unsigned long buf_size;
729

730
731
	if (!str)
		return 0;
732
	buf_size = memparse(str, &str);
733
	/* nr_entries can not be zero */
734
	if (buf_size == 0)
735
		return 0;
736
	trace_buf_size = buf_size;
737
738
	return 1;
}
739
__setup("trace_buf_size=", set_buf_size);
740

741
742
static int __init set_tracing_thresh(char *str)
{
743
	unsigned long threshold;
744
745
746
747
	int ret;

	if (!str)
		return 0;
748
	ret = kstrtoul(str, 0, &threshold);
749
750
	if (ret < 0)
		return 0;
751
	tracing_thresh = threshold * 1000;
752
753
754
755
	return 1;
}
__setup("tracing_thresh=", set_tracing_thresh);

Steven Rostedt's avatar
Steven Rostedt committed
756
757
758
759
760
unsigned long nsecs_to_usecs(unsigned long nsecs)
{
	return nsecs / 1000;
}

Steven Rostedt's avatar
Steven Rostedt committed
761
/* These must match the bit postions in trace_iterator_flags */
762
763
764
765
766
static const char *trace_options[] = {
	"print-parent",
	"sym-offset",
	"sym-addr",
	"verbose",
Ingo Molnar's avatar
Ingo Molnar committed
767
	"raw",
768
	"hex",
Ingo Molnar's avatar
Ingo Molnar committed
769
	"bin",
770
	"block",
Ingo Molnar's avatar
Ingo Molnar committed
771
	"stacktrace",
772
	"trace_printk",
773
	"ftrace_preempt",
774
	"branch",
775
	"annotate",
776
	"userstacktrace",
777
	"sym-userobj",
778
	"printk-msg-only",
779
	"context-info",
780
	"latency-format",
781
	"sleep-time",
782
	"graph-time",
783
	"record-cmd",
784
	"overwrite",
785
	"disable_on_free",
786
	"irq-info",
787
	"markers",
788
	"function-trace",
789
790
791
	NULL
};

792
793
794
static struct {
	u64 (*func)(void);
	const char *name;
795
	int in_ns;		/* is this clock in nanoseconds? */
796
} trace_clocks[] = {
797
798
799
	{ trace_clock_local,	"local",	1 },
	{ trace_clock_global,	"global",	1 },
	{ trace_clock_counter,	"counter",	0 },
800
	{ trace_clock_jiffies,	"uptime",	1 },
801
	{ trace_clock,		"perf",		1 },
802
	ARCH_TRACE_CLOCKS
803
804
};

805
806
807
808
809
810
811
812
813
814
815
816
817
818
819
820
821
822
823
824
825
826
827
828
829
830
831
832
833
834
835
836
837
838
839
840
841
842
843
844
845
846
847
848
849
850
851
852
853
854
855
856
857
858
859
860
861
862
863
864
865
866
867
868
869
870
871
872
873
874
875
876
877
878
879
880
881
/*
 * trace_parser_get_init - gets the buffer for trace parser
 */
int trace_parser_get_init(struct trace_parser *parser, int size)
{
	memset(parser, 0, sizeof(*parser));

	parser->buffer = kmalloc(size, GFP_KERNEL);
	if (!parser->buffer)
		return 1;

	parser->size = size;
	return 0;
}

/*
 * trace_parser_put - frees the buffer for trace parser
 */
void trace_parser_put(struct trace_parser *parser)
{
	kfree(parser->buffer);
}

/*
 * trace_get_user - reads the user input string separated by  space
 * (matched by isspace(ch))
 *
 * For each string found the 'struct trace_parser' is updated,
 * and the function returns.
 *
 * Returns number of bytes read.
 *
 * See kernel/trace/trace.h for 'struct trace_parser' details.
 */
int trace_get_user(struct trace_parser *parser, const char __user *ubuf,
	size_t cnt, loff_t *ppos)
{
	char ch;
	size_t read = 0;
	ssize_t ret;

	if (!*ppos)
		trace_parser_clear(parser);

	ret = get_user(ch, ubuf++);
	if (ret)
		goto out;

	read++;
	cnt--;

	/*
	 * The parser is not finished with the last write,
	 * continue reading the user input without skipping spaces.
	 */
	if (!parser->cont) {
		/* skip white space */
		while (cnt && isspace(ch)) {
			ret = get_user(ch, ubuf++);
			if (ret)
				goto out;
			read++;
			cnt--;
		}

		/* only spaces were written */
		if (isspace(ch)) {
			*ppos += read;
			ret = read;
			goto out;
		}

		parser->idx = 0;
	}

	/* read the non-space input */
	while (cnt && !isspace(ch)) {
882
		if (parser->idx < parser->size - 1)
883
884
885
886
887
888
889
890
891
892
893
894
895
896
897
898
			parser->buffer[parser->idx++] = ch;
		else {
			ret = -EINVAL;
			goto out;
		}
		ret = get_user(ch, ubuf++);
		if (ret)
			goto out;
		read++;
		cnt--;
	}

	/* We either got finished input or we have to wait for another call. */
	if (isspace(ch)) {
		parser->buffer[parser->idx] = 0;
		parser->cont = false;
899
	} else if (parser->idx < parser->size - 1) {
900
901
		parser->cont = true;
		parser->buffer[parser->idx++] = ch;
902
903
904
	} else {
		ret = -EINVAL;
		goto out;
905
906
907
908
909
910
911
912
913
	}

	*ppos += read;
	ret = read;

out:
	return ret;
}

914
915
916
917
918
ssize_t trace_seq_to_user(struct trace_seq *s, char __user *ubuf, size_t cnt)
{
	int len;
	int ret;

919
920
921
	if (!cnt)
		return 0;

922
923
924
925
926
927
928
	if (s->len <= s->readpos)
		return -EBUSY;

	len = s->len - s->readpos;
	if (cnt > len)
		cnt = len;
	ret = copy_to_user(ubuf, s->buffer + s->readpos, cnt);
929
	if (ret == cnt)
930
931
		return -EFAULT;

932
933
	cnt -= ret;

934
	s->readpos += cnt;
935
	return cnt;
936
937
}

938
static ssize_t trace_seq_to_buffer(struct trace_seq *s, void *buf, size_t cnt)
939
940
941
942
943
944
945
946
947
{
	int len;

	if (s->len <= s->readpos)
		return -EBUSY;

	len = s->len - s->readpos;
	if (cnt > len)
		cnt = len;
948
	memcpy(buf, s->buffer + s->readpos, cnt);
949

950
	s->readpos += cnt;
951
952
953
	return cnt;
}

954
955
956
957
958
959
/*
 * ftrace_max_lock is used to protect the swapping of buffers
 * when taking a max snapshot. The buffers themselves are
 * protected by per_cpu spinlocks. But the action of the swap
 * needs its own lock.
 *
960
 * This is defined as a arch_spinlock_t in order to help
961
962
963
964
965
966
 * with performance when lockdep debugging is enabled.
 *
 * It is also used in other places outside the update_max_tr
 * so it needs to be defined outside of the
 * CONFIG_TRACER_MAX_TRACE.
 */
967
static arch_spinlock_t ftrace_max_lock =
968
	(arch_spinlock_t)__ARCH_SPIN_LOCK_UNLOCKED;
969

970
971
unsigned long __read_mostly	tracing_thresh;

972
973
974
975
976
977
978
979
980
981
982
#ifdef CONFIG_TRACER_MAX_TRACE
unsigned long __read_mostly	tracing_max_latency;

/*
 * Copy the new maximum trace into the separate maximum-trace
 * structure. (this way the maximum trace is permanently saved,
 * for later retrieval via /sys/kernel/debug/tracing/latency_trace)
 */
static void
__update_max_tr(struct trace_array *tr, struct task_struct *tsk, int cpu)
{
983
984
985
986
	struct trace_buffer *trace_buf = &tr->trace_buffer;
	struct trace_buffer *max_buf = &tr->max_buffer;
	struct trace_array_cpu *data = per_cpu_ptr(trace_buf->data, cpu);
	struct trace_array_cpu *max_data = per_cpu_ptr(max_buf->data, cpu);
987

988
989
	max_buf->cpu = cpu;
	max_buf->time_start = data->preempt_timestamp;
990

991
992
993
	max_data->saved_latency = tracing_max_latency;
	max_data->critical_start = data->critical_start;
	max_data->critical_end = data->critical_end;
994

995
	memcpy(max_data->comm, tsk->comm, TASK_COMM_LEN);
996
	max_data->pid = tsk->pid;
997
998
999
1000
1001
1002
1003
1004
1005
	/*
	 * If tsk == current, then use current_uid(), as that does not use
	 * RCU. The irq tracer can be called out of RCU scope.
	 */
	if (tsk == current)
		max_data->uid = current_uid();
	else
		max_data->uid = task_uid(tsk);

1006
1007
1008
	max_data->nice = tsk->static_prio - 20 - MAX_RT_PRIO;
	max_data->policy = tsk->policy;
	max_data->rt_priority = tsk->rt_priority;
1009
1010
1011
1012
1013

	/* record this tasks comm */
	tracing_record_cmdline(tsk);
}

Steven Rostedt's avatar
Steven Rostedt committed
1014
1015
1016
1017
1018
1019
1020
1021
1022
/**
 * update_max_tr - snapshot all trace buffers from global_trace to max_tr
 * @tr: tracer
 * @tsk: the task with the latency
 * @cpu: The cpu that initiated the trace.
 *
 * Flip the buffers between the @tr and the max_tr and record information
 * about which task was the cause of this latency.
 */
Ingo Molnar's avatar
Ingo Molnar committed
1023
void
1024
1025
update_max_tr(struct trace_array *tr, struct task_struct *tsk, int cpu)
{
1026
	struct ring_buffer *buf;
1027

1028
	if (tr->stop_count)
1029
1030
		return;

1031
	WARN_ON_ONCE(!irqs_disabled());
1032

1033
	if (!tr->allocated_snapshot) {
1034
		/* Only the nop tracer should hit this when disabling */
1035
		WARN_ON_ONCE(tr->current_trace != &nop_trace);
1036
		return;
1037
	}
1038

1039
	arch_spin_lock(&ftrace_max_lock);
1040

1041
1042
1043
	buf = tr->trace_buffer.buffer;
	tr->trace_buffer.buffer = tr->max_buffer.buffer;
	tr->max_buffer.buffer = buf;
1044

1045
	__update_max_tr(tr, tsk, cpu);
1046
	arch_spin_unlock(&ftrace_max_lock);
1047
1048
1049
1050
1051
1052
1053
}

/**
 * update_max_tr_single - only copy one trace over, and reset the rest
 * @tr - tracer
 * @tsk - task with the latency
 * @cpu - the cpu of the buffer to copy.
Steven Rostedt's avatar
Steven Rostedt committed
1054
1055
 *
 * Flip the trace of a single CPU buffer between the @tr and the max_tr.
1056
 */
Ingo Molnar's avatar
Ingo Molnar committed
1057
void
1058
1059
update_max_tr_single(struct trace_array *tr, struct task_struct *tsk, int cpu)
{
1060
	int ret;
1061

1062
	if (tr->stop_count)
1063
1064
		return;

1065
	WARN_ON_ONCE(!irqs_disabled());
Steven Rostedt's avatar
Steven Rostedt committed
1066
	if (!tr->allocated_snapshot) {
1067
		/* Only the nop tracer should hit this when disabling */
1068
		WARN_ON_ONCE(tr->current_trace != &nop_trace);
1069
		return;
1070
	}
1071

1072
	arch_spin_lock(&ftrace_max_lock);
1073

1074
	ret = ring_buffer_swap_cpu(tr->max_buffer.buffer, tr->trace_buffer.buffer, cpu);
1075

1076
1077
1078
1079
1080
1081
1082
	if (ret == -EBUSY) {
		/*
		 * We failed to swap the buffer due to a commit taking
		 * place on this CPU. We fail to record, but we reset
		 * the max trace buffer (no one writes directly to it)
		 * and flag that it failed.
		 */
1083
		trace_array_printk_buf(tr->max_buffer.buffer, _THIS_IP_,
1084
1085
1086
1087
			"Failed to swap buffers due to commit in progress\n");
	}

	WARN_ON_ONCE(ret && ret != -EAGAIN && ret != -EBUSY);
1088
1089

	__update_max_tr(tr, tsk, cpu);
1090
	arch_spin_unlock(&ftrace_max_lock);
1091
}
1092
#endif /* CONFIG_TRACER_MAX_TRACE */
1093

1094
1095
static void default_wait_pipe(struct trace_iterator *iter)
{
1096
1097
1098
	/* Iterators are static, they should be filled or empty */
	if (trace_buffer_iter(iter, iter->cpu_file))
		return;
1099

1100
	ring_buffer_wait(iter->trace_buffer->buffer, iter->cpu_file);
1101
1102
}

1103
1104
1105
1106
1107
1108
#ifdef CONFIG_FTRACE_STARTUP_TEST
static int run_tracer_selftest(struct tracer *type)
{
	struct trace_array *tr = &global_trace;
	struct tracer *saved_tracer = tr->current_trace;
	int ret;
1109

1110
1111
	if (!type->selftest || tracing_selftest_disabled)
		return 0;
1112
1113

	/*
1114
1115
1116
1117
1118
	 * Run a selftest on this tracer.
	 * Here we reset the trace buffer, and set the current
	 * tracer to be this tracer. The tracer can then run some
	 * internal tracing to verify that everything is in order.
	 * If we fail, we do not register this tracer.
1119
	 */
1120
	tracing_reset_online_cpus(&tr->trace_buffer);
1121

1122
1123
1124
1125
1126
1127
1128
1129
1130
1131
1132
1133
1134
1135
1136
1137
1138
1139
1140
1141
1142
1143
1144
1145
1146
1147
1148
1149
1150
	tr->current_trace = type;

#ifdef CONFIG_TRACER_MAX_TRACE
	if (type->use_max_tr) {
		/* If we expanded the buffers, make sure the max is expanded too */
		if (ring_buffer_expanded)
			ring_buffer_resize(tr->max_buffer.buffer, trace_buf_size,
					   RING_BUFFER_ALL_CPUS);
		tr->allocated_snapshot = true;
	}
#endif

	/* the test is responsible for initializing and enabling */
	pr_info("Testing tracer %s: ", type->name);
	ret = type->selftest(type, tr);
	/* the test is responsible for resetting too */
	tr->current_trace = saved_tracer;
	if (ret) {
		printk(KERN_CONT "FAILED!\n");
		/* Add the warning after printing 'FAILED' */
		WARN_ON(1);
		return -1;
	}
	/* Only reset on passing, to avoid touching corrupted buffers */
	tracing_reset_online_cpus(&tr->trace_buffer);

#ifdef CONFIG_TRACER_MAX_TRACE
	if (type->use_max_tr) {
		tr->allocated_snapshot = false;
1151

1152
1153
1154
1155
1156
1157
1158
1159
1160
1161
1162
1163
1164
1165
		/* Shrink the max buffer again */
		if (ring_buffer_expanded)
			ring_buffer_resize(tr->max_buffer.buffer, 1,
					   RING_BUFFER_ALL_CPUS);
	}
#endif

	printk(KERN_CONT "PASSED\n");
	return 0;
}
#else
static inline int run_tracer_selftest(struct tracer *type)
{
	return 0;
1166
}
1167
#endif /* CONFIG_FTRACE_STARTUP_TEST */
1168

Steven Rostedt's avatar
Steven Rostedt committed
1169
1170
1171
1172
1173
1174
/**
 * register_tracer - register a tracer with the ftrace system.
 * @type - the plugin for the tracer
 *
 * Register a new plugin tracer.
 */
1175
1176
1177
1178
1179
1180
1181
1182
1183
1184
int register_tracer(struct tracer *type)
{
	struct tracer *t;
	int ret = 0;

	if (!type->name) {
		pr_info("Tracer must have a name\n");
		return -1;
	}

1185
	if (strlen(type->name) >= MAX_TRACER_SIZE) {
Li Zefan's avatar
Li Zefan committed
1186
1187
1188
1189
		pr_info("Tracer has a name longer than %d\n", MAX_TRACER_SIZE);
		return -1;
	}

1190
	mutex_lock(&trace_types_lock);
Ingo Molnar's avatar
Ingo Molnar committed
1191

1192
1193
	tracing_selftest_running = true;

1194
1195
1196
	for (t = trace_types; t; t = t->next) {
		if (strcmp(type->name, t->name) == 0) {
			/* already found */
Li Zefan's avatar
Li Zefan committed
1197
			pr_info("Tracer %s already registered\n",
1198
1199
1200
1201
1202
1203
				type->name);
			ret = -1;
			goto out;
		}
	}

1204
1205
1206
1207
1208
1209
1210
	if (!type->set_flag)
		type->set_flag = &dummy_set_flag;
	if (!type->flags)
		type->flags = &dummy_tracer_flags;
	else
		if (!type->flags->opts)
			type->flags->opts = dummy_tracer_opt;
1211
1212
1213
	if (!type->wait_pipe)
		type->wait_pipe = default_wait_pipe;

1214
1215
1216
	ret = run_tracer_selftest(type);
	if (ret < 0)
		goto out;
Steven Rostedt's avatar
Steven Rostedt committed
1217

1218
1219
	type->next = trace_types;
	trace_types = type;
Steven Rostedt's avatar
Steven Rostedt committed
1220

1221
 out:
1222
	tracing_selftest_running = false;
1223
1224
	mutex_unlock(&trace_types_lock);

Steven Rostedt's avatar
Steven Rostedt committed
1225
1226
1227
	if (ret || !default_bootup_tracer)
		goto out_unlock;

Li Zefan's avatar
Li Zefan committed
1228
	if (strncmp(default_bootup_tracer, type->name, MAX_TRACER_SIZE))
Steven Rostedt's avatar
Steven Rostedt committed
1229
1230
1231
1232
1233
1234
1235
		goto out_unlock;

	printk(KERN_INFO "Starting tracer '%s'\n", type->name);
	/* Do we want this tracer to start on bootup? */
	tracing_set_tracer(type->name);
	default_bootup_tracer = NULL;
	/* disable other selftests, since this will break it. */
1236
	tracing_selftest_disabled = true;
1237
#ifdef CONFIG_FTRACE_STARTUP_TEST
Steven Rostedt's avatar
Steven Rostedt committed
1238
1239
	printk(KERN_INFO "Disabling FTRACE selftests due to running tracer '%s'\n",
	       type->name);
1240
1241
#endif

Steven Rostedt's avatar
Steven Rostedt committed
1242
 out_unlock:
1243
1244
1245
	return ret;
}

1246
void tracing_reset(struct trace_buffer *buf, int cpu)
1247
{
1248
	struct ring_buffer *buffer = buf->buffer;
1249

1250
1251
1252
	if (!buffer)
		return;

1253
1254
1255
1256
	ring_buffer_record_disable(buffer);

	/* Make sure all commits have finished */
	synchronize_sched();
1257
	ring_buffer_reset_cpu(buffer, cpu);
1258
1259
1260
1261

	ring_buffer_record_enable(buffer);
}

1262
void tracing_reset_online_cpus(struct trace_buffer *buf)
1263
{
1264
	struct ring_buffer *buffer = buf->buffer;
1265
1266
	int cpu;

1267
1268
1269
	if (!buffer)
		return;

1270
1271
1272
1273
1274
	ring_buffer_record_disable(buffer);

	/* Make sure all commits have finished */
	synchronize_sched();

1275
	buf->time_start = buffer_ftrace_now(buf, buf->cpu);
1276
1277

	for_each_online_cpu(cpu)
1278
		ring_buffer_reset_cpu(buffer, cpu);
1279
1280

	ring_buffer_record_enable(buffer);
1281
1282
}

1283
/* Must have trace_types_lock held */
1284
void tracing_reset_all_online_cpus(void)
1285
{
1286
1287
1288
	struct trace_array *tr;

	list_for_each_entry(tr, &ftrace_trace_arrays, list) {
1289
1290
1291
1292
		tracing_reset_online_cpus(&tr->trace_buffer);
#ifdef CONFIG_TRACER_MAX_TRACE
		tracing_reset_online_cpus(&tr->max_buffer);
#endif
1293
	}
1294
1295
}

1296
#define SAVED_CMDLINES 128
1297
#define NO_CMDLINE_MAP UINT_MAX
1298
1299
1300
1301
static unsigned map_pid_to_cmdline[PID_MAX_DEFAULT+1];
static unsigned map_cmdline_to_pid[SAVED_CMDLINES];
static char saved_cmdlines[SAVED_CMDLINES][TASK_COMM_LEN];
static int cmdline_idx;
1302
static arch_spinlock_t trace_cmdline_lock = __ARCH_SPIN_LOCK_UNLOCKED;
1303
1304

/* temporary disable recording */
1305
static atomic_t trace_record_cmdline_disabled __read_mostly;
1306
1307
1308

static void trace_init_cmdlines(void)
{
1309
1310
	memset(&map_pid_to_cmdline, NO_CMDLINE_MAP, sizeof(map_pid_to_cmdline));
	memset(&map_cmdline_to_pid, NO_CMDLINE_MAP, sizeof(map_cmdline_to_pid));
1311
1312
1313
	cmdline_idx = 0;
}