trace.c 112 KB
Newer Older
1
2
3
4
5
6
7
8
9
10
11
12
13
/*
 * ring buffer based function tracer
 *
 * Copyright (C) 2007-2008 Steven Rostedt <srostedt@redhat.com>
 * Copyright (C) 2008 Ingo Molnar <mingo@redhat.com>
 *
 * Originally taken from the RT patch by:
 *    Arnaldo Carvalho de Melo <acme@redhat.com>
 *
 * Based on code from the latency_tracer, that is:
 *  Copyright (C) 2004-2006 Ingo Molnar
 *  Copyright (C) 2004 William Lee Irwin III
 */
14
#include <linux/ring_buffer.h>
15
#include <generated/utsrelease.h>
16
17
#include <linux/stacktrace.h>
#include <linux/writeback.h>
18
19
#include <linux/kallsyms.h>
#include <linux/seq_file.h>
20
#include <linux/notifier.h>
21
#include <linux/irqflags.h>
22
#include <linux/debugfs.h>
23
#include <linux/pagemap.h>
24
25
26
#include <linux/hardirq.h>
#include <linux/linkage.h>
#include <linux/uaccess.h>
27
#include <linux/kprobes.h>
28
29
30
#include <linux/ftrace.h>
#include <linux/module.h>
#include <linux/percpu.h>
31
#include <linux/splice.h>
32
#include <linux/kdebug.h>
33
#include <linux/string.h>
34
#include <linux/rwsem.h>
35
#include <linux/slab.h>
36
37
#include <linux/ctype.h>
#include <linux/init.h>
38
#include <linux/poll.h>
39
#include <linux/fs.h>
Ingo Molnar's avatar
Ingo Molnar committed
40

41
#include "trace.h"
42
#include "trace_output.h"
43

44
45
46
47
/*
 * On boot up, the ring buffer is set to the minimum size, so that
 * we do not waste memory on systems that are not using tracing.
 */
48
int ring_buffer_expanded;
49

50
51
/*
 * We need to change this state when a selftest is running.
52
53
 * A selftest will lurk into the ring-buffer to count the
 * entries inserted during the selftest although some concurrent
54
 * insertions into the ring-buffer such as trace_printk could occurred
55
56
 * at the same time, giving false positive or negative results.
 */
57
static bool __read_mostly tracing_selftest_running;
58

59
60
61
/*
 * If a tracer is running, we do not want to run SELFTEST.
 */
62
bool __read_mostly tracing_selftest_disabled;
63

64
65
66
67
68
69
70
71
72
73
74
75
76
77
/* For tracers that don't implement custom flags */
static struct tracer_opt dummy_tracer_opt[] = {
	{ }
};

static struct tracer_flags dummy_tracer_flags = {
	.val = 0,
	.opts = dummy_tracer_opt
};

static int dummy_set_flag(u32 old_flags, u32 bit, int set)
{
	return 0;
}
78
79
80
81
82
83
84

/*
 * Kill all tracing for good (never come back).
 * It is initialized to 1 but will turn to zero if the initialization
 * of the tracer is successful. But that is the only place that sets
 * this back to zero.
 */
85
static int tracing_disabled = 1;
86

87
DEFINE_PER_CPU(int, ftrace_cpu_disabled);
88
89
90
91

static inline void ftrace_disable_cpu(void)
{
	preempt_disable();
92
	__this_cpu_inc(ftrace_cpu_disabled);
93
94
95
96
}

static inline void ftrace_enable_cpu(void)
{
97
	__this_cpu_dec(ftrace_cpu_disabled);
98
99
100
	preempt_enable();
}

101
cpumask_var_t __read_mostly	tracing_buffer_mask;
102

103
104
105
106
107
108
109
110
111
112
113
/*
 * ftrace_dump_on_oops - variable to dump ftrace buffer on oops
 *
 * If there is an oops (or kernel panic) and the ftrace_dump_on_oops
 * is set, then ftrace_dump is called. This will output the contents
 * of the ftrace buffers to the console.  This is very useful for
 * capturing traces that lead to crashes and outputing it to a
 * serial console.
 *
 * It is default off, but you can enable it with either specifying
 * "ftrace_dump_on_oops" in the kernel command line, or setting
114
115
116
 * /proc/sys/kernel/ftrace_dump_on_oops
 * Set 1 if you want to dump buffers of all CPUs
 * Set 2 if you want to dump the buffer of the CPU that triggered oops
117
 */
118
119

enum ftrace_dump_mode ftrace_dump_on_oops;
120

121
122
static int tracing_set_tracer(const char *buf);

Li Zefan's avatar
Li Zefan committed
123
124
#define MAX_TRACER_SIZE		100
static char bootup_tracer_buf[MAX_TRACER_SIZE] __initdata;
125
static char *default_bootup_tracer;
126

127
static int __init set_cmdline_ftrace(char *str)
128
{
Li Zefan's avatar
Li Zefan committed
129
	strncpy(bootup_tracer_buf, str, MAX_TRACER_SIZE);
130
	default_bootup_tracer = bootup_tracer_buf;
131
132
	/* We are using ftrace early, expand it */
	ring_buffer_expanded = 1;
133
134
	return 1;
}
135
__setup("ftrace=", set_cmdline_ftrace);
136

137
138
static int __init set_ftrace_dump_on_oops(char *str)
{
139
140
141
142
143
144
145
146
147
148
149
	if (*str++ != '=' || !*str) {
		ftrace_dump_on_oops = DUMP_ALL;
		return 1;
	}

	if (!strcmp("orig_cpu", str)) {
		ftrace_dump_on_oops = DUMP_ORIG;
                return 1;
        }

        return 0;
150
151
}
__setup("ftrace_dump_on_oops", set_ftrace_dump_on_oops);
Steven Rostedt's avatar
Steven Rostedt committed
152

153
unsigned long long ns2usecs(cycle_t nsec)
154
155
156
157
158
159
{
	nsec += 500;
	do_div(nsec, 1000);
	return nsec;
}

Steven Rostedt's avatar
Steven Rostedt committed
160
161
162
163
164
165
166
167
168
169
170
171
/*
 * The global_trace is the descriptor that holds the tracing
 * buffers for the live tracing. For each CPU, it contains
 * a link list of pages that will store trace entries. The
 * page descriptor of the pages in the memory is used to hold
 * the link list by linking the lru item in the page descriptor
 * to each of the pages in the buffer per CPU.
 *
 * For each active CPU there is a data field that holds the
 * pages for the buffer for that CPU. Each CPU has the same number
 * of pages allocated for its buffer.
 */
172
173
174
175
static struct trace_array	global_trace;

static DEFINE_PER_CPU(struct trace_array_cpu, global_trace_cpu);

176
177
int filter_current_check_discard(struct ring_buffer *buffer,
				 struct ftrace_event_call *call, void *rec,
178
179
				 struct ring_buffer_event *event)
{
180
	return filter_check_discard(call, rec, buffer, event);
181
}
182
EXPORT_SYMBOL_GPL(filter_current_check_discard);
183

184
185
186
187
188
189
190
191
192
193
194
195
196
cycle_t ftrace_now(int cpu)
{
	u64 ts;

	/* Early boot up does not have a buffer yet */
	if (!global_trace.buffer)
		return trace_clock_local();

	ts = ring_buffer_time_stamp(global_trace.buffer, cpu);
	ring_buffer_normalize_time_stamp(global_trace.buffer, cpu, &ts);

	return ts;
}
197

Steven Rostedt's avatar
Steven Rostedt committed
198
199
200
201
202
203
204
205
206
207
/*
 * The max_tr is used to snapshot the global_trace when a maximum
 * latency is reached. Some tracers will use this to store a maximum
 * trace while it continues examining live traces.
 *
 * The buffers for the max_tr are set up the same as the global_trace.
 * When a snapshot is taken, the link list of the max_tr is swapped
 * with the link list of the global_trace and the buffers are reset for
 * the global_trace so the tracing can continue.
 */
208
209
static struct trace_array	max_tr;

210
static DEFINE_PER_CPU(struct trace_array_cpu, max_tr_data);
211

Steven Rostedt's avatar
Steven Rostedt committed
212
/* tracer_enabled is used to toggle activation of a tracer */
213
static int			tracer_enabled = 1;
Steven Rostedt's avatar
Steven Rostedt committed
214

215
216
217
218
219
220
221
222
223
224
225
226
227
/**
 * tracing_is_enabled - return tracer_enabled status
 *
 * This function is used by other tracers to know the status
 * of the tracer_enabled flag.  Tracers may use this function
 * to know if it should enable their features when starting
 * up. See irqsoff tracer for an example (start_irqsoff_tracer).
 */
int tracing_is_enabled(void)
{
	return tracer_enabled;
}

Steven Rostedt's avatar
Steven Rostedt committed
228
/*
229
230
231
 * trace_buf_size is the size in bytes that is allocated
 * for a buffer. Note, the number of bytes is always rounded
 * to page size.
232
233
234
235
236
 *
 * This number is purposely set to a low number of 16384.
 * If the dump on oops happens, it will be much appreciated
 * to not have to wait for all that output. Anyway this can be
 * boot time and run time configurable.
Steven Rostedt's avatar
Steven Rostedt committed
237
 */
238
#define TRACE_BUF_SIZE_DEFAULT	1441792UL /* 16384 * 88 (sizeof(entry)) */
239

240
static unsigned long		trace_buf_size = TRACE_BUF_SIZE_DEFAULT;
241

Steven Rostedt's avatar
Steven Rostedt committed
242
/* trace_types holds a link list of available tracers. */
243
static struct tracer		*trace_types __read_mostly;
Steven Rostedt's avatar
Steven Rostedt committed
244
245

/* current_trace points to the tracer that is currently active */
246
static struct tracer		*current_trace __read_mostly;
Steven Rostedt's avatar
Steven Rostedt committed
247
248
249
250

/*
 * trace_types_lock is used to protect the trace_types list.
 */
251
static DEFINE_MUTEX(trace_types_lock);
Steven Rostedt's avatar
Steven Rostedt committed
252

253
254
255
256
257
258
259
260
261
262
263
264
265
266
267
268
269
270
271
272
273
274
275
276
277
278
279
280
281
282
283
284
285
286
287
288
289
290
291
292
293
294
295
296
297
298
299
300
301
302
303
304
305
306
307
308
309
310
311
312
313
314
315
316
317
318
319
320
321
322
323
324
325
326
327
328
329
330
331
332
333
334
/*
 * serialize the access of the ring buffer
 *
 * ring buffer serializes readers, but it is low level protection.
 * The validity of the events (which returns by ring_buffer_peek() ..etc)
 * are not protected by ring buffer.
 *
 * The content of events may become garbage if we allow other process consumes
 * these events concurrently:
 *   A) the page of the consumed events may become a normal page
 *      (not reader page) in ring buffer, and this page will be rewrited
 *      by events producer.
 *   B) The page of the consumed events may become a page for splice_read,
 *      and this page will be returned to system.
 *
 * These primitives allow multi process access to different cpu ring buffer
 * concurrently.
 *
 * These primitives don't distinguish read-only and read-consume access.
 * Multi read-only access are also serialized.
 */

#ifdef CONFIG_SMP
static DECLARE_RWSEM(all_cpu_access_lock);
static DEFINE_PER_CPU(struct mutex, cpu_access_lock);

static inline void trace_access_lock(int cpu)
{
	if (cpu == TRACE_PIPE_ALL_CPU) {
		/* gain it for accessing the whole ring buffer. */
		down_write(&all_cpu_access_lock);
	} else {
		/* gain it for accessing a cpu ring buffer. */

		/* Firstly block other trace_access_lock(TRACE_PIPE_ALL_CPU). */
		down_read(&all_cpu_access_lock);

		/* Secondly block other access to this @cpu ring buffer. */
		mutex_lock(&per_cpu(cpu_access_lock, cpu));
	}
}

static inline void trace_access_unlock(int cpu)
{
	if (cpu == TRACE_PIPE_ALL_CPU) {
		up_write(&all_cpu_access_lock);
	} else {
		mutex_unlock(&per_cpu(cpu_access_lock, cpu));
		up_read(&all_cpu_access_lock);
	}
}

static inline void trace_access_lock_init(void)
{
	int cpu;

	for_each_possible_cpu(cpu)
		mutex_init(&per_cpu(cpu_access_lock, cpu));
}

#else

static DEFINE_MUTEX(access_lock);

static inline void trace_access_lock(int cpu)
{
	(void)cpu;
	mutex_lock(&access_lock);
}

static inline void trace_access_unlock(int cpu)
{
	(void)cpu;
	mutex_unlock(&access_lock);
}

static inline void trace_access_lock_init(void)
{
}

#endif

Steven Rostedt's avatar
Steven Rostedt committed
335
/* trace_wait is a waitqueue for tasks blocked on trace_poll */
336
337
static DECLARE_WAIT_QUEUE_HEAD(trace_wait);

338
/* trace_flags holds trace_options default values */
339
unsigned long trace_flags = TRACE_ITER_PRINT_PARENT | TRACE_ITER_PRINTK |
340
	TRACE_ITER_ANNOTATE | TRACE_ITER_CONTEXT_INFO | TRACE_ITER_SLEEP_TIME |
341
342
	TRACE_ITER_GRAPH_TIME | TRACE_ITER_RECORD_CMD | TRACE_ITER_OVERWRITE |
	TRACE_ITER_IRQ_INFO;
343

344
static int trace_stop_count;
345
static DEFINE_RAW_SPINLOCK(tracing_start_lock);
346

347
348
349
350
351
352
353
static void wakeup_work_handler(struct work_struct *work)
{
	wake_up(&trace_wait);
}

static DECLARE_DELAYED_WORK(wakeup_work, wakeup_work_handler);

Steven Rostedt's avatar
Steven Rostedt committed
354
355
356
/**
 * trace_wake_up - wake up tasks waiting for trace input
 *
357
358
359
 * Schedules a delayed work to wake up any task that is blocked on the
 * trace_wait queue. These is used with trace_poll for tasks polling the
 * trace.
Steven Rostedt's avatar
Steven Rostedt committed
360
 */
361
362
void trace_wake_up(void)
{
363
	const unsigned long delay = msecs_to_jiffies(2);
364
365
366

	if (trace_flags & TRACE_ITER_BLOCK)
		return;
367
	schedule_delayed_work(&wakeup_work, delay);
368
}
369

370
static int __init set_buf_size(char *str)
371
{
372
	unsigned long buf_size;
373

374
375
	if (!str)
		return 0;
376
	buf_size = memparse(str, &str);
377
	/* nr_entries can not be zero */
378
	if (buf_size == 0)
379
		return 0;
380
	trace_buf_size = buf_size;
381
382
	return 1;
}
383
__setup("trace_buf_size=", set_buf_size);
384

385
386
387
388
389
390
391
392
393
394
395
396
397
398
399
static int __init set_tracing_thresh(char *str)
{
	unsigned long threshhold;
	int ret;

	if (!str)
		return 0;
	ret = strict_strtoul(str, 0, &threshhold);
	if (ret < 0)
		return 0;
	tracing_thresh = threshhold * 1000;
	return 1;
}
__setup("tracing_thresh=", set_tracing_thresh);

Steven Rostedt's avatar
Steven Rostedt committed
400
401
402
403
404
unsigned long nsecs_to_usecs(unsigned long nsecs)
{
	return nsecs / 1000;
}

Steven Rostedt's avatar
Steven Rostedt committed
405
/* These must match the bit postions in trace_iterator_flags */
406
407
408
409
410
static const char *trace_options[] = {
	"print-parent",
	"sym-offset",
	"sym-addr",
	"verbose",
Ingo Molnar's avatar
Ingo Molnar committed
411
	"raw",
412
	"hex",
Ingo Molnar's avatar
Ingo Molnar committed
413
	"bin",
414
	"block",
Ingo Molnar's avatar
Ingo Molnar committed
415
	"stacktrace",
416
	"trace_printk",
417
	"ftrace_preempt",
418
	"branch",
419
	"annotate",
420
	"userstacktrace",
421
	"sym-userobj",
422
	"printk-msg-only",
423
	"context-info",
424
	"latency-format",
425
	"sleep-time",
426
	"graph-time",
427
	"record-cmd",
428
	"overwrite",
429
	"disable_on_free",
430
	"irq-info",
431
432
433
	NULL
};

434
435
436
437
438
439
static struct {
	u64 (*func)(void);
	const char *name;
} trace_clocks[] = {
	{ trace_clock_local,	"local" },
	{ trace_clock_global,	"global" },
440
	{ trace_clock_counter,	"counter" },
441
442
443
444
};

int trace_clock_id;

445
446
447
448
449
450
451
452
453
454
455
456
457
458
459
460
461
462
463
464
465
466
467
468
469
470
471
472
473
474
475
476
477
478
479
480
481
482
483
484
485
486
487
488
489
490
491
492
493
494
495
496
497
498
499
500
501
502
503
504
505
506
507
508
509
510
511
512
513
514
515
516
517
518
519
520
521
/*
 * trace_parser_get_init - gets the buffer for trace parser
 */
int trace_parser_get_init(struct trace_parser *parser, int size)
{
	memset(parser, 0, sizeof(*parser));

	parser->buffer = kmalloc(size, GFP_KERNEL);
	if (!parser->buffer)
		return 1;

	parser->size = size;
	return 0;
}

/*
 * trace_parser_put - frees the buffer for trace parser
 */
void trace_parser_put(struct trace_parser *parser)
{
	kfree(parser->buffer);
}

/*
 * trace_get_user - reads the user input string separated by  space
 * (matched by isspace(ch))
 *
 * For each string found the 'struct trace_parser' is updated,
 * and the function returns.
 *
 * Returns number of bytes read.
 *
 * See kernel/trace/trace.h for 'struct trace_parser' details.
 */
int trace_get_user(struct trace_parser *parser, const char __user *ubuf,
	size_t cnt, loff_t *ppos)
{
	char ch;
	size_t read = 0;
	ssize_t ret;

	if (!*ppos)
		trace_parser_clear(parser);

	ret = get_user(ch, ubuf++);
	if (ret)
		goto out;

	read++;
	cnt--;

	/*
	 * The parser is not finished with the last write,
	 * continue reading the user input without skipping spaces.
	 */
	if (!parser->cont) {
		/* skip white space */
		while (cnt && isspace(ch)) {
			ret = get_user(ch, ubuf++);
			if (ret)
				goto out;
			read++;
			cnt--;
		}

		/* only spaces were written */
		if (isspace(ch)) {
			*ppos += read;
			ret = read;
			goto out;
		}

		parser->idx = 0;
	}

	/* read the non-space input */
	while (cnt && !isspace(ch)) {
522
		if (parser->idx < parser->size - 1)
523
524
525
526
527
528
529
530
531
532
533
534
535
536
537
538
539
540
541
542
543
544
545
546
547
548
549
550
			parser->buffer[parser->idx++] = ch;
		else {
			ret = -EINVAL;
			goto out;
		}
		ret = get_user(ch, ubuf++);
		if (ret)
			goto out;
		read++;
		cnt--;
	}

	/* We either got finished input or we have to wait for another call. */
	if (isspace(ch)) {
		parser->buffer[parser->idx] = 0;
		parser->cont = false;
	} else {
		parser->cont = true;
		parser->buffer[parser->idx++] = ch;
	}

	*ppos += read;
	ret = read;

out:
	return ret;
}

551
552
553
554
555
ssize_t trace_seq_to_user(struct trace_seq *s, char __user *ubuf, size_t cnt)
{
	int len;
	int ret;

556
557
558
	if (!cnt)
		return 0;

559
560
561
562
563
564
565
	if (s->len <= s->readpos)
		return -EBUSY;

	len = s->len - s->readpos;
	if (cnt > len)
		cnt = len;
	ret = copy_to_user(ubuf, s->buffer + s->readpos, cnt);
566
	if (ret == cnt)
567
568
		return -EFAULT;

569
570
	cnt -= ret;

571
	s->readpos += cnt;
572
	return cnt;
573
574
}

575
static ssize_t trace_seq_to_buffer(struct trace_seq *s, void *buf, size_t cnt)
576
577
578
579
580
581
582
583
584
585
586
587
588
589
{
	int len;
	void *ret;

	if (s->len <= s->readpos)
		return -EBUSY;

	len = s->len - s->readpos;
	if (cnt > len)
		cnt = len;
	ret = memcpy(buf, s->buffer + s->readpos, cnt);
	if (!ret)
		return -EFAULT;

590
	s->readpos += cnt;
591
592
593
	return cnt;
}

594
595
596
597
598
599
/*
 * ftrace_max_lock is used to protect the swapping of buffers
 * when taking a max snapshot. The buffers themselves are
 * protected by per_cpu spinlocks. But the action of the swap
 * needs its own lock.
 *
600
 * This is defined as a arch_spinlock_t in order to help
601
602
603
604
605
606
 * with performance when lockdep debugging is enabled.
 *
 * It is also used in other places outside the update_max_tr
 * so it needs to be defined outside of the
 * CONFIG_TRACER_MAX_TRACE.
 */
607
static arch_spinlock_t ftrace_max_lock =
608
	(arch_spinlock_t)__ARCH_SPIN_LOCK_UNLOCKED;
609

610
611
unsigned long __read_mostly	tracing_thresh;

612
613
614
615
616
617
618
619
620
621
622
623
#ifdef CONFIG_TRACER_MAX_TRACE
unsigned long __read_mostly	tracing_max_latency;

/*
 * Copy the new maximum trace into the separate maximum-trace
 * structure. (this way the maximum trace is permanently saved,
 * for later retrieval via /sys/kernel/debug/tracing/latency_trace)
 */
static void
__update_max_tr(struct trace_array *tr, struct task_struct *tsk, int cpu)
{
	struct trace_array_cpu *data = tr->data[cpu];
624
	struct trace_array_cpu *max_data;
625
626
627
628

	max_tr.cpu = cpu;
	max_tr.time_start = data->preempt_timestamp;

629
630
631
632
	max_data = max_tr.data[cpu];
	max_data->saved_latency = tracing_max_latency;
	max_data->critical_start = data->critical_start;
	max_data->critical_end = data->critical_end;
633

634
	memcpy(max_data->comm, tsk->comm, TASK_COMM_LEN);
635
636
637
638
639
	max_data->pid = tsk->pid;
	max_data->uid = task_uid(tsk);
	max_data->nice = tsk->static_prio - 20 - MAX_RT_PRIO;
	max_data->policy = tsk->policy;
	max_data->rt_priority = tsk->rt_priority;
640
641
642
643
644

	/* record this tasks comm */
	tracing_record_cmdline(tsk);
}

Steven Rostedt's avatar
Steven Rostedt committed
645
646
647
648
649
650
651
652
653
/**
 * update_max_tr - snapshot all trace buffers from global_trace to max_tr
 * @tr: tracer
 * @tsk: the task with the latency
 * @cpu: The cpu that initiated the trace.
 *
 * Flip the buffers between the @tr and the max_tr and record information
 * about which task was the cause of this latency.
 */
Ingo Molnar's avatar
Ingo Molnar committed
654
void
655
656
update_max_tr(struct trace_array *tr, struct task_struct *tsk, int cpu)
{
657
	struct ring_buffer *buf = tr->buffer;
658

659
660
661
	if (trace_stop_count)
		return;

662
	WARN_ON_ONCE(!irqs_disabled());
663
664
665
666
	if (!current_trace->use_max_tr) {
		WARN_ON_ONCE(1);
		return;
	}
667
	arch_spin_lock(&ftrace_max_lock);
668
669
670
671

	tr->buffer = max_tr.buffer;
	max_tr.buffer = buf;

672
	__update_max_tr(tr, tsk, cpu);
673
	arch_spin_unlock(&ftrace_max_lock);
674
675
676
677
678
679
680
}

/**
 * update_max_tr_single - only copy one trace over, and reset the rest
 * @tr - tracer
 * @tsk - task with the latency
 * @cpu - the cpu of the buffer to copy.
Steven Rostedt's avatar
Steven Rostedt committed
681
682
 *
 * Flip the trace of a single CPU buffer between the @tr and the max_tr.
683
 */
Ingo Molnar's avatar
Ingo Molnar committed
684
void
685
686
update_max_tr_single(struct trace_array *tr, struct task_struct *tsk, int cpu)
{
687
	int ret;
688

689
690
691
	if (trace_stop_count)
		return;

692
	WARN_ON_ONCE(!irqs_disabled());
693
694
695
696
697
	if (!current_trace->use_max_tr) {
		WARN_ON_ONCE(1);
		return;
	}

698
	arch_spin_lock(&ftrace_max_lock);
699

700
701
	ftrace_disable_cpu();

702
703
	ret = ring_buffer_swap_cpu(max_tr.buffer, tr->buffer, cpu);

704
705
706
707
708
709
710
711
712
713
714
	if (ret == -EBUSY) {
		/*
		 * We failed to swap the buffer due to a commit taking
		 * place on this CPU. We fail to record, but we reset
		 * the max trace buffer (no one writes directly to it)
		 * and flag that it failed.
		 */
		trace_array_printk(&max_tr, _THIS_IP_,
			"Failed to swap buffers due to commit in progress\n");
	}

715
716
	ftrace_enable_cpu();

717
	WARN_ON_ONCE(ret && ret != -EAGAIN && ret != -EBUSY);
718
719

	__update_max_tr(tr, tsk, cpu);
720
	arch_spin_unlock(&ftrace_max_lock);
721
}
722
#endif /* CONFIG_TRACER_MAX_TRACE */
723

Steven Rostedt's avatar
Steven Rostedt committed
724
725
726
727
728
729
/**
 * register_tracer - register a tracer with the ftrace system.
 * @type - the plugin for the tracer
 *
 * Register a new plugin tracer.
 */
730
int register_tracer(struct tracer *type)
731
732
__releases(kernel_lock)
__acquires(kernel_lock)
733
734
735
736
737
738
739
740
741
{
	struct tracer *t;
	int ret = 0;

	if (!type->name) {
		pr_info("Tracer must have a name\n");
		return -1;
	}

742
	if (strlen(type->name) >= MAX_TRACER_SIZE) {
Li Zefan's avatar
Li Zefan committed
743
744
745
746
		pr_info("Tracer has a name longer than %d\n", MAX_TRACER_SIZE);
		return -1;
	}

747
	mutex_lock(&trace_types_lock);
Ingo Molnar's avatar
Ingo Molnar committed
748

749
750
	tracing_selftest_running = true;

751
752
753
	for (t = trace_types; t; t = t->next) {
		if (strcmp(type->name, t->name) == 0) {
			/* already found */
Li Zefan's avatar
Li Zefan committed
754
			pr_info("Tracer %s already registered\n",
755
756
757
758
759
760
				type->name);
			ret = -1;
			goto out;
		}
	}

761
762
763
764
765
766
767
	if (!type->set_flag)
		type->set_flag = &dummy_set_flag;
	if (!type->flags)
		type->flags = &dummy_tracer_flags;
	else
		if (!type->flags->opts)
			type->flags->opts = dummy_tracer_opt;
768
769
770
	if (!type->wait_pipe)
		type->wait_pipe = default_wait_pipe;

771

Steven Rostedt's avatar
Steven Rostedt committed
772
#ifdef CONFIG_FTRACE_STARTUP_TEST
773
	if (type->selftest && !tracing_selftest_disabled) {
Steven Rostedt's avatar
Steven Rostedt committed
774
775
		struct tracer *saved_tracer = current_trace;
		struct trace_array *tr = &global_trace;
776

Steven Rostedt's avatar
Steven Rostedt committed
777
778
779
780
781
782
783
		/*
		 * Run a selftest on this tracer.
		 * Here we reset the trace buffer, and set the current
		 * tracer to be this tracer. The tracer can then run some
		 * internal tracing to verify that everything is in order.
		 * If we fail, we do not register this tracer.
		 */
784
		tracing_reset_online_cpus(tr);
Ingo Molnar's avatar
Ingo Molnar committed
785

Steven Rostedt's avatar
Steven Rostedt committed
786
		current_trace = type;
787
788
789
790
791

		/* If we expanded the buffers, make sure the max is expanded too */
		if (ring_buffer_expanded && type->use_max_tr)
			ring_buffer_resize(max_tr.buffer, trace_buf_size);

Steven Rostedt's avatar
Steven Rostedt committed
792
793
794
795
796
797
798
799
800
		/* the test is responsible for initializing and enabling */
		pr_info("Testing tracer %s: ", type->name);
		ret = type->selftest(type, tr);
		/* the test is responsible for resetting too */
		current_trace = saved_tracer;
		if (ret) {
			printk(KERN_CONT "FAILED!\n");
			goto out;
		}
Steven Rostedt's avatar
Steven Rostedt committed
801
		/* Only reset on passing, to avoid touching corrupted buffers */
802
		tracing_reset_online_cpus(tr);
Ingo Molnar's avatar
Ingo Molnar committed
803

804
805
806
807
		/* Shrink the max buffer again */
		if (ring_buffer_expanded && type->use_max_tr)
			ring_buffer_resize(max_tr.buffer, 1);

Steven Rostedt's avatar
Steven Rostedt committed
808
809
810
811
		printk(KERN_CONT "PASSED\n");
	}
#endif

812
813
	type->next = trace_types;
	trace_types = type;
Steven Rostedt's avatar
Steven Rostedt committed
814

815
 out:
816
	tracing_selftest_running = false;
817
818
	mutex_unlock(&trace_types_lock);

Steven Rostedt's avatar
Steven Rostedt committed
819
820
821
	if (ret || !default_bootup_tracer)
		goto out_unlock;

Li Zefan's avatar
Li Zefan committed
822
	if (strncmp(default_bootup_tracer, type->name, MAX_TRACER_SIZE))
Steven Rostedt's avatar
Steven Rostedt committed
823
824
825
826
827
828
829
830
		goto out_unlock;

	printk(KERN_INFO "Starting tracer '%s'\n", type->name);
	/* Do we want this tracer to start on bootup? */
	tracing_set_tracer(type->name);
	default_bootup_tracer = NULL;
	/* disable other selftests, since this will break it. */
	tracing_selftest_disabled = 1;
831
#ifdef CONFIG_FTRACE_STARTUP_TEST
Steven Rostedt's avatar
Steven Rostedt committed
832
833
	printk(KERN_INFO "Disabling FTRACE selftests due to running tracer '%s'\n",
	       type->name);
834
835
#endif

Steven Rostedt's avatar
Steven Rostedt committed
836
 out_unlock:
837
838
839
840
841
842
843
844
845
846
847
848
	return ret;
}

void unregister_tracer(struct tracer *type)
{
	struct tracer **t;

	mutex_lock(&trace_types_lock);
	for (t = &trace_types; *t; t = &(*t)->next) {
		if (*t == type)
			goto found;
	}
Li Zefan's avatar
Li Zefan committed
849
	pr_info("Tracer %s not registered\n", type->name);
850
851
852
853
	goto out;

 found:
	*t = (*t)->next;
854
855
856
857
858
859
860
861

	if (type == current_trace && tracer_enabled) {
		tracer_enabled = 0;
		tracing_stop();
		if (current_trace->stop)
			current_trace->stop(&global_trace);
		current_trace = &nop_trace;
	}
Li Zefan's avatar
Li Zefan committed
862
out:
863
864
865
	mutex_unlock(&trace_types_lock);
}

866
static void __tracing_reset(struct ring_buffer *buffer, int cpu)
867
{
868
	ftrace_disable_cpu();
869
	ring_buffer_reset_cpu(buffer, cpu);
870
	ftrace_enable_cpu();
871
872
}

873
874
875
876
877
878
879
880
void tracing_reset(struct trace_array *tr, int cpu)
{
	struct ring_buffer *buffer = tr->buffer;

	ring_buffer_record_disable(buffer);

	/* Make sure all commits have finished */
	synchronize_sched();
881
	__tracing_reset(buffer, cpu);
882
883
884
885

	ring_buffer_record_enable(buffer);
}

886
887
void tracing_reset_online_cpus(struct trace_array *tr)
{
888
	struct ring_buffer *buffer = tr->buffer;
889
890
	int cpu;

891
892
893
894
895
	ring_buffer_record_disable(buffer);

	/* Make sure all commits have finished */
	synchronize_sched();

896
897
898
	tr->time_start = ftrace_now(tr->cpu);

	for_each_online_cpu(cpu)
899
		__tracing_reset(buffer, cpu);
900
901

	ring_buffer_record_enable(buffer);
902
903
}

904
905
906
907
908
909
910
911
912
913
void tracing_reset_current(int cpu)
{
	tracing_reset(&global_trace, cpu);
}

void tracing_reset_current_online_cpus(void)
{
	tracing_reset_online_cpus(&global_trace);
}

914
#define SAVED_CMDLINES 128
915
#define NO_CMDLINE_MAP UINT_MAX
916
917
918
919
static unsigned map_pid_to_cmdline[PID_MAX_DEFAULT+1];
static unsigned map_cmdline_to_pid[SAVED_CMDLINES];
static char saved_cmdlines[SAVED_CMDLINES][TASK_COMM_LEN];
static int cmdline_idx;
920
static arch_spinlock_t trace_cmdline_lock = __ARCH_SPIN_LOCK_UNLOCKED;
921
922

/* temporary disable recording */
923
static atomic_t trace_record_cmdline_disabled __read_mostly;
924
925
926

static void trace_init_cmdlines(void)
{
927
928
	memset(&map_pid_to_cmdline, NO_CMDLINE_MAP, sizeof(map_pid_to_cmdline));
	memset(&map_cmdline_to_pid, NO_CMDLINE_MAP, sizeof(map_cmdline_to_pid));
929
930
931
	cmdline_idx = 0;
}

932
933
934
935
936
int is_tracing_stopped(void)
{
	return trace_stop_count;
}

937
938
939
940
941
942
943
944
945
946
947
948
949
950
951
/**
 * ftrace_off_permanent - disable all ftrace code permanently
 *
 * This should only be called when a serious anomally has
 * been detected.  This will turn off the function tracing,
 * ring buffers, and other tracing utilites. It takes no
 * locks and can be called from any context.
 */
void ftrace_off_permanent(void)
{
	tracing_disabled = 1;
	ftrace_stop();
	tracing_off_permanent();
}

952
953
954
955
956
957
958
959
960
961
962
963
964
965
/**
 * tracing_start - quick start of the tracer
 *
 * If tracing is enabled but was stopped by tracing_stop,
 * this will start the tracer back up.
 */
void tracing_start(void)
{
	struct ring_buffer *buffer;
	unsigned long flags;

	if (tracing_disabled)
		return;

966
	raw_spin_lock_irqsave(&tracing_start_lock, flags);
967
968
969
970
971
972
	if (--trace_stop_count) {
		if (trace_stop_count < 0) {
			/* Someone screwed up their debugging */
			WARN_ON_ONCE(1);
			trace_stop_count = 0;
		}
973
974
975
		goto out;
	}

976
977
	/* Prevent the buffers from switching */
	arch_spin_lock(&ftrace_max_lock);
978
979
980
981
982
983
984
985
986

	buffer = global_trace.buffer;
	if (buffer)
		ring_buffer_record_enable(buffer);

	buffer = max_tr.buffer;
	if (buffer)
		ring_buffer_record_enable(buffer);

987
988
	arch_spin_unlock(&ftrace_max_lock);

989
990
	ftrace_start();
 out:
991
	raw_spin_unlock_irqrestore(&tracing_start_lock, flags);
992
993
994
995
996
997
998
999
1000
1001
1002
1003
1004
1005
}

/**
 * tracing_stop - quick stop of the tracer
 *
 * Light weight way to stop tracing. Use in conjunction with
 * tracing_start.
 */
void tracing_stop(void)
{
	struct ring_buffer *buffer;
	unsigned long flags;

	ftrace_stop();
1006
	raw_spin_lock_irqsave(&tracing_start_lock, flags);
1007
1008
1009
	if (trace_stop_count++)
		goto out;

1010
1011
1012
	/* Prevent the buffers from switching */
	arch_spin_lock(&ftrace_max_lock);

1013
1014
1015
1016
1017
1018
1019
1020
	buffer = global_trace.buffer;
	if (buffer)
		ring_buffer_record_disable(buffer);

	buffer = max_tr.buffer;
	if (buffer)
		ring_buffer_record_disable(buffer);

1021
1022
	arch_spin_unlock(&ftrace_max_lock);

1023
 out:
1024
	raw_spin_unlock_irqrestore(&tracing_start_lock, flags);
1025
1026
}

Ingo Molnar's avatar
Ingo Molnar committed
1027
void trace_stop_cmdline_recording(void);
1028

Ingo Molnar's avatar
Ingo Molnar committed
1029
static void trace_save_cmdline(struct task_struct *tsk)
1030
{
1031
	unsigned pid, idx;
1032
1033
1034
1035
1036
1037
1038
1039
1040
1041

	if (!tsk->pid || unlikely(tsk->pid > PID_MAX_DEFAULT))
		return;

	/*
	 * It's not the end of the world if we don't get
	 * the lock, but we also don't want to spin
	 * nor do we want to disable interrupts,
	 * so if we miss here, then better luck next time.
	 */
1042
	if (!arch_spin_trylock(&trace_cmdline_lock))
1043
1044
1045
		return;

	idx = map_pid_to_cmdline[tsk->pid];
1046
	if (idx == NO_CMDLINE_MAP) {
1047
1048
		idx = (cmdline_idx + 1) % SAVED_CMDLINES;

1049
1050
1051
1052
1053
1054
1055
1056
1057
		/*
		 * Check whether the cmdline buffer at idx has a pid
		 * mapped. We are going to overwrite that entry so we
		 * need to clear the map_pid_to_cmdline. Otherwise we
		 * would read the new comm for the old pid.
		 */
		pid = map_cmdline_to_pid[idx];
		if (pid != NO_CMDLINE_MAP)
			map_pid_to_cmdline[pid] = NO_CMDLINE_MAP;
1058

1059
		map_cmdline_to_pid[idx] = tsk->pid;
1060
1061
1062
1063
1064
1065
1066
		map_pid_to_cmdline[tsk->pid] = idx;

		cmdline_idx = idx;
	}

	memcpy(&saved_cmdlines[idx], tsk->comm, TASK_COMM_LEN);

1067
	arch_spin_unlock(&trace_cmdline_lock);
1068
1069
}

1070
void trace_find_cmdline(int pid, char comm[])
1071
1072
1073
{
	unsigned map;

1074
1075
1076
1077
	if (!pid) {
		strcpy(comm, "<idle>");
		return;
	}
1078

1079
1080
1081
1082
1083
	if (WARN_ON_ONCE(pid < 0)) {
		strcpy(comm, "<XXX>");
		return;
	}

1084
1085
1086
1087
	if (pid > PID_MAX_DEFAULT) {
		strcpy(comm, "<...>");
		return;
	}
1088

1089
	preempt_disable();
1090
	arch_spin_lock(&trace_cmdline_lock);
1091
	map = map_pid_to_cmdline[pid];
1092
1093
1094
1095
	if (map != NO_CMDLINE_MAP)
		strcpy(comm, saved_cmdlines[map]);
	else
		strcpy(comm, "<...>");
1096

1097
	arch_spin_unlock(&trace_cmdline_lock);
1098
	preempt_enable();
1099
1100
}

Ingo Molnar's avatar
Ingo Molnar committed
1101
void tracing_record_cmdline(struct task_struct *tsk)
1102
{
1103
1104
	if (atomic_read(&trace_record_cmdline_disabled) || !tracer_enabled ||
	    !tracing_is_on())
1105
1106
1107
1108
1109
		return;

	trace_save_cmdline(tsk);
}

1110
void
1111
1112
tracing_generic_entry_update(struct trace_entry *entry, unsigned long flags,
			     int pc)
1113
1114
1115
{
	struct task_struct *tsk = current;

1116
1117
	entry->preempt_count		= pc & 0xff;
	entry->pid			= (tsk) ? tsk->pid : 0;
1118
	entry->padding			= 0;
1119
	entry->flags =
1120
#ifdef CONFIG_TRACE_IRQFLAGS_SUPPORT
1121
		(irqs_disabled_flags(flags) ? TRACE_FLAG_IRQS_OFF : 0) |
1122
1123
1124
#else
		TRACE_FLAG_IRQS_NOSUPPORT |
#endif
1125
1126
1127
1128
		((pc & HARDIRQ_MASK) ? TRACE_FLAG_HARDIRQ : 0) |
		((pc & SOFTIRQ_MASK) ? TRACE_FLAG_SOFTIRQ : 0) |
		(need_resched() ? TRACE_FLAG_NEED_RESCHED : 0);
}
1129
EXPORT_SYMBOL_GPL(tracing_generic_entry_update);
1130

1131
1132
1133
1134
1135
struct ring_buffer_event *
trace_buffer_lock_reserve(struct ring_buffer *buffer,
			  int type,
			  unsigned long len,
			  unsigned long flags, int pc)
1136
1137
1138
{
	struct ring_buffer_event *event;

1139
	event = ring_buffer_lock_reserve(buffer, len);
1140
1141
1142
1143
1144
1145
1146
1147
1148
1149
	if (event != NULL) {
		struct trace_entry *ent = ring_buffer_event_data(event);

		tracing_generic_entry_update(ent, flags, pc);
		ent->type = type;
	}

	return event;
}

1150
1151
1152
1153
1154
static inline void
__trace_buffer_unlock_commit(struct ring_buffer *buffer,
			     struct ring_buffer_event *event,
			     unsigned long flags, int pc,
			     int wake)
1155
{
1156
	ring_buffer_unlock_commit(buffer, event);
1157

1158
1159
	ftrace_trace_stack(buffer, flags, 6, pc);
	ftrace_trace_userstack(buffer, flags, pc);
1160
1161
1162
1163
1164

	if (wake)
		trace_wake_up();
}

1165
1166
1167
void trace_buffer_unlock_commit(struct ring_buffer *buffer,
				struct ring_buffer_event *event,
				unsigned long flags, int pc)
1168
{
1169
	__trace_buffer_unlock_commit(buffer, event, flags, pc, 1);
1170
1171
}

1172
struct ring_buffer_event *
1173
1174
trace_current_buffer_lock_reserve(struct ring_buffer **current_rb,
				  int type, unsigned long len,
1175
1176
				  unsigned long flags, int pc)
{
1177
1178
	*current_rb = global_trace.buffer;
	return trace_buffer_lock_reserve(*current_rb,
1179
1180
					 type, len, flags, pc);
}
1181
EXPORT_SYMBOL_GPL(trace_current_buffer_lock_reserve);
1182

1183
1184
void trace_current_buffer_unlock_commit(struct ring_buffer *buffer,
					struct ring_buffer_event *event,
1185
1186
					unsigned long flags, int pc)
{
1187
	__trace_buffer_unlock_commit(buffer, event, flags, pc, 1);
1188
}
1189
EXPORT_SYMBOL_GPL(trace_current_buffer_unlock_commit);
1190

1191
1192
1193
void trace_nowake_buffer_unlock_commit(struct ring_buffer *buffer,
				       struct ring_buffer_event *event,
				       unsigned long flags, int pc)
1194
{
1195
	__trace_buffer_unlock_commit(buffer, event, flags, pc, 0);
1196
}
1197
EXPORT_SYMBOL_GPL(trace_nowake_buffer_unlock_commit);
1198

1199
1200
1201
1202
1203
1204
1205
1206
1207
1208
1209
1210
void trace_nowake_buffer_unlock_commit_regs(struct ring_buffer *buffer,
					    struct ring_buffer_event *event,
					    unsigned long flags, int pc,
					    struct pt_regs *regs)
{
	ring_buffer_unlock_commit(buffer, event);

	ftrace_trace_stack_regs(buffer, flags, 0, pc, regs);
	ftrace_trace_userstack(buffer, flags, pc);
}
EXPORT_SYMBOL_GPL(trace_nowake_buffer_unlock_commit_regs);

1211
1212
void trace_current_buffer_discard_commit(struct ring_buffer *buffer,
					 struct ring_buffer_event *event)
1213
{
1214
	ring_buffer_discard_commit(buffer, event);
1215
}
1216
EXPORT_SYMBOL_GPL(trace_current_buffer_discard_commit);
1217

Ingo Molnar's avatar
Ingo Molnar committed
1218
void
1219
trace_function(struct trace_array *tr,
1220
1221
	       unsigned long ip, unsigned long parent_ip, unsigned long flags,
	       int pc)
1222
{
1223
	struct ftrace_event_call *call = &event_function;
1224
	struct ring_buffer *buffer = tr->buffer;
1225
	struct ring_buffer_event *event;
1226
	struct ftrace_entry *entry;
1227

1228
	/* If we are reading the ring buffer, don't trace */
1229
	if (unlikely(__this_cpu_read(ftrace_cpu_disabled)))
1230
1231
		return;

1232
	event = trace_buffer_lock_reserve(buffer, TRACE_FN, sizeof(*entry),
1233
					  flags, pc);
1234
1235
1236
	if (!event)
		return;
	entry	= ring_buffer_event_data(</