trace.h 29.9 KB
Newer Older
1
2
3
4
#ifndef _LINUX_KERNEL_TRACE_H
#define _LINUX_KERNEL_TRACE_H

#include <linux/fs.h>
Arun Sharma's avatar
Arun Sharma committed
5
#include <linux/atomic.h>
6
7
#include <linux/sched.h>
#include <linux/clocksource.h>
8
#include <linux/ring_buffer.h>
Pekka Paalanen's avatar
Pekka Paalanen committed
9
#include <linux/mmiotrace.h>
10
#include <linux/tracepoint.h>
11
#include <linux/ftrace.h>
12
#include <linux/hw_breakpoint.h>
13
#include <linux/trace_seq.h>
14
#include <linux/ftrace_event.h>
15

16
17
18
19
20
#ifdef CONFIG_FTRACE_SYSCALLS
#include <asm/unistd.h>		/* For NR_SYSCALLS	     */
#include <asm/syscall.h>	/* some archs define it here */
#endif

21
22
23
24
25
26
27
enum trace_type {
	__TRACE_FIRST_TYPE = 0,

	TRACE_FN,
	TRACE_CTX,
	TRACE_WAKE,
	TRACE_STACK,
28
	TRACE_PRINT,
29
	TRACE_BPRINT,
Pekka Paalanen's avatar
Pekka Paalanen committed
30
31
	TRACE_MMIO_RW,
	TRACE_MMIO_MAP,
32
	TRACE_BRANCH,
33
34
	TRACE_GRAPH_RET,
	TRACE_GRAPH_ENT,
35
	TRACE_USER_STACK,
36
	TRACE_BLK,
37
	TRACE_BPUTS,
38

39
	__TRACE_LAST_TYPE,
40
41
};

42

43
44
#undef __field
#define __field(type, item)		type	item;
Ingo Molnar's avatar
Ingo Molnar committed
45

46
47
#undef __field_struct
#define __field_struct(type, item)	__field(type, item)
Ingo Molnar's avatar
Ingo Molnar committed
48

49
50
#undef __field_desc
#define __field_desc(type, container, item)
51

52
53
#undef __array
#define __array(type, item, size)	type	item[size];
54

55
56
#undef __array_desc
#define __array_desc(type, container, item, size)
57

58
59
#undef __dynamic_array
#define __dynamic_array(type, item)	type	item[];
60

61
62
#undef F_STRUCT
#define F_STRUCT(args...)		args
63

64
#undef FTRACE_ENTRY
65
66
67
68
#define FTRACE_ENTRY(name, struct_name, id, tstruct, print, filter)	\
	struct struct_name {						\
		struct trace_entry	ent;				\
		tstruct							\
69
	}
70

71
72
#undef TP_ARGS
#define TP_ARGS(args...)	args
73

74
#undef FTRACE_ENTRY_DUP
75
#define FTRACE_ENTRY_DUP(name, name_struct, id, tstruct, printk, filter)
76

77
#undef FTRACE_ENTRY_REG
78
79
80
81
#define FTRACE_ENTRY_REG(name, struct_name, id, tstruct, print,	\
			 filter, regfn) \
	FTRACE_ENTRY(name, struct_name, id, PARAMS(tstruct), PARAMS(print), \
		     filter)
82

83
#include "trace_entries.h"
84

85
86
87
88
/*
 * syscalls are special, and need special handling, this is why
 * they are not included in trace_entries.h
 */
89
90
91
92
93
94
95
96
97
struct syscall_trace_enter {
	struct trace_entry	ent;
	int			nr;
	unsigned long		args[];
};

struct syscall_trace_exit {
	struct trace_entry	ent;
	int			nr;
98
	long			ret;
99
100
};

101
struct kprobe_trace_entry_head {
102
103
104
105
	struct trace_entry	ent;
	unsigned long		ip;
};

106
struct kretprobe_trace_entry_head {
107
108
109
110
111
	struct trace_entry	ent;
	unsigned long		func;
	unsigned long		ret_ip;
};

112
113
114
/*
 * trace_flag_type is an enumeration that holds different
 * states when a trace occurs. These are:
115
 *  IRQS_OFF		- interrupts were disabled
116
 *  IRQS_NOSUPPORT	- arch does not support irqs_disabled_flags
Li Zefan's avatar
Li Zefan committed
117
 *  NEED_RESCHED	- reschedule is requested
118
119
 *  HARDIRQ		- inside an interrupt handler
 *  SOFTIRQ		- inside a softirq handler
120
121
122
 */
enum trace_flag_type {
	TRACE_FLAG_IRQS_OFF		= 0x01,
123
124
125
126
	TRACE_FLAG_IRQS_NOSUPPORT	= 0x02,
	TRACE_FLAG_NEED_RESCHED		= 0x04,
	TRACE_FLAG_HARDIRQ		= 0x08,
	TRACE_FLAG_SOFTIRQ		= 0x10,
127
128
};

129
#define TRACE_BUF_SIZE		1024
130

131
132
struct trace_array;

133
134
135
136
137
138
139
/*
 * The CPU trace array - it consists of thousands of trace entries
 * plus some other descriptor data: (for example which task started
 * the trace, etc.)
 */
struct trace_array_cpu {
	atomic_t		disabled;
140
	void			*buffer_page;	/* ring buffer spare */
Ingo Molnar's avatar
Ingo Molnar committed
141

142
	unsigned long		entries;
143
144
145
146
147
148
149
	unsigned long		saved_latency;
	unsigned long		critical_start;
	unsigned long		critical_end;
	unsigned long		critical_sequence;
	unsigned long		nice;
	unsigned long		policy;
	unsigned long		rt_priority;
150
	unsigned long		skipped_entries;
151
152
	cycle_t			preempt_timestamp;
	pid_t			pid;
153
	kuid_t			uid;
154
155
156
	char			comm[TASK_COMM_LEN];
};

157
158
struct tracer;

159
160
161
162
163
164
165
166
struct trace_buffer {
	struct trace_array		*tr;
	struct ring_buffer		*buffer;
	struct trace_array_cpu __percpu	*data;
	cycle_t				time_start;
	int				cpu;
};

167
168
169
170
171
172
/*
 * The trace array - an array of per-CPU trace arrays. This is the
 * highest level data structure that individual tracers deal with.
 * They have on/off state as well:
 */
struct trace_array {
173
	struct list_head	list;
174
	char			*name;
175
176
177
178
179
180
181
182
183
184
185
186
187
188
	struct trace_buffer	trace_buffer;
#ifdef CONFIG_TRACER_MAX_TRACE
	/*
	 * The max_buffer is used to snapshot the trace when a maximum
	 * latency is reached, or when the user initiates a snapshot.
	 * Some tracers will use this to store a maximum trace while
	 * it continues examining live traces.
	 *
	 * The buffers for the max_buffer are set up the same as the trace_buffer
	 * When a snapshot is taken, the buffer of the max_buffer is swapped
	 * with the buffer of the trace_buffer and the buffers are reset for
	 * the trace_buffer so the tracing can continue.
	 */
	struct trace_buffer	max_buffer;
189
	bool			allocated_snapshot;
190
#endif
191
	int			buffer_disabled;
192
193
194
195
196
197
#ifdef CONFIG_FTRACE_SYSCALLS
	int			sys_refcount_enter;
	int			sys_refcount_exit;
	DECLARE_BITMAP(enabled_enter_syscalls, NR_syscalls);
	DECLARE_BITMAP(enabled_exit_syscalls, NR_syscalls);
#endif
198
199
200
	int			stop_count;
	int			clock_id;
	struct tracer		*current_trace;
201
	unsigned int		flags;
202
	raw_spinlock_t		start_lock;
203
	struct dentry		*dir;
204
205
	struct dentry		*options;
	struct dentry		*percpu_dir;
206
207
208
	struct dentry		*event_dir;
	struct list_head	systems;
	struct list_head	events;
209
	int			ref;
210
211
};

212
213
214
215
216
217
enum {
	TRACE_ARRAY_FL_GLOBAL	= (1 << 0)
};

extern struct list_head ftrace_trace_arrays;

218
219
extern struct mutex trace_types_lock;

220
221
222
extern int trace_array_get(struct trace_array *tr);
extern void trace_array_put(struct trace_array *tr);

223
224
225
226
227
228
229
230
231
232
233
234
235
236
/*
 * The global tracer (top) should be the first trace array added,
 * but we check the flag anyway.
 */
static inline struct trace_array *top_trace_array(void)
{
	struct trace_array *tr;

	tr = list_entry(ftrace_trace_arrays.prev,
			typeof(*tr), list);
	WARN_ON(!(tr->flags & TRACE_ARRAY_FL_GLOBAL));
	return tr;
}

237
238
239
240
241
242
243
244
245
246
247
248
249
250
251
252
253
254
255
256
257
258
259
260
261
262
263
264
265
266
267
268
#define FTRACE_CMP_TYPE(var, type) \
	__builtin_types_compatible_p(typeof(var), type *)

#undef IF_ASSIGN
#define IF_ASSIGN(var, entry, etype, id)		\
	if (FTRACE_CMP_TYPE(var, etype)) {		\
		var = (typeof(var))(entry);		\
		WARN_ON(id && (entry)->type != id);	\
		break;					\
	}

/* Will cause compile errors if type is not found. */
extern void __ftrace_bad_type(void);

/*
 * The trace_assign_type is a verifier that the entry type is
 * the same as the type being assigned. To add new types simply
 * add a line with the following format:
 *
 * IF_ASSIGN(var, ent, type, id);
 *
 *  Where "type" is the trace type that includes the trace_entry
 *  as the "ent" item. And "id" is the trace identifier that is
 *  used in the trace_type enum.
 *
 *  If the type can have more than one id, then use zero.
 */
#define trace_assign_type(var, ent)					\
	do {								\
		IF_ASSIGN(var, ent, struct ftrace_entry, TRACE_FN);	\
		IF_ASSIGN(var, ent, struct ctx_switch_entry, 0);	\
		IF_ASSIGN(var, ent, struct stack_entry, TRACE_STACK);	\
269
		IF_ASSIGN(var, ent, struct userstack_entry, TRACE_USER_STACK);\
270
		IF_ASSIGN(var, ent, struct print_entry, TRACE_PRINT);	\
271
		IF_ASSIGN(var, ent, struct bprint_entry, TRACE_BPRINT);	\
272
		IF_ASSIGN(var, ent, struct bputs_entry, TRACE_BPUTS);	\
273
274
275
276
		IF_ASSIGN(var, ent, struct trace_mmiotrace_rw,		\
			  TRACE_MMIO_RW);				\
		IF_ASSIGN(var, ent, struct trace_mmiotrace_map,		\
			  TRACE_MMIO_MAP);				\
277
		IF_ASSIGN(var, ent, struct trace_branch, TRACE_BRANCH); \
278
279
280
281
		IF_ASSIGN(var, ent, struct ftrace_graph_ent_entry,	\
			  TRACE_GRAPH_ENT);		\
		IF_ASSIGN(var, ent, struct ftrace_graph_ret_entry,	\
			  TRACE_GRAPH_RET);		\
282
283
		__ftrace_bad_type();					\
	} while (0)
284

285
286
287
288
289
290
/*
 * An option specific to a tracer. This is a boolean value.
 * The bit is the bit index that sets its value on the
 * flags value in struct tracer_flags.
 */
struct tracer_opt {
291
292
	const char	*name; /* Will appear on the trace_options file */
	u32		bit; /* Mask assigned in val field in tracer_flags */
293
294
295
296
297
298
299
300
};

/*
 * The set of specific options for a tracer. Your tracer
 * have to set the initial value of the flags val.
 */
struct tracer_flags {
	u32			val;
301
	struct tracer_opt	*opts;
302
303
304
305
306
};

/* Makes more easy to define a tracer opt */
#define TRACER_OPT(s, b)	.name = #s, .bit = b

307

308
309
310
311
312
313
314
315
316
317
318
/**
 * struct tracer - a specific tracer and its callbacks to interact with debugfs
 * @name: the name chosen to select it on the available_tracers file
 * @init: called when one switches to this tracer (echo name > current_tracer)
 * @reset: called when one switches to another tracer
 * @start: called when tracing is unpaused (echo 1 > tracing_enabled)
 * @stop: called when tracing is paused (echo 0 > tracing_enabled)
 * @open: called when the trace file is opened
 * @pipe_open: called when the trace_pipe file is opened
 * @wait_pipe: override how the user waits for traces on trace_pipe
 * @close: called when the trace file is released
319
 * @pipe_close: called when the trace_pipe file is released
320
321
322
323
324
325
326
 * @read: override the default read callback on trace_pipe
 * @splice_read: override the default splice_read callback on trace_pipe
 * @selftest: selftest to run on boot (see trace_selftest.c)
 * @print_headers: override the first lines that describe your columns
 * @print_line: callback that prints a trace
 * @set_flag: signals one of your private flags changed (trace_options file)
 * @flags: your private flags
327
328
329
 */
struct tracer {
	const char		*name;
330
	int			(*init)(struct trace_array *tr);
331
	void			(*reset)(struct trace_array *tr);
332
333
	void			(*start)(struct trace_array *tr);
	void			(*stop)(struct trace_array *tr);
334
	void			(*open)(struct trace_iterator *iter);
335
	void			(*pipe_open)(struct trace_iterator *iter);
336
	void			(*wait_pipe)(struct trace_iterator *iter);
337
	void			(*close)(struct trace_iterator *iter);
338
	void			(*pipe_close)(struct trace_iterator *iter);
339
340
341
	ssize_t			(*read)(struct trace_iterator *iter,
					struct file *filp, char __user *ubuf,
					size_t cnt, loff_t *ppos);
342
343
344
345
346
347
	ssize_t			(*splice_read)(struct trace_iterator *iter,
					       struct file *filp,
					       loff_t *ppos,
					       struct pipe_inode_info *pipe,
					       size_t len,
					       unsigned int flags);
Steven Rostedt's avatar
Steven Rostedt committed
348
349
350
351
#ifdef CONFIG_FTRACE_STARTUP_TEST
	int			(*selftest)(struct tracer *trace,
					    struct trace_array *tr);
#endif
352
	void			(*print_header)(struct seq_file *m);
353
	enum print_line_t	(*print_line)(struct trace_iterator *iter);
354
355
	/* If you handled the flag setting, return 0 */
	int			(*set_flag)(u32 old_flags, u32 bit, int set);
356
357
358
	/* Return 0 if OK with change, else return non-zero */
	int			(*flag_changed)(struct tracer *tracer,
						u32 mask, int set);
359
	struct tracer		*next;
360
	struct tracer_flags	*flags;
361
	bool			print_max;
362
363
	bool			enabled;
#ifdef CONFIG_TRACER_MAX_TRACE
364
	bool			use_max_tr;
365
#endif
366
367
};

368

369
370
/* Only current can touch trace_recursion */

371
372
373
374
375
376
377
378
379
380
381
382
383
384
385
386
387
388
389
390
391
392
393
394
395
/*
 * For function tracing recursion:
 *  The order of these bits are important.
 *
 *  When function tracing occurs, the following steps are made:
 *   If arch does not support a ftrace feature:
 *    call internal function (uses INTERNAL bits) which calls...
 *   If callback is registered to the "global" list, the list
 *    function is called and recursion checks the GLOBAL bits.
 *    then this function calls...
 *   The function callback, which can use the FTRACE bits to
 *    check for recursion.
 *
 * Now if the arch does not suppport a feature, and it calls
 * the global list function which calls the ftrace callback
 * all three of these steps will do a recursion protection.
 * There's no reason to do one if the previous caller already
 * did. The recursion that we are protecting against will
 * go through the same steps again.
 *
 * To prevent the multiple recursion checks, if a recursion
 * bit is set that is higher than the MAX bit of the current
 * check, then we know that the check was made by the previous
 * caller, and we can skip the current check.
 */
396
enum {
397
398
399
400
401
402
403
	TRACE_BUFFER_BIT,
	TRACE_BUFFER_NMI_BIT,
	TRACE_BUFFER_IRQ_BIT,
	TRACE_BUFFER_SIRQ_BIT,

	/* Start of function recursion bits */
	TRACE_FTRACE_BIT,
404
405
406
	TRACE_FTRACE_NMI_BIT,
	TRACE_FTRACE_IRQ_BIT,
	TRACE_FTRACE_SIRQ_BIT,
407

408
	/* GLOBAL_BITs must be greater than FTRACE_BITs */
409
410
411
412
413
	TRACE_GLOBAL_BIT,
	TRACE_GLOBAL_NMI_BIT,
	TRACE_GLOBAL_IRQ_BIT,
	TRACE_GLOBAL_SIRQ_BIT,

414
415
416
417
418
419
	/* INTERNAL_BITs must be greater than GLOBAL_BITs */
	TRACE_INTERNAL_BIT,
	TRACE_INTERNAL_NMI_BIT,
	TRACE_INTERNAL_IRQ_BIT,
	TRACE_INTERNAL_SIRQ_BIT,

420
	TRACE_CONTROL_BIT,
421

422
423
424
425
426
427
428
/*
 * Abuse of the trace_recursion.
 * As we need a way to maintain state if we are tracing the function
 * graph in irq because we want to trace a particular function that
 * was called in irq context but we have irq tracing off. Since this
 * can only be modified by current, we can reuse trace_recursion.
 */
429
430
	TRACE_IRQ_BIT,
};
431

432
433
434
#define trace_recursion_set(bit)	do { (current)->trace_recursion |= (1<<(bit)); } while (0)
#define trace_recursion_clear(bit)	do { (current)->trace_recursion &= ~(1<<(bit)); } while (0)
#define trace_recursion_test(bit)	((current)->trace_recursion & (1<<(bit)))
435

436
437
438
439
440
441
442
443
444
445
446
447
448
449
450
451
452
453
454
455
456
457
458
459
460
461
462
463
464
465
466
467
468
469
470
471
472
473
474
475
476
477
478
479
480
481
482
483
484
485
486
487
488
489
490
491
492
493
494
495
496
497
498
499
500
#define TRACE_CONTEXT_BITS	4

#define TRACE_FTRACE_START	TRACE_FTRACE_BIT
#define TRACE_FTRACE_MAX	((1 << (TRACE_FTRACE_START + TRACE_CONTEXT_BITS)) - 1)

#define TRACE_GLOBAL_START	TRACE_GLOBAL_BIT
#define TRACE_GLOBAL_MAX	((1 << (TRACE_GLOBAL_START + TRACE_CONTEXT_BITS)) - 1)

#define TRACE_LIST_START	TRACE_INTERNAL_BIT
#define TRACE_LIST_MAX		((1 << (TRACE_LIST_START + TRACE_CONTEXT_BITS)) - 1)

#define TRACE_CONTEXT_MASK	TRACE_LIST_MAX

static __always_inline int trace_get_context_bit(void)
{
	int bit;

	if (in_interrupt()) {
		if (in_nmi())
			bit = 0;

		else if (in_irq())
			bit = 1;
		else
			bit = 2;
	} else
		bit = 3;

	return bit;
}

static __always_inline int trace_test_and_set_recursion(int start, int max)
{
	unsigned int val = current->trace_recursion;
	int bit;

	/* A previous recursion check was made */
	if ((val & TRACE_CONTEXT_MASK) > max)
		return 0;

	bit = trace_get_context_bit() + start;
	if (unlikely(val & (1 << bit)))
		return -1;

	val |= 1 << bit;
	current->trace_recursion = val;
	barrier();

	return bit;
}

static __always_inline void trace_clear_recursion(int bit)
{
	unsigned int val = current->trace_recursion;

	if (!bit)
		return;

	bit = 1 << bit;
	val &= ~bit;

	barrier();
	current->trace_recursion = val;
}

501
502
503
504
505
506
507
508
static inline struct ring_buffer_iter *
trace_buffer_iter(struct trace_iterator *iter, int cpu)
{
	if (iter->buffer_iter && iter->buffer_iter[cpu])
		return iter->buffer_iter[cpu];
	return NULL;
}

509
int tracer_init(struct tracer *t, struct trace_array *tr);
510
int tracing_is_enabled(void);
511
512
void tracing_reset(struct trace_buffer *buf, int cpu);
void tracing_reset_online_cpus(struct trace_buffer *buf);
513
void tracing_reset_current(int cpu);
514
void tracing_reset_all_online_cpus(void);
515
int tracing_open_generic(struct inode *inode, struct file *filp);
516
struct dentry *trace_create_file(const char *name,
Al Viro's avatar
Al Viro committed
517
				 umode_t mode,
518
519
520
521
				 struct dentry *parent,
				 void *data,
				 const struct file_operations *fops);

522
struct dentry *tracing_init_dentry_tr(struct trace_array *tr);
523
struct dentry *tracing_init_dentry(void);
Ingo Molnar's avatar
Ingo Molnar committed
524

525
526
struct ring_buffer_event;

527
528
529
530
531
532
struct ring_buffer_event *
trace_buffer_lock_reserve(struct ring_buffer *buffer,
			  int type,
			  unsigned long len,
			  unsigned long flags,
			  int pc);
533

534
535
struct trace_entry *tracing_get_trace_entry(struct trace_array *tr,
						struct trace_array_cpu *data);
536
537
538
539

struct trace_entry *trace_find_next_entry(struct trace_iterator *iter,
					  int *ent_cpu, u64 *ent_ts);

540
541
542
void __buffer_unlock_commit(struct ring_buffer *buffer,
			    struct ring_buffer_event *event);

543
544
545
546
547
548
549
550
int trace_empty(struct trace_iterator *iter);

void *trace_find_next_entry_inc(struct trace_iterator *iter);

void trace_init_global_iter(struct trace_iterator *iter);

void tracing_iter_reset(struct trace_iterator *iter, int cpu);

551
552
void poll_wait_pipe(struct trace_iterator *iter);

553
554
555
void tracing_sched_switch_trace(struct trace_array *tr,
				struct task_struct *prev,
				struct task_struct *next,
556
				unsigned long flags, int pc);
557
558
559
560

void tracing_sched_wakeup_trace(struct trace_array *tr,
				struct task_struct *wakee,
				struct task_struct *cur,
561
				unsigned long flags, int pc);
562
563
564
void trace_function(struct trace_array *tr,
		    unsigned long ip,
		    unsigned long parent_ip,
565
		    unsigned long flags, int pc);
566
567
568
569
void trace_graph_function(struct trace_array *tr,
		    unsigned long ip,
		    unsigned long parent_ip,
		    unsigned long flags, int pc);
570
void trace_latency_header(struct seq_file *m);
571
572
573
void trace_default_header(struct seq_file *m);
void print_trace_header(struct seq_file *m, struct trace_iterator *iter);
int trace_empty(struct trace_iterator *iter);
574

575
void trace_graph_return(struct ftrace_graph_ret *trace);
576
int trace_graph_entry(struct ftrace_graph_ent *trace);
577
void set_graph_array(struct trace_array *tr);
578

579
580
void tracing_start_cmdline_record(void);
void tracing_stop_cmdline_record(void);
Steven Rostedt's avatar
Steven Rostedt committed
581
582
583
void tracing_sched_switch_assign_trace(struct trace_array *tr);
void tracing_stop_sched_switch_record(void);
void tracing_start_sched_switch_record(void);
584
int register_tracer(struct tracer *type);
585
int is_tracing_stopped(void);
586
587
588
589
590

extern cpumask_var_t __read_mostly tracing_buffer_mask;

#define for_each_tracing_cpu(cpu)	\
	for_each_cpu(cpu, tracing_buffer_mask)
591
592
593

extern unsigned long nsecs_to_usecs(unsigned long nsecs);

594
595
extern unsigned long tracing_thresh;

596
#ifdef CONFIG_TRACER_MAX_TRACE
597
598
599
600
601
extern unsigned long tracing_max_latency;

void update_max_tr(struct trace_array *tr, struct task_struct *tsk, int cpu);
void update_max_tr_single(struct trace_array *tr,
			  struct task_struct *tsk, int cpu);
602
#endif /* CONFIG_TRACER_MAX_TRACE */
603

604
#ifdef CONFIG_STACKTRACE
605
void ftrace_trace_stack(struct ring_buffer *buffer, unsigned long flags,
606
607
			int skip, int pc);

608
609
610
void ftrace_trace_stack_regs(struct ring_buffer *buffer, unsigned long flags,
			     int skip, int pc, struct pt_regs *regs);

611
void ftrace_trace_userstack(struct ring_buffer *buffer, unsigned long flags,
612
613
614
615
616
			    int pc);

void __trace_stack(struct trace_array *tr, unsigned long flags, int skip,
		   int pc);
#else
617
static inline void ftrace_trace_stack(struct ring_buffer *buffer,
618
619
620
621
				      unsigned long flags, int skip, int pc)
{
}

622
623
624
625
626
627
static inline void ftrace_trace_stack_regs(struct ring_buffer *buffer,
					   unsigned long flags, int skip,
					   int pc, struct pt_regs *regs)
{
}

628
static inline void ftrace_trace_userstack(struct ring_buffer *buffer,
629
630
631
632
633
634
635
636
637
					  unsigned long flags, int pc)
{
}

static inline void __trace_stack(struct trace_array *tr, unsigned long flags,
				 int skip, int pc)
{
}
#endif /* CONFIG_STACKTRACE */
638

Ingo Molnar's avatar
Ingo Molnar committed
639
extern cycle_t ftrace_now(int cpu);
640

641
extern void trace_find_cmdline(int pid, char comm[]);
642

643
644
#ifdef CONFIG_DYNAMIC_FTRACE
extern unsigned long ftrace_update_tot_cnt;
645
#endif
646
647
#define DYN_FTRACE_TEST_NAME trace_selftest_dynamic_test_func
extern int DYN_FTRACE_TEST_NAME(void);
648
649
#define DYN_FTRACE_TEST_NAME2 trace_selftest_dynamic_test_func2
extern int DYN_FTRACE_TEST_NAME2(void);
650

651
extern bool ring_buffer_expanded;
652
extern bool tracing_selftest_disabled;
653
DECLARE_PER_CPU(int, ftrace_cpu_disabled);
654

Steven Rostedt's avatar
Steven Rostedt committed
655
656
657
#ifdef CONFIG_FTRACE_STARTUP_TEST
extern int trace_selftest_startup_function(struct tracer *trace,
					   struct trace_array *tr);
658
659
extern int trace_selftest_startup_function_graph(struct tracer *trace,
						 struct trace_array *tr);
Steven Rostedt's avatar
Steven Rostedt committed
660
661
662
663
664
665
666
667
extern int trace_selftest_startup_irqsoff(struct tracer *trace,
					  struct trace_array *tr);
extern int trace_selftest_startup_preemptoff(struct tracer *trace,
					     struct trace_array *tr);
extern int trace_selftest_startup_preemptirqsoff(struct tracer *trace,
						 struct trace_array *tr);
extern int trace_selftest_startup_wakeup(struct tracer *trace,
					 struct trace_array *tr);
Steven Noonan's avatar
Steven Noonan committed
668
669
extern int trace_selftest_startup_nop(struct tracer *trace,
					 struct trace_array *tr);
Steven Rostedt's avatar
Steven Rostedt committed
670
671
extern int trace_selftest_startup_sched_switch(struct tracer *trace,
					       struct trace_array *tr);
672
673
extern int trace_selftest_startup_branch(struct tracer *trace,
					 struct trace_array *tr);
674
675
676
677
678
679
680
681
682
/*
 * Tracer data references selftest functions that only occur
 * on boot up. These can be __init functions. Thus, when selftests
 * are enabled, then the tracers need to reference __init functions.
 */
#define __tracer_data		__refdata
#else
/* Tracers are seldom changed. Optimize when selftests are disabled. */
#define __tracer_data		__read_mostly
Steven Rostedt's avatar
Steven Rostedt committed
683
684
#endif /* CONFIG_FTRACE_STARTUP_TEST */

Ingo Molnar's avatar
Ingo Molnar committed
685
extern void *head_page(struct trace_array_cpu *data);
686
extern unsigned long long ns2usecs(cycle_t nsec);
687
extern int
688
trace_vbprintk(unsigned long ip, const char *fmt, va_list args);
689
extern int
690
trace_vprintk(unsigned long ip, const char *fmt, va_list args);
691
692
693
694
695
extern int
trace_array_vprintk(struct trace_array *tr,
		    unsigned long ip, const char *fmt, va_list args);
int trace_array_printk(struct trace_array *tr,
		       unsigned long ip, const char *fmt, ...);
696
697
int trace_array_printk_buf(struct ring_buffer *buffer,
			   unsigned long ip, const char *fmt, ...);
698
699
void trace_printk_seq(struct trace_seq *s);
enum print_line_t print_trace_line(struct trace_iterator *iter);
Ingo Molnar's avatar
Ingo Molnar committed
700

701
702
extern unsigned long trace_flags;

703
/* Standard output formatting function used for function return traces */
704
#ifdef CONFIG_FUNCTION_GRAPH_TRACER
705
706
707
708
709
710
711
712
713

/* Flag options */
#define TRACE_GRAPH_PRINT_OVERRUN       0x1
#define TRACE_GRAPH_PRINT_CPU           0x2
#define TRACE_GRAPH_PRINT_OVERHEAD      0x4
#define TRACE_GRAPH_PRINT_PROC          0x8
#define TRACE_GRAPH_PRINT_DURATION      0x10
#define TRACE_GRAPH_PRINT_ABS_TIME      0x20

714
715
716
extern enum print_line_t
print_graph_function_flags(struct trace_iterator *iter, u32 flags);
extern void print_graph_headers_flags(struct seq_file *s, u32 flags);
717
718
extern enum print_line_t
trace_print_graph_duration(unsigned long long duration, struct trace_seq *s);
719
720
721
722
723
724
725
726
727
extern void graph_trace_open(struct trace_iterator *iter);
extern void graph_trace_close(struct trace_iterator *iter);
extern int __trace_graph_entry(struct trace_array *tr,
			       struct ftrace_graph_ent *trace,
			       unsigned long flags, int pc);
extern void __trace_graph_return(struct trace_array *tr,
				 struct ftrace_graph_ret *trace,
				 unsigned long flags, int pc);

728
729
730
731

#ifdef CONFIG_DYNAMIC_FTRACE
/* TODO: make this variable */
#define FTRACE_GRAPH_MAX_FUNCS		32
732
extern int ftrace_graph_filter_enabled;
733
734
735
736
737
738
739
extern int ftrace_graph_count;
extern unsigned long ftrace_graph_funcs[FTRACE_GRAPH_MAX_FUNCS];

static inline int ftrace_graph_addr(unsigned long addr)
{
	int i;

740
	if (!ftrace_graph_filter_enabled)
741
742
743
		return 1;

	for (i = 0; i < ftrace_graph_count; i++) {
744
745
746
747
748
749
750
751
752
753
		if (addr == ftrace_graph_funcs[i]) {
			/*
			 * If no irqs are to be traced, but a set_graph_function
			 * is set, and called by an interrupt handler, we still
			 * want to trace it.
			 */
			if (in_irq())
				trace_recursion_set(TRACE_IRQ_BIT);
			else
				trace_recursion_clear(TRACE_IRQ_BIT);
754
			return 1;
755
		}
756
757
758
759
	}

	return 0;
}
760
#else
761
762
763
static inline int ftrace_graph_addr(unsigned long addr)
{
	return 1;
764
765
766
}
#endif /* CONFIG_DYNAMIC_FTRACE */
#else /* CONFIG_FUNCTION_GRAPH_TRACER */
767
static inline enum print_line_t
768
print_graph_function_flags(struct trace_iterator *iter, u32 flags)
769
770
771
{
	return TRACE_TYPE_UNHANDLED;
}
772
#endif /* CONFIG_FUNCTION_GRAPH_TRACER */
773

774
extern struct list_head ftrace_pids;
775

776
#ifdef CONFIG_FUNCTION_TRACER
777
extern bool ftrace_filter_param __initdata;
778
779
static inline int ftrace_trace_task(struct task_struct *task)
{
780
	if (list_empty(&ftrace_pids))
781
782
783
784
		return 1;

	return test_tsk_trace_trace(task);
}
785
extern int ftrace_is_dead(void);
786
787
788
789
790
#else
static inline int ftrace_trace_task(struct task_struct *task)
{
	return 1;
}
791
static inline int ftrace_is_dead(void) { return 0; }
792
#endif
793

794
795
int ftrace_event_is_function(struct ftrace_event_call *call);

796
797
798
799
/*
 * struct trace_parser - servers for reading the user input separated by spaces
 * @cont: set if the input is not complete - no final space char was found
 * @buffer: holds the parsed user input
800
 * @idx: user input length
801
802
803
804
805
806
807
808
809
810
811
812
813
814
815
816
817
818
819
820
821
822
823
824
825
826
827
828
829
830
 * @size: buffer size
 */
struct trace_parser {
	bool		cont;
	char		*buffer;
	unsigned	idx;
	unsigned	size;
};

static inline bool trace_parser_loaded(struct trace_parser *parser)
{
	return (parser->idx != 0);
}

static inline bool trace_parser_cont(struct trace_parser *parser)
{
	return parser->cont;
}

static inline void trace_parser_clear(struct trace_parser *parser)
{
	parser->cont = false;
	parser->idx = 0;
}

extern int trace_parser_get_init(struct trace_parser *parser, int size);
extern void trace_parser_put(struct trace_parser *parser);
extern int trace_get_user(struct trace_parser *parser, const char __user *ubuf,
	size_t cnt, loff_t *ppos);

Steven Rostedt's avatar
Steven Rostedt committed
831
832
833
834
835
836
837
/*
 * trace_iterator_flags is an enumeration that defines bit
 * positions into trace_flags that controls the output.
 *
 * NOTE: These bits must match the trace_options array in
 *       trace.c.
 */
838
839
840
841
842
843
844
845
846
847
enum trace_iterator_flags {
	TRACE_ITER_PRINT_PARENT		= 0x01,
	TRACE_ITER_SYM_OFFSET		= 0x02,
	TRACE_ITER_SYM_ADDR		= 0x04,
	TRACE_ITER_VERBOSE		= 0x08,
	TRACE_ITER_RAW			= 0x10,
	TRACE_ITER_HEX			= 0x20,
	TRACE_ITER_BIN			= 0x40,
	TRACE_ITER_BLOCK		= 0x80,
	TRACE_ITER_STACKTRACE		= 0x100,
848
849
850
851
852
853
854
855
856
857
858
	TRACE_ITER_PRINTK		= 0x200,
	TRACE_ITER_PREEMPTONLY		= 0x400,
	TRACE_ITER_BRANCH		= 0x800,
	TRACE_ITER_ANNOTATE		= 0x1000,
	TRACE_ITER_USERSTACKTRACE       = 0x2000,
	TRACE_ITER_SYM_USEROBJ          = 0x4000,
	TRACE_ITER_PRINTK_MSGONLY	= 0x8000,
	TRACE_ITER_CONTEXT_INFO		= 0x10000, /* Print pid/cpu/time */
	TRACE_ITER_LATENCY_FMT		= 0x20000,
	TRACE_ITER_SLEEP_TIME		= 0x40000,
	TRACE_ITER_GRAPH_TIME		= 0x80000,
859
	TRACE_ITER_RECORD_CMD		= 0x100000,
860
	TRACE_ITER_OVERWRITE		= 0x200000,
861
	TRACE_ITER_STOP_ON_FREE		= 0x400000,
862
	TRACE_ITER_IRQ_INFO		= 0x800000,
863
	TRACE_ITER_MARKERS		= 0x1000000,
864
	TRACE_ITER_FUNCTION		= 0x2000000,
865
866
};

867
868
869
870
871
872
873
/*
 * TRACE_ITER_SYM_MASK masks the options in trace_flags that
 * control the output of kernel symbols.
 */
#define TRACE_ITER_SYM_MASK \
	(TRACE_ITER_PRINT_PARENT|TRACE_ITER_SYM_OFFSET|TRACE_ITER_SYM_ADDR)

874
875
extern struct tracer nop_trace;

876
#ifdef CONFIG_BRANCH_TRACER
877
878
879
extern int enable_branch_tracing(struct trace_array *tr);
extern void disable_branch_tracing(void);
static inline int trace_branch_enable(struct trace_array *tr)
880
{
881
882
	if (trace_flags & TRACE_ITER_BRANCH)
		return enable_branch_tracing(tr);
883
884
	return 0;
}
885
static inline void trace_branch_disable(void)
886
887
{
	/* due to races, always disable */
888
	disable_branch_tracing();
889
890
}
#else
891
static inline int trace_branch_enable(struct trace_array *tr)
892
893
894
{
	return 0;
}
895
static inline void trace_branch_disable(void)
896
897
{
}
898
#endif /* CONFIG_BRANCH_TRACER */
899

900
901
902
/* set ring buffers to default size if not already done so */
int tracing_update_buffers(void);

903
904
struct ftrace_event_field {
	struct list_head	link;
905
906
	const char		*name;
	const char		*type;
907
	int			filter_type;
908
909
	int			offset;
	int			size;
910
	int			is_signed;
911
912
};

913
struct event_filter {
914
915
	int			n_preds;	/* Number assigned */
	int			a_preds;	/* allocated */
916
	struct filter_pred	*preds;
917
	struct filter_pred	*root;
918
	char			*filter_string;
919
920
};

921
922
923
struct event_subsystem {
	struct list_head	list;
	const char		*name;
924
	struct event_filter	*filter;
925
	int			ref_count;
926
927
};

928
929
930
931
932
933
934
935
936
struct ftrace_subsystem_dir {
	struct list_head		list;
	struct event_subsystem		*subsystem;
	struct trace_array		*tr;
	struct dentry			*entry;
	int				ref_count;
	int				nr_events;
};

937
938
#define FILTER_PRED_INVALID	((unsigned short)-1)
#define FILTER_PRED_IS_RIGHT	(1 << 15)
939
#define FILTER_PRED_FOLD	(1 << 15)
940

941
942
943
944
945
946
947
948
/*
 * The max preds is the size of unsigned short with
 * two flags at the MSBs. One bit is used for both the IS_RIGHT
 * and FOLD flags. The other is reserved.
 *
 * 2^14 preds is way more than enough.
 */
#define MAX_FILTER_PRED		16384
949

Tom Zanussi's avatar
Tom Zanussi committed
950
struct filter_pred;
951
struct regex;
Tom Zanussi's avatar
Tom Zanussi committed
952

953
typedef int (*filter_pred_fn_t) (struct filter_pred *pred, void *event);
Tom Zanussi's avatar
Tom Zanussi committed
954

955
956
typedef int (*regex_match_func)(char *str, struct regex *r, int len);

957
enum regex_type {
958
	MATCH_FULL = 0,
959
960
961
962
963
	MATCH_FRONT_ONLY,
	MATCH_MIDDLE_ONLY,
	MATCH_END_ONLY,
};

964
965
966
967
968
969
970
struct regex {
	char			pattern[MAX_FILTER_STR_VAL];
	int			len;
	int			field_len;
	regex_match_func	match;
};

Tom Zanussi's avatar
Tom Zanussi committed
971
struct filter_pred {
972
973
974
	filter_pred_fn_t 	fn;
	u64 			val;
	struct regex		regex;
975
	unsigned short		*ops;
976
	struct ftrace_event_field *field;
977
978
979
	int 			offset;
	int 			not;
	int 			op;
980
981
982
983
	unsigned short		index;
	unsigned short		parent;
	unsigned short		left;
	unsigned short		right;
Tom Zanussi's avatar
Tom Zanussi committed
984
985
};

986
987
extern enum regex_type
filter_parse_regex(char *buff, int len, char **search, int *not);
988
extern void print_event_filter(struct ftrace_event_call *call,
989
			       struct trace_seq *s);
990
991
extern int apply_event_filter(struct ftrace_event_call *call,
			      char *filter_string);
992
extern int apply_subsystem_event_filter(struct ftrace_subsystem_dir *dir,
993
994
					char *filter_string);
extern void print_subsystem_event_filter(struct event_subsystem *system,
995
					 struct trace_seq *s);
996
extern int filter_assign_type(const char *type);
Tom Zanussi's avatar
Tom Zanussi committed
997

998
999
struct ftrace_event_field *
trace_find_event_field(struct ftrace_event_call *call, char *name);
1000

1001
static inline int
1002
filter_check_discard(struct ftrace_event_call *call, void *rec,
1003
		     struct ring_buffer *buffer,
1004
1005
		     struct ring_buffer_event *event)
{
1006
	if (unlikely(call->flags & TRACE_EVENT_FL_FILTERED) &&
Li Zefan's avatar
Li Zefan committed
1007
	    !filter_match_preds(call->filter, rec)) {
1008
1009
1010
1011
1012
		ring_buffer_discard_commit(buffer, event);
		return 1;
	}

	return 0;
1013
1014
}

1015
extern void trace_event_enable_cmd_record(bool enable);
1016
extern int event_trace_add_tracer(struct dentry *parent, struct trace_array *tr);
1017
extern int event_trace_del_tracer(struct trace_array *tr);
1018

1019
extern struct mutex event_mutex;
1020
extern struct list_head ftrace_events;
Peter Zijlstra's avatar
Peter Zijlstra committed
1021

1022
1023
1024
extern const char *__start___trace_bprintk_fmt[];
extern const char *__stop___trace_bprintk_fmt[];

1025
void trace_printk_init_buffers(void);
1026
void trace_printk_start_comm(void);
1027
int trace_keep_overwrite(struct tracer *tracer, u32 mask, int set);
1028
int set_tracer_flag(struct trace_array *tr, unsigned int mask, int enabled);
1029

1030
1031
1032
1033
1034
1035
1036
1037
1038
1039
1040
/*
 * Normal trace_printk() and friends allocates special buffers
 * to do the manipulation, as well as saves the print formats
 * into sections to display. But the trace infrastructure wants
 * to use these without the added overhead at the price of being
 * a bit slower (used mainly for warnings, where we don't care
 * about performance). The internal_trace_puts() is for such
 * a purpose.
 */
#define internal_trace_puts(str) __trace_puts(_THIS_IP_, str, strlen(str))

1041
#undef FTRACE_ENTRY
1042
#define FTRACE_ENTRY(call, struct_name, id, tstruct, print, filter)	\
1043
1044
	extern struct ftrace_event_call					\
	__attribute__((__aligned__(4))) event_##call;
1045
#undef FTRACE_ENTRY_DUP
1046
1047
1048
#define FTRACE_ENTRY_DUP(call, struct_name, id, tstruct, print, filter)	\
	FTRACE_ENTRY(call, struct_name, id, PARAMS(tstruct), PARAMS(print), \
		     filter)
1049
#include "trace_entries.h"
1050

1051
#if defined(CONFIG_PERF_EVENTS) && defined(CONFIG_FUNCTION_TRACER)
1052
1053
1054
1055
int perf_ftrace_event_register(struct ftrace_event_call *call,
			       enum trace_reg type, void *data);
#else
#define perf_ftrace_event_register NULL
1056
#endif
1057

1058
#endif /* _LINUX_KERNEL_TRACE_H */