trace.h 24.1 KB
Newer Older
1
2
3
4
#ifndef _LINUX_KERNEL_TRACE_H
#define _LINUX_KERNEL_TRACE_H

#include <linux/fs.h>
Arun Sharma's avatar
Arun Sharma committed
5
#include <linux/atomic.h>
6
7
#include <linux/sched.h>
#include <linux/clocksource.h>
8
#include <linux/ring_buffer.h>
Pekka Paalanen's avatar
Pekka Paalanen committed
9
#include <linux/mmiotrace.h>
10
#include <linux/tracepoint.h>
11
#include <linux/ftrace.h>
12
#include <linux/hw_breakpoint.h>
13
#include <linux/trace_seq.h>
14
#include <linux/ftrace_event.h>
15

16
17
18
19
20
21
22
enum trace_type {
	__TRACE_FIRST_TYPE = 0,

	TRACE_FN,
	TRACE_CTX,
	TRACE_WAKE,
	TRACE_STACK,
23
	TRACE_PRINT,
24
	TRACE_BPRINT,
Pekka Paalanen's avatar
Pekka Paalanen committed
25
26
	TRACE_MMIO_RW,
	TRACE_MMIO_MAP,
27
	TRACE_BRANCH,
28
29
	TRACE_GRAPH_RET,
	TRACE_GRAPH_ENT,
30
	TRACE_USER_STACK,
31
	TRACE_BLK,
32

33
	__TRACE_LAST_TYPE,
34
35
};

36

37
38
#undef __field
#define __field(type, item)		type	item;
Ingo Molnar's avatar
Ingo Molnar committed
39

40
41
#undef __field_struct
#define __field_struct(type, item)	__field(type, item)
Ingo Molnar's avatar
Ingo Molnar committed
42

43
44
#undef __field_desc
#define __field_desc(type, container, item)
45

46
47
#undef __array
#define __array(type, item, size)	type	item[size];
48

49
50
#undef __array_desc
#define __array_desc(type, container, item, size)
51

52
53
#undef __dynamic_array
#define __dynamic_array(type, item)	type	item[];
54

55
56
#undef F_STRUCT
#define F_STRUCT(args...)		args
57

58
#undef FTRACE_ENTRY
59
60
61
62
#define FTRACE_ENTRY(name, struct_name, id, tstruct, print, filter)	\
	struct struct_name {						\
		struct trace_entry	ent;				\
		tstruct							\
63
	}
64

65
66
#undef TP_ARGS
#define TP_ARGS(args...)	args
67

68
#undef FTRACE_ENTRY_DUP
69
#define FTRACE_ENTRY_DUP(name, name_struct, id, tstruct, printk, filter)
70

71
#undef FTRACE_ENTRY_REG
72
73
74
75
#define FTRACE_ENTRY_REG(name, struct_name, id, tstruct, print,	\
			 filter, regfn) \
	FTRACE_ENTRY(name, struct_name, id, PARAMS(tstruct), PARAMS(print), \
		     filter)
76

77
#include "trace_entries.h"
78

79
80
81
82
/*
 * syscalls are special, and need special handling, this is why
 * they are not included in trace_entries.h
 */
83
84
85
86
87
88
89
90
91
struct syscall_trace_enter {
	struct trace_entry	ent;
	int			nr;
	unsigned long		args[];
};

struct syscall_trace_exit {
	struct trace_entry	ent;
	int			nr;
92
	long			ret;
93
94
};

95
struct kprobe_trace_entry_head {
96
97
98
99
	struct trace_entry	ent;
	unsigned long		ip;
};

100
struct kretprobe_trace_entry_head {
101
102
103
104
105
	struct trace_entry	ent;
	unsigned long		func;
	unsigned long		ret_ip;
};

106
107
108
/*
 * trace_flag_type is an enumeration that holds different
 * states when a trace occurs. These are:
109
 *  IRQS_OFF		- interrupts were disabled
110
 *  IRQS_NOSUPPORT	- arch does not support irqs_disabled_flags
Li Zefan's avatar
Li Zefan committed
111
 *  NEED_RESCHED	- reschedule is requested
112
113
 *  HARDIRQ		- inside an interrupt handler
 *  SOFTIRQ		- inside a softirq handler
114
115
116
 */
enum trace_flag_type {
	TRACE_FLAG_IRQS_OFF		= 0x01,
117
118
119
120
	TRACE_FLAG_IRQS_NOSUPPORT	= 0x02,
	TRACE_FLAG_NEED_RESCHED		= 0x04,
	TRACE_FLAG_HARDIRQ		= 0x08,
	TRACE_FLAG_SOFTIRQ		= 0x10,
121
122
};

123
#define TRACE_BUF_SIZE		1024
124
125
126
127
128
129
130
131

/*
 * The CPU trace array - it consists of thousands of trace entries
 * plus some other descriptor data: (for example which task started
 * the trace, etc.)
 */
struct trace_array_cpu {
	atomic_t		disabled;
132
	void			*buffer_page;	/* ring buffer spare */
Ingo Molnar's avatar
Ingo Molnar committed
133

134
135
136
137
138
139
140
	unsigned long		saved_latency;
	unsigned long		critical_start;
	unsigned long		critical_end;
	unsigned long		critical_sequence;
	unsigned long		nice;
	unsigned long		policy;
	unsigned long		rt_priority;
141
	unsigned long		skipped_entries;
142
143
144
145
146
147
148
149
150
151
152
153
	cycle_t			preempt_timestamp;
	pid_t			pid;
	uid_t			uid;
	char			comm[TASK_COMM_LEN];
};

/*
 * The trace array - an array of per-CPU trace arrays. This is the
 * highest level data structure that individual tracers deal with.
 * They have on/off state as well:
 */
struct trace_array {
154
	struct ring_buffer	*buffer;
155
156
	unsigned long		entries;
	int			cpu;
157
	int			buffer_disabled;
158
	cycle_t			time_start;
159
	struct task_struct	*waiter;
160
161
162
	struct trace_array_cpu	*data[NR_CPUS];
};

163
164
165
166
167
168
169
170
171
172
173
174
175
176
177
178
179
180
181
182
183
184
185
186
187
188
189
190
191
192
193
194
#define FTRACE_CMP_TYPE(var, type) \
	__builtin_types_compatible_p(typeof(var), type *)

#undef IF_ASSIGN
#define IF_ASSIGN(var, entry, etype, id)		\
	if (FTRACE_CMP_TYPE(var, etype)) {		\
		var = (typeof(var))(entry);		\
		WARN_ON(id && (entry)->type != id);	\
		break;					\
	}

/* Will cause compile errors if type is not found. */
extern void __ftrace_bad_type(void);

/*
 * The trace_assign_type is a verifier that the entry type is
 * the same as the type being assigned. To add new types simply
 * add a line with the following format:
 *
 * IF_ASSIGN(var, ent, type, id);
 *
 *  Where "type" is the trace type that includes the trace_entry
 *  as the "ent" item. And "id" is the trace identifier that is
 *  used in the trace_type enum.
 *
 *  If the type can have more than one id, then use zero.
 */
#define trace_assign_type(var, ent)					\
	do {								\
		IF_ASSIGN(var, ent, struct ftrace_entry, TRACE_FN);	\
		IF_ASSIGN(var, ent, struct ctx_switch_entry, 0);	\
		IF_ASSIGN(var, ent, struct stack_entry, TRACE_STACK);	\
195
		IF_ASSIGN(var, ent, struct userstack_entry, TRACE_USER_STACK);\
196
		IF_ASSIGN(var, ent, struct print_entry, TRACE_PRINT);	\
197
		IF_ASSIGN(var, ent, struct bprint_entry, TRACE_BPRINT);	\
198
199
200
201
		IF_ASSIGN(var, ent, struct trace_mmiotrace_rw,		\
			  TRACE_MMIO_RW);				\
		IF_ASSIGN(var, ent, struct trace_mmiotrace_map,		\
			  TRACE_MMIO_MAP);				\
202
		IF_ASSIGN(var, ent, struct trace_branch, TRACE_BRANCH); \
203
204
205
206
		IF_ASSIGN(var, ent, struct ftrace_graph_ent_entry,	\
			  TRACE_GRAPH_ENT);		\
		IF_ASSIGN(var, ent, struct ftrace_graph_ret_entry,	\
			  TRACE_GRAPH_RET);		\
207
208
		__ftrace_bad_type();					\
	} while (0)
209

210
211
212
213
214
215
/*
 * An option specific to a tracer. This is a boolean value.
 * The bit is the bit index that sets its value on the
 * flags value in struct tracer_flags.
 */
struct tracer_opt {
216
217
	const char	*name; /* Will appear on the trace_options file */
	u32		bit; /* Mask assigned in val field in tracer_flags */
218
219
220
221
222
223
224
225
};

/*
 * The set of specific options for a tracer. Your tracer
 * have to set the initial value of the flags val.
 */
struct tracer_flags {
	u32			val;
226
	struct tracer_opt	*opts;
227
228
229
230
231
};

/* Makes more easy to define a tracer opt */
#define TRACER_OPT(s, b)	.name = #s, .bit = b

232

233
234
235
236
237
238
239
240
241
242
243
/**
 * struct tracer - a specific tracer and its callbacks to interact with debugfs
 * @name: the name chosen to select it on the available_tracers file
 * @init: called when one switches to this tracer (echo name > current_tracer)
 * @reset: called when one switches to another tracer
 * @start: called when tracing is unpaused (echo 1 > tracing_enabled)
 * @stop: called when tracing is paused (echo 0 > tracing_enabled)
 * @open: called when the trace file is opened
 * @pipe_open: called when the trace_pipe file is opened
 * @wait_pipe: override how the user waits for traces on trace_pipe
 * @close: called when the trace file is released
244
 * @pipe_close: called when the trace_pipe file is released
245
246
247
248
249
250
251
 * @read: override the default read callback on trace_pipe
 * @splice_read: override the default splice_read callback on trace_pipe
 * @selftest: selftest to run on boot (see trace_selftest.c)
 * @print_headers: override the first lines that describe your columns
 * @print_line: callback that prints a trace
 * @set_flag: signals one of your private flags changed (trace_options file)
 * @flags: your private flags
252
253
254
 */
struct tracer {
	const char		*name;
255
	int			(*init)(struct trace_array *tr);
256
	void			(*reset)(struct trace_array *tr);
257
258
	void			(*start)(struct trace_array *tr);
	void			(*stop)(struct trace_array *tr);
259
	void			(*open)(struct trace_iterator *iter);
260
	void			(*pipe_open)(struct trace_iterator *iter);
261
	void			(*wait_pipe)(struct trace_iterator *iter);
262
	void			(*close)(struct trace_iterator *iter);
263
	void			(*pipe_close)(struct trace_iterator *iter);
264
265
266
	ssize_t			(*read)(struct trace_iterator *iter,
					struct file *filp, char __user *ubuf,
					size_t cnt, loff_t *ppos);
267
268
269
270
271
272
	ssize_t			(*splice_read)(struct trace_iterator *iter,
					       struct file *filp,
					       loff_t *ppos,
					       struct pipe_inode_info *pipe,
					       size_t len,
					       unsigned int flags);
Steven Rostedt's avatar
Steven Rostedt committed
273
274
275
276
#ifdef CONFIG_FTRACE_STARTUP_TEST
	int			(*selftest)(struct tracer *trace,
					    struct trace_array *tr);
#endif
277
	void			(*print_header)(struct seq_file *m);
278
	enum print_line_t	(*print_line)(struct trace_iterator *iter);
279
280
	/* If you handled the flag setting, return 0 */
	int			(*set_flag)(u32 old_flags, u32 bit, int set);
281
	struct tracer		*next;
282
	struct tracer_flags	*flags;
283
	int			print_max;
284
	int			use_max_tr;
285
286
};

287

288
289
290
291
292
293
294
295
296
297
/* Only current can touch trace_recursion */
#define trace_recursion_inc() do { (current)->trace_recursion++; } while (0)
#define trace_recursion_dec() do { (current)->trace_recursion--; } while (0)

/* Ring buffer has the 10 LSB bits to count */
#define trace_recursion_buffer() ((current)->trace_recursion & 0x3ff)

/* for function tracing recursion */
#define TRACE_INTERNAL_BIT		(1<<11)
#define TRACE_GLOBAL_BIT		(1<<12)
298
299
#define TRACE_CONTROL_BIT		(1<<13)

300
301
302
303
304
305
306
307
308
309
310
311
312
/*
 * Abuse of the trace_recursion.
 * As we need a way to maintain state if we are tracing the function
 * graph in irq because we want to trace a particular function that
 * was called in irq context but we have irq tracing off. Since this
 * can only be modified by current, we can reuse trace_recursion.
 */
#define TRACE_IRQ_BIT			(1<<13)

#define trace_recursion_set(bit)	do { (current)->trace_recursion |= (bit); } while (0)
#define trace_recursion_clear(bit)	do { (current)->trace_recursion &= ~(bit); } while (0)
#define trace_recursion_test(bit)	((current)->trace_recursion & (bit))

313
314
#define TRACE_PIPE_ALL_CPU	-1

315
int tracer_init(struct tracer *t, struct trace_array *tr);
316
int tracing_is_enabled(void);
317
void trace_wake_up(void);
318
void tracing_reset(struct trace_array *tr, int cpu);
319
void tracing_reset_online_cpus(struct trace_array *tr);
320
321
void tracing_reset_current(int cpu);
void tracing_reset_current_online_cpus(void);
322
int tracing_open_generic(struct inode *inode, struct file *filp);
323
struct dentry *trace_create_file(const char *name,
Al Viro's avatar
Al Viro committed
324
				 umode_t mode,
325
326
327
328
				 struct dentry *parent,
				 void *data,
				 const struct file_operations *fops);

329
struct dentry *tracing_init_dentry(void);
Ingo Molnar's avatar
Ingo Molnar committed
330

331
332
struct ring_buffer_event;

333
334
335
336
337
338
339
struct ring_buffer_event *
trace_buffer_lock_reserve(struct ring_buffer *buffer,
			  int type,
			  unsigned long len,
			  unsigned long flags,
			  int pc);
void trace_buffer_unlock_commit(struct ring_buffer *buffer,
340
341
342
				struct ring_buffer_event *event,
				unsigned long flags, int pc);

343
344
struct trace_entry *tracing_get_trace_entry(struct trace_array *tr,
						struct trace_array_cpu *data);
345
346
347
348

struct trace_entry *trace_find_next_entry(struct trace_iterator *iter,
					  int *ent_cpu, u64 *ent_ts);

349
350
351
352
353
354
355
356
int trace_empty(struct trace_iterator *iter);

void *trace_find_next_entry_inc(struct trace_iterator *iter);

void trace_init_global_iter(struct trace_iterator *iter);

void tracing_iter_reset(struct trace_iterator *iter, int cpu);

357
358
359
void default_wait_pipe(struct trace_iterator *iter);
void poll_wait_pipe(struct trace_iterator *iter);

360
361
362
363
void ftrace(struct trace_array *tr,
			    struct trace_array_cpu *data,
			    unsigned long ip,
			    unsigned long parent_ip,
364
			    unsigned long flags, int pc);
365
366
367
void tracing_sched_switch_trace(struct trace_array *tr,
				struct task_struct *prev,
				struct task_struct *next,
368
				unsigned long flags, int pc);
369
370
371
372

void tracing_sched_wakeup_trace(struct trace_array *tr,
				struct task_struct *wakee,
				struct task_struct *cur,
373
				unsigned long flags, int pc);
374
375
376
void trace_function(struct trace_array *tr,
		    unsigned long ip,
		    unsigned long parent_ip,
377
		    unsigned long flags, int pc);
378
379
380
381
void trace_graph_function(struct trace_array *tr,
		    unsigned long ip,
		    unsigned long parent_ip,
		    unsigned long flags, int pc);
382
void trace_latency_header(struct seq_file *m);
383
384
385
void trace_default_header(struct seq_file *m);
void print_trace_header(struct seq_file *m, struct trace_iterator *iter);
int trace_empty(struct trace_iterator *iter);
386

387
void trace_graph_return(struct ftrace_graph_ret *trace);
388
int trace_graph_entry(struct ftrace_graph_ent *trace);
389
void set_graph_array(struct trace_array *tr);
390

391
392
void tracing_start_cmdline_record(void);
void tracing_stop_cmdline_record(void);
Steven Rostedt's avatar
Steven Rostedt committed
393
394
395
void tracing_sched_switch_assign_trace(struct trace_array *tr);
void tracing_stop_sched_switch_record(void);
void tracing_start_sched_switch_record(void);
396
397
int register_tracer(struct tracer *type);
void unregister_tracer(struct tracer *type);
398
int is_tracing_stopped(void);
399
400
401
402
403
404
405
406
407
enum trace_file_type {
	TRACE_FILE_LAT_FMT	= 1,
	TRACE_FILE_ANNOTATE	= 2,
};

extern cpumask_var_t __read_mostly tracing_buffer_mask;

#define for_each_tracing_cpu(cpu)	\
	for_each_cpu(cpu, tracing_buffer_mask)
408
409
410

extern unsigned long nsecs_to_usecs(unsigned long nsecs);

411
412
extern unsigned long tracing_thresh;

413
#ifdef CONFIG_TRACER_MAX_TRACE
414
415
416
417
418
extern unsigned long tracing_max_latency;

void update_max_tr(struct trace_array *tr, struct task_struct *tsk, int cpu);
void update_max_tr_single(struct trace_array *tr,
			  struct task_struct *tsk, int cpu);
419
#endif /* CONFIG_TRACER_MAX_TRACE */
420

421
#ifdef CONFIG_STACKTRACE
422
void ftrace_trace_stack(struct ring_buffer *buffer, unsigned long flags,
423
424
			int skip, int pc);

425
426
427
void ftrace_trace_stack_regs(struct ring_buffer *buffer, unsigned long flags,
			     int skip, int pc, struct pt_regs *regs);

428
void ftrace_trace_userstack(struct ring_buffer *buffer, unsigned long flags,
429
430
431
432
433
			    int pc);

void __trace_stack(struct trace_array *tr, unsigned long flags, int skip,
		   int pc);
#else
434
static inline void ftrace_trace_stack(struct ring_buffer *buffer,
435
436
437
438
				      unsigned long flags, int skip, int pc)
{
}

439
440
441
442
443
444
static inline void ftrace_trace_stack_regs(struct ring_buffer *buffer,
					   unsigned long flags, int skip,
					   int pc, struct pt_regs *regs)
{
}

445
static inline void ftrace_trace_userstack(struct ring_buffer *buffer,
446
447
448
449
450
451
452
453
454
					  unsigned long flags, int pc)
{
}

static inline void __trace_stack(struct trace_array *tr, unsigned long flags,
				 int skip, int pc)
{
}
#endif /* CONFIG_STACKTRACE */
455

Ingo Molnar's avatar
Ingo Molnar committed
456
extern cycle_t ftrace_now(int cpu);
457

458
extern void trace_find_cmdline(int pid, char comm[]);
459

460
461
#ifdef CONFIG_DYNAMIC_FTRACE
extern unsigned long ftrace_update_tot_cnt;
462
463
#define DYN_FTRACE_TEST_NAME trace_selftest_dynamic_test_func
extern int DYN_FTRACE_TEST_NAME(void);
464
465
#define DYN_FTRACE_TEST_NAME2 trace_selftest_dynamic_test_func2
extern int DYN_FTRACE_TEST_NAME2(void);
466
467
#endif

468
469
extern int ring_buffer_expanded;
extern bool tracing_selftest_disabled;
470
DECLARE_PER_CPU(int, ftrace_cpu_disabled);
471

Steven Rostedt's avatar
Steven Rostedt committed
472
473
474
#ifdef CONFIG_FTRACE_STARTUP_TEST
extern int trace_selftest_startup_function(struct tracer *trace,
					   struct trace_array *tr);
475
476
extern int trace_selftest_startup_function_graph(struct tracer *trace,
						 struct trace_array *tr);
Steven Rostedt's avatar
Steven Rostedt committed
477
478
479
480
481
482
483
484
extern int trace_selftest_startup_irqsoff(struct tracer *trace,
					  struct trace_array *tr);
extern int trace_selftest_startup_preemptoff(struct tracer *trace,
					     struct trace_array *tr);
extern int trace_selftest_startup_preemptirqsoff(struct tracer *trace,
						 struct trace_array *tr);
extern int trace_selftest_startup_wakeup(struct tracer *trace,
					 struct trace_array *tr);
Steven Noonan's avatar
Steven Noonan committed
485
486
extern int trace_selftest_startup_nop(struct tracer *trace,
					 struct trace_array *tr);
Steven Rostedt's avatar
Steven Rostedt committed
487
488
extern int trace_selftest_startup_sched_switch(struct tracer *trace,
					       struct trace_array *tr);
489
490
extern int trace_selftest_startup_branch(struct tracer *trace,
					 struct trace_array *tr);
Steven Rostedt's avatar
Steven Rostedt committed
491
492
#endif /* CONFIG_FTRACE_STARTUP_TEST */

Ingo Molnar's avatar
Ingo Molnar committed
493
extern void *head_page(struct trace_array_cpu *data);
494
extern unsigned long long ns2usecs(cycle_t nsec);
495
extern int
496
trace_vbprintk(unsigned long ip, const char *fmt, va_list args);
497
extern int
498
trace_vprintk(unsigned long ip, const char *fmt, va_list args);
499
500
501
502
503
extern int
trace_array_vprintk(struct trace_array *tr,
		    unsigned long ip, const char *fmt, va_list args);
int trace_array_printk(struct trace_array *tr,
		       unsigned long ip, const char *fmt, ...);
504
505
void trace_printk_seq(struct trace_seq *s);
enum print_line_t print_trace_line(struct trace_iterator *iter);
Ingo Molnar's avatar
Ingo Molnar committed
506

507
508
extern unsigned long trace_flags;

509
510
extern int trace_clock_id;

511
/* Standard output formatting function used for function return traces */
512
#ifdef CONFIG_FUNCTION_GRAPH_TRACER
513
514
515
516
517
518
519
520
521

/* Flag options */
#define TRACE_GRAPH_PRINT_OVERRUN       0x1
#define TRACE_GRAPH_PRINT_CPU           0x2
#define TRACE_GRAPH_PRINT_OVERHEAD      0x4
#define TRACE_GRAPH_PRINT_PROC          0x8
#define TRACE_GRAPH_PRINT_DURATION      0x10
#define TRACE_GRAPH_PRINT_ABS_TIME      0x20

522
523
524
extern enum print_line_t
print_graph_function_flags(struct trace_iterator *iter, u32 flags);
extern void print_graph_headers_flags(struct seq_file *s, u32 flags);
525
526
extern enum print_line_t
trace_print_graph_duration(unsigned long long duration, struct trace_seq *s);
527
528
529
530
531
532
533
534
535
extern void graph_trace_open(struct trace_iterator *iter);
extern void graph_trace_close(struct trace_iterator *iter);
extern int __trace_graph_entry(struct trace_array *tr,
			       struct ftrace_graph_ent *trace,
			       unsigned long flags, int pc);
extern void __trace_graph_return(struct trace_array *tr,
				 struct ftrace_graph_ret *trace,
				 unsigned long flags, int pc);

536
537
538
539

#ifdef CONFIG_DYNAMIC_FTRACE
/* TODO: make this variable */
#define FTRACE_GRAPH_MAX_FUNCS		32
540
extern int ftrace_graph_filter_enabled;
541
542
543
544
545
546
547
extern int ftrace_graph_count;
extern unsigned long ftrace_graph_funcs[FTRACE_GRAPH_MAX_FUNCS];

static inline int ftrace_graph_addr(unsigned long addr)
{
	int i;

548
	if (!ftrace_graph_filter_enabled)
549
550
551
		return 1;

	for (i = 0; i < ftrace_graph_count; i++) {
552
553
554
555
556
557
558
559
560
561
		if (addr == ftrace_graph_funcs[i]) {
			/*
			 * If no irqs are to be traced, but a set_graph_function
			 * is set, and called by an interrupt handler, we still
			 * want to trace it.
			 */
			if (in_irq())
				trace_recursion_set(TRACE_IRQ_BIT);
			else
				trace_recursion_clear(TRACE_IRQ_BIT);
562
			return 1;
563
		}
564
565
566
567
	}

	return 0;
}
568
#else
569
570
571
static inline int ftrace_graph_addr(unsigned long addr)
{
	return 1;
572
573
574
}
#endif /* CONFIG_DYNAMIC_FTRACE */
#else /* CONFIG_FUNCTION_GRAPH_TRACER */
575
static inline enum print_line_t
576
print_graph_function_flags(struct trace_iterator *iter, u32 flags)
577
578
579
{
	return TRACE_TYPE_UNHANDLED;
}
580
#endif /* CONFIG_FUNCTION_GRAPH_TRACER */
581

582
extern struct list_head ftrace_pids;
583

584
#ifdef CONFIG_FUNCTION_TRACER
585
586
static inline int ftrace_trace_task(struct task_struct *task)
{
587
	if (list_empty(&ftrace_pids))
588
589
590
591
		return 1;

	return test_tsk_trace_trace(task);
}
592
extern int ftrace_is_dead(void);
593
594
595
596
597
#else
static inline int ftrace_trace_task(struct task_struct *task)
{
	return 1;
}
598
static inline int ftrace_is_dead(void) { return 0; }
599
#endif
600

601
602
int ftrace_event_is_function(struct ftrace_event_call *call);

603
604
605
606
/*
 * struct trace_parser - servers for reading the user input separated by spaces
 * @cont: set if the input is not complete - no final space char was found
 * @buffer: holds the parsed user input
607
 * @idx: user input length
608
609
610
611
612
613
614
615
616
617
618
619
620
621
622
623
624
625
626
627
628
629
630
631
632
633
634
635
636
637
 * @size: buffer size
 */
struct trace_parser {
	bool		cont;
	char		*buffer;
	unsigned	idx;
	unsigned	size;
};

static inline bool trace_parser_loaded(struct trace_parser *parser)
{
	return (parser->idx != 0);
}

static inline bool trace_parser_cont(struct trace_parser *parser)
{
	return parser->cont;
}

static inline void trace_parser_clear(struct trace_parser *parser)
{
	parser->cont = false;
	parser->idx = 0;
}

extern int trace_parser_get_init(struct trace_parser *parser, int size);
extern void trace_parser_put(struct trace_parser *parser);
extern int trace_get_user(struct trace_parser *parser, const char __user *ubuf,
	size_t cnt, loff_t *ppos);

Steven Rostedt's avatar
Steven Rostedt committed
638
639
640
641
642
643
644
/*
 * trace_iterator_flags is an enumeration that defines bit
 * positions into trace_flags that controls the output.
 *
 * NOTE: These bits must match the trace_options array in
 *       trace.c.
 */
645
646
647
648
649
650
651
652
653
654
enum trace_iterator_flags {
	TRACE_ITER_PRINT_PARENT		= 0x01,
	TRACE_ITER_SYM_OFFSET		= 0x02,
	TRACE_ITER_SYM_ADDR		= 0x04,
	TRACE_ITER_VERBOSE		= 0x08,
	TRACE_ITER_RAW			= 0x10,
	TRACE_ITER_HEX			= 0x20,
	TRACE_ITER_BIN			= 0x40,
	TRACE_ITER_BLOCK		= 0x80,
	TRACE_ITER_STACKTRACE		= 0x100,
655
656
657
658
659
660
661
662
663
664
665
	TRACE_ITER_PRINTK		= 0x200,
	TRACE_ITER_PREEMPTONLY		= 0x400,
	TRACE_ITER_BRANCH		= 0x800,
	TRACE_ITER_ANNOTATE		= 0x1000,
	TRACE_ITER_USERSTACKTRACE       = 0x2000,
	TRACE_ITER_SYM_USEROBJ          = 0x4000,
	TRACE_ITER_PRINTK_MSGONLY	= 0x8000,
	TRACE_ITER_CONTEXT_INFO		= 0x10000, /* Print pid/cpu/time */
	TRACE_ITER_LATENCY_FMT		= 0x20000,
	TRACE_ITER_SLEEP_TIME		= 0x40000,
	TRACE_ITER_GRAPH_TIME		= 0x80000,
666
	TRACE_ITER_RECORD_CMD		= 0x100000,
667
	TRACE_ITER_OVERWRITE		= 0x200000,
668
	TRACE_ITER_STOP_ON_FREE		= 0x400000,
669
	TRACE_ITER_IRQ_INFO		= 0x800000,
670
671
};

672
673
674
675
676
677
678
/*
 * TRACE_ITER_SYM_MASK masks the options in trace_flags that
 * control the output of kernel symbols.
 */
#define TRACE_ITER_SYM_MASK \
	(TRACE_ITER_PRINT_PARENT|TRACE_ITER_SYM_OFFSET|TRACE_ITER_SYM_ADDR)

679
680
extern struct tracer nop_trace;

681
#ifdef CONFIG_BRANCH_TRACER
682
683
684
extern int enable_branch_tracing(struct trace_array *tr);
extern void disable_branch_tracing(void);
static inline int trace_branch_enable(struct trace_array *tr)
685
{
686
687
	if (trace_flags & TRACE_ITER_BRANCH)
		return enable_branch_tracing(tr);
688
689
	return 0;
}
690
static inline void trace_branch_disable(void)
691
692
{
	/* due to races, always disable */
693
	disable_branch_tracing();
694
695
}
#else
696
static inline int trace_branch_enable(struct trace_array *tr)
697
698
699
{
	return 0;
}
700
static inline void trace_branch_disable(void)
701
702
{
}
703
#endif /* CONFIG_BRANCH_TRACER */
704

705
706
707
/* set ring buffers to default size if not already done so */
int tracing_update_buffers(void);

708
709
710
711
712
713
/* trace event type bit fields, not numeric */
enum {
	TRACE_EVENT_TYPE_PRINTF		= 1,
	TRACE_EVENT_TYPE_RAW		= 2,
};

714
715
716
717
struct ftrace_event_field {
	struct list_head	link;
	char			*name;
	char			*type;
718
	int			filter_type;
719
720
	int			offset;
	int			size;
721
	int			is_signed;
722
723
};

724
struct event_filter {
725
726
	int			n_preds;	/* Number assigned */
	int			a_preds;	/* allocated */
727
	struct filter_pred	*preds;
728
	struct filter_pred	*root;
729
	char			*filter_string;
730
731
};

732
733
734
735
struct event_subsystem {
	struct list_head	list;
	const char		*name;
	struct dentry		*entry;
736
	struct event_filter	*filter;
737
	int			nr_events;
738
	int			ref_count;
739
740
};

741
742
#define FILTER_PRED_INVALID	((unsigned short)-1)
#define FILTER_PRED_IS_RIGHT	(1 << 15)
743
#define FILTER_PRED_FOLD	(1 << 15)
744

745
746
747
748
749
750
751
752
/*
 * The max preds is the size of unsigned short with
 * two flags at the MSBs. One bit is used for both the IS_RIGHT
 * and FOLD flags. The other is reserved.
 *
 * 2^14 preds is way more than enough.
 */
#define MAX_FILTER_PRED		16384
753

Tom Zanussi's avatar
Tom Zanussi committed
754
struct filter_pred;
755
struct regex;
Tom Zanussi's avatar
Tom Zanussi committed
756

757
typedef int (*filter_pred_fn_t) (struct filter_pred *pred, void *event);
Tom Zanussi's avatar
Tom Zanussi committed
758

759
760
typedef int (*regex_match_func)(char *str, struct regex *r, int len);

761
enum regex_type {
762
	MATCH_FULL = 0,
763
764
765
766
767
	MATCH_FRONT_ONLY,
	MATCH_MIDDLE_ONLY,
	MATCH_END_ONLY,
};

768
769
770
771
772
773
774
struct regex {
	char			pattern[MAX_FILTER_STR_VAL];
	int			len;
	int			field_len;
	regex_match_func	match;
};

Tom Zanussi's avatar
Tom Zanussi committed
775
struct filter_pred {
776
777
778
	filter_pred_fn_t 	fn;
	u64 			val;
	struct regex		regex;
779
	unsigned short		*ops;
780
	struct ftrace_event_field *field;
781
782
783
	int 			offset;
	int 			not;
	int 			op;
784
785
786
787
	unsigned short		index;
	unsigned short		parent;
	unsigned short		left;
	unsigned short		right;
Tom Zanussi's avatar
Tom Zanussi committed
788
789
};

790
791
extern struct list_head ftrace_common_fields;

792
793
extern enum regex_type
filter_parse_regex(char *buff, int len, char **search, int *not);
794
extern void print_event_filter(struct ftrace_event_call *call,
795
			       struct trace_seq *s);
796
797
798
799
800
extern int apply_event_filter(struct ftrace_event_call *call,
			      char *filter_string);
extern int apply_subsystem_event_filter(struct event_subsystem *system,
					char *filter_string);
extern void print_subsystem_event_filter(struct event_subsystem *system,
801
					 struct trace_seq *s);
802
extern int filter_assign_type(const char *type);
Tom Zanussi's avatar
Tom Zanussi committed
803

804
805
806
struct list_head *
trace_get_fields(struct ftrace_event_call *event_call);

807
static inline int
808
filter_check_discard(struct ftrace_event_call *call, void *rec,
809
		     struct ring_buffer *buffer,
810
811
		     struct ring_buffer_event *event)
{
812
	if (unlikely(call->flags & TRACE_EVENT_FL_FILTERED) &&
Li Zefan's avatar
Li Zefan committed
813
	    !filter_match_preds(call->filter, rec)) {
814
815
816
817
818
		ring_buffer_discard_commit(buffer, event);
		return 1;
	}

	return 0;
819
820
}

821
822
extern void trace_event_enable_cmd_record(bool enable);

823
extern struct mutex event_mutex;
824
extern struct list_head ftrace_events;
Peter Zijlstra's avatar
Peter Zijlstra committed
825

826
827
828
extern const char *__start___trace_bprintk_fmt[];
extern const char *__stop___trace_bprintk_fmt[];

829
#undef FTRACE_ENTRY
830
#define FTRACE_ENTRY(call, struct_name, id, tstruct, print, filter)	\
831
832
	extern struct ftrace_event_call					\
	__attribute__((__aligned__(4))) event_##call;
833
#undef FTRACE_ENTRY_DUP
834
835
836
#define FTRACE_ENTRY_DUP(call, struct_name, id, tstruct, print, filter)	\
	FTRACE_ENTRY(call, struct_name, id, PARAMS(tstruct), PARAMS(print), \
		     filter)
837
#include "trace_entries.h"
838

839
840
841
842
843
844
845
#ifdef CONFIG_FUNCTION_TRACER
int perf_ftrace_event_register(struct ftrace_event_call *call,
			       enum trace_reg type, void *data);
#else
#define perf_ftrace_event_register NULL
#endif /* CONFIG_FUNCTION_TRACER */

846
#endif /* _LINUX_KERNEL_TRACE_H */