trace.h 26.9 KB
Newer Older
1
2
3
4
#ifndef _LINUX_KERNEL_TRACE_H
#define _LINUX_KERNEL_TRACE_H

#include <linux/fs.h>
Arun Sharma's avatar
Arun Sharma committed
5
#include <linux/atomic.h>
6
7
#include <linux/sched.h>
#include <linux/clocksource.h>
8
#include <linux/ring_buffer.h>
Pekka Paalanen's avatar
Pekka Paalanen committed
9
#include <linux/mmiotrace.h>
10
#include <linux/tracepoint.h>
11
#include <linux/ftrace.h>
12
#include <linux/hw_breakpoint.h>
13
#include <linux/trace_seq.h>
14
#include <linux/ftrace_event.h>
15

16
17
18
19
20
21
22
enum trace_type {
	__TRACE_FIRST_TYPE = 0,

	TRACE_FN,
	TRACE_CTX,
	TRACE_WAKE,
	TRACE_STACK,
23
	TRACE_PRINT,
24
	TRACE_BPRINT,
Pekka Paalanen's avatar
Pekka Paalanen committed
25
26
	TRACE_MMIO_RW,
	TRACE_MMIO_MAP,
27
	TRACE_BRANCH,
28
29
	TRACE_GRAPH_RET,
	TRACE_GRAPH_ENT,
30
	TRACE_USER_STACK,
31
	TRACE_BLK,
32

33
	__TRACE_LAST_TYPE,
34
35
};

36

37
38
#undef __field
#define __field(type, item)		type	item;
Ingo Molnar's avatar
Ingo Molnar committed
39

40
41
#undef __field_struct
#define __field_struct(type, item)	__field(type, item)
Ingo Molnar's avatar
Ingo Molnar committed
42

43
44
#undef __field_desc
#define __field_desc(type, container, item)
45

46
47
#undef __array
#define __array(type, item, size)	type	item[size];
48

49
50
#undef __array_desc
#define __array_desc(type, container, item, size)
51

52
53
#undef __dynamic_array
#define __dynamic_array(type, item)	type	item[];
54

55
56
#undef F_STRUCT
#define F_STRUCT(args...)		args
57

58
#undef FTRACE_ENTRY
59
60
61
62
#define FTRACE_ENTRY(name, struct_name, id, tstruct, print, filter)	\
	struct struct_name {						\
		struct trace_entry	ent;				\
		tstruct							\
63
	}
64

65
66
#undef TP_ARGS
#define TP_ARGS(args...)	args
67

68
#undef FTRACE_ENTRY_DUP
69
#define FTRACE_ENTRY_DUP(name, name_struct, id, tstruct, printk, filter)
70

71
#undef FTRACE_ENTRY_REG
72
73
74
75
#define FTRACE_ENTRY_REG(name, struct_name, id, tstruct, print,	\
			 filter, regfn) \
	FTRACE_ENTRY(name, struct_name, id, PARAMS(tstruct), PARAMS(print), \
		     filter)
76

77
#include "trace_entries.h"
78

79
80
81
82
/*
 * syscalls are special, and need special handling, this is why
 * they are not included in trace_entries.h
 */
83
84
85
86
87
88
89
90
91
struct syscall_trace_enter {
	struct trace_entry	ent;
	int			nr;
	unsigned long		args[];
};

struct syscall_trace_exit {
	struct trace_entry	ent;
	int			nr;
92
	long			ret;
93
94
};

95
struct kprobe_trace_entry_head {
96
97
98
99
	struct trace_entry	ent;
	unsigned long		ip;
};

100
struct kretprobe_trace_entry_head {
101
102
103
104
105
	struct trace_entry	ent;
	unsigned long		func;
	unsigned long		ret_ip;
};

106
107
108
/*
 * trace_flag_type is an enumeration that holds different
 * states when a trace occurs. These are:
109
 *  IRQS_OFF		- interrupts were disabled
110
 *  IRQS_NOSUPPORT	- arch does not support irqs_disabled_flags
Li Zefan's avatar
Li Zefan committed
111
 *  NEED_RESCHED	- reschedule is requested
112
113
 *  HARDIRQ		- inside an interrupt handler
 *  SOFTIRQ		- inside a softirq handler
114
115
116
 */
enum trace_flag_type {
	TRACE_FLAG_IRQS_OFF		= 0x01,
117
118
119
120
	TRACE_FLAG_IRQS_NOSUPPORT	= 0x02,
	TRACE_FLAG_NEED_RESCHED		= 0x04,
	TRACE_FLAG_HARDIRQ		= 0x08,
	TRACE_FLAG_SOFTIRQ		= 0x10,
121
122
};

123
#define TRACE_BUF_SIZE		1024
124
125
126
127
128
129
130
131

/*
 * The CPU trace array - it consists of thousands of trace entries
 * plus some other descriptor data: (for example which task started
 * the trace, etc.)
 */
struct trace_array_cpu {
	atomic_t		disabled;
132
	void			*buffer_page;	/* ring buffer spare */
Ingo Molnar's avatar
Ingo Molnar committed
133

134
	unsigned long		entries;
135
136
137
138
139
140
141
	unsigned long		saved_latency;
	unsigned long		critical_start;
	unsigned long		critical_end;
	unsigned long		critical_sequence;
	unsigned long		nice;
	unsigned long		policy;
	unsigned long		rt_priority;
142
	unsigned long		skipped_entries;
143
144
	cycle_t			preempt_timestamp;
	pid_t			pid;
145
	kuid_t			uid;
146
147
148
149
150
151
152
153
154
	char			comm[TASK_COMM_LEN];
};

/*
 * The trace array - an array of per-CPU trace arrays. This is the
 * highest level data structure that individual tracers deal with.
 * They have on/off state as well:
 */
struct trace_array {
155
	struct ring_buffer	*buffer;
156
	int			cpu;
157
	int			buffer_disabled;
158
	cycle_t			time_start;
159
	struct task_struct	*waiter;
160
161
162
	struct trace_array_cpu	*data[NR_CPUS];
};

163
164
165
166
167
168
169
170
171
172
173
174
175
176
177
178
179
180
181
182
183
184
185
186
187
188
189
190
191
192
193
194
#define FTRACE_CMP_TYPE(var, type) \
	__builtin_types_compatible_p(typeof(var), type *)

#undef IF_ASSIGN
#define IF_ASSIGN(var, entry, etype, id)		\
	if (FTRACE_CMP_TYPE(var, etype)) {		\
		var = (typeof(var))(entry);		\
		WARN_ON(id && (entry)->type != id);	\
		break;					\
	}

/* Will cause compile errors if type is not found. */
extern void __ftrace_bad_type(void);

/*
 * The trace_assign_type is a verifier that the entry type is
 * the same as the type being assigned. To add new types simply
 * add a line with the following format:
 *
 * IF_ASSIGN(var, ent, type, id);
 *
 *  Where "type" is the trace type that includes the trace_entry
 *  as the "ent" item. And "id" is the trace identifier that is
 *  used in the trace_type enum.
 *
 *  If the type can have more than one id, then use zero.
 */
#define trace_assign_type(var, ent)					\
	do {								\
		IF_ASSIGN(var, ent, struct ftrace_entry, TRACE_FN);	\
		IF_ASSIGN(var, ent, struct ctx_switch_entry, 0);	\
		IF_ASSIGN(var, ent, struct stack_entry, TRACE_STACK);	\
195
		IF_ASSIGN(var, ent, struct userstack_entry, TRACE_USER_STACK);\
196
		IF_ASSIGN(var, ent, struct print_entry, TRACE_PRINT);	\
197
		IF_ASSIGN(var, ent, struct bprint_entry, TRACE_BPRINT);	\
198
199
200
201
		IF_ASSIGN(var, ent, struct trace_mmiotrace_rw,		\
			  TRACE_MMIO_RW);				\
		IF_ASSIGN(var, ent, struct trace_mmiotrace_map,		\
			  TRACE_MMIO_MAP);				\
202
		IF_ASSIGN(var, ent, struct trace_branch, TRACE_BRANCH); \
203
204
205
206
		IF_ASSIGN(var, ent, struct ftrace_graph_ent_entry,	\
			  TRACE_GRAPH_ENT);		\
		IF_ASSIGN(var, ent, struct ftrace_graph_ret_entry,	\
			  TRACE_GRAPH_RET);		\
207
208
		__ftrace_bad_type();					\
	} while (0)
209

210
211
212
213
214
215
/*
 * An option specific to a tracer. This is a boolean value.
 * The bit is the bit index that sets its value on the
 * flags value in struct tracer_flags.
 */
struct tracer_opt {
216
217
	const char	*name; /* Will appear on the trace_options file */
	u32		bit; /* Mask assigned in val field in tracer_flags */
218
219
220
221
222
223
224
225
};

/*
 * The set of specific options for a tracer. Your tracer
 * have to set the initial value of the flags val.
 */
struct tracer_flags {
	u32			val;
226
	struct tracer_opt	*opts;
227
228
229
230
231
};

/* Makes more easy to define a tracer opt */
#define TRACER_OPT(s, b)	.name = #s, .bit = b

232

233
234
235
236
237
238
239
240
241
242
243
/**
 * struct tracer - a specific tracer and its callbacks to interact with debugfs
 * @name: the name chosen to select it on the available_tracers file
 * @init: called when one switches to this tracer (echo name > current_tracer)
 * @reset: called when one switches to another tracer
 * @start: called when tracing is unpaused (echo 1 > tracing_enabled)
 * @stop: called when tracing is paused (echo 0 > tracing_enabled)
 * @open: called when the trace file is opened
 * @pipe_open: called when the trace_pipe file is opened
 * @wait_pipe: override how the user waits for traces on trace_pipe
 * @close: called when the trace file is released
244
 * @pipe_close: called when the trace_pipe file is released
245
246
247
248
249
250
251
 * @read: override the default read callback on trace_pipe
 * @splice_read: override the default splice_read callback on trace_pipe
 * @selftest: selftest to run on boot (see trace_selftest.c)
 * @print_headers: override the first lines that describe your columns
 * @print_line: callback that prints a trace
 * @set_flag: signals one of your private flags changed (trace_options file)
 * @flags: your private flags
252
253
254
 */
struct tracer {
	const char		*name;
255
	int			(*init)(struct trace_array *tr);
256
	void			(*reset)(struct trace_array *tr);
257
258
	void			(*start)(struct trace_array *tr);
	void			(*stop)(struct trace_array *tr);
259
	void			(*open)(struct trace_iterator *iter);
260
	void			(*pipe_open)(struct trace_iterator *iter);
261
	void			(*wait_pipe)(struct trace_iterator *iter);
262
	void			(*close)(struct trace_iterator *iter);
263
	void			(*pipe_close)(struct trace_iterator *iter);
264
265
266
	ssize_t			(*read)(struct trace_iterator *iter,
					struct file *filp, char __user *ubuf,
					size_t cnt, loff_t *ppos);
267
268
269
270
271
272
	ssize_t			(*splice_read)(struct trace_iterator *iter,
					       struct file *filp,
					       loff_t *ppos,
					       struct pipe_inode_info *pipe,
					       size_t len,
					       unsigned int flags);
Steven Rostedt's avatar
Steven Rostedt committed
273
274
275
276
#ifdef CONFIG_FTRACE_STARTUP_TEST
	int			(*selftest)(struct tracer *trace,
					    struct trace_array *tr);
#endif
277
	void			(*print_header)(struct seq_file *m);
278
	enum print_line_t	(*print_line)(struct trace_iterator *iter);
279
280
	/* If you handled the flag setting, return 0 */
	int			(*set_flag)(u32 old_flags, u32 bit, int set);
281
282
283
	/* Return 0 if OK with change, else return non-zero */
	int			(*flag_changed)(struct tracer *tracer,
						u32 mask, int set);
284
	struct tracer		*next;
285
	struct tracer_flags	*flags;
286
287
	bool			print_max;
	bool			use_max_tr;
288
	bool			allocated_snapshot;
289
	bool			enabled;
290
291
};

292

293
294
/* Only current can touch trace_recursion */

295
296
297
298
299
300
301
302
303
304
305
306
307
308
309
310
311
312
313
314
315
316
317
318
319
/*
 * For function tracing recursion:
 *  The order of these bits are important.
 *
 *  When function tracing occurs, the following steps are made:
 *   If arch does not support a ftrace feature:
 *    call internal function (uses INTERNAL bits) which calls...
 *   If callback is registered to the "global" list, the list
 *    function is called and recursion checks the GLOBAL bits.
 *    then this function calls...
 *   The function callback, which can use the FTRACE bits to
 *    check for recursion.
 *
 * Now if the arch does not suppport a feature, and it calls
 * the global list function which calls the ftrace callback
 * all three of these steps will do a recursion protection.
 * There's no reason to do one if the previous caller already
 * did. The recursion that we are protecting against will
 * go through the same steps again.
 *
 * To prevent the multiple recursion checks, if a recursion
 * bit is set that is higher than the MAX bit of the current
 * check, then we know that the check was made by the previous
 * caller, and we can skip the current check.
 */
320
enum {
321
322
323
324
325
326
327
	TRACE_BUFFER_BIT,
	TRACE_BUFFER_NMI_BIT,
	TRACE_BUFFER_IRQ_BIT,
	TRACE_BUFFER_SIRQ_BIT,

	/* Start of function recursion bits */
	TRACE_FTRACE_BIT,
328
329
330
	TRACE_FTRACE_NMI_BIT,
	TRACE_FTRACE_IRQ_BIT,
	TRACE_FTRACE_SIRQ_BIT,
331

332
	/* GLOBAL_BITs must be greater than FTRACE_BITs */
333
334
335
336
337
	TRACE_GLOBAL_BIT,
	TRACE_GLOBAL_NMI_BIT,
	TRACE_GLOBAL_IRQ_BIT,
	TRACE_GLOBAL_SIRQ_BIT,

338
339
340
341
342
343
	/* INTERNAL_BITs must be greater than GLOBAL_BITs */
	TRACE_INTERNAL_BIT,
	TRACE_INTERNAL_NMI_BIT,
	TRACE_INTERNAL_IRQ_BIT,
	TRACE_INTERNAL_SIRQ_BIT,

344
	TRACE_CONTROL_BIT,
345

346
347
348
349
350
351
352
/*
 * Abuse of the trace_recursion.
 * As we need a way to maintain state if we are tracing the function
 * graph in irq because we want to trace a particular function that
 * was called in irq context but we have irq tracing off. Since this
 * can only be modified by current, we can reuse trace_recursion.
 */
353
354
	TRACE_IRQ_BIT,
};
355

356
357
358
#define trace_recursion_set(bit)	do { (current)->trace_recursion |= (1<<(bit)); } while (0)
#define trace_recursion_clear(bit)	do { (current)->trace_recursion &= ~(1<<(bit)); } while (0)
#define trace_recursion_test(bit)	((current)->trace_recursion & (1<<(bit)))
359

360
361
362
363
364
365
366
367
368
369
370
371
372
373
374
375
376
377
378
379
380
381
382
383
384
385
386
387
388
389
390
391
392
393
394
395
396
397
398
399
400
401
402
403
404
405
406
407
408
409
410
411
412
413
414
415
416
417
418
419
420
421
422
423
424
#define TRACE_CONTEXT_BITS	4

#define TRACE_FTRACE_START	TRACE_FTRACE_BIT
#define TRACE_FTRACE_MAX	((1 << (TRACE_FTRACE_START + TRACE_CONTEXT_BITS)) - 1)

#define TRACE_GLOBAL_START	TRACE_GLOBAL_BIT
#define TRACE_GLOBAL_MAX	((1 << (TRACE_GLOBAL_START + TRACE_CONTEXT_BITS)) - 1)

#define TRACE_LIST_START	TRACE_INTERNAL_BIT
#define TRACE_LIST_MAX		((1 << (TRACE_LIST_START + TRACE_CONTEXT_BITS)) - 1)

#define TRACE_CONTEXT_MASK	TRACE_LIST_MAX

static __always_inline int trace_get_context_bit(void)
{
	int bit;

	if (in_interrupt()) {
		if (in_nmi())
			bit = 0;

		else if (in_irq())
			bit = 1;
		else
			bit = 2;
	} else
		bit = 3;

	return bit;
}

static __always_inline int trace_test_and_set_recursion(int start, int max)
{
	unsigned int val = current->trace_recursion;
	int bit;

	/* A previous recursion check was made */
	if ((val & TRACE_CONTEXT_MASK) > max)
		return 0;

	bit = trace_get_context_bit() + start;
	if (unlikely(val & (1 << bit)))
		return -1;

	val |= 1 << bit;
	current->trace_recursion = val;
	barrier();

	return bit;
}

static __always_inline void trace_clear_recursion(int bit)
{
	unsigned int val = current->trace_recursion;

	if (!bit)
		return;

	bit = 1 << bit;
	val &= ~bit;

	barrier();
	current->trace_recursion = val;
}

425
426
#define TRACE_PIPE_ALL_CPU	-1

427
428
429
430
431
432
433
434
static inline struct ring_buffer_iter *
trace_buffer_iter(struct trace_iterator *iter, int cpu)
{
	if (iter->buffer_iter && iter->buffer_iter[cpu])
		return iter->buffer_iter[cpu];
	return NULL;
}

435
int tracer_init(struct tracer *t, struct trace_array *tr);
436
int tracing_is_enabled(void);
437
void tracing_reset(struct trace_array *tr, int cpu);
438
void tracing_reset_online_cpus(struct trace_array *tr);
439
440
void tracing_reset_current(int cpu);
void tracing_reset_current_online_cpus(void);
441
int tracing_open_generic(struct inode *inode, struct file *filp);
442
struct dentry *trace_create_file(const char *name,
Al Viro's avatar
Al Viro committed
443
				 umode_t mode,
444
445
446
447
				 struct dentry *parent,
				 void *data,
				 const struct file_operations *fops);

448
struct dentry *tracing_init_dentry(void);
Ingo Molnar's avatar
Ingo Molnar committed
449

450
451
struct ring_buffer_event;

452
453
454
455
456
457
struct ring_buffer_event *
trace_buffer_lock_reserve(struct ring_buffer *buffer,
			  int type,
			  unsigned long len,
			  unsigned long flags,
			  int pc);
458

459
460
struct trace_entry *tracing_get_trace_entry(struct trace_array *tr,
						struct trace_array_cpu *data);
461
462
463
464

struct trace_entry *trace_find_next_entry(struct trace_iterator *iter,
					  int *ent_cpu, u64 *ent_ts);

465
466
467
void __buffer_unlock_commit(struct ring_buffer *buffer,
			    struct ring_buffer_event *event);

468
469
470
471
472
473
474
475
int trace_empty(struct trace_iterator *iter);

void *trace_find_next_entry_inc(struct trace_iterator *iter);

void trace_init_global_iter(struct trace_iterator *iter);

void tracing_iter_reset(struct trace_iterator *iter, int cpu);

476
477
void poll_wait_pipe(struct trace_iterator *iter);

478
479
480
481
void ftrace(struct trace_array *tr,
			    struct trace_array_cpu *data,
			    unsigned long ip,
			    unsigned long parent_ip,
482
			    unsigned long flags, int pc);
483
484
485
void tracing_sched_switch_trace(struct trace_array *tr,
				struct task_struct *prev,
				struct task_struct *next,
486
				unsigned long flags, int pc);
487
488
489
490

void tracing_sched_wakeup_trace(struct trace_array *tr,
				struct task_struct *wakee,
				struct task_struct *cur,
491
				unsigned long flags, int pc);
492
493
494
void trace_function(struct trace_array *tr,
		    unsigned long ip,
		    unsigned long parent_ip,
495
		    unsigned long flags, int pc);
496
497
498
499
void trace_graph_function(struct trace_array *tr,
		    unsigned long ip,
		    unsigned long parent_ip,
		    unsigned long flags, int pc);
500
void trace_latency_header(struct seq_file *m);
501
502
503
void trace_default_header(struct seq_file *m);
void print_trace_header(struct seq_file *m, struct trace_iterator *iter);
int trace_empty(struct trace_iterator *iter);
504

505
void trace_graph_return(struct ftrace_graph_ret *trace);
506
int trace_graph_entry(struct ftrace_graph_ent *trace);
507
void set_graph_array(struct trace_array *tr);
508

509
510
void tracing_start_cmdline_record(void);
void tracing_stop_cmdline_record(void);
Steven Rostedt's avatar
Steven Rostedt committed
511
512
513
void tracing_sched_switch_assign_trace(struct trace_array *tr);
void tracing_stop_sched_switch_record(void);
void tracing_start_sched_switch_record(void);
514
int register_tracer(struct tracer *type);
515
int is_tracing_stopped(void);
516
517
518
519
520

extern cpumask_var_t __read_mostly tracing_buffer_mask;

#define for_each_tracing_cpu(cpu)	\
	for_each_cpu(cpu, tracing_buffer_mask)
521
522
523

extern unsigned long nsecs_to_usecs(unsigned long nsecs);

524
525
extern unsigned long tracing_thresh;

526
#ifdef CONFIG_TRACER_MAX_TRACE
527
528
529
530
531
extern unsigned long tracing_max_latency;

void update_max_tr(struct trace_array *tr, struct task_struct *tsk, int cpu);
void update_max_tr_single(struct trace_array *tr,
			  struct task_struct *tsk, int cpu);
532
#endif /* CONFIG_TRACER_MAX_TRACE */
533

534
#ifdef CONFIG_STACKTRACE
535
void ftrace_trace_stack(struct ring_buffer *buffer, unsigned long flags,
536
537
			int skip, int pc);

538
539
540
void ftrace_trace_stack_regs(struct ring_buffer *buffer, unsigned long flags,
			     int skip, int pc, struct pt_regs *regs);

541
void ftrace_trace_userstack(struct ring_buffer *buffer, unsigned long flags,
542
543
544
545
546
			    int pc);

void __trace_stack(struct trace_array *tr, unsigned long flags, int skip,
		   int pc);
#else
547
static inline void ftrace_trace_stack(struct ring_buffer *buffer,
548
549
550
551
				      unsigned long flags, int skip, int pc)
{
}

552
553
554
555
556
557
static inline void ftrace_trace_stack_regs(struct ring_buffer *buffer,
					   unsigned long flags, int skip,
					   int pc, struct pt_regs *regs)
{
}

558
static inline void ftrace_trace_userstack(struct ring_buffer *buffer,
559
560
561
562
563
564
565
566
567
					  unsigned long flags, int pc)
{
}

static inline void __trace_stack(struct trace_array *tr, unsigned long flags,
				 int skip, int pc)
{
}
#endif /* CONFIG_STACKTRACE */
568

Ingo Molnar's avatar
Ingo Molnar committed
569
extern cycle_t ftrace_now(int cpu);
570

571
extern void trace_find_cmdline(int pid, char comm[]);
572

573
574
#ifdef CONFIG_DYNAMIC_FTRACE
extern unsigned long ftrace_update_tot_cnt;
575
#endif
576
577
#define DYN_FTRACE_TEST_NAME trace_selftest_dynamic_test_func
extern int DYN_FTRACE_TEST_NAME(void);
578
579
#define DYN_FTRACE_TEST_NAME2 trace_selftest_dynamic_test_func2
extern int DYN_FTRACE_TEST_NAME2(void);
580

581
582
extern int ring_buffer_expanded;
extern bool tracing_selftest_disabled;
583
DECLARE_PER_CPU(int, ftrace_cpu_disabled);
584

Steven Rostedt's avatar
Steven Rostedt committed
585
586
587
#ifdef CONFIG_FTRACE_STARTUP_TEST
extern int trace_selftest_startup_function(struct tracer *trace,
					   struct trace_array *tr);
588
589
extern int trace_selftest_startup_function_graph(struct tracer *trace,
						 struct trace_array *tr);
Steven Rostedt's avatar
Steven Rostedt committed
590
591
592
593
594
595
596
597
extern int trace_selftest_startup_irqsoff(struct tracer *trace,
					  struct trace_array *tr);
extern int trace_selftest_startup_preemptoff(struct tracer *trace,
					     struct trace_array *tr);
extern int trace_selftest_startup_preemptirqsoff(struct tracer *trace,
						 struct trace_array *tr);
extern int trace_selftest_startup_wakeup(struct tracer *trace,
					 struct trace_array *tr);
Steven Noonan's avatar
Steven Noonan committed
598
599
extern int trace_selftest_startup_nop(struct tracer *trace,
					 struct trace_array *tr);
Steven Rostedt's avatar
Steven Rostedt committed
600
601
extern int trace_selftest_startup_sched_switch(struct tracer *trace,
					       struct trace_array *tr);
602
603
extern int trace_selftest_startup_branch(struct tracer *trace,
					 struct trace_array *tr);
Steven Rostedt's avatar
Steven Rostedt committed
604
605
#endif /* CONFIG_FTRACE_STARTUP_TEST */

Ingo Molnar's avatar
Ingo Molnar committed
606
extern void *head_page(struct trace_array_cpu *data);
607
extern unsigned long long ns2usecs(cycle_t nsec);
608
extern int
609
trace_vbprintk(unsigned long ip, const char *fmt, va_list args);
610
extern int
611
trace_vprintk(unsigned long ip, const char *fmt, va_list args);
612
613
614
615
616
extern int
trace_array_vprintk(struct trace_array *tr,
		    unsigned long ip, const char *fmt, va_list args);
int trace_array_printk(struct trace_array *tr,
		       unsigned long ip, const char *fmt, ...);
617
618
void trace_printk_seq(struct trace_seq *s);
enum print_line_t print_trace_line(struct trace_iterator *iter);
Ingo Molnar's avatar
Ingo Molnar committed
619

620
621
extern unsigned long trace_flags;

622
623
extern int trace_clock_id;

624
/* Standard output formatting function used for function return traces */
625
#ifdef CONFIG_FUNCTION_GRAPH_TRACER
626
627
628
629
630
631
632
633
634

/* Flag options */
#define TRACE_GRAPH_PRINT_OVERRUN       0x1
#define TRACE_GRAPH_PRINT_CPU           0x2
#define TRACE_GRAPH_PRINT_OVERHEAD      0x4
#define TRACE_GRAPH_PRINT_PROC          0x8
#define TRACE_GRAPH_PRINT_DURATION      0x10
#define TRACE_GRAPH_PRINT_ABS_TIME      0x20

635
636
637
extern enum print_line_t
print_graph_function_flags(struct trace_iterator *iter, u32 flags);
extern void print_graph_headers_flags(struct seq_file *s, u32 flags);
638
639
extern enum print_line_t
trace_print_graph_duration(unsigned long long duration, struct trace_seq *s);
640
641
642
643
644
645
646
647
648
extern void graph_trace_open(struct trace_iterator *iter);
extern void graph_trace_close(struct trace_iterator *iter);
extern int __trace_graph_entry(struct trace_array *tr,
			       struct ftrace_graph_ent *trace,
			       unsigned long flags, int pc);
extern void __trace_graph_return(struct trace_array *tr,
				 struct ftrace_graph_ret *trace,
				 unsigned long flags, int pc);

649
650
651
652

#ifdef CONFIG_DYNAMIC_FTRACE
/* TODO: make this variable */
#define FTRACE_GRAPH_MAX_FUNCS		32
653
extern int ftrace_graph_filter_enabled;
654
655
656
657
658
659
660
extern int ftrace_graph_count;
extern unsigned long ftrace_graph_funcs[FTRACE_GRAPH_MAX_FUNCS];

static inline int ftrace_graph_addr(unsigned long addr)
{
	int i;

661
	if (!ftrace_graph_filter_enabled)
662
663
664
		return 1;

	for (i = 0; i < ftrace_graph_count; i++) {
665
666
667
668
669
670
671
672
673
674
		if (addr == ftrace_graph_funcs[i]) {
			/*
			 * If no irqs are to be traced, but a set_graph_function
			 * is set, and called by an interrupt handler, we still
			 * want to trace it.
			 */
			if (in_irq())
				trace_recursion_set(TRACE_IRQ_BIT);
			else
				trace_recursion_clear(TRACE_IRQ_BIT);
675
			return 1;
676
		}
677
678
679
680
	}

	return 0;
}
681
#else
682
683
684
static inline int ftrace_graph_addr(unsigned long addr)
{
	return 1;
685
686
687
}
#endif /* CONFIG_DYNAMIC_FTRACE */
#else /* CONFIG_FUNCTION_GRAPH_TRACER */
688
static inline enum print_line_t
689
print_graph_function_flags(struct trace_iterator *iter, u32 flags)
690
691
692
{
	return TRACE_TYPE_UNHANDLED;
}
693
#endif /* CONFIG_FUNCTION_GRAPH_TRACER */
694

695
extern struct list_head ftrace_pids;
696

697
#ifdef CONFIG_FUNCTION_TRACER
698
699
static inline int ftrace_trace_task(struct task_struct *task)
{
700
	if (list_empty(&ftrace_pids))
701
702
703
704
		return 1;

	return test_tsk_trace_trace(task);
}
705
extern int ftrace_is_dead(void);
706
707
708
709
710
#else
static inline int ftrace_trace_task(struct task_struct *task)
{
	return 1;
}
711
static inline int ftrace_is_dead(void) { return 0; }
712
#endif
713

714
715
int ftrace_event_is_function(struct ftrace_event_call *call);

716
717
718
719
/*
 * struct trace_parser - servers for reading the user input separated by spaces
 * @cont: set if the input is not complete - no final space char was found
 * @buffer: holds the parsed user input
720
 * @idx: user input length
721
722
723
724
725
726
727
728
729
730
731
732
733
734
735
736
737
738
739
740
741
742
743
744
745
746
747
748
749
750
 * @size: buffer size
 */
struct trace_parser {
	bool		cont;
	char		*buffer;
	unsigned	idx;
	unsigned	size;
};

static inline bool trace_parser_loaded(struct trace_parser *parser)
{
	return (parser->idx != 0);
}

static inline bool trace_parser_cont(struct trace_parser *parser)
{
	return parser->cont;
}

static inline void trace_parser_clear(struct trace_parser *parser)
{
	parser->cont = false;
	parser->idx = 0;
}

extern int trace_parser_get_init(struct trace_parser *parser, int size);
extern void trace_parser_put(struct trace_parser *parser);
extern int trace_get_user(struct trace_parser *parser, const char __user *ubuf,
	size_t cnt, loff_t *ppos);

Steven Rostedt's avatar
Steven Rostedt committed
751
752
753
754
755
756
757
/*
 * trace_iterator_flags is an enumeration that defines bit
 * positions into trace_flags that controls the output.
 *
 * NOTE: These bits must match the trace_options array in
 *       trace.c.
 */
758
759
760
761
762
763
764
765
766
767
enum trace_iterator_flags {
	TRACE_ITER_PRINT_PARENT		= 0x01,
	TRACE_ITER_SYM_OFFSET		= 0x02,
	TRACE_ITER_SYM_ADDR		= 0x04,
	TRACE_ITER_VERBOSE		= 0x08,
	TRACE_ITER_RAW			= 0x10,
	TRACE_ITER_HEX			= 0x20,
	TRACE_ITER_BIN			= 0x40,
	TRACE_ITER_BLOCK		= 0x80,
	TRACE_ITER_STACKTRACE		= 0x100,
768
769
770
771
772
773
774
775
776
777
778
	TRACE_ITER_PRINTK		= 0x200,
	TRACE_ITER_PREEMPTONLY		= 0x400,
	TRACE_ITER_BRANCH		= 0x800,
	TRACE_ITER_ANNOTATE		= 0x1000,
	TRACE_ITER_USERSTACKTRACE       = 0x2000,
	TRACE_ITER_SYM_USEROBJ          = 0x4000,
	TRACE_ITER_PRINTK_MSGONLY	= 0x8000,
	TRACE_ITER_CONTEXT_INFO		= 0x10000, /* Print pid/cpu/time */
	TRACE_ITER_LATENCY_FMT		= 0x20000,
	TRACE_ITER_SLEEP_TIME		= 0x40000,
	TRACE_ITER_GRAPH_TIME		= 0x80000,
779
	TRACE_ITER_RECORD_CMD		= 0x100000,
780
	TRACE_ITER_OVERWRITE		= 0x200000,
781
	TRACE_ITER_STOP_ON_FREE		= 0x400000,
782
	TRACE_ITER_IRQ_INFO		= 0x800000,
783
	TRACE_ITER_MARKERS		= 0x1000000,
784
785
};

786
787
788
789
790
791
792
/*
 * TRACE_ITER_SYM_MASK masks the options in trace_flags that
 * control the output of kernel symbols.
 */
#define TRACE_ITER_SYM_MASK \
	(TRACE_ITER_PRINT_PARENT|TRACE_ITER_SYM_OFFSET|TRACE_ITER_SYM_ADDR)

793
794
extern struct tracer nop_trace;

795
#ifdef CONFIG_BRANCH_TRACER
796
797
798
extern int enable_branch_tracing(struct trace_array *tr);
extern void disable_branch_tracing(void);
static inline int trace_branch_enable(struct trace_array *tr)
799
{
800
801
	if (trace_flags & TRACE_ITER_BRANCH)
		return enable_branch_tracing(tr);
802
803
	return 0;
}
804
static inline void trace_branch_disable(void)
805
806
{
	/* due to races, always disable */
807
	disable_branch_tracing();
808
809
}
#else
810
static inline int trace_branch_enable(struct trace_array *tr)
811
812
813
{
	return 0;
}
814
static inline void trace_branch_disable(void)
815
816
{
}
817
#endif /* CONFIG_BRANCH_TRACER */
818

819
820
821
/* set ring buffers to default size if not already done so */
int tracing_update_buffers(void);

822
823
824
825
826
827
/* trace event type bit fields, not numeric */
enum {
	TRACE_EVENT_TYPE_PRINTF		= 1,
	TRACE_EVENT_TYPE_RAW		= 2,
};

828
829
830
831
struct ftrace_event_field {
	struct list_head	link;
	char			*name;
	char			*type;
832
	int			filter_type;
833
834
	int			offset;
	int			size;
835
	int			is_signed;
836
837
};

838
struct event_filter {
839
840
	int			n_preds;	/* Number assigned */
	int			a_preds;	/* allocated */
841
	struct filter_pred	*preds;
842
	struct filter_pred	*root;
843
	char			*filter_string;
844
845
};

846
847
848
849
struct event_subsystem {
	struct list_head	list;
	const char		*name;
	struct dentry		*entry;
850
	struct event_filter	*filter;
851
	int			nr_events;
852
	int			ref_count;
853
854
};

855
856
#define FILTER_PRED_INVALID	((unsigned short)-1)
#define FILTER_PRED_IS_RIGHT	(1 << 15)
857
#define FILTER_PRED_FOLD	(1 << 15)
858

859
860
861
862
863
864
865
866
/*
 * The max preds is the size of unsigned short with
 * two flags at the MSBs. One bit is used for both the IS_RIGHT
 * and FOLD flags. The other is reserved.
 *
 * 2^14 preds is way more than enough.
 */
#define MAX_FILTER_PRED		16384
867

Tom Zanussi's avatar
Tom Zanussi committed
868
struct filter_pred;
869
struct regex;
Tom Zanussi's avatar
Tom Zanussi committed
870

871
typedef int (*filter_pred_fn_t) (struct filter_pred *pred, void *event);
Tom Zanussi's avatar
Tom Zanussi committed
872

873
874
typedef int (*regex_match_func)(char *str, struct regex *r, int len);

875
enum regex_type {
876
	MATCH_FULL = 0,
877
878
879
880
881
	MATCH_FRONT_ONLY,
	MATCH_MIDDLE_ONLY,
	MATCH_END_ONLY,
};

882
883
884
885
886
887
888
struct regex {
	char			pattern[MAX_FILTER_STR_VAL];
	int			len;
	int			field_len;
	regex_match_func	match;
};

Tom Zanussi's avatar
Tom Zanussi committed
889
struct filter_pred {
890
891
892
	filter_pred_fn_t 	fn;
	u64 			val;
	struct regex		regex;
893
	unsigned short		*ops;
894
	struct ftrace_event_field *field;
895
896
897
	int 			offset;
	int 			not;
	int 			op;
898
899
900
901
	unsigned short		index;
	unsigned short		parent;
	unsigned short		left;
	unsigned short		right;
Tom Zanussi's avatar
Tom Zanussi committed
902
903
};

904
905
extern struct list_head ftrace_common_fields;

906
907
extern enum regex_type
filter_parse_regex(char *buff, int len, char **search, int *not);
908
extern void print_event_filter(struct ftrace_event_call *call,
909
			       struct trace_seq *s);
910
911
912
913
914
extern int apply_event_filter(struct ftrace_event_call *call,
			      char *filter_string);
extern int apply_subsystem_event_filter(struct event_subsystem *system,
					char *filter_string);
extern void print_subsystem_event_filter(struct event_subsystem *system,
915
					 struct trace_seq *s);
916
extern int filter_assign_type(const char *type);
Tom Zanussi's avatar
Tom Zanussi committed
917

918
919
920
struct list_head *
trace_get_fields(struct ftrace_event_call *event_call);

921
static inline int
922
filter_check_discard(struct ftrace_event_call *call, void *rec,
923
		     struct ring_buffer *buffer,
924
925
		     struct ring_buffer_event *event)
{
926
	if (unlikely(call->flags & TRACE_EVENT_FL_FILTERED) &&
Li Zefan's avatar
Li Zefan committed
927
	    !filter_match_preds(call->filter, rec)) {
928
929
930
931
932
		ring_buffer_discard_commit(buffer, event);
		return 1;
	}

	return 0;
933
934
}

935
936
extern void trace_event_enable_cmd_record(bool enable);

937
extern struct mutex event_mutex;
938
extern struct list_head ftrace_events;
Peter Zijlstra's avatar
Peter Zijlstra committed
939

940
941
942
extern const char *__start___trace_bprintk_fmt[];
extern const char *__stop___trace_bprintk_fmt[];

943
void trace_printk_init_buffers(void);
944
void trace_printk_start_comm(void);
945
946
int trace_keep_overwrite(struct tracer *tracer, u32 mask, int set);
int set_tracer_flag(unsigned int mask, int enabled);
947

948
#undef FTRACE_ENTRY
949
#define FTRACE_ENTRY(call, struct_name, id, tstruct, print, filter)	\
950
951
	extern struct ftrace_event_call					\
	__attribute__((__aligned__(4))) event_##call;
952
#undef FTRACE_ENTRY_DUP
953
954
955
#define FTRACE_ENTRY_DUP(call, struct_name, id, tstruct, print, filter)	\
	FTRACE_ENTRY(call, struct_name, id, PARAMS(tstruct), PARAMS(print), \
		     filter)
956
#include "trace_entries.h"
957

958
#if defined(CONFIG_PERF_EVENTS) && defined(CONFIG_FUNCTION_TRACER)
959
960
961
962
int perf_ftrace_event_register(struct ftrace_event_call *call,
			       enum trace_reg type, void *data);
#else
#define perf_ftrace_event_register NULL
963
#endif
964

965
#endif /* _LINUX_KERNEL_TRACE_H */