ftrace_event.h 9.89 KB
Newer Older
1
2
3
4
#ifndef _LINUX_FTRACE_EVENT_H
#define _LINUX_FTRACE_EVENT_H

#include <linux/ring_buffer.h>
5
#include <linux/trace_seq.h>
6
#include <linux/percpu.h>
7
#include <linux/hardirq.h>
8
#include <linux/perf_event.h>
9
10
11

struct trace_array;
struct tracer;
12
struct dentry;
13

14
15
16
17
18
struct trace_print_flags {
	unsigned long		mask;
	const char		*name;
};

19
20
21
22
23
struct trace_print_flags_u64 {
	unsigned long long	mask;
	const char		*name;
};

24
25
26
27
const char *ftrace_print_flags_seq(struct trace_seq *p, const char *delim,
				   unsigned long flags,
				   const struct trace_print_flags *flag_array);

28
29
30
const char *ftrace_print_symbols_seq(struct trace_seq *p, unsigned long val,
				     const struct trace_print_flags *symbol_array);

31
32
33
34
35
36
37
#if BITS_PER_LONG == 32
const char *ftrace_print_symbols_seq_u64(struct trace_seq *p,
					 unsigned long long val,
					 const struct trace_print_flags_u64
								 *symbol_array);
#endif

38
39
40
const char *ftrace_print_hex_seq(struct trace_seq *p,
				 const unsigned char *buf, int len);

41
42
43
44
45
46
47
/*
 * The trace entry - the most basic unit of tracing. This is what
 * is printed in the end as a single line in the trace output, such as:
 *
 *     bash-15816 [01]   235.197585: idle_cpu <- irq_enter
 */
struct trace_entry {
48
	unsigned short		type;
49
50
51
52
53
	unsigned char		flags;
	unsigned char		preempt_count;
	int			pid;
};

54
55
56
#define FTRACE_MAX_EVENT						\
	((1 << (sizeof(((struct trace_entry *)0)->type) * 8)) - 1)

57
58
59
60
61
62
63
64
65
66
/*
 * Trace iterator - used by printout routines who present trace
 * results to users and which routines might sleep, etc:
 */
struct trace_iterator {
	struct trace_array	*tr;
	struct tracer		*trace;
	void			*private;
	int			cpu_file;
	struct mutex		mutex;
67
	struct ring_buffer_iter	**buffer_iter;
68
	unsigned long		iter_flags;
69

70
71
72
	/* trace_seq for __print_flags() and __print_symbolic() etc. */
	struct trace_seq	tmp_seq;

73
74
75
	/* The below is zeroed out in pipe_read */
	struct trace_seq	seq;
	struct trace_entry	*ent;
76
	unsigned long		lost_events;
77
	int			leftover;
78
	int			ent_size;
79
80
81
82
83
84
85
	int			cpu;
	u64			ts;

	loff_t			pos;
	long			idx;

	cpumask_var_t		started;
86
87
88

	/* it's true when current open file is snapshot */
	bool			snapshot;
89
90
};

91
92
93
94
95
96
enum trace_iter_flags {
	TRACE_FILE_LAT_FMT	= 1,
	TRACE_FILE_ANNOTATE	= 2,
	TRACE_FILE_TIME_IN_NS	= 4,
};

97

98
99
struct trace_event;

100
typedef enum print_line_t (*trace_print_func)(struct trace_iterator *iter,
101
102
103
				      int flags, struct trace_event *event);

struct trace_event_functions {
104
105
106
107
108
109
	trace_print_func	trace;
	trace_print_func	raw;
	trace_print_func	hex;
	trace_print_func	binary;
};

110
111
112
113
114
115
116
struct trace_event {
	struct hlist_node		node;
	struct list_head		list;
	int				type;
	struct trace_event_functions	*funcs;
};

117
118
119
120
121
122
123
124
125
126
127
extern int register_ftrace_event(struct trace_event *event);
extern int unregister_ftrace_event(struct trace_event *event);

/* Return values for print_line callback */
enum print_line_t {
	TRACE_TYPE_PARTIAL_LINE	= 0,	/* Retry after flushing the seq */
	TRACE_TYPE_HANDLED	= 1,
	TRACE_TYPE_UNHANDLED	= 2,	/* Relay to other output functions */
	TRACE_TYPE_NO_CONSUME	= 3	/* Handled but ask to not consume */
};

128
129
130
void tracing_generic_entry_update(struct trace_entry *entry,
				  unsigned long flags,
				  int pc);
131
132
133
134
135
136
137
struct ftrace_event_file;

struct ring_buffer_event *
trace_event_buffer_lock_reserve(struct ring_buffer **current_buffer,
				struct ftrace_event_file *ftrace_file,
				int type, unsigned long len,
				unsigned long flags, int pc);
138
struct ring_buffer_event *
139
140
trace_current_buffer_lock_reserve(struct ring_buffer **current_buffer,
				  int type, unsigned long len,
141
				  unsigned long flags, int pc);
142
143
void trace_current_buffer_unlock_commit(struct ring_buffer *buffer,
					struct ring_buffer_event *event,
144
					unsigned long flags, int pc);
145
146
147
148
149
150
151
void trace_buffer_unlock_commit(struct ring_buffer *buffer,
				struct ring_buffer_event *event,
				unsigned long flags, int pc);
void trace_buffer_unlock_commit_regs(struct ring_buffer *buffer,
				     struct ring_buffer_event *event,
				     unsigned long flags, int pc,
				     struct pt_regs *regs);
152
153
void trace_current_buffer_discard_commit(struct ring_buffer *buffer,
					 struct ring_buffer_event *event);
154
155
156

void tracing_record_cmdline(struct task_struct *tsk);

157
158
struct event_filter;

159
160
161
enum trace_reg {
	TRACE_REG_REGISTER,
	TRACE_REG_UNREGISTER,
162
#ifdef CONFIG_PERF_EVENTS
163
164
	TRACE_REG_PERF_REGISTER,
	TRACE_REG_PERF_UNREGISTER,
165
166
	TRACE_REG_PERF_OPEN,
	TRACE_REG_PERF_CLOSE,
167
168
	TRACE_REG_PERF_ADD,
	TRACE_REG_PERF_DEL,
169
#endif
170
171
172
173
};

struct ftrace_event_call;

174
175
struct ftrace_event_class {
	char			*system;
176
177
178
179
180
	void			*probe;
#ifdef CONFIG_PERF_EVENTS
	void			*perf_probe;
#endif
	int			(*reg)(struct ftrace_event_call *event,
181
				       enum trace_reg type, void *data);
182
183
184
	int			(*define_fields)(struct ftrace_event_call *);
	struct list_head	*(*get_fields)(struct ftrace_event_call *);
	struct list_head	fields;
185
	int			(*raw_init)(struct ftrace_event_call *);
186
187
};

188
extern int ftrace_event_reg(struct ftrace_event_call *event,
189
			    enum trace_reg type, void *data);
190

191
192
enum {
	TRACE_EVENT_FL_FILTERED_BIT,
193
	TRACE_EVENT_FL_CAP_ANY_BIT,
194
	TRACE_EVENT_FL_NO_SET_FILTER_BIT,
195
	TRACE_EVENT_FL_IGNORE_ENABLE_BIT,
196
197
};

198
199
200
201
202
203
/*
 * Event flags:
 *  FILTERED	  - The event has a filter attached
 *  CAP_ANY	  - Any user can enable for perf
 *  NO_SET_FILTER - Set when filter has error and is to be ignored
 */
204
enum {
205
	TRACE_EVENT_FL_FILTERED		= (1 << TRACE_EVENT_FL_FILTERED_BIT),
206
	TRACE_EVENT_FL_CAP_ANY		= (1 << TRACE_EVENT_FL_CAP_ANY_BIT),
207
	TRACE_EVENT_FL_NO_SET_FILTER	= (1 << TRACE_EVENT_FL_NO_SET_FILTER_BIT),
208
	TRACE_EVENT_FL_IGNORE_ENABLE	= (1 << TRACE_EVENT_FL_IGNORE_ENABLE_BIT),
209
210
};

211
struct ftrace_event_call {
212
	struct list_head	list;
213
	struct ftrace_event_class *class;
214
	char			*name;
215
	struct trace_event	event;
Lai Jiangshan's avatar
Lai Jiangshan committed
216
	const char		*print_fmt;
217
	struct event_filter	*filter;
218
	struct list_head	*files;
219
	void			*mod;
220
	void			*data;
221
222
223
224
225
226
227
228
229
230
231
232
233
234
235
236
237
238
239
240
241
242
243
244
245
246
247
248
249
250
251
252
	int			flags; /* static flags of different events */

#ifdef CONFIG_PERF_EVENTS
	int				perf_refcount;
	struct hlist_head __percpu	*perf_events;
#endif
};

struct trace_array;
struct ftrace_subsystem_dir;

enum {
	FTRACE_EVENT_FL_ENABLED_BIT,
	FTRACE_EVENT_FL_RECORDED_CMD_BIT,
};

/*
 * Ftrace event file flags:
 *  ENABELD	  - The event is enabled
 *  RECORDED_CMD  - The comms should be recorded at sched_switch
 */
enum {
	FTRACE_EVENT_FL_ENABLED		= (1 << FTRACE_EVENT_FL_ENABLED_BIT),
	FTRACE_EVENT_FL_RECORDED_CMD	= (1 << FTRACE_EVENT_FL_RECORDED_CMD_BIT),
};

struct ftrace_event_file {
	struct list_head		list;
	struct ftrace_event_call	*event_call;
	struct dentry			*dir;
	struct trace_array		*tr;
	struct ftrace_subsystem_dir	*system;
253

254
255
256
257
	/*
	 * 32 bit flags:
	 *   bit 1:		enabled
	 *   bit 2:		filter_active
258
	 *   bit 3:		enabled cmd record
259
260
261
	 *   bit 4:		allow trace by non root (cap any)
	 *   bit 5:		failed to apply filter
	 *   bit 6:		ftrace internal event (do not enable)
262
	 *
263
264
265
266
	 * Changes to flags must hold the event_mutex.
	 *
	 * Note: Reads of flags do not hold the event_mutex since
	 * they occur in critical sections. But the way flags
267
	 * is currently used, these changes do not affect the code
268
269
270
	 * except that when a change is made, it may have a slight
	 * delay in propagating the changes to other CPUs due to
	 * caching and such.
271
272
	 */
	unsigned int		flags;
273
274
};

275
276
277
278
279
280
281
282
#define __TRACE_EVENT_FLAGS(name, value)				\
	static int __init trace_init_flags_##name(void)			\
	{								\
		event_##name.flags = value;				\
		return 0;						\
	}								\
	early_initcall(trace_init_flags_##name);

283
#define PERF_MAX_TRACE_SIZE	2048
284

285
#define MAX_FILTER_STR_VAL	256	/* Should handle KSYM_SYMBOL_LEN */
286

287
extern void destroy_preds(struct ftrace_event_call *call);
Li Zefan's avatar
Li Zefan committed
288
extern int filter_match_preds(struct event_filter *filter, void *rec);
289
290
extern int filter_current_check_discard(struct ring_buffer *buffer,
					struct ftrace_event_call *call,
291
292
293
					void *rec,
					struct ring_buffer_event *event);

294
295
296
297
enum {
	FILTER_OTHER = 0,
	FILTER_STATIC_STRING,
	FILTER_DYN_STRING,
298
	FILTER_PTR_STRING,
299
	FILTER_TRACE_FN,
300
301
};

302
303
304
305
#define EVENT_STORAGE_SIZE 128
extern struct mutex event_storage_mutex;
extern char event_storage[EVENT_STORAGE_SIZE];

306
extern int trace_event_raw_init(struct ftrace_event_call *call);
307
308
309
extern int trace_define_field(struct ftrace_event_call *call, const char *type,
			      const char *name, int offset, int size,
			      int is_signed, int filter_type);
310
311
extern int trace_add_event_call(struct ftrace_event_call *call);
extern void trace_remove_event_call(struct ftrace_event_call *call);
312

313
#define is_signed_type(type)	(((type)(-1)) < (type)0)
314

315
316
int trace_set_clr_event(const char *system, const char *event, int set);

317
318
319
320
321
322
323
324
325
326
327
328
329
330
331
332
333
334
335
/*
 * The double __builtin_constant_p is because gcc will give us an error
 * if we try to allocate the static variable to fmt if it is not a
 * constant. Even with the outer if statement optimizing out.
 */
#define event_trace_printk(ip, fmt, args...)				\
do {									\
	__trace_printk_check_format(fmt, ##args);			\
	tracing_record_cmdline(current);				\
	if (__builtin_constant_p(fmt)) {				\
		static const char *trace_printk_fmt			\
		  __attribute__((section("__trace_printk_fmt"))) =	\
			__builtin_constant_p(fmt) ? fmt : NULL;		\
									\
		__trace_bprintk(ip, trace_printk_fmt, ##args);		\
	} else								\
		__trace_printk(ip, fmt, ##args);			\
} while (0)

336
#ifdef CONFIG_PERF_EVENTS
Li Zefan's avatar
Li Zefan committed
337
struct perf_event;
338
339
340

DECLARE_PER_CPU(struct pt_regs, perf_trace_regs);

341
342
extern int  perf_trace_init(struct perf_event *event);
extern void perf_trace_destroy(struct perf_event *event);
Peter Zijlstra's avatar
Peter Zijlstra committed
343
344
extern int  perf_trace_add(struct perf_event *event, int flags);
extern void perf_trace_del(struct perf_event *event, int flags);
345
extern int  ftrace_profile_set_filter(struct perf_event *event, int event_id,
Li Zefan's avatar
Li Zefan committed
346
347
				     char *filter_str);
extern void ftrace_profile_free_filter(struct perf_event *event);
348
349
extern void *perf_trace_buf_prepare(int size, unsigned short type,
				    struct pt_regs *regs, int *rctxp);
350
351

static inline void
352
perf_trace_buf_submit(void *raw_data, int size, int rctx, u64 addr,
353
354
		       u64 count, struct pt_regs *regs, void *head,
		       struct task_struct *task)
355
{
356
	perf_tp_event(addr, count, raw_data, size, regs, head, rctx, task);
357
}
Li Zefan's avatar
Li Zefan committed
358
359
#endif

360
#endif /* _LINUX_FTRACE_EVENT_H */