ftrace.h 25 KB
Newer Older
1
2
3
4
5
/*
 * Ftrace header.  For implementation details beyond the random comments
 * scattered below, see: Documentation/trace/ftrace-design.txt
 */

6
7
8
#ifndef _LINUX_FTRACE_H
#define _LINUX_FTRACE_H

9
#include <linux/trace_clock.h>
10
#include <linux/kallsyms.h>
11
#include <linux/linkage.h>
12
#include <linux/bitops.h>
13
#include <linux/ptrace.h>
14
#include <linux/ktime.h>
15
#include <linux/sched.h>
16
17
18
#include <linux/types.h>
#include <linux/init.h>
#include <linux/fs.h>
19

20
21
#include <asm/ftrace.h>

22
23
24
25
26
27
28
29
30
/*
 * If the arch supports passing the variable contents of
 * function_trace_op as the third parameter back from the
 * mcount call, then the arch should define this as 1.
 */
#ifndef ARCH_SUPPORTS_FTRACE_OPS
#define ARCH_SUPPORTS_FTRACE_OPS 0
#endif

31
32
33
34
35
36
37
38
39
40
41
42
43
/*
 * If the arch's mcount caller does not support all of ftrace's
 * features, then it must call an indirect function that
 * does. Or at least does enough to prevent any unwelcomed side effects.
 */
#if !defined(CONFIG_HAVE_FUNCTION_TRACE_MCOUNT_TEST) || \
	!ARCH_SUPPORTS_FTRACE_OPS
# define FTRACE_FORCE_LIST_FUNC 1
#else
# define FTRACE_FORCE_LIST_FUNC 0
#endif


44
struct module;
45
46
struct ftrace_hash;

47
#ifdef CONFIG_FUNCTION_TRACER
Ingo Molnar's avatar
Ingo Molnar committed
48

49
50
51
extern int ftrace_enabled;
extern int
ftrace_enable_sysctl(struct ctl_table *table, int write,
52
		     void __user *buffer, size_t *lenp,
53
54
		     loff_t *ppos);

55
56
57
struct ftrace_ops;

typedef void (*ftrace_func_t)(unsigned long ip, unsigned long parent_ip,
58
			      struct ftrace_ops *op, struct pt_regs *regs);
59

60
61
62
63
64
65
66
67
68
69
70
71
72
73
/*
 * FTRACE_OPS_FL_* bits denote the state of ftrace_ops struct and are
 * set in the flags member.
 *
 * ENABLED - set/unset when ftrace_ops is registered/unregistered
 * GLOBAL  - set manualy by ftrace_ops user to denote the ftrace_ops
 *           is part of the global tracers sharing the same filter
 *           via set_ftrace_* debugfs files.
 * DYNAMIC - set when ftrace_ops is registered to denote dynamically
 *           allocated ftrace_ops which need special care
 * CONTROL - set manualy by ftrace_ops user to denote the ftrace_ops
 *           could be controled by following calls:
 *             ftrace_function_local_enable
 *             ftrace_function_local_disable
74
75
76
77
78
79
80
81
82
83
84
85
86
87
 * SAVE_REGS - The ftrace_ops wants regs saved at each function called
 *            and passed to the callback. If this flag is set, but the
 *            architecture does not support passing regs
 *            (ARCH_SUPPORTS_FTRACE_SAVE_REGS is not defined), then the
 *            ftrace_ops will fail to register, unless the next flag
 *            is set.
 * SAVE_REGS_IF_SUPPORTED - This is the same as SAVE_REGS, but if the
 *            handler can handle an arch that does not save regs
 *            (the handler tests if regs == NULL), then it can set
 *            this flag instead. It will not fail registering the ftrace_ops
 *            but, the regs field will be NULL if the arch does not support
 *            passing regs to the handler.
 *            Note, if this flag is set, the SAVE_REGS flag will automatically
 *            get set upon registering the ftrace_ops, if the arch supports it.
88
89
90
91
 * RECURSION_SAFE - The ftrace_ops can set this to tell the ftrace infrastructure
 *            that the call back has its own recursion protection. If it does
 *            not set this, then the ftrace infrastructure will add recursion
 *            protection for the caller.
92
 */
93
enum {
94
95
96
97
98
99
	FTRACE_OPS_FL_ENABLED			= 1 << 0,
	FTRACE_OPS_FL_GLOBAL			= 1 << 1,
	FTRACE_OPS_FL_DYNAMIC			= 1 << 2,
	FTRACE_OPS_FL_CONTROL			= 1 << 3,
	FTRACE_OPS_FL_SAVE_REGS			= 1 << 4,
	FTRACE_OPS_FL_SAVE_REGS_IF_SUPPORTED	= 1 << 5,
100
	FTRACE_OPS_FL_RECURSION_SAFE		= 1 << 6,
101
102
};

103
struct ftrace_ops {
104
105
	ftrace_func_t			func;
	struct ftrace_ops		*next;
106
	unsigned long			flags;
107
	int __percpu			*disabled;
108
109
110
111
#ifdef CONFIG_DYNAMIC_FTRACE
	struct ftrace_hash		*notrace_hash;
	struct ftrace_hash		*filter_hash;
#endif
112
113
};

114
115
extern int function_trace_stop;

116
117
118
119
120
121
122
123
124
125
126
/*
 * Type of the current tracing.
 */
enum ftrace_tracing_type_t {
	FTRACE_TYPE_ENTER = 0, /* Hook the call of the function */
	FTRACE_TYPE_RETURN,	/* Hook the return of the function */
};

/* Current tracing type, default is FTRACE_TYPE_ENTER */
extern enum ftrace_tracing_type_t ftrace_tracing_type;

127
128
129
130
131
132
133
134
135
136
137
138
139
140
141
142
143
144
145
146
147
148
149
150
151
152
/**
 * ftrace_stop - stop function tracer.
 *
 * A quick way to stop the function tracer. Note this an on off switch,
 * it is not something that is recursive like preempt_disable.
 * This does not disable the calling of mcount, it only stops the
 * calling of functions from mcount.
 */
static inline void ftrace_stop(void)
{
	function_trace_stop = 1;
}

/**
 * ftrace_start - start the function tracer.
 *
 * This function is the inverse of ftrace_stop. This does not enable
 * the function tracing if the function tracer is disabled. This only
 * sets the function tracer flag to continue calling the functions
 * from mcount.
 */
static inline void ftrace_start(void)
{
	function_trace_stop = 0;
}

153
154
155
156
157
158
159
160
161
162
163
/*
 * The ftrace_ops must be a static and should also
 * be read_mostly.  These functions do modify read_mostly variables
 * so use them sparely. Never free an ftrace_op or modify the
 * next pointer after it has been registered. Even after unregistering
 * it, the next pointer may still be used internally.
 */
int register_ftrace_function(struct ftrace_ops *ops);
int unregister_ftrace_function(struct ftrace_ops *ops);
void clear_ftrace_function(void);

164
165
166
167
168
169
170
171
172
173
174
175
176
177
178
179
180
181
182
183
184
185
186
187
188
189
190
191
192
193
194
195
196
197
198
199
200
201
202
203
204
205
206
207
208
209
210
211
212
/**
 * ftrace_function_local_enable - enable controlled ftrace_ops on current cpu
 *
 * This function enables tracing on current cpu by decreasing
 * the per cpu control variable.
 * It must be called with preemption disabled and only on ftrace_ops
 * registered with FTRACE_OPS_FL_CONTROL. If called without preemption
 * disabled, this_cpu_ptr will complain when CONFIG_DEBUG_PREEMPT is enabled.
 */
static inline void ftrace_function_local_enable(struct ftrace_ops *ops)
{
	if (WARN_ON_ONCE(!(ops->flags & FTRACE_OPS_FL_CONTROL)))
		return;

	(*this_cpu_ptr(ops->disabled))--;
}

/**
 * ftrace_function_local_disable - enable controlled ftrace_ops on current cpu
 *
 * This function enables tracing on current cpu by decreasing
 * the per cpu control variable.
 * It must be called with preemption disabled and only on ftrace_ops
 * registered with FTRACE_OPS_FL_CONTROL. If called without preemption
 * disabled, this_cpu_ptr will complain when CONFIG_DEBUG_PREEMPT is enabled.
 */
static inline void ftrace_function_local_disable(struct ftrace_ops *ops)
{
	if (WARN_ON_ONCE(!(ops->flags & FTRACE_OPS_FL_CONTROL)))
		return;

	(*this_cpu_ptr(ops->disabled))++;
}

/**
 * ftrace_function_local_disabled - returns ftrace_ops disabled value
 *                                  on current cpu
 *
 * This function returns value of ftrace_ops::disabled on current cpu.
 * It must be called with preemption disabled and only on ftrace_ops
 * registered with FTRACE_OPS_FL_CONTROL. If called without preemption
 * disabled, this_cpu_ptr will complain when CONFIG_DEBUG_PREEMPT is enabled.
 */
static inline int ftrace_function_local_disabled(struct ftrace_ops *ops)
{
	WARN_ON_ONCE(!(ops->flags & FTRACE_OPS_FL_CONTROL));
	return *this_cpu_ptr(ops->disabled);
}

213
214
extern void ftrace_stub(unsigned long a0, unsigned long a1,
			struct ftrace_ops *op, struct pt_regs *regs);
215

216
#else /* !CONFIG_FUNCTION_TRACER */
217
218
219
220
221
222
223
/*
 * (un)register_ftrace_function must be a macro since the ops parameter
 * must not be evaluated.
 */
#define register_ftrace_function(ops) ({ 0; })
#define unregister_ftrace_function(ops) ({ 0; })
static inline void clear_ftrace_function(void) { }
224
static inline void ftrace_kill(void) { }
225
226
static inline void ftrace_stop(void) { }
static inline void ftrace_start(void) { }
227
#endif /* CONFIG_FUNCTION_TRACER */
228

229
230
231
232
#ifdef CONFIG_STACK_TRACER
extern int stack_tracer_enabled;
int
stack_trace_sysctl(struct ctl_table *table, int write,
233
		   void __user *buffer, size_t *lenp,
234
235
236
		   loff_t *ppos);
#endif

237
238
239
struct ftrace_func_command {
	struct list_head	list;
	char			*name;
240
241
	int			(*func)(struct ftrace_hash *hash,
					char *func, char *cmd,
242
243
244
					char *params, int enable);
};

245
#ifdef CONFIG_DYNAMIC_FTRACE
246

247
248
249
int ftrace_arch_code_modify_prepare(void);
int ftrace_arch_code_modify_post_process(void);

250
251
void ftrace_bug(int err, unsigned long ip);

252
253
struct seq_file;

254
struct ftrace_probe_ops {
255
256
257
258
259
	void			(*func)(unsigned long ip,
					unsigned long parent_ip,
					void **data);
	int			(*callback)(unsigned long ip, void **data);
	void			(*free)(void **data);
260
261
	int			(*print)(struct seq_file *m,
					 unsigned long ip,
262
					 struct ftrace_probe_ops *ops,
263
					 void *data);
264
265
266
};

extern int
267
register_ftrace_function_probe(char *glob, struct ftrace_probe_ops *ops,
268
269
			      void *data);
extern void
270
unregister_ftrace_function_probe(char *glob, struct ftrace_probe_ops *ops,
271
272
				void *data);
extern void
273
274
unregister_ftrace_function_probe_func(char *glob, struct ftrace_probe_ops *ops);
extern void unregister_ftrace_function_probe_all(char *glob);
275

276
277
extern int ftrace_text_reserved(void *start, void *end);

278
279
280
281
282
283
284
285
286
287
288
289
290
291
292
293
294
/*
 * The dyn_ftrace record's flags field is split into two parts.
 * the first part which is '0-FTRACE_REF_MAX' is a counter of
 * the number of callbacks that have registered the function that
 * the dyn_ftrace descriptor represents.
 *
 * The second part is a mask:
 *  ENABLED - the function is being traced
 *  REGS    - the record wants the function to save regs
 *  REGS_EN - the function is set up to save regs.
 *
 * When a new ftrace_ops is registered and wants a function to save
 * pt_regs, the rec->flag REGS is set. When the function has been
 * set up to save regs, the REG_EN flag is set. Once a function
 * starts saving regs it will do so until all ftrace_ops are removed
 * from tracing that function.
 */
295
enum {
296
297
298
	FTRACE_FL_ENABLED	= (1UL << 29),
	FTRACE_FL_REGS		= (1UL << 30),
	FTRACE_FL_REGS_EN	= (1UL << 31)
299
300
};

301
302
#define FTRACE_FL_MASK		(0x7UL << 29)
#define FTRACE_REF_MAX		((1UL << 29) - 1)
303

304
struct dyn_ftrace {
305
306
307
308
	union {
		unsigned long		ip; /* address of mcount call-site */
		struct dyn_ftrace	*freelist;
	};
309
	unsigned long		flags;
310
	struct dyn_arch_ftrace		arch;
311
312
};

Steven Rostedt's avatar
Steven Rostedt committed
313
int ftrace_force_update(void);
314
int ftrace_set_filter(struct ftrace_ops *ops, unsigned char *buf,
315
		       int len, int reset);
316
int ftrace_set_notrace(struct ftrace_ops *ops, unsigned char *buf,
317
318
319
			int len, int reset);
void ftrace_set_global_filter(unsigned char *buf, int len, int reset);
void ftrace_set_global_notrace(unsigned char *buf, int len, int reset);
320
void ftrace_free_filter(struct ftrace_ops *ops);
Steven Rostedt's avatar
Steven Rostedt committed
321

322
323
324
int register_ftrace_command(struct ftrace_func_command *cmd);
int unregister_ftrace_command(struct ftrace_func_command *cmd);

325
326
327
328
329
330
331
332
enum {
	FTRACE_UPDATE_CALLS		= (1 << 0),
	FTRACE_DISABLE_CALLS		= (1 << 1),
	FTRACE_UPDATE_TRACE_FUNC	= (1 << 2),
	FTRACE_START_FUNC_RET		= (1 << 3),
	FTRACE_STOP_FUNC_RET		= (1 << 4),
};

333
334
335
336
337
338
339
340
341
342
343
344
/*
 * The FTRACE_UPDATE_* enum is used to pass information back
 * from the ftrace_update_record() and ftrace_test_record()
 * functions. These are called by the code update routines
 * to find out what is to be done for a given function.
 *
 *  IGNORE           - The function is already what we want it to be
 *  MAKE_CALL        - Start tracing the function
 *  MODIFY_CALL      - Stop saving regs for the function
 *  MODIFY_CALL_REGS - Start saving regs for the function
 *  MAKE_NOP         - Stop tracing the function
 */
345
346
347
enum {
	FTRACE_UPDATE_IGNORE,
	FTRACE_UPDATE_MAKE_CALL,
348
349
	FTRACE_UPDATE_MODIFY_CALL,
	FTRACE_UPDATE_MODIFY_CALL_REGS,
350
351
352
	FTRACE_UPDATE_MAKE_NOP,
};

353
354
355
356
enum {
	FTRACE_ITER_FILTER	= (1 << 0),
	FTRACE_ITER_NOTRACE	= (1 << 1),
	FTRACE_ITER_PRINTALL	= (1 << 2),
357
358
359
	FTRACE_ITER_DO_HASH	= (1 << 3),
	FTRACE_ITER_HASH	= (1 << 4),
	FTRACE_ITER_ENABLED	= (1 << 5),
360
361
};

362
363
364
365
366
367
368
369
void arch_ftrace_update_code(int command);

struct ftrace_rec_iter;

struct ftrace_rec_iter *ftrace_rec_iter_start(void);
struct ftrace_rec_iter *ftrace_rec_iter_next(struct ftrace_rec_iter *iter);
struct dyn_ftrace *ftrace_rec_iter_record(struct ftrace_rec_iter *iter);

370
371
372
373
374
375
#define for_ftrace_rec_iter(iter)		\
	for (iter = ftrace_rec_iter_start();	\
	     iter;				\
	     iter = ftrace_rec_iter_next(iter))


376
377
378
int ftrace_update_record(struct dyn_ftrace *rec, int enable);
int ftrace_test_record(struct dyn_ftrace *rec, int enable);
void ftrace_run_stop_machine(int command);
379
unsigned long ftrace_location(unsigned long ip);
380
381
382

extern ftrace_func_t ftrace_trace_function;

383
384
385
386
387
388
389
390
391
int ftrace_regex_open(struct ftrace_ops *ops, int flag,
		  struct inode *inode, struct file *file);
ssize_t ftrace_filter_write(struct file *file, const char __user *ubuf,
			    size_t cnt, loff_t *ppos);
ssize_t ftrace_notrace_write(struct file *file, const char __user *ubuf,
			     size_t cnt, loff_t *ppos);
loff_t ftrace_regex_lseek(struct file *file, loff_t offset, int origin);
int ftrace_regex_release(struct inode *inode, struct file *file);

392
393
394
void __init
ftrace_set_early_filter(struct ftrace_ops *ops, char *buf, int enable);

395
/* defined in arch */
396
extern int ftrace_ip_converted(unsigned long ip);
397
extern int ftrace_dyn_arch_init(void *data);
398
extern void ftrace_replace_code(int enable);
399
400
extern int ftrace_update_ftrace_func(ftrace_func_t func);
extern void ftrace_caller(void);
401
extern void ftrace_regs_caller(void);
402
extern void ftrace_call(void);
403
extern void ftrace_regs_call(void);
404
extern void mcount_call(void);
405

406
407
void ftrace_modify_all_code(int command);

408
409
410
#ifndef FTRACE_ADDR
#define FTRACE_ADDR ((unsigned long)ftrace_caller)
#endif
411
412
413
414
415
416
417
418
419

#ifndef FTRACE_REGS_ADDR
#ifdef ARCH_SUPPORTS_FTRACE_SAVE_REGS
# define FTRACE_REGS_ADDR ((unsigned long)ftrace_regs_caller)
#else
# define FTRACE_REGS_ADDR FTRACE_ADDR
#endif
#endif

420
421
#ifdef CONFIG_FUNCTION_GRAPH_TRACER
extern void ftrace_graph_caller(void);
422
423
424
425
426
extern int ftrace_enable_ftrace_graph_caller(void);
extern int ftrace_disable_ftrace_graph_caller(void);
#else
static inline int ftrace_enable_ftrace_graph_caller(void) { return 0; }
static inline int ftrace_disable_ftrace_graph_caller(void) { return 0; }
427
#endif
428

429
/**
430
 * ftrace_make_nop - convert code into nop
431
432
433
434
435
436
437
438
439
440
441
442
443
444
445
446
447
448
449
450
451
 * @mod: module structure if called by module load initialization
 * @rec: the mcount call site record
 * @addr: the address that the call site should be calling
 *
 * This is a very sensitive operation and great care needs
 * to be taken by the arch.  The operation should carefully
 * read the location, check to see if what is read is indeed
 * what we expect it to be, and then on success of the compare,
 * it should write to the location.
 *
 * The code segment at @rec->ip should be a caller to @addr
 *
 * Return must be:
 *  0 on success
 *  -EFAULT on error reading the location
 *  -EINVAL on a failed compare of the contents
 *  -EPERM  on error writing to the location
 * Any other value will be considered a failure.
 */
extern int ftrace_make_nop(struct module *mod,
			   struct dyn_ftrace *rec, unsigned long addr);
452

453
/**
454
455
456
 * ftrace_make_call - convert a nop call site into a call to addr
 * @rec: the mcount call site record
 * @addr: the address that the call site should call
457
458
459
460
461
462
463
 *
 * This is a very sensitive operation and great care needs
 * to be taken by the arch.  The operation should carefully
 * read the location, check to see if what is read is indeed
 * what we expect it to be, and then on success of the compare,
 * it should write to the location.
 *
464
465
 * The code segment at @rec->ip should be a nop
 *
466
467
468
469
470
471
472
 * Return must be:
 *  0 on success
 *  -EFAULT on error reading the location
 *  -EINVAL on a failed compare of the contents
 *  -EPERM  on error writing to the location
 * Any other value will be considered a failure.
 */
473
474
extern int ftrace_make_call(struct dyn_ftrace *rec, unsigned long addr);

475
476
477
478
479
480
481
482
483
484
485
486
487
488
489
490
491
492
493
494
495
496
497
498
499
500
501
502
503
504
505
506
507
#ifdef ARCH_SUPPORTS_FTRACE_SAVE_REGS
/**
 * ftrace_modify_call - convert from one addr to another (no nop)
 * @rec: the mcount call site record
 * @old_addr: the address expected to be currently called to
 * @addr: the address to change to
 *
 * This is a very sensitive operation and great care needs
 * to be taken by the arch.  The operation should carefully
 * read the location, check to see if what is read is indeed
 * what we expect it to be, and then on success of the compare,
 * it should write to the location.
 *
 * The code segment at @rec->ip should be a caller to @old_addr
 *
 * Return must be:
 *  0 on success
 *  -EFAULT on error reading the location
 *  -EINVAL on a failed compare of the contents
 *  -EPERM  on error writing to the location
 * Any other value will be considered a failure.
 */
extern int ftrace_modify_call(struct dyn_ftrace *rec, unsigned long old_addr,
			      unsigned long addr);
#else
/* Should never be called */
static inline int ftrace_modify_call(struct dyn_ftrace *rec, unsigned long old_addr,
				     unsigned long addr)
{
	return -EINVAL;
}
#endif

508
509
/* May be defined in arch */
extern int ftrace_arch_read_dyn_info(char *buf, int size);
510

511
512
extern int skip_trace(unsigned long ip);

513
514
extern void ftrace_disable_daemon(void);
extern void ftrace_enable_daemon(void);
Steven Rostedt's avatar
Steven Rostedt committed
515
#else
516
517
518
519
static inline int skip_trace(unsigned long ip) { return 0; }
static inline int ftrace_force_update(void) { return 0; }
static inline void ftrace_disable_daemon(void) { }
static inline void ftrace_enable_daemon(void) { }
520
static inline void ftrace_release_mod(struct module *mod) {}
521
522
static inline int register_ftrace_command(struct ftrace_func_command *cmd)
{
523
	return -EINVAL;
524
525
526
}
static inline int unregister_ftrace_command(char *cmd_name)
{
527
	return -EINVAL;
528
}
529
530
531
532
static inline int ftrace_text_reserved(void *start, void *end)
{
	return 0;
}
533
534
535
536
537
538
539

/*
 * Again users of functions that have ftrace_ops may not
 * have them defined when ftrace is not enabled, but these
 * functions may still be called. Use a macro instead of inline.
 */
#define ftrace_regex_open(ops, flag, inod, file) ({ -ENODEV; })
540
#define ftrace_set_early_filter(ops, buf, enable) do { } while (0)
541
542
543
#define ftrace_set_filter(ops, buf, len, reset) ({ -ENODEV; })
#define ftrace_set_notrace(ops, buf, len, reset) ({ -ENODEV; })
#define ftrace_free_filter(ops) do { } while (0)
544
545
546
547
548
549
550
551
552
553
554

static inline ssize_t ftrace_filter_write(struct file *file, const char __user *ubuf,
			    size_t cnt, loff_t *ppos) { return -ENODEV; }
static inline ssize_t ftrace_notrace_write(struct file *file, const char __user *ubuf,
			     size_t cnt, loff_t *ppos) { return -ENODEV; }
static inline loff_t ftrace_regex_lseek(struct file *file, loff_t offset, int origin)
{
	return -ENODEV;
}
static inline int
ftrace_regex_release(struct inode *inode, struct file *file) { return -ENODEV; }
555
#endif /* CONFIG_DYNAMIC_FTRACE */
556

Ingo Molnar's avatar
Ingo Molnar committed
557
558
559
/* totally disable ftrace - can not re-enable after this */
void ftrace_kill(void);

Ingo Molnar's avatar
Ingo Molnar committed
560
561
static inline void tracer_disable(void)
{
562
#ifdef CONFIG_FUNCTION_TRACER
Ingo Molnar's avatar
Ingo Molnar committed
563
564
565
566
	ftrace_enabled = 0;
#endif
}

567
568
/*
 * Ftrace disable/restore without lock. Some synchronization mechanism
569
 * must be used to prevent ftrace_enabled to be changed between
570
571
 * disable/restore.
 */
572
573
static inline int __ftrace_enabled_save(void)
{
574
#ifdef CONFIG_FUNCTION_TRACER
575
576
577
578
579
580
581
582
583
584
	int saved_ftrace_enabled = ftrace_enabled;
	ftrace_enabled = 0;
	return saved_ftrace_enabled;
#else
	return 0;
#endif
}

static inline void __ftrace_enabled_restore(int enabled)
{
585
#ifdef CONFIG_FUNCTION_TRACER
586
587
588
589
	ftrace_enabled = enabled;
#endif
}

590
591
592
593
594
595
596
597
598
599
600
601
602
603
604
605
606
607
608
#ifndef HAVE_ARCH_CALLER_ADDR
# ifdef CONFIG_FRAME_POINTER
#  define CALLER_ADDR0 ((unsigned long)__builtin_return_address(0))
#  define CALLER_ADDR1 ((unsigned long)__builtin_return_address(1))
#  define CALLER_ADDR2 ((unsigned long)__builtin_return_address(2))
#  define CALLER_ADDR3 ((unsigned long)__builtin_return_address(3))
#  define CALLER_ADDR4 ((unsigned long)__builtin_return_address(4))
#  define CALLER_ADDR5 ((unsigned long)__builtin_return_address(5))
#  define CALLER_ADDR6 ((unsigned long)__builtin_return_address(6))
# else
#  define CALLER_ADDR0 ((unsigned long)__builtin_return_address(0))
#  define CALLER_ADDR1 0UL
#  define CALLER_ADDR2 0UL
#  define CALLER_ADDR3 0UL
#  define CALLER_ADDR4 0UL
#  define CALLER_ADDR5 0UL
#  define CALLER_ADDR6 0UL
# endif
#endif /* ifndef HAVE_ARCH_CALLER_ADDR */
609

610
#ifdef CONFIG_IRQSOFF_TRACER
Ingo Molnar's avatar
Ingo Molnar committed
611
612
  extern void time_hardirqs_on(unsigned long a0, unsigned long a1);
  extern void time_hardirqs_off(unsigned long a0, unsigned long a1);
613
#else
614
615
  static inline void time_hardirqs_on(unsigned long a0, unsigned long a1) { }
  static inline void time_hardirqs_off(unsigned long a0, unsigned long a1) { }
616
617
#endif

618
#ifdef CONFIG_PREEMPT_TRACER
Ingo Molnar's avatar
Ingo Molnar committed
619
620
  extern void trace_preempt_on(unsigned long a0, unsigned long a1);
  extern void trace_preempt_off(unsigned long a0, unsigned long a1);
621
#else
622
623
624
625
626
627
/*
 * Use defines instead of static inlines because some arches will make code out
 * of the CALLER_ADDR, when we really want these to be a real nop.
 */
# define trace_preempt_on(a0, a1) do { } while (0)
# define trace_preempt_off(a0, a1) do { } while (0)
628
629
#endif

630
631
632
633
634
635
#ifdef CONFIG_FTRACE_MCOUNT_RECORD
extern void ftrace_init(void);
#else
static inline void ftrace_init(void) { }
#endif

636
637
638
639
640
641
642
/*
 * Structure that defines an entry function trace.
 */
struct ftrace_graph_ent {
	unsigned long func; /* Current function */
	int depth;
};
643

644
645
646
/*
 * Structure that defines a return function trace.
 */
647
struct ftrace_graph_ret {
648
649
650
	unsigned long func; /* Current function */
	unsigned long long calltime;
	unsigned long long rettime;
651
652
	/* Number of functions that overran the depth limit for current task */
	unsigned long overrun;
653
	int depth;
654
655
};

656
657
658
659
/* Type of the callback handlers for tracing function graph*/
typedef void (*trace_func_graph_ret_t)(struct ftrace_graph_ret *); /* return */
typedef int (*trace_func_graph_ent_t)(struct ftrace_graph_ent *); /* entry */

660
#ifdef CONFIG_FUNCTION_GRAPH_TRACER
661

662
/* for init task */
663
#define INIT_FTRACE_GRAPH		.ret_stack = NULL,
664

665
666
667
668
669
670
671
672
673
/*
 * Stack of return addresses for functions
 * of a thread.
 * Used in struct thread_info
 */
struct ftrace_ret_stack {
	unsigned long ret;
	unsigned long func;
	unsigned long long calltime;
674
	unsigned long long subtime;
675
	unsigned long fp;
676
677
678
679
680
681
682
683
684
685
};

/*
 * Primary handler of a function return.
 * It relays on ftrace_return_to_handler.
 * Defined in entry_32/64.S
 */
extern void return_to_handler(void);

extern int
686
687
ftrace_push_return_trace(unsigned long ret, unsigned long func, int *depth,
			 unsigned long frame_pointer);
688

689
690
691
692
693
694
695
/*
 * Sometimes we don't want to trace a function with the function
 * graph tracer but we want them to keep traced by the usual function
 * tracer if the function graph tracer is not configured.
 */
#define __notrace_funcgraph		notrace

696
697
698
699
700
701
702
703
704
705
/*
 * We want to which function is an entrypoint of a hardirq.
 * That will help us to put a signal on output.
 */
#define __irq_entry		 __attribute__((__section__(".irqentry.text")))

/* Limits of hardirq entrypoints */
extern char __irqentry_text_start[];
extern char __irqentry_text_end[];

706
707
#define FTRACE_RETFUNC_DEPTH 50
#define FTRACE_RETSTACK_ALLOC_SIZE 32
708
709
710
extern int register_ftrace_graph(trace_func_graph_ret_t retfunc,
				trace_func_graph_ent_t entryfunc);

711
712
extern void ftrace_graph_stop(void);

713
714
715
/* The current handlers in use */
extern trace_func_graph_ret_t ftrace_graph_return;
extern trace_func_graph_ent_t ftrace_graph_entry;
716

717
extern void unregister_ftrace_graph(void);
718

719
720
extern void ftrace_graph_init_task(struct task_struct *t);
extern void ftrace_graph_exit_task(struct task_struct *t);
721
extern void ftrace_graph_init_idle_task(struct task_struct *t, int cpu);
722
723
724
725
726

static inline int task_curr_ret_stack(struct task_struct *t)
{
	return t->curr_ret_stack;
}
727
728
729
730
731
732
733
734
735
736

static inline void pause_graph_tracing(void)
{
	atomic_inc(&current->tracing_graph_pause);
}

static inline void unpause_graph_tracing(void)
{
	atomic_dec(&current->tracing_graph_pause);
}
737
#else /* !CONFIG_FUNCTION_GRAPH_TRACER */
738
739

#define __notrace_funcgraph
740
#define __irq_entry
741
#define INIT_FTRACE_GRAPH
742

743
744
static inline void ftrace_graph_init_task(struct task_struct *t) { }
static inline void ftrace_graph_exit_task(struct task_struct *t) { }
745
static inline void ftrace_graph_init_idle_task(struct task_struct *t, int cpu) { }
746

747
748
749
750
751
752
753
static inline int register_ftrace_graph(trace_func_graph_ret_t retfunc,
			  trace_func_graph_ent_t entryfunc)
{
	return -1;
}
static inline void unregister_ftrace_graph(void) { }

754
755
756
757
static inline int task_curr_ret_stack(struct task_struct *tsk)
{
	return -1;
}
758
759
760

static inline void pause_graph_tracing(void) { }
static inline void unpause_graph_tracing(void) { }
761
#endif /* CONFIG_FUNCTION_GRAPH_TRACER */
762

763
764
765
766
767
768
769
770
771
772
773
774
775
776
777
778
779
780
781
782
783
784
785
786
787
788
789
790
791
792
793
794
795
796
797
798
799
800
801
802
803
804
#ifdef CONFIG_TRACING

/* flags for current->trace */
enum {
	TSK_TRACE_FL_TRACE_BIT	= 0,
	TSK_TRACE_FL_GRAPH_BIT	= 1,
};
enum {
	TSK_TRACE_FL_TRACE	= 1 << TSK_TRACE_FL_TRACE_BIT,
	TSK_TRACE_FL_GRAPH	= 1 << TSK_TRACE_FL_GRAPH_BIT,
};

static inline void set_tsk_trace_trace(struct task_struct *tsk)
{
	set_bit(TSK_TRACE_FL_TRACE_BIT, &tsk->trace);
}

static inline void clear_tsk_trace_trace(struct task_struct *tsk)
{
	clear_bit(TSK_TRACE_FL_TRACE_BIT, &tsk->trace);
}

static inline int test_tsk_trace_trace(struct task_struct *tsk)
{
	return tsk->trace & TSK_TRACE_FL_TRACE;
}

static inline void set_tsk_trace_graph(struct task_struct *tsk)
{
	set_bit(TSK_TRACE_FL_GRAPH_BIT, &tsk->trace);
}

static inline void clear_tsk_trace_graph(struct task_struct *tsk)
{
	clear_bit(TSK_TRACE_FL_GRAPH_BIT, &tsk->trace);
}

static inline int test_tsk_trace_graph(struct task_struct *tsk)
{
	return tsk->trace & TSK_TRACE_FL_GRAPH;
}

805
806
807
enum ftrace_dump_mode;

extern enum ftrace_dump_mode ftrace_dump_on_oops;
808

809
810
811
812
#ifdef CONFIG_PREEMPT
#define INIT_TRACE_RECURSION		.trace_recursion = 0,
#endif

813
814
#endif /* CONFIG_TRACING */

815
816
817
#ifndef INIT_TRACE_RECURSION
#define INIT_TRACE_RECURSION
#endif
818

819
820
821
822
823
824
#ifdef CONFIG_FTRACE_SYSCALLS

unsigned long arch_syscall_addr(int nr);

#endif /* CONFIG_FTRACE_SYSCALLS */

825
#endif /* _LINUX_FTRACE_H */