ftrace.h 20.1 KB
Newer Older
1
2
3
4
5
/*
 * Ftrace header.  For implementation details beyond the random comments
 * scattered below, see: Documentation/trace/ftrace-design.txt
 */

6
7
8
#ifndef _LINUX_FTRACE_H
#define _LINUX_FTRACE_H

9
#include <linux/trace_clock.h>
10
#include <linux/kallsyms.h>
11
#include <linux/linkage.h>
12
#include <linux/bitops.h>
13
#include <linux/ktime.h>
14
#include <linux/sched.h>
15
16
17
#include <linux/types.h>
#include <linux/init.h>
#include <linux/fs.h>
18

19
20
#include <asm/ftrace.h>

21
struct module;
22
23
struct ftrace_hash;

24
#ifdef CONFIG_FUNCTION_TRACER
Ingo Molnar's avatar
Ingo Molnar committed
25

26
27
28
extern int ftrace_enabled;
extern int
ftrace_enable_sysctl(struct ctl_table *table, int write,
29
		     void __user *buffer, size_t *lenp,
30
31
		     loff_t *ppos);

32
33
typedef void (*ftrace_func_t)(unsigned long ip, unsigned long parent_ip);

34
35
36
37
38
39
40
41
42
43
44
45
46
47
48
/*
 * FTRACE_OPS_FL_* bits denote the state of ftrace_ops struct and are
 * set in the flags member.
 *
 * ENABLED - set/unset when ftrace_ops is registered/unregistered
 * GLOBAL  - set manualy by ftrace_ops user to denote the ftrace_ops
 *           is part of the global tracers sharing the same filter
 *           via set_ftrace_* debugfs files.
 * DYNAMIC - set when ftrace_ops is registered to denote dynamically
 *           allocated ftrace_ops which need special care
 * CONTROL - set manualy by ftrace_ops user to denote the ftrace_ops
 *           could be controled by following calls:
 *             ftrace_function_local_enable
 *             ftrace_function_local_disable
 */
49
50
51
enum {
	FTRACE_OPS_FL_ENABLED		= 1 << 0,
	FTRACE_OPS_FL_GLOBAL		= 1 << 1,
52
	FTRACE_OPS_FL_DYNAMIC		= 1 << 2,
53
	FTRACE_OPS_FL_CONTROL		= 1 << 3,
54
55
};

56
struct ftrace_ops {
57
58
	ftrace_func_t			func;
	struct ftrace_ops		*next;
59
	unsigned long			flags;
60
	int __percpu			*disabled;
61
62
63
64
#ifdef CONFIG_DYNAMIC_FTRACE
	struct ftrace_hash		*notrace_hash;
	struct ftrace_hash		*filter_hash;
#endif
65
66
};

67
68
extern int function_trace_stop;

69
70
71
72
73
74
75
76
77
78
79
/*
 * Type of the current tracing.
 */
enum ftrace_tracing_type_t {
	FTRACE_TYPE_ENTER = 0, /* Hook the call of the function */
	FTRACE_TYPE_RETURN,	/* Hook the return of the function */
};

/* Current tracing type, default is FTRACE_TYPE_ENTER */
extern enum ftrace_tracing_type_t ftrace_tracing_type;

80
81
82
83
84
85
86
87
88
89
90
91
92
93
94
95
96
97
98
99
100
101
102
103
104
105
/**
 * ftrace_stop - stop function tracer.
 *
 * A quick way to stop the function tracer. Note this an on off switch,
 * it is not something that is recursive like preempt_disable.
 * This does not disable the calling of mcount, it only stops the
 * calling of functions from mcount.
 */
static inline void ftrace_stop(void)
{
	function_trace_stop = 1;
}

/**
 * ftrace_start - start the function tracer.
 *
 * This function is the inverse of ftrace_stop. This does not enable
 * the function tracing if the function tracer is disabled. This only
 * sets the function tracer flag to continue calling the functions
 * from mcount.
 */
static inline void ftrace_start(void)
{
	function_trace_stop = 0;
}

106
107
108
109
110
111
112
113
114
115
116
/*
 * The ftrace_ops must be a static and should also
 * be read_mostly.  These functions do modify read_mostly variables
 * so use them sparely. Never free an ftrace_op or modify the
 * next pointer after it has been registered. Even after unregistering
 * it, the next pointer may still be used internally.
 */
int register_ftrace_function(struct ftrace_ops *ops);
int unregister_ftrace_function(struct ftrace_ops *ops);
void clear_ftrace_function(void);

117
118
119
120
121
122
123
124
125
126
127
128
129
130
131
132
133
134
135
136
137
138
139
140
141
142
143
144
145
146
147
148
149
150
151
152
153
154
155
156
157
158
159
160
161
162
163
164
165
/**
 * ftrace_function_local_enable - enable controlled ftrace_ops on current cpu
 *
 * This function enables tracing on current cpu by decreasing
 * the per cpu control variable.
 * It must be called with preemption disabled and only on ftrace_ops
 * registered with FTRACE_OPS_FL_CONTROL. If called without preemption
 * disabled, this_cpu_ptr will complain when CONFIG_DEBUG_PREEMPT is enabled.
 */
static inline void ftrace_function_local_enable(struct ftrace_ops *ops)
{
	if (WARN_ON_ONCE(!(ops->flags & FTRACE_OPS_FL_CONTROL)))
		return;

	(*this_cpu_ptr(ops->disabled))--;
}

/**
 * ftrace_function_local_disable - enable controlled ftrace_ops on current cpu
 *
 * This function enables tracing on current cpu by decreasing
 * the per cpu control variable.
 * It must be called with preemption disabled and only on ftrace_ops
 * registered with FTRACE_OPS_FL_CONTROL. If called without preemption
 * disabled, this_cpu_ptr will complain when CONFIG_DEBUG_PREEMPT is enabled.
 */
static inline void ftrace_function_local_disable(struct ftrace_ops *ops)
{
	if (WARN_ON_ONCE(!(ops->flags & FTRACE_OPS_FL_CONTROL)))
		return;

	(*this_cpu_ptr(ops->disabled))++;
}

/**
 * ftrace_function_local_disabled - returns ftrace_ops disabled value
 *                                  on current cpu
 *
 * This function returns value of ftrace_ops::disabled on current cpu.
 * It must be called with preemption disabled and only on ftrace_ops
 * registered with FTRACE_OPS_FL_CONTROL. If called without preemption
 * disabled, this_cpu_ptr will complain when CONFIG_DEBUG_PREEMPT is enabled.
 */
static inline int ftrace_function_local_disabled(struct ftrace_ops *ops)
{
	WARN_ON_ONCE(!(ops->flags & FTRACE_OPS_FL_CONTROL));
	return *this_cpu_ptr(ops->disabled);
}

166
167
extern void ftrace_stub(unsigned long a0, unsigned long a1);

168
#else /* !CONFIG_FUNCTION_TRACER */
169
170
171
172
173
174
175
/*
 * (un)register_ftrace_function must be a macro since the ops parameter
 * must not be evaluated.
 */
#define register_ftrace_function(ops) ({ 0; })
#define unregister_ftrace_function(ops) ({ 0; })
static inline void clear_ftrace_function(void) { }
176
static inline void ftrace_kill(void) { }
177
178
static inline void ftrace_stop(void) { }
static inline void ftrace_start(void) { }
179
#endif /* CONFIG_FUNCTION_TRACER */
180

181
182
183
184
#ifdef CONFIG_STACK_TRACER
extern int stack_tracer_enabled;
int
stack_trace_sysctl(struct ctl_table *table, int write,
185
		   void __user *buffer, size_t *lenp,
186
187
188
		   loff_t *ppos);
#endif

189
190
191
struct ftrace_func_command {
	struct list_head	list;
	char			*name;
192
193
	int			(*func)(struct ftrace_hash *hash,
					char *func, char *cmd,
194
195
196
					char *params, int enable);
};

197
#ifdef CONFIG_DYNAMIC_FTRACE
198

199
200
201
int ftrace_arch_code_modify_prepare(void);
int ftrace_arch_code_modify_post_process(void);

202
203
void ftrace_bug(int err, unsigned long ip);

204
205
struct seq_file;

206
struct ftrace_probe_ops {
207
208
209
210
211
	void			(*func)(unsigned long ip,
					unsigned long parent_ip,
					void **data);
	int			(*callback)(unsigned long ip, void **data);
	void			(*free)(void **data);
212
213
	int			(*print)(struct seq_file *m,
					 unsigned long ip,
214
					 struct ftrace_probe_ops *ops,
215
					 void *data);
216
217
218
};

extern int
219
register_ftrace_function_probe(char *glob, struct ftrace_probe_ops *ops,
220
221
			      void *data);
extern void
222
unregister_ftrace_function_probe(char *glob, struct ftrace_probe_ops *ops,
223
224
				void *data);
extern void
225
226
unregister_ftrace_function_probe_func(char *glob, struct ftrace_probe_ops *ops);
extern void unregister_ftrace_function_probe_all(char *glob);
227

228
229
extern int ftrace_text_reserved(void *start, void *end);

230
enum {
231
	FTRACE_FL_ENABLED	= (1 << 30),
232
233
};

234
235
236
#define FTRACE_FL_MASK		(0x3UL << 30)
#define FTRACE_REF_MAX		((1 << 30) - 1)

237
struct dyn_ftrace {
238
239
240
241
	union {
		unsigned long		ip; /* address of mcount call-site */
		struct dyn_ftrace	*freelist;
	};
242
	unsigned long		flags;
243
	struct dyn_arch_ftrace		arch;
244
245
};

Steven Rostedt's avatar
Steven Rostedt committed
246
int ftrace_force_update(void);
247
int ftrace_set_filter(struct ftrace_ops *ops, unsigned char *buf,
248
		       int len, int reset);
249
int ftrace_set_notrace(struct ftrace_ops *ops, unsigned char *buf,
250
251
252
			int len, int reset);
void ftrace_set_global_filter(unsigned char *buf, int len, int reset);
void ftrace_set_global_notrace(unsigned char *buf, int len, int reset);
253
void ftrace_free_filter(struct ftrace_ops *ops);
Steven Rostedt's avatar
Steven Rostedt committed
254

255
256
257
int register_ftrace_command(struct ftrace_func_command *cmd);
int unregister_ftrace_command(struct ftrace_func_command *cmd);

258
259
260
261
262
263
264
265
266
267
268
269
270
271
enum {
	FTRACE_UPDATE_CALLS		= (1 << 0),
	FTRACE_DISABLE_CALLS		= (1 << 1),
	FTRACE_UPDATE_TRACE_FUNC	= (1 << 2),
	FTRACE_START_FUNC_RET		= (1 << 3),
	FTRACE_STOP_FUNC_RET		= (1 << 4),
};

enum {
	FTRACE_UPDATE_IGNORE,
	FTRACE_UPDATE_MAKE_CALL,
	FTRACE_UPDATE_MAKE_NOP,
};

272
273
274
275
enum {
	FTRACE_ITER_FILTER	= (1 << 0),
	FTRACE_ITER_NOTRACE	= (1 << 1),
	FTRACE_ITER_PRINTALL	= (1 << 2),
276
277
278
	FTRACE_ITER_DO_HASH	= (1 << 3),
	FTRACE_ITER_HASH	= (1 << 4),
	FTRACE_ITER_ENABLED	= (1 << 5),
279
280
};

281
282
283
284
285
286
287
288
void arch_ftrace_update_code(int command);

struct ftrace_rec_iter;

struct ftrace_rec_iter *ftrace_rec_iter_start(void);
struct ftrace_rec_iter *ftrace_rec_iter_next(struct ftrace_rec_iter *iter);
struct dyn_ftrace *ftrace_rec_iter_record(struct ftrace_rec_iter *iter);

289
290
291
292
293
294
#define for_ftrace_rec_iter(iter)		\
	for (iter = ftrace_rec_iter_start();	\
	     iter;				\
	     iter = ftrace_rec_iter_next(iter))


295
296
297
298
299
300
301
int ftrace_update_record(struct dyn_ftrace *rec, int enable);
int ftrace_test_record(struct dyn_ftrace *rec, int enable);
void ftrace_run_stop_machine(int command);
int ftrace_location(unsigned long ip);

extern ftrace_func_t ftrace_trace_function;

302
303
304
305
306
307
308
309
310
int ftrace_regex_open(struct ftrace_ops *ops, int flag,
		  struct inode *inode, struct file *file);
ssize_t ftrace_filter_write(struct file *file, const char __user *ubuf,
			    size_t cnt, loff_t *ppos);
ssize_t ftrace_notrace_write(struct file *file, const char __user *ubuf,
			     size_t cnt, loff_t *ppos);
loff_t ftrace_regex_lseek(struct file *file, loff_t offset, int origin);
int ftrace_regex_release(struct inode *inode, struct file *file);

311
312
313
void __init
ftrace_set_early_filter(struct ftrace_ops *ops, char *buf, int enable);

314
/* defined in arch */
315
extern int ftrace_ip_converted(unsigned long ip);
316
317
318
319
320
extern int ftrace_dyn_arch_init(void *data);
extern int ftrace_update_ftrace_func(ftrace_func_t func);
extern void ftrace_caller(void);
extern void ftrace_call(void);
extern void mcount_call(void);
321
322
323
324

#ifndef FTRACE_ADDR
#define FTRACE_ADDR ((unsigned long)ftrace_caller)
#endif
325
326
#ifdef CONFIG_FUNCTION_GRAPH_TRACER
extern void ftrace_graph_caller(void);
327
328
329
330
331
extern int ftrace_enable_ftrace_graph_caller(void);
extern int ftrace_disable_ftrace_graph_caller(void);
#else
static inline int ftrace_enable_ftrace_graph_caller(void) { return 0; }
static inline int ftrace_disable_ftrace_graph_caller(void) { return 0; }
332
#endif
333

334
/**
335
 * ftrace_make_nop - convert code into nop
336
337
338
339
340
341
342
343
344
345
346
347
348
349
350
351
352
353
354
355
356
 * @mod: module structure if called by module load initialization
 * @rec: the mcount call site record
 * @addr: the address that the call site should be calling
 *
 * This is a very sensitive operation and great care needs
 * to be taken by the arch.  The operation should carefully
 * read the location, check to see if what is read is indeed
 * what we expect it to be, and then on success of the compare,
 * it should write to the location.
 *
 * The code segment at @rec->ip should be a caller to @addr
 *
 * Return must be:
 *  0 on success
 *  -EFAULT on error reading the location
 *  -EINVAL on a failed compare of the contents
 *  -EPERM  on error writing to the location
 * Any other value will be considered a failure.
 */
extern int ftrace_make_nop(struct module *mod,
			   struct dyn_ftrace *rec, unsigned long addr);
357

358
/**
359
360
361
 * ftrace_make_call - convert a nop call site into a call to addr
 * @rec: the mcount call site record
 * @addr: the address that the call site should call
362
363
364
365
366
367
368
 *
 * This is a very sensitive operation and great care needs
 * to be taken by the arch.  The operation should carefully
 * read the location, check to see if what is read is indeed
 * what we expect it to be, and then on success of the compare,
 * it should write to the location.
 *
369
370
 * The code segment at @rec->ip should be a nop
 *
371
372
373
374
375
376
377
 * Return must be:
 *  0 on success
 *  -EFAULT on error reading the location
 *  -EINVAL on a failed compare of the contents
 *  -EPERM  on error writing to the location
 * Any other value will be considered a failure.
 */
378
379
380
381
extern int ftrace_make_call(struct dyn_ftrace *rec, unsigned long addr);

/* May be defined in arch */
extern int ftrace_arch_read_dyn_info(char *buf, int size);
382

383
384
extern int skip_trace(unsigned long ip);

385
386
extern void ftrace_disable_daemon(void);
extern void ftrace_enable_daemon(void);
Steven Rostedt's avatar
Steven Rostedt committed
387
#else
388
389
390
391
static inline int skip_trace(unsigned long ip) { return 0; }
static inline int ftrace_force_update(void) { return 0; }
static inline void ftrace_disable_daemon(void) { }
static inline void ftrace_enable_daemon(void) { }
392
static inline void ftrace_release_mod(struct module *mod) {}
393
394
static inline int register_ftrace_command(struct ftrace_func_command *cmd)
{
395
	return -EINVAL;
396
397
398
}
static inline int unregister_ftrace_command(char *cmd_name)
{
399
	return -EINVAL;
400
}
401
402
403
404
static inline int ftrace_text_reserved(void *start, void *end)
{
	return 0;
}
405
406
407
408
409
410
411

/*
 * Again users of functions that have ftrace_ops may not
 * have them defined when ftrace is not enabled, but these
 * functions may still be called. Use a macro instead of inline.
 */
#define ftrace_regex_open(ops, flag, inod, file) ({ -ENODEV; })
412
#define ftrace_set_early_filter(ops, buf, enable) do { } while (0)
413
414
415
#define ftrace_set_filter(ops, buf, len, reset) ({ -ENODEV; })
#define ftrace_set_notrace(ops, buf, len, reset) ({ -ENODEV; })
#define ftrace_free_filter(ops) do { } while (0)
416
417
418
419
420
421
422
423
424
425
426

static inline ssize_t ftrace_filter_write(struct file *file, const char __user *ubuf,
			    size_t cnt, loff_t *ppos) { return -ENODEV; }
static inline ssize_t ftrace_notrace_write(struct file *file, const char __user *ubuf,
			     size_t cnt, loff_t *ppos) { return -ENODEV; }
static inline loff_t ftrace_regex_lseek(struct file *file, loff_t offset, int origin)
{
	return -ENODEV;
}
static inline int
ftrace_regex_release(struct inode *inode, struct file *file) { return -ENODEV; }
427
#endif /* CONFIG_DYNAMIC_FTRACE */
428

Ingo Molnar's avatar
Ingo Molnar committed
429
430
431
/* totally disable ftrace - can not re-enable after this */
void ftrace_kill(void);

Ingo Molnar's avatar
Ingo Molnar committed
432
433
static inline void tracer_disable(void)
{
434
#ifdef CONFIG_FUNCTION_TRACER
Ingo Molnar's avatar
Ingo Molnar committed
435
436
437
438
	ftrace_enabled = 0;
#endif
}

439
440
/*
 * Ftrace disable/restore without lock. Some synchronization mechanism
441
 * must be used to prevent ftrace_enabled to be changed between
442
443
 * disable/restore.
 */
444
445
static inline int __ftrace_enabled_save(void)
{
446
#ifdef CONFIG_FUNCTION_TRACER
447
448
449
450
451
452
453
454
455
456
	int saved_ftrace_enabled = ftrace_enabled;
	ftrace_enabled = 0;
	return saved_ftrace_enabled;
#else
	return 0;
#endif
}

static inline void __ftrace_enabled_restore(int enabled)
{
457
#ifdef CONFIG_FUNCTION_TRACER
458
459
460
461
	ftrace_enabled = enabled;
#endif
}

462
463
464
465
466
467
468
469
470
471
472
473
474
475
476
477
478
479
480
#ifndef HAVE_ARCH_CALLER_ADDR
# ifdef CONFIG_FRAME_POINTER
#  define CALLER_ADDR0 ((unsigned long)__builtin_return_address(0))
#  define CALLER_ADDR1 ((unsigned long)__builtin_return_address(1))
#  define CALLER_ADDR2 ((unsigned long)__builtin_return_address(2))
#  define CALLER_ADDR3 ((unsigned long)__builtin_return_address(3))
#  define CALLER_ADDR4 ((unsigned long)__builtin_return_address(4))
#  define CALLER_ADDR5 ((unsigned long)__builtin_return_address(5))
#  define CALLER_ADDR6 ((unsigned long)__builtin_return_address(6))
# else
#  define CALLER_ADDR0 ((unsigned long)__builtin_return_address(0))
#  define CALLER_ADDR1 0UL
#  define CALLER_ADDR2 0UL
#  define CALLER_ADDR3 0UL
#  define CALLER_ADDR4 0UL
#  define CALLER_ADDR5 0UL
#  define CALLER_ADDR6 0UL
# endif
#endif /* ifndef HAVE_ARCH_CALLER_ADDR */
481

482
#ifdef CONFIG_IRQSOFF_TRACER
Ingo Molnar's avatar
Ingo Molnar committed
483
484
  extern void time_hardirqs_on(unsigned long a0, unsigned long a1);
  extern void time_hardirqs_off(unsigned long a0, unsigned long a1);
485
#else
486
487
  static inline void time_hardirqs_on(unsigned long a0, unsigned long a1) { }
  static inline void time_hardirqs_off(unsigned long a0, unsigned long a1) { }
488
489
#endif

490
#ifdef CONFIG_PREEMPT_TRACER
Ingo Molnar's avatar
Ingo Molnar committed
491
492
  extern void trace_preempt_on(unsigned long a0, unsigned long a1);
  extern void trace_preempt_off(unsigned long a0, unsigned long a1);
493
#else
494
495
  static inline void trace_preempt_on(unsigned long a0, unsigned long a1) { }
  static inline void trace_preempt_off(unsigned long a0, unsigned long a1) { }
496
497
#endif

498
499
500
501
502
503
#ifdef CONFIG_FTRACE_MCOUNT_RECORD
extern void ftrace_init(void);
#else
static inline void ftrace_init(void) { }
#endif

504
505
506
507
508
509
510
/*
 * Structure that defines an entry function trace.
 */
struct ftrace_graph_ent {
	unsigned long func; /* Current function */
	int depth;
};
511

512
513
514
/*
 * Structure that defines a return function trace.
 */
515
struct ftrace_graph_ret {
516
517
518
	unsigned long func; /* Current function */
	unsigned long long calltime;
	unsigned long long rettime;
519
520
	/* Number of functions that overran the depth limit for current task */
	unsigned long overrun;
521
	int depth;
522
523
};

524
525
526
527
/* Type of the callback handlers for tracing function graph*/
typedef void (*trace_func_graph_ret_t)(struct ftrace_graph_ret *); /* return */
typedef int (*trace_func_graph_ent_t)(struct ftrace_graph_ent *); /* entry */

528
#ifdef CONFIG_FUNCTION_GRAPH_TRACER
529

530
/* for init task */
531
#define INIT_FTRACE_GRAPH		.ret_stack = NULL,
532

533
534
535
536
537
538
539
540
541
/*
 * Stack of return addresses for functions
 * of a thread.
 * Used in struct thread_info
 */
struct ftrace_ret_stack {
	unsigned long ret;
	unsigned long func;
	unsigned long long calltime;
542
	unsigned long long subtime;
543
	unsigned long fp;
544
545
546
547
548
549
550
551
552
553
};

/*
 * Primary handler of a function return.
 * It relays on ftrace_return_to_handler.
 * Defined in entry_32/64.S
 */
extern void return_to_handler(void);

extern int
554
555
ftrace_push_return_trace(unsigned long ret, unsigned long func, int *depth,
			 unsigned long frame_pointer);
556

557
558
559
560
561
562
563
/*
 * Sometimes we don't want to trace a function with the function
 * graph tracer but we want them to keep traced by the usual function
 * tracer if the function graph tracer is not configured.
 */
#define __notrace_funcgraph		notrace

564
565
566
567
568
569
570
571
572
573
/*
 * We want to which function is an entrypoint of a hardirq.
 * That will help us to put a signal on output.
 */
#define __irq_entry		 __attribute__((__section__(".irqentry.text")))

/* Limits of hardirq entrypoints */
extern char __irqentry_text_start[];
extern char __irqentry_text_end[];

574
575
#define FTRACE_RETFUNC_DEPTH 50
#define FTRACE_RETSTACK_ALLOC_SIZE 32
576
577
578
extern int register_ftrace_graph(trace_func_graph_ret_t retfunc,
				trace_func_graph_ent_t entryfunc);

579
580
extern void ftrace_graph_stop(void);

581
582
583
/* The current handlers in use */
extern trace_func_graph_ret_t ftrace_graph_return;
extern trace_func_graph_ent_t ftrace_graph_entry;
584

585
extern void unregister_ftrace_graph(void);
586

587
588
extern void ftrace_graph_init_task(struct task_struct *t);
extern void ftrace_graph_exit_task(struct task_struct *t);
589
extern void ftrace_graph_init_idle_task(struct task_struct *t, int cpu);
590
591
592
593
594

static inline int task_curr_ret_stack(struct task_struct *t)
{
	return t->curr_ret_stack;
}
595
596
597
598
599
600
601
602
603
604

static inline void pause_graph_tracing(void)
{
	atomic_inc(&current->tracing_graph_pause);
}

static inline void unpause_graph_tracing(void)
{
	atomic_dec(&current->tracing_graph_pause);
}
605
#else /* !CONFIG_FUNCTION_GRAPH_TRACER */
606
607

#define __notrace_funcgraph
608
#define __irq_entry
609
#define INIT_FTRACE_GRAPH
610

611
612
static inline void ftrace_graph_init_task(struct task_struct *t) { }
static inline void ftrace_graph_exit_task(struct task_struct *t) { }
613
static inline void ftrace_graph_init_idle_task(struct task_struct *t, int cpu) { }
614

615
616
617
618
619
620
621
static inline int register_ftrace_graph(trace_func_graph_ret_t retfunc,
			  trace_func_graph_ent_t entryfunc)
{
	return -1;
}
static inline void unregister_ftrace_graph(void) { }

622
623
624
625
static inline int task_curr_ret_stack(struct task_struct *tsk)
{
	return -1;
}
626
627
628

static inline void pause_graph_tracing(void) { }
static inline void unpause_graph_tracing(void) { }
629
#endif /* CONFIG_FUNCTION_GRAPH_TRACER */
630

631
632
633
634
635
636
637
638
639
640
641
642
643
644
645
646
647
648
649
650
651
652
653
654
655
656
657
658
659
660
661
662
663
664
665
666
667
668
669
670
671
672
#ifdef CONFIG_TRACING

/* flags for current->trace */
enum {
	TSK_TRACE_FL_TRACE_BIT	= 0,
	TSK_TRACE_FL_GRAPH_BIT	= 1,
};
enum {
	TSK_TRACE_FL_TRACE	= 1 << TSK_TRACE_FL_TRACE_BIT,
	TSK_TRACE_FL_GRAPH	= 1 << TSK_TRACE_FL_GRAPH_BIT,
};

static inline void set_tsk_trace_trace(struct task_struct *tsk)
{
	set_bit(TSK_TRACE_FL_TRACE_BIT, &tsk->trace);
}

static inline void clear_tsk_trace_trace(struct task_struct *tsk)
{
	clear_bit(TSK_TRACE_FL_TRACE_BIT, &tsk->trace);
}

static inline int test_tsk_trace_trace(struct task_struct *tsk)
{
	return tsk->trace & TSK_TRACE_FL_TRACE;
}

static inline void set_tsk_trace_graph(struct task_struct *tsk)
{
	set_bit(TSK_TRACE_FL_GRAPH_BIT, &tsk->trace);
}

static inline void clear_tsk_trace_graph(struct task_struct *tsk)
{
	clear_bit(TSK_TRACE_FL_GRAPH_BIT, &tsk->trace);
}

static inline int test_tsk_trace_graph(struct task_struct *tsk)
{
	return tsk->trace & TSK_TRACE_FL_GRAPH;
}

673
674
675
enum ftrace_dump_mode;

extern enum ftrace_dump_mode ftrace_dump_on_oops;
676

677
678
679
680
#ifdef CONFIG_PREEMPT
#define INIT_TRACE_RECURSION		.trace_recursion = 0,
#endif

681
682
#endif /* CONFIG_TRACING */

683
684
685
#ifndef INIT_TRACE_RECURSION
#define INIT_TRACE_RECURSION
#endif
686

687
688
689
690
691
692
#ifdef CONFIG_FTRACE_SYSCALLS

unsigned long arch_syscall_addr(int nr);

#endif /* CONFIG_FTRACE_SYSCALLS */

693
#endif /* _LINUX_FTRACE_H */