ftrace.h 20.8 KB
Newer Older
1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
/*
 * Stage 1 of the trace events.
 *
 * Override the macros in <trace/trace_events.h> to include the following:
 *
 * struct ftrace_raw_<call> {
 *	struct trace_entry		ent;
 *	<type>				<item>;
 *	<type2>				<item2>[<len>];
 *	[...]
 * };
 *
 * The <type> <item> is created by the __field(type, item) macro or
 * the __array(type2, item2, len) macro.
 * We simply do "type item;", and that will create the fields
 * in the structure.
 */

#include <linux/ftrace_event.h>

21
22
23
#undef __field
#define __field(type, item)		type	item;

24
25
26
#undef __field_ext
#define __field_ext(type, item, filter_type)	type	item;

27
28
29
#undef __array
#define __array(type, item, len)	type	item[len];

30
#undef __dynamic_array
31
#define __dynamic_array(type, item, len) u32 __data_loc_##item;
32

33
#undef __string
34
#define __string(item, src) __dynamic_array(char, item, -1)
35

36
37
38
39
40
41
42
43
#undef TP_STRUCT__entry
#define TP_STRUCT__entry(args...) args

#undef TRACE_EVENT
#define TRACE_EVENT(name, proto, args, tstruct, assign, print)	\
	struct ftrace_raw_##name {				\
		struct trace_entry	ent;			\
		tstruct						\
44
		char			__data[0];		\
45
46
47
	};							\
	static struct ftrace_event_call event_##name

48
49
50
#undef __cpparg
#define __cpparg(arg...) arg

51
52
/* Callbacks are meaningless to ftrace. */
#undef TRACE_EVENT_FN
53
54
55
56
#define TRACE_EVENT_FN(name, proto, args, tstruct,			\
		assign, print, reg, unreg)				\
	TRACE_EVENT(name, __cpparg(proto), __cpparg(args),		\
		__cpparg(tstruct), __cpparg(assign), __cpparg(print))	\
57

58
59
#include TRACE_INCLUDE(TRACE_INCLUDE_FILE)

60

61
62
63
/*
 * Stage 2 of the trace events.
 *
64
65
 * Include the following:
 *
66
 * struct ftrace_data_offsets_<call> {
67
68
 *	u32				<item1>;
 *	u32				<item2>;
69
70
71
 *	[...]
 * };
 *
72
 * The __dynamic_array() macro will create each u32 <item>, this is
73
 * to keep the offset of each array from the beginning of the event.
74
 * The size of an array is also encoded, in the higher 16 bits of <item>.
75
76
 */

77
#undef __field
78
79
80
81
#define __field(type, item)

#undef __field_ext
#define __field_ext(type, item, filter_type)
82

83
84
85
#undef __array
#define __array(type, item, len)

86
#undef __dynamic_array
87
#define __dynamic_array(type, item, len)	u32 item;
88
89

#undef __string
90
#define __string(item, src) __dynamic_array(char, item, -1)
91
92
93

#undef TRACE_EVENT
#define TRACE_EVENT(call, proto, args, tstruct, assign, print)		\
94
	struct ftrace_data_offsets_##call {				\
95
96
97
98
99
		tstruct;						\
	};

#include TRACE_INCLUDE(TRACE_INCLUDE_FILE)

100
101
102
103
104
105
106
107
108
109
110
111
112
113
114
115
116
117
118
119
120
121
122
/*
 * Setup the showing format of trace point.
 *
 * int
 * ftrace_format_##call(struct trace_seq *s)
 * {
 *	struct ftrace_raw_##call field;
 *	int ret;
 *
 *	ret = trace_seq_printf(s, #type " " #item ";"
 *			       " offset:%u; size:%u;\n",
 *			       offsetof(struct ftrace_raw_##call, item),
 *			       sizeof(field.type));
 *
 * }
 */

#undef TP_STRUCT__entry
#define TP_STRUCT__entry(args...) args

#undef __field
#define __field(type, item)					\
	ret = trace_seq_printf(s, "\tfield:" #type " " #item ";\t"	\
123
			       "offset:%u;\tsize:%u;\tsigned:%u;\n",	\
124
			       (unsigned int)offsetof(typeof(field), item), \
125
126
			       (unsigned int)sizeof(field.item),	\
			       (unsigned int)is_signed_type(type));	\
127
128
129
	if (!ret)							\
		return 0;

130
131
132
#undef __field_ext
#define __field_ext(type, item, filter_type)	__field(type, item)

133
134
135
#undef __array
#define __array(type, item, len)						\
	ret = trace_seq_printf(s, "\tfield:" #type " " #item "[" #len "];\t"	\
136
			       "offset:%u;\tsize:%u;\tsigned:%u;\n",	\
137
			       (unsigned int)offsetof(typeof(field), item), \
138
139
			       (unsigned int)sizeof(field.item),	\
			       (unsigned int)is_signed_type(type));	\
140
141
142
143
144
	if (!ret)							\
		return 0;

#undef __dynamic_array
#define __dynamic_array(type, item, len)				       \
145
	ret = trace_seq_printf(s, "\tfield:__data_loc " #type "[] " #item ";\t"\
146
			       "offset:%u;\tsize:%u;\tsigned:%u;\n",	       \
147
148
			       (unsigned int)offsetof(typeof(field),	       \
					__data_loc_##item),		       \
149
150
			       (unsigned int)sizeof(field.__data_loc_##item), \
			       (unsigned int)is_signed_type(type));	\
151
152
153
154
155
156
157
158
159
160
161
162
163
164
165
166
167
168
169
	if (!ret)							       \
		return 0;

#undef __string
#define __string(item, src) __dynamic_array(char, item, -1)

#undef __entry
#define __entry REC

#undef __print_symbolic
#undef __get_dynamic_array
#undef __get_str

#undef TP_printk
#define TP_printk(fmt, args...) "%s, %s\n", #fmt, __stringify(args)

#undef TP_fast_assign
#define TP_fast_assign(args...) args

170
171
172
#undef TP_perf_assign
#define TP_perf_assign(args...)

173
174
175
#undef TRACE_EVENT
#define TRACE_EVENT(call, proto, args, tstruct, func, print)		\
static int								\
176
177
ftrace_format_##call(struct ftrace_event_call *unused,			\
		      struct trace_seq *s)				\
178
179
180
181
182
183
184
185
186
187
188
189
190
{									\
	struct ftrace_raw_##call field __attribute__((unused));		\
	int ret = 0;							\
									\
	tstruct;							\
									\
	trace_seq_printf(s, "\nprint fmt: " print);			\
									\
	return ret;							\
}

#include TRACE_INCLUDE(TRACE_INCLUDE_FILE)

191
192
193
/*
 * Stage 3 of the trace events.
 *
194
195
196
197
198
199
200
201
 * Override the macros in <trace/trace_events.h> to include the following:
 *
 * enum print_line_t
 * ftrace_raw_output_<call>(struct trace_iterator *iter, int flags)
 * {
 *	struct trace_seq *s = &iter->seq;
 *	struct ftrace_raw_<call> *field; <-- defined in stage 1
 *	struct trace_entry *entry;
202
 *	struct trace_seq *p;
203
204
205
206
207
208
209
210
211
212
213
 *	int ret;
 *
 *	entry = iter->ent;
 *
 *	if (entry->type != event_<call>.id) {
 *		WARN_ON_ONCE(1);
 *		return TRACE_TYPE_UNHANDLED;
 *	}
 *
 *	field = (typeof(field))entry;
 *
214
 *	p = get_cpu_var(ftrace_event_seq);
215
 *	trace_seq_init(p);
216
 *	ret = trace_seq_printf(s, <TP_printk> "\n");
217
 *	put_cpu();
218
219
220
221
222
223
224
225
226
227
228
229
230
231
232
233
234
 *	if (!ret)
 *		return TRACE_TYPE_PARTIAL_LINE;
 *
 *	return TRACE_TYPE_HANDLED;
 * }
 *
 * This is the method used to print the raw event to the trace
 * output format. Note, this is not needed if the data is read
 * in binary.
 */

#undef __entry
#define __entry field

#undef TP_printk
#define TP_printk(fmt, args...) fmt "\n", args

235
236
#undef __get_dynamic_array
#define __get_dynamic_array(field)	\
237
		((void *)__entry + (__entry->__data_loc_##field & 0xffff))
238

239
#undef __get_str
240
#define __get_str(field) (char *)__get_dynamic_array(field)
241

242
243
244
#undef __print_flags
#define __print_flags(flag, delim, flag_array...)			\
	({								\
245
		static const struct trace_print_flags __flags[] =	\
246
			{ flag_array, { -1, NULL }};			\
247
		ftrace_print_flags_seq(p, delim, flag, __flags);	\
248
249
	})

250
251
252
253
254
255
256
257
#undef __print_symbolic
#define __print_symbolic(value, symbol_array...)			\
	({								\
		static const struct trace_print_flags symbols[] =	\
			{ symbol_array, { -1, NULL }};			\
		ftrace_print_symbols_seq(p, value, symbols);		\
	})

258
259
#undef TRACE_EVENT
#define TRACE_EVENT(call, proto, args, tstruct, assign, print)		\
260
static enum print_line_t						\
261
262
263
264
265
ftrace_raw_output_##call(struct trace_iterator *iter, int flags)	\
{									\
	struct trace_seq *s = &iter->seq;				\
	struct ftrace_raw_##call *field;				\
	struct trace_entry *entry;					\
266
	struct trace_seq *p;						\
267
268
269
270
271
272
273
274
275
276
277
	int ret;							\
									\
	entry = iter->ent;						\
									\
	if (entry->type != event_##call.id) {				\
		WARN_ON_ONCE(1);					\
		return TRACE_TYPE_UNHANDLED;				\
	}								\
									\
	field = (typeof(field))entry;					\
									\
278
	p = &get_cpu_var(ftrace_event_seq);				\
279
	trace_seq_init(p);						\
280
	ret = trace_seq_printf(s, #call ": " print);			\
281
	put_cpu();							\
282
283
284
285
286
287
288
289
	if (!ret)							\
		return TRACE_TYPE_PARTIAL_LINE;				\
									\
	return TRACE_TYPE_HANDLED;					\
}
	
#include TRACE_INCLUDE(TRACE_INCLUDE_FILE)

290
291
#undef __field_ext
#define __field_ext(type, item, filter_type)				\
292
293
	ret = trace_define_field(event_call, #type, #item,		\
				 offsetof(typeof(field), item),		\
294
295
				 sizeof(field.item),			\
				 is_signed_type(type), filter_type);	\
296
297
298
	if (ret)							\
		return ret;

299
300
301
#undef __field
#define __field(type, item)	__field_ext(type, item, FILTER_OTHER)

302
303
304
305
306
#undef __array
#define __array(type, item, len)					\
	BUILD_BUG_ON(len > MAX_FILTER_STR_VAL);				\
	ret = trace_define_field(event_call, #type "[" #len "]", #item,	\
				 offsetof(typeof(field), item),		\
307
				 sizeof(field.item), 0, FILTER_OTHER);	\
308
309
310
	if (ret)							\
		return ret;

311
312
#undef __dynamic_array
#define __dynamic_array(type, item, len)				       \
313
	ret = trace_define_field(event_call, "__data_loc " #type "[]", #item,  \
314
315
316
				 offsetof(typeof(field), __data_loc_##item),   \
				 sizeof(field.__data_loc_##item), 0,	       \
				 FILTER_OTHER);
317

318
#undef __string
319
#define __string(item, src) __dynamic_array(char, item, -1)
320

321
322
#undef TRACE_EVENT
#define TRACE_EVENT(call, proto, args, tstruct, func, print)		\
323
static int								\
324
ftrace_define_fields_##call(struct ftrace_event_call *event_call)	\
325
326
327
328
{									\
	struct ftrace_raw_##call field;					\
	int ret;							\
									\
329
330
331
	ret = trace_define_common_fields(event_call);			\
	if (ret)							\
		return ret;						\
332
333
334
335
336
337
338
339
									\
	tstruct;							\
									\
	return ret;							\
}

#include TRACE_INCLUDE(TRACE_INCLUDE_FILE)

340
341
342
343
344
345
346
347
348
349
/*
 * remember the offset of each array from the beginning of the event.
 */

#undef __entry
#define __entry entry

#undef __field
#define __field(type, item)

350
351
352
#undef __field_ext
#define __field_ext(type, item, filter_type)

353
354
355
356
357
358
359
#undef __array
#define __array(type, item, len)

#undef __dynamic_array
#define __dynamic_array(type, item, len)				\
	__data_offsets->item = __data_size +				\
			       offsetof(typeof(*entry), __data);	\
360
	__data_offsets->item |= (len * sizeof(type)) << 16;		\
361
362
363
364
365
366
367
368
369
370
371
372
373
374
375
376
377
378
379
380
	__data_size += (len) * sizeof(type);

#undef __string
#define __string(item, src) __dynamic_array(char, item, strlen(src) + 1)       \

#undef TRACE_EVENT
#define TRACE_EVENT(call, proto, args, tstruct, assign, print)		\
static inline int ftrace_get_offsets_##call(				\
	struct ftrace_data_offsets_##call *__data_offsets, proto)       \
{									\
	int __data_size = 0;						\
	struct ftrace_raw_##call __maybe_unused *entry;			\
									\
	tstruct;							\
									\
	return __data_size;						\
}

#include TRACE_INCLUDE(TRACE_INCLUDE_FILE)

381
382
383
#ifdef CONFIG_EVENT_PROFILE

/*
384
 * Generate the functions needed for tracepoint perf_event support.
385
 *
386
 * NOTE: The insertion profile callback (ftrace_profile_<call>) is defined later
387
 *
388
 * static int ftrace_profile_enable_<call>(void)
389
 * {
390
 * 	return register_trace_<call>(ftrace_profile_<call>);
391
392
 * }
 *
393
 * static void ftrace_profile_disable_<call>(void)
394
 * {
395
 * 	unregister_trace_<call>(ftrace_profile_<call>);
396
397
398
399
400
401
402
 * }
 *
 */

#undef TRACE_EVENT
#define TRACE_EVENT(call, proto, args, tstruct, assign, print)		\
									\
403
static void ftrace_profile_##call(proto);				\
404
									\
405
static int ftrace_profile_enable_##call(struct ftrace_event_call *unused)\
406
{									\
407
	return register_trace_##call(ftrace_profile_##call);		\
408
409
}									\
									\
410
static void ftrace_profile_disable_##call(struct ftrace_event_call *unused)\
411
{									\
412
	unregister_trace_##call(ftrace_profile_##call);			\
413
414
415
416
417
418
}

#include TRACE_INCLUDE(TRACE_INCLUDE_FILE)

#endif

419
/*
420
 * Stage 4 of the trace events.
421
 *
422
 * Override the macros in <trace/trace_events.h> to include the following:
423
424
425
 *
 * static void ftrace_event_<call>(proto)
 * {
426
 *	event_trace_printk(_RET_IP_, "<call>: " <fmt>);
427
428
 * }
 *
429
 * static int ftrace_reg_event_<call>(struct ftrace_event_call *unused)
430
 * {
431
 *	int ret;
432
 *
433
434
435
436
437
 *	ret = register_trace_<call>(ftrace_event_<call>);
 *	if (!ret)
 *		pr_info("event trace: Could not activate trace point "
 *			"probe to  <call>");
 *	return ret;
438
439
 * }
 *
440
 * static void ftrace_unreg_event_<call>(struct ftrace_event_call *unused)
441
 * {
442
 *	unregister_trace_<call>(ftrace_event_<call>);
443
444
445
 * }
 *
 *
446
 * For those macros defined with TRACE_EVENT:
447
448
449
450
451
 *
 * static struct ftrace_event_call event_<call>;
 *
 * static void ftrace_raw_event_<call>(proto)
 * {
452
453
 *	struct ring_buffer_event *event;
 *	struct ftrace_raw_<call> *entry; <-- defined in stage 1
454
 *	struct ring_buffer *buffer;
455
456
457
458
459
460
 *	unsigned long irq_flags;
 *	int pc;
 *
 *	local_save_flags(irq_flags);
 *	pc = preempt_count();
 *
461
462
 *	event = trace_current_buffer_lock_reserve(&buffer,
 *				  event_<call>.id,
463
464
465
466
467
468
469
 *				  sizeof(struct ftrace_raw_<call>),
 *				  irq_flags, pc);
 *	if (!event)
 *		return;
 *	entry	= ring_buffer_event_data(event);
 *
 *	<assign>;  <-- Here we assign the entries by the __field and
470
 *			__array macros.
471
 *
472
 *	trace_current_buffer_unlock_commit(buffer, event, irq_flags, pc);
473
474
 * }
 *
475
 * static int ftrace_raw_reg_event_<call>(struct ftrace_event_call *unused)
476
 * {
477
 *	int ret;
478
 *
479
480
481
482
483
 *	ret = register_trace_<call>(ftrace_raw_event_<call>);
 *	if (!ret)
 *		pr_info("event trace: Could not activate trace point "
 *			"probe to <call>");
 *	return ret;
484
485
 * }
 *
486
 * static void ftrace_unreg_event_<call>(struct ftrace_event_call *unused)
487
 * {
488
 *	unregister_trace_<call>(ftrace_raw_event_<call>);
489
490
491
 * }
 *
 * static struct trace_event ftrace_event_type_<call> = {
492
 *	.trace			= ftrace_raw_output_<call>, <-- stage 2
493
494
 * };
 *
495
 * static int ftrace_raw_init_event_<call>(struct ftrace_event_call *unused)
496
 * {
497
 *	int id;
498
 *
499
500
501
502
503
 *	id = register_ftrace_event(&ftrace_event_type_<call>);
 *	if (!id)
 *		return -ENODEV;
 *	event_<call>.id = id;
 *	return 0;
504
505
506
507
508
 * }
 *
 * static struct ftrace_event_call __used
 * __attribute__((__aligned__(4)))
 * __attribute__((section("_ftrace_events"))) event_<call> = {
509
 *	.name			= "<call>",
510
 *	.system			= "<system>",
511
512
513
 *	.raw_init		= ftrace_raw_init_event_<call>,
 *	.regfunc		= ftrace_reg_event_<call>,
 *	.unregfunc		= ftrace_unreg_event_<call>,
514
 *	.show_format		= ftrace_format_<call>,
515
516
517
518
 * }
 *
 */

519
520
#undef TP_FMT
#define TP_FMT(fmt, args...)	fmt "\n", ##args
521

Peter Zijlstra's avatar
Peter Zijlstra committed
522
523
524
525
526
527
528
529
530
531
532
#ifdef CONFIG_EVENT_PROFILE

#define _TRACE_PROFILE_INIT(call)					\
	.profile_count = ATOMIC_INIT(-1),				\
	.profile_enable = ftrace_profile_enable_##call,			\
	.profile_disable = ftrace_profile_disable_##call,

#else
#define _TRACE_PROFILE_INIT(call)
#endif

533
534
#undef __entry
#define __entry entry
535

536
537
538
539
540
541
#undef __field
#define __field(type, item)

#undef __array
#define __array(type, item, len)

542
543
544
545
#undef __dynamic_array
#define __dynamic_array(type, item, len)				\
	__entry->__data_loc_##item = __data_offsets.item;

546
#undef __string
547
#define __string(item, src) __dynamic_array(char, item, -1)       	\
548
549
550
551
552

#undef __assign_str
#define __assign_str(dst, src)						\
	strcpy(__get_str(dst), src);

553
#undef TRACE_EVENT
554
#define TRACE_EVENT(call, proto, args, tstruct, assign, print)		\
555
556
557
558
559
									\
static struct ftrace_event_call event_##call;				\
									\
static void ftrace_raw_event_##call(proto)				\
{									\
560
	struct ftrace_data_offsets_##call __maybe_unused __data_offsets;\
561
	struct ftrace_event_call *event_call = &event_##call;		\
562
563
	struct ring_buffer_event *event;				\
	struct ftrace_raw_##call *entry;				\
564
	struct ring_buffer *buffer;					\
565
	unsigned long irq_flags;					\
566
	int __data_size;						\
567
568
569
570
571
	int pc;								\
									\
	local_save_flags(irq_flags);					\
	pc = preempt_count();						\
									\
572
	__data_size = ftrace_get_offsets_##call(&__data_offsets, args); \
573
									\
574
575
	event = trace_current_buffer_lock_reserve(&buffer,		\
				 event_##call.id,			\
576
				 sizeof(*entry) + __data_size,		\
577
				 irq_flags, pc);			\
578
579
580
581
	if (!event)							\
		return;							\
	entry	= ring_buffer_event_data(event);			\
									\
582
583
584
									\
	tstruct								\
									\
585
	{ assign; }							\
586
									\
587
588
589
	if (!filter_current_check_discard(buffer, event_call, entry, event)) \
		trace_nowake_buffer_unlock_commit(buffer,		\
						  event, irq_flags, pc); \
590
591
}									\
									\
592
static int ftrace_raw_reg_event_##call(struct ftrace_event_call *unused)\
593
594
595
596
{									\
	int ret;							\
									\
	ret = register_trace_##call(ftrace_raw_event_##call);		\
597
	if (ret)							\
598
		pr_info("event trace: Could not activate trace point "	\
599
			"probe to " #call "\n");			\
600
601
602
	return ret;							\
}									\
									\
603
static void ftrace_raw_unreg_event_##call(struct ftrace_event_call *unused)\
604
605
606
607
608
609
610
611
{									\
	unregister_trace_##call(ftrace_raw_event_##call);		\
}									\
									\
static struct trace_event ftrace_event_type_##call = {			\
	.trace			= ftrace_raw_output_##call,		\
};									\
									\
612
static int ftrace_raw_init_event_##call(struct ftrace_event_call *unused)\
613
614
615
616
617
618
619
{									\
	int id;								\
									\
	id = register_ftrace_event(&ftrace_event_type_##call);		\
	if (!id)							\
		return -ENODEV;						\
	event_##call.id = id;						\
620
	INIT_LIST_HEAD(&event_##call.fields);				\
621
622
623
624
625
626
	return 0;							\
}									\
									\
static struct ftrace_event_call __used					\
__attribute__((__aligned__(4)))						\
__attribute__((section("_ftrace_events"))) event_##call = {		\
627
	.name			= #call,				\
628
	.system			= __stringify(TRACE_SYSTEM),		\
629
	.event			= &ftrace_event_type_##call,		\
630
	.raw_init		= ftrace_raw_init_event_##call,		\
631
632
	.regfunc		= ftrace_raw_reg_event_##call,		\
	.unregfunc		= ftrace_raw_unreg_event_##call,	\
633
	.show_format		= ftrace_format_##call,			\
634
	.define_fields		= ftrace_define_fields_##call,		\
Peter Zijlstra's avatar
Peter Zijlstra committed
635
	_TRACE_PROFILE_INIT(call)					\
636
}
Peter Zijlstra's avatar
Peter Zijlstra committed
637

638
#include TRACE_INCLUDE(TRACE_INCLUDE_FILE)
Peter Zijlstra's avatar
Peter Zijlstra committed
639

640
641
642
643
644
645
646
647
648
649
/*
 * Define the insertion callback to profile events
 *
 * The job is very similar to ftrace_raw_event_<call> except that we don't
 * insert in the ring buffer but in a perf counter.
 *
 * static void ftrace_profile_<call>(proto)
 * {
 *	struct ftrace_data_offsets_<call> __maybe_unused __data_offsets;
 *	struct ftrace_event_call *event_call = &event_<call>;
650
 *	extern void perf_tp_event(int, u64, u64, void *, int);
651
 *	struct ftrace_raw_##call *entry;
652
 *	struct perf_trace_buf *trace_buf;
653
654
 *	u64 __addr = 0, __count = 1;
 *	unsigned long irq_flags;
655
 *	struct trace_entry *ent;
656
657
 *	int __entry_size;
 *	int __data_size;
658
 *	int __cpu
659
660
661
662
663
 *	int pc;
 *
 *	pc = preempt_count();
 *
 *	__data_size = ftrace_get_offsets_<call>(&__data_offsets, args);
664
665
666
667
668
669
 *
 *	// Below we want to get the aligned size by taking into account
 *	// the u32 field that will later store the buffer size
 *	__entry_size = ALIGN(__data_size + sizeof(*entry) + sizeof(u32),
 *			     sizeof(u64));
 *	__entry_size -= sizeof(u32);
670
 *
671
672
673
674
675
676
 *	// Protect the non nmi buffer
 *	// This also protects the rcu read side
 *	local_irq_save(irq_flags);
 *	__cpu = smp_processor_id();
 *
 *	if (in_nmi())
677
 *		trace_buf = rcu_dereference(perf_trace_buf_nmi);
678
 *	else
679
 *		trace_buf = rcu_dereference(perf_trace_buf);
680
 *
681
 *	if (!trace_buf)
682
 *		goto end;
683
 *
684
685
686
687
688
689
690
691
692
693
694
695
 *	trace_buf = per_cpu_ptr(trace_buf, __cpu);
 *
 * 	// Avoid recursion from perf that could mess up the buffer
 * 	if (trace_buf->recursion++)
 *		goto end_recursion;
 *
 * 	raw_data = trace_buf->buf;
 *
 *	// Make recursion update visible before entering perf_tp_event
 *	// so that we protect from perf recursions.
 *
 *	barrier();
696
 *
697
698
699
700
701
702
 *	//zero dead bytes from alignment to avoid stack leak to userspace:
 *	*(u64 *)(&raw_data[__entry_size - sizeof(u64)]) = 0ULL;
 *	entry = (struct ftrace_raw_<call> *)raw_data;
 *	ent = &entry->ent;
 *	tracing_generic_entry_update(ent, irq_flags, pc);
 *	ent->type = event_call->id;
703
 *
704
 *	<tstruct> <- do some jobs with dynamic arrays
705
 *
706
 *	<assign>  <- affect our values
707
 *
708
 *	perf_tp_event(event_call->id, __addr, __count, entry,
709
 *		     __entry_size);  <- submit them to perf counter
710
711
712
713
714
715
716
717
718
719
720
721
722
723
724
725
726
 *
 * }
 */

#ifdef CONFIG_EVENT_PROFILE

#undef __perf_addr
#define __perf_addr(a) __addr = (a)

#undef __perf_count
#define __perf_count(c) __count = (c)

#undef TRACE_EVENT
#define TRACE_EVENT(call, proto, args, tstruct, assign, print)		\
static void ftrace_profile_##call(proto)				\
{									\
	struct ftrace_data_offsets_##call __maybe_unused __data_offsets;\
727
728
	extern int perf_swevent_get_recursion_context(void);		\
	extern void perf_swevent_put_recursion_context(int rctx);	\
729
	struct ftrace_event_call *event_call = &event_##call;		\
730
	extern void perf_tp_event(int, u64, u64, void *, int);		\
731
732
733
	struct ftrace_raw_##call *entry;				\
	u64 __addr = 0, __count = 1;					\
	unsigned long irq_flags;					\
734
	struct trace_entry *ent;					\
735
736
	int __entry_size;						\
	int __data_size;						\
737
	char *trace_buf;						\
738
739
	char *raw_data;							\
	int __cpu;							\
740
	int rctx;							\
741
742
743
744
745
	int pc;								\
									\
	pc = preempt_count();						\
									\
	__data_size = ftrace_get_offsets_##call(&__data_offsets, args); \
746
747
	__entry_size = ALIGN(__data_size + sizeof(*entry) + sizeof(u32),\
			     sizeof(u64));				\
748
	__entry_size -= sizeof(u32);					\
749
									\
750
751
752
753
754
	if (WARN_ONCE(__entry_size > FTRACE_MAX_PROFILE_SIZE,		\
		      "profile buffer not large enough"))		\
		return;							\
									\
	local_irq_save(irq_flags);					\
755
									\
756
757
758
	rctx = perf_swevent_get_recursion_context();			\
	if (rctx < 0)							\
		goto end_recursion;					\
759
									\
760
	__cpu = smp_processor_id();					\
761
									\
762
	if (in_nmi())							\
763
		trace_buf = rcu_dereference(perf_trace_buf_nmi);	\
764
	else								\
765
		trace_buf = rcu_dereference(perf_trace_buf);		\
766
									\
767
	if (!trace_buf)							\
768
		goto end;						\
769
									\
770
	raw_data = per_cpu_ptr(trace_buf, __cpu);			\
771
									\
772
773
774
775
776
777
778
779
780
781
	*(u64 *)(&raw_data[__entry_size - sizeof(u64)]) = 0ULL;		\
	entry = (struct ftrace_raw_##call *)raw_data;			\
	ent = &entry->ent;						\
	tracing_generic_entry_update(ent, irq_flags, pc);		\
	ent->type = event_call->id;					\
									\
	tstruct								\
									\
	{ assign; }							\
									\
782
	perf_tp_event(event_call->id, __addr, __count, entry,		\
783
			     __entry_size);				\
784
									\
785
786
787
end:									\
	perf_swevent_put_recursion_context(rctx);			\
end_recursion:								\
788
	local_irq_restore(irq_flags);					\
789
790
791
792
793
794
									\
}

#include TRACE_INCLUDE(TRACE_INCLUDE_FILE)
#endif /* CONFIG_EVENT_PROFILE */

Peter Zijlstra's avatar
Peter Zijlstra committed
795
796
#undef _TRACE_PROFILE_INIT