ftrace.h 19.4 KB
Newer Older
1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
/*
 * Stage 1 of the trace events.
 *
 * Override the macros in <trace/trace_events.h> to include the following:
 *
 * struct ftrace_raw_<call> {
 *	struct trace_entry		ent;
 *	<type>				<item>;
 *	<type2>				<item2>[<len>];
 *	[...]
 * };
 *
 * The <type> <item> is created by the __field(type, item) macro or
 * the __array(type2, item2, len) macro.
 * We simply do "type item;", and that will create the fields
 * in the structure.
 */

#include <linux/ftrace_event.h>

21
22
23
#undef __field
#define __field(type, item)		type	item;

24
25
26
#undef __field_ext
#define __field_ext(type, item, filter_type)	type	item;

27
28
29
#undef __array
#define __array(type, item, len)	type	item[len];

30
#undef __dynamic_array
31
#define __dynamic_array(type, item, len) u32 __data_loc_##item;
32

33
#undef __string
34
#define __string(item, src) __dynamic_array(char, item, -1)
35

36
37
38
39
40
41
42
43
#undef TP_STRUCT__entry
#define TP_STRUCT__entry(args...) args

#undef TRACE_EVENT
#define TRACE_EVENT(name, proto, args, tstruct, assign, print)	\
	struct ftrace_raw_##name {				\
		struct trace_entry	ent;			\
		tstruct						\
44
		char			__data[0];		\
45
46
47
	};							\
	static struct ftrace_event_call event_##name

48
49
50
51
52
53
54
55
56
/* Callbacks are meaningless to ftrace. */
#undef TRACE_EVENT_FN
#define TRACE_EVENT_FN(name, proto, args, tstruct,		\
		assign, print, reg, unreg)			\
	TRACE_EVENT(name, TP_PROTO(proto), TP_ARGS(args),	\
		TP_STRUCT__entry(tstruct),			\
		TP_fast_assign(assign),				\
		TP_printk(print))

57
58
#include TRACE_INCLUDE(TRACE_INCLUDE_FILE)

59

60
61
62
/*
 * Stage 2 of the trace events.
 *
63
64
 * Include the following:
 *
65
 * struct ftrace_data_offsets_<call> {
66
67
 *	u32				<item1>;
 *	u32				<item2>;
68
69
70
 *	[...]
 * };
 *
71
 * The __dynamic_array() macro will create each u32 <item>, this is
72
 * to keep the offset of each array from the beginning of the event.
73
 * The size of an array is also encoded, in the higher 16 bits of <item>.
74
75
 */

76
#undef __field
77
78
79
80
#define __field(type, item)

#undef __field_ext
#define __field_ext(type, item, filter_type)
81

82
83
84
#undef __array
#define __array(type, item, len)

85
#undef __dynamic_array
86
#define __dynamic_array(type, item, len)	u32 item;
87
88

#undef __string
89
#define __string(item, src) __dynamic_array(char, item, -1)
90
91
92

#undef TRACE_EVENT
#define TRACE_EVENT(call, proto, args, tstruct, assign, print)		\
93
	struct ftrace_data_offsets_##call {				\
94
95
96
97
98
		tstruct;						\
	};

#include TRACE_INCLUDE(TRACE_INCLUDE_FILE)

99
100
101
102
103
104
105
106
107
108
109
110
111
112
113
114
115
116
117
118
119
120
121
122
123
124
125
126
127
/*
 * Setup the showing format of trace point.
 *
 * int
 * ftrace_format_##call(struct trace_seq *s)
 * {
 *	struct ftrace_raw_##call field;
 *	int ret;
 *
 *	ret = trace_seq_printf(s, #type " " #item ";"
 *			       " offset:%u; size:%u;\n",
 *			       offsetof(struct ftrace_raw_##call, item),
 *			       sizeof(field.type));
 *
 * }
 */

#undef TP_STRUCT__entry
#define TP_STRUCT__entry(args...) args

#undef __field
#define __field(type, item)					\
	ret = trace_seq_printf(s, "\tfield:" #type " " #item ";\t"	\
			       "offset:%u;\tsize:%u;\n",		\
			       (unsigned int)offsetof(typeof(field), item), \
			       (unsigned int)sizeof(field.item));	\
	if (!ret)							\
		return 0;

128
129
130
#undef __field_ext
#define __field_ext(type, item, filter_type)	__field(type, item)

131
132
133
134
135
136
137
138
139
140
141
#undef __array
#define __array(type, item, len)						\
	ret = trace_seq_printf(s, "\tfield:" #type " " #item "[" #len "];\t"	\
			       "offset:%u;\tsize:%u;\n",		\
			       (unsigned int)offsetof(typeof(field), item), \
			       (unsigned int)sizeof(field.item));	\
	if (!ret)							\
		return 0;

#undef __dynamic_array
#define __dynamic_array(type, item, len)				       \
142
	ret = trace_seq_printf(s, "\tfield:__data_loc " #type "[] " #item ";\t"\
143
144
145
146
147
148
149
150
151
152
153
154
155
156
157
158
159
160
161
162
163
164
165
			       "offset:%u;\tsize:%u;\n",		       \
			       (unsigned int)offsetof(typeof(field),	       \
					__data_loc_##item),		       \
			       (unsigned int)sizeof(field.__data_loc_##item)); \
	if (!ret)							       \
		return 0;

#undef __string
#define __string(item, src) __dynamic_array(char, item, -1)

#undef __entry
#define __entry REC

#undef __print_symbolic
#undef __get_dynamic_array
#undef __get_str

#undef TP_printk
#define TP_printk(fmt, args...) "%s, %s\n", #fmt, __stringify(args)

#undef TP_fast_assign
#define TP_fast_assign(args...) args

166
167
168
#undef TP_perf_assign
#define TP_perf_assign(args...)

169
170
171
#undef TRACE_EVENT
#define TRACE_EVENT(call, proto, args, tstruct, func, print)		\
static int								\
172
173
ftrace_format_##call(struct ftrace_event_call *unused,			\
		      struct trace_seq *s)				\
174
175
176
177
178
179
180
181
182
183
184
185
186
{									\
	struct ftrace_raw_##call field __attribute__((unused));		\
	int ret = 0;							\
									\
	tstruct;							\
									\
	trace_seq_printf(s, "\nprint fmt: " print);			\
									\
	return ret;							\
}

#include TRACE_INCLUDE(TRACE_INCLUDE_FILE)

187
188
189
/*
 * Stage 3 of the trace events.
 *
190
191
192
193
194
195
196
197
 * Override the macros in <trace/trace_events.h> to include the following:
 *
 * enum print_line_t
 * ftrace_raw_output_<call>(struct trace_iterator *iter, int flags)
 * {
 *	struct trace_seq *s = &iter->seq;
 *	struct ftrace_raw_<call> *field; <-- defined in stage 1
 *	struct trace_entry *entry;
198
 *	struct trace_seq *p;
199
200
201
202
203
204
205
206
207
208
209
 *	int ret;
 *
 *	entry = iter->ent;
 *
 *	if (entry->type != event_<call>.id) {
 *		WARN_ON_ONCE(1);
 *		return TRACE_TYPE_UNHANDLED;
 *	}
 *
 *	field = (typeof(field))entry;
 *
210
 *	p = get_cpu_var(ftrace_event_seq);
211
 *	trace_seq_init(p);
212
 *	ret = trace_seq_printf(s, <TP_printk> "\n");
213
 *	put_cpu();
214
215
216
217
218
219
220
221
222
223
224
225
226
227
228
229
230
 *	if (!ret)
 *		return TRACE_TYPE_PARTIAL_LINE;
 *
 *	return TRACE_TYPE_HANDLED;
 * }
 *
 * This is the method used to print the raw event to the trace
 * output format. Note, this is not needed if the data is read
 * in binary.
 */

#undef __entry
#define __entry field

#undef TP_printk
#define TP_printk(fmt, args...) fmt "\n", args

231
232
#undef __get_dynamic_array
#define __get_dynamic_array(field)	\
233
		((void *)__entry + (__entry->__data_loc_##field & 0xffff))
234

235
#undef __get_str
236
#define __get_str(field) (char *)__get_dynamic_array(field)
237

238
239
240
241
242
243
244
245
#undef __print_flags
#define __print_flags(flag, delim, flag_array...)			\
	({								\
		static const struct trace_print_flags flags[] =		\
			{ flag_array, { -1, NULL }};			\
		ftrace_print_flags_seq(p, delim, flag, flags);		\
	})

246
247
248
249
250
251
252
253
#undef __print_symbolic
#define __print_symbolic(value, symbol_array...)			\
	({								\
		static const struct trace_print_flags symbols[] =	\
			{ symbol_array, { -1, NULL }};			\
		ftrace_print_symbols_seq(p, value, symbols);		\
	})

254
255
256
257
258
259
260
261
#undef TRACE_EVENT
#define TRACE_EVENT(call, proto, args, tstruct, assign, print)		\
enum print_line_t							\
ftrace_raw_output_##call(struct trace_iterator *iter, int flags)	\
{									\
	struct trace_seq *s = &iter->seq;				\
	struct ftrace_raw_##call *field;				\
	struct trace_entry *entry;					\
262
	struct trace_seq *p;						\
263
264
265
266
267
268
269
270
271
272
273
	int ret;							\
									\
	entry = iter->ent;						\
									\
	if (entry->type != event_##call.id) {				\
		WARN_ON_ONCE(1);					\
		return TRACE_TYPE_UNHANDLED;				\
	}								\
									\
	field = (typeof(field))entry;					\
									\
274
	p = &get_cpu_var(ftrace_event_seq);				\
275
	trace_seq_init(p);						\
276
	ret = trace_seq_printf(s, #call ": " print);			\
277
	put_cpu();							\
278
279
280
281
282
283
284
285
	if (!ret)							\
		return TRACE_TYPE_PARTIAL_LINE;				\
									\
	return TRACE_TYPE_HANDLED;					\
}
	
#include TRACE_INCLUDE(TRACE_INCLUDE_FILE)

286
287
#undef __field_ext
#define __field_ext(type, item, filter_type)				\
288
289
	ret = trace_define_field(event_call, #type, #item,		\
				 offsetof(typeof(field), item),		\
290
291
				 sizeof(field.item),			\
				 is_signed_type(type), filter_type);	\
292
293
294
	if (ret)							\
		return ret;

295
296
297
#undef __field
#define __field(type, item)	__field_ext(type, item, FILTER_OTHER)

298
299
300
301
302
#undef __array
#define __array(type, item, len)					\
	BUILD_BUG_ON(len > MAX_FILTER_STR_VAL);				\
	ret = trace_define_field(event_call, #type "[" #len "]", #item,	\
				 offsetof(typeof(field), item),		\
303
				 sizeof(field.item), 0, FILTER_OTHER);	\
304
305
306
	if (ret)							\
		return ret;

307
308
#undef __dynamic_array
#define __dynamic_array(type, item, len)				       \
309
	ret = trace_define_field(event_call, "__data_loc " #type "[]", #item,  \
310
311
312
				 offsetof(typeof(field), __data_loc_##item),   \
				 sizeof(field.__data_loc_##item), 0,	       \
				 FILTER_OTHER);
313

314
#undef __string
315
#define __string(item, src) __dynamic_array(char, item, -1)
316

317
318
319
#undef TRACE_EVENT
#define TRACE_EVENT(call, proto, args, tstruct, func, print)		\
int									\
320
ftrace_define_fields_##call(struct ftrace_event_call *event_call)	\
321
322
323
324
{									\
	struct ftrace_raw_##call field;					\
	int ret;							\
									\
325
326
327
	ret = trace_define_common_fields(event_call);			\
	if (ret)							\
		return ret;						\
328
329
330
331
332
333
334
335
									\
	tstruct;							\
									\
	return ret;							\
}

#include TRACE_INCLUDE(TRACE_INCLUDE_FILE)

336
337
338
339
340
341
342
343
344
345
/*
 * remember the offset of each array from the beginning of the event.
 */

#undef __entry
#define __entry entry

#undef __field
#define __field(type, item)

346
347
348
#undef __field_ext
#define __field_ext(type, item, filter_type)

349
350
351
352
353
354
355
#undef __array
#define __array(type, item, len)

#undef __dynamic_array
#define __dynamic_array(type, item, len)				\
	__data_offsets->item = __data_size +				\
			       offsetof(typeof(*entry), __data);	\
356
	__data_offsets->item |= (len * sizeof(type)) << 16;		\
357
358
359
360
361
362
363
364
365
366
367
368
369
370
371
372
373
374
375
376
	__data_size += (len) * sizeof(type);

#undef __string
#define __string(item, src) __dynamic_array(char, item, strlen(src) + 1)       \

#undef TRACE_EVENT
#define TRACE_EVENT(call, proto, args, tstruct, assign, print)		\
static inline int ftrace_get_offsets_##call(				\
	struct ftrace_data_offsets_##call *__data_offsets, proto)       \
{									\
	int __data_size = 0;						\
	struct ftrace_raw_##call __maybe_unused *entry;			\
									\
	tstruct;							\
									\
	return __data_size;						\
}

#include TRACE_INCLUDE(TRACE_INCLUDE_FILE)

377
378
379
380
381
#ifdef CONFIG_EVENT_PROFILE

/*
 * Generate the functions needed for tracepoint perf_counter support.
 *
382
 * NOTE: The insertion profile callback (ftrace_profile_<call>) is defined later
383
384
385
386
387
388
389
390
391
392
393
394
395
396
397
398
399
400
401
402
403
404
 *
 * static int ftrace_profile_enable_<call>(struct ftrace_event_call *event_call)
 * {
 * 	int ret = 0;
 *
 * 	if (!atomic_inc_return(&event_call->profile_count))
 * 		ret = register_trace_<call>(ftrace_profile_<call>);
 *
 * 	return ret;
 * }
 *
 * static void ftrace_profile_disable_<call>(struct ftrace_event_call *event_call)
 * {
 * 	if (atomic_add_negative(-1, &event->call->profile_count))
 * 		unregister_trace_<call>(ftrace_profile_<call>);
 * }
 *
 */

#undef TRACE_EVENT
#define TRACE_EVENT(call, proto, args, tstruct, assign, print)		\
									\
405
static void ftrace_profile_##call(proto);				\
406
407
408
409
410
411
412
413
414
415
416
417
418
419
420
421
422
423
424
425
426
									\
static int ftrace_profile_enable_##call(struct ftrace_event_call *event_call) \
{									\
	int ret = 0;							\
									\
	if (!atomic_inc_return(&event_call->profile_count))		\
		ret = register_trace_##call(ftrace_profile_##call);	\
									\
	return ret;							\
}									\
									\
static void ftrace_profile_disable_##call(struct ftrace_event_call *event_call)\
{									\
	if (atomic_add_negative(-1, &event_call->profile_count))	\
		unregister_trace_##call(ftrace_profile_##call);		\
}

#include TRACE_INCLUDE(TRACE_INCLUDE_FILE)

#endif

427
/*
428
 * Stage 4 of the trace events.
429
 *
430
 * Override the macros in <trace/trace_events.h> to include the following:
431
432
433
 *
 * static void ftrace_event_<call>(proto)
 * {
434
 *	event_trace_printk(_RET_IP_, "<call>: " <fmt>);
435
436
437
438
 * }
 *
 * static int ftrace_reg_event_<call>(void)
 * {
439
 *	int ret;
440
 *
441
442
443
444
445
 *	ret = register_trace_<call>(ftrace_event_<call>);
 *	if (!ret)
 *		pr_info("event trace: Could not activate trace point "
 *			"probe to  <call>");
 *	return ret;
446
447
448
449
 * }
 *
 * static void ftrace_unreg_event_<call>(void)
 * {
450
 *	unregister_trace_<call>(ftrace_event_<call>);
451
452
453
 * }
 *
 *
454
 * For those macros defined with TRACE_EVENT:
455
456
457
458
459
 *
 * static struct ftrace_event_call event_<call>;
 *
 * static void ftrace_raw_event_<call>(proto)
 * {
460
461
462
463
464
465
466
467
468
469
470
471
472
473
474
475
 *	struct ring_buffer_event *event;
 *	struct ftrace_raw_<call> *entry; <-- defined in stage 1
 *	unsigned long irq_flags;
 *	int pc;
 *
 *	local_save_flags(irq_flags);
 *	pc = preempt_count();
 *
 *	event = trace_current_buffer_lock_reserve(event_<call>.id,
 *				  sizeof(struct ftrace_raw_<call>),
 *				  irq_flags, pc);
 *	if (!event)
 *		return;
 *	entry	= ring_buffer_event_data(event);
 *
 *	<assign>;  <-- Here we assign the entries by the __field and
476
 *			__array macros.
477
 *
478
 *	trace_current_buffer_unlock_commit(event, irq_flags, pc);
479
480
481
482
 * }
 *
 * static int ftrace_raw_reg_event_<call>(void)
 * {
483
 *	int ret;
484
 *
485
486
487
488
489
 *	ret = register_trace_<call>(ftrace_raw_event_<call>);
 *	if (!ret)
 *		pr_info("event trace: Could not activate trace point "
 *			"probe to <call>");
 *	return ret;
490
491
492
493
 * }
 *
 * static void ftrace_unreg_event_<call>(void)
 * {
494
 *	unregister_trace_<call>(ftrace_raw_event_<call>);
495
496
497
 * }
 *
 * static struct trace_event ftrace_event_type_<call> = {
498
 *	.trace			= ftrace_raw_output_<call>, <-- stage 2
499
500
501
502
 * };
 *
 * static int ftrace_raw_init_event_<call>(void)
 * {
503
 *	int id;
504
 *
505
506
507
508
509
 *	id = register_ftrace_event(&ftrace_event_type_<call>);
 *	if (!id)
 *		return -ENODEV;
 *	event_<call>.id = id;
 *	return 0;
510
511
512
513
514
 * }
 *
 * static struct ftrace_event_call __used
 * __attribute__((__aligned__(4)))
 * __attribute__((section("_ftrace_events"))) event_<call> = {
515
 *	.name			= "<call>",
516
 *	.system			= "<system>",
517
518
519
 *	.raw_init		= ftrace_raw_init_event_<call>,
 *	.regfunc		= ftrace_reg_event_<call>,
 *	.unregfunc		= ftrace_unreg_event_<call>,
520
 *	.show_format		= ftrace_format_<call>,
521
522
523
524
 * }
 *
 */

525
526
#undef TP_FMT
#define TP_FMT(fmt, args...)	fmt "\n", ##args
527

Peter Zijlstra's avatar
Peter Zijlstra committed
528
529
530
531
532
533
534
535
536
537
538
#ifdef CONFIG_EVENT_PROFILE

#define _TRACE_PROFILE_INIT(call)					\
	.profile_count = ATOMIC_INIT(-1),				\
	.profile_enable = ftrace_profile_enable_##call,			\
	.profile_disable = ftrace_profile_disable_##call,

#else
#define _TRACE_PROFILE_INIT(call)
#endif

539
540
#undef __entry
#define __entry entry
541

542
543
544
545
546
547
#undef __field
#define __field(type, item)

#undef __array
#define __array(type, item, len)

548
549
550
551
#undef __dynamic_array
#define __dynamic_array(type, item, len)				\
	__entry->__data_loc_##item = __data_offsets.item;

552
#undef __string
553
#define __string(item, src) __dynamic_array(char, item, -1)       	\
554
555
556
557
558

#undef __assign_str
#define __assign_str(dst, src)						\
	strcpy(__get_str(dst), src);

559
#undef TRACE_EVENT
560
#define TRACE_EVENT(call, proto, args, tstruct, assign, print)		\
561
562
563
564
565
									\
static struct ftrace_event_call event_##call;				\
									\
static void ftrace_raw_event_##call(proto)				\
{									\
566
	struct ftrace_data_offsets_##call __maybe_unused __data_offsets;\
567
	struct ftrace_event_call *event_call = &event_##call;		\
568
569
570
	struct ring_buffer_event *event;				\
	struct ftrace_raw_##call *entry;				\
	unsigned long irq_flags;					\
571
	int __data_size;						\
572
573
574
575
576
	int pc;								\
									\
	local_save_flags(irq_flags);					\
	pc = preempt_count();						\
									\
577
	__data_size = ftrace_get_offsets_##call(&__data_offsets, args); \
578
									\
579
	event = trace_current_buffer_lock_reserve(event_##call.id,	\
580
				 sizeof(*entry) + __data_size,		\
581
				 irq_flags, pc);			\
582
583
584
585
	if (!event)							\
		return;							\
	entry	= ring_buffer_event_data(event);			\
									\
586
587
588
									\
	tstruct								\
									\
589
	{ assign; }							\
590
									\
591
	if (!filter_current_check_discard(event_call, entry, event))	\
592
		trace_nowake_buffer_unlock_commit(event, irq_flags, pc); \
593
594
}									\
									\
595
static int ftrace_raw_reg_event_##call(void *ptr)			\
596
597
598
599
{									\
	int ret;							\
									\
	ret = register_trace_##call(ftrace_raw_event_##call);		\
600
	if (ret)							\
601
		pr_info("event trace: Could not activate trace point "	\
602
			"probe to " #call "\n");			\
603
604
605
	return ret;							\
}									\
									\
606
static void ftrace_raw_unreg_event_##call(void *ptr)			\
607
608
609
610
611
612
613
614
615
616
617
618
619
620
621
622
{									\
	unregister_trace_##call(ftrace_raw_event_##call);		\
}									\
									\
static struct trace_event ftrace_event_type_##call = {			\
	.trace			= ftrace_raw_output_##call,		\
};									\
									\
static int ftrace_raw_init_event_##call(void)				\
{									\
	int id;								\
									\
	id = register_ftrace_event(&ftrace_event_type_##call);		\
	if (!id)							\
		return -ENODEV;						\
	event_##call.id = id;						\
623
	INIT_LIST_HEAD(&event_##call.fields);				\
624
	init_preds(&event_##call);					\
625
626
627
628
629
630
	return 0;							\
}									\
									\
static struct ftrace_event_call __used					\
__attribute__((__aligned__(4)))						\
__attribute__((section("_ftrace_events"))) event_##call = {		\
631
	.name			= #call,				\
632
	.system			= __stringify(TRACE_SYSTEM),		\
633
	.event			= &ftrace_event_type_##call,		\
634
	.raw_init		= ftrace_raw_init_event_##call,		\
635
636
	.regfunc		= ftrace_raw_reg_event_##call,		\
	.unregfunc		= ftrace_raw_unreg_event_##call,	\
637
	.show_format		= ftrace_format_##call,			\
638
	.define_fields		= ftrace_define_fields_##call,		\
Peter Zijlstra's avatar
Peter Zijlstra committed
639
	_TRACE_PROFILE_INIT(call)					\
640
}
Peter Zijlstra's avatar
Peter Zijlstra committed
641

642
#include TRACE_INCLUDE(TRACE_INCLUDE_FILE)
Peter Zijlstra's avatar
Peter Zijlstra committed
643

644
645
646
647
648
649
650
651
652
653
654
655
656
657
658
659
660
661
662
663
664
665
/*
 * Define the insertion callback to profile events
 *
 * The job is very similar to ftrace_raw_event_<call> except that we don't
 * insert in the ring buffer but in a perf counter.
 *
 * static void ftrace_profile_<call>(proto)
 * {
 *	struct ftrace_data_offsets_<call> __maybe_unused __data_offsets;
 *	struct ftrace_event_call *event_call = &event_<call>;
 *	extern void perf_tpcounter_event(int, u64, u64, void *, int);
 *	struct ftrace_raw_##call *entry;
 *	u64 __addr = 0, __count = 1;
 *	unsigned long irq_flags;
 *	int __entry_size;
 *	int __data_size;
 *	int pc;
 *
 *	local_save_flags(irq_flags);
 *	pc = preempt_count();
 *
 *	__data_size = ftrace_get_offsets_<call>(&__data_offsets, args);
666
667
668
669
670
671
 *
 *	// Below we want to get the aligned size by taking into account
 *	// the u32 field that will later store the buffer size
 *	__entry_size = ALIGN(__data_size + sizeof(*entry) + sizeof(u32),
 *			     sizeof(u64));
 *	__entry_size -= sizeof(u32);
672
673
674
675
676
 *
 *	do {
 *		char raw_data[__entry_size]; <- allocate our sample in the stack
 *		struct trace_entry *ent;
 *
677
678
679
 *		zero dead bytes from alignment to avoid stack leak to userspace:
 *
 *		*(u64 *)(&raw_data[__entry_size - sizeof(u64)]) = 0ULL;
680
681
682
683
684
685
686
687
688
689
690
691
692
693
694
695
696
697
698
699
700
701
702
703
704
705
706
707
708
709
710
711
712
713
714
715
716
717
718
719
720
721
 *		entry = (struct ftrace_raw_<call> *)raw_data;
 *		ent = &entry->ent;
 *		tracing_generic_entry_update(ent, irq_flags, pc);
 *		ent->type = event_call->id;
 *
 *		<tstruct> <- do some jobs with dynamic arrays
 *
 *		<assign>  <- affect our values
 *
 *		perf_tpcounter_event(event_call->id, __addr, __count, entry,
 *			     __entry_size);  <- submit them to perf counter
 *	} while (0);
 *
 * }
 */

#ifdef CONFIG_EVENT_PROFILE

#undef __perf_addr
#define __perf_addr(a) __addr = (a)

#undef __perf_count
#define __perf_count(c) __count = (c)

#undef TRACE_EVENT
#define TRACE_EVENT(call, proto, args, tstruct, assign, print)		\
static void ftrace_profile_##call(proto)				\
{									\
	struct ftrace_data_offsets_##call __maybe_unused __data_offsets;\
	struct ftrace_event_call *event_call = &event_##call;		\
	extern void perf_tpcounter_event(int, u64, u64, void *, int);	\
	struct ftrace_raw_##call *entry;				\
	u64 __addr = 0, __count = 1;					\
	unsigned long irq_flags;					\
	int __entry_size;						\
	int __data_size;						\
	int pc;								\
									\
	local_save_flags(irq_flags);					\
	pc = preempt_count();						\
									\
	__data_size = ftrace_get_offsets_##call(&__data_offsets, args); \
722
723
	__entry_size = ALIGN(__data_size + sizeof(*entry) + sizeof(u32),\
			     sizeof(u64));				\
724
	__entry_size -= sizeof(u32);					\
725
726
727
728
729
									\
	do {								\
		char raw_data[__entry_size];				\
		struct trace_entry *ent;				\
									\
730
		*(u64 *)(&raw_data[__entry_size - sizeof(u64)]) = 0ULL;	\
731
732
733
734
735
736
737
738
739
740
741
742
743
744
745
746
747
748
		entry = (struct ftrace_raw_##call *)raw_data;		\
		ent = &entry->ent;					\
		tracing_generic_entry_update(ent, irq_flags, pc);	\
		ent->type = event_call->id;				\
									\
		tstruct							\
									\
		{ assign; }						\
									\
		perf_tpcounter_event(event_call->id, __addr, __count, entry,\
			     __entry_size);				\
	} while (0);							\
									\
}

#include TRACE_INCLUDE(TRACE_INCLUDE_FILE)
#endif /* CONFIG_EVENT_PROFILE */

Peter Zijlstra's avatar
Peter Zijlstra committed
749
750
#undef _TRACE_PROFILE_INIT