ftrace.h 18.8 KB
Newer Older
1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
/*
 * Stage 1 of the trace events.
 *
 * Override the macros in <trace/trace_events.h> to include the following:
 *
 * struct ftrace_raw_<call> {
 *	struct trace_entry		ent;
 *	<type>				<item>;
 *	<type2>				<item2>[<len>];
 *	[...]
 * };
 *
 * The <type> <item> is created by the __field(type, item) macro or
 * the __array(type2, item2, len) macro.
 * We simply do "type item;", and that will create the fields
 * in the structure.
 */

#include <linux/ftrace_event.h>

21
22
23
#undef __field
#define __field(type, item)		type	item;

24
25
26
#undef __array
#define __array(type, item, len)	type	item[len];

27
#undef __dynamic_array
28
#define __dynamic_array(type, item, len) u32 __data_loc_##item;
29

30
#undef __string
31
#define __string(item, src) __dynamic_array(char, item, -1)
32

33
34
35
36
37
38
39
40
#undef TP_STRUCT__entry
#define TP_STRUCT__entry(args...) args

#undef TRACE_EVENT
#define TRACE_EVENT(name, proto, args, tstruct, assign, print)	\
	struct ftrace_raw_##name {				\
		struct trace_entry	ent;			\
		tstruct						\
41
		char			__data[0];		\
42
43
44
45
46
	};							\
	static struct ftrace_event_call event_##name

#include TRACE_INCLUDE(TRACE_INCLUDE_FILE)

47

48
49
50
/*
 * Stage 2 of the trace events.
 *
51
52
 * Include the following:
 *
53
 * struct ftrace_data_offsets_<call> {
54
55
 *	u32				<item1>;
 *	u32				<item2>;
56
57
58
 *	[...]
 * };
 *
59
 * The __dynamic_array() macro will create each u32 <item>, this is
60
 * to keep the offset of each array from the beginning of the event.
61
 * The size of an array is also encoded, in the higher 16 bits of <item>.
62
63
 */

64
65
66
#undef __field
#define __field(type, item);

67
68
69
#undef __array
#define __array(type, item, len)

70
#undef __dynamic_array
71
#define __dynamic_array(type, item, len)	u32 item;
72
73

#undef __string
74
#define __string(item, src) __dynamic_array(char, item, -1)
75
76
77

#undef TRACE_EVENT
#define TRACE_EVENT(call, proto, args, tstruct, assign, print)		\
78
	struct ftrace_data_offsets_##call {				\
79
80
81
82
83
		tstruct;						\
	};

#include TRACE_INCLUDE(TRACE_INCLUDE_FILE)

84
85
86
87
88
89
90
91
92
93
94
95
96
97
98
99
100
101
102
103
104
105
106
107
108
109
110
111
112
113
114
115
116
117
118
119
120
121
122
123
/*
 * Setup the showing format of trace point.
 *
 * int
 * ftrace_format_##call(struct trace_seq *s)
 * {
 *	struct ftrace_raw_##call field;
 *	int ret;
 *
 *	ret = trace_seq_printf(s, #type " " #item ";"
 *			       " offset:%u; size:%u;\n",
 *			       offsetof(struct ftrace_raw_##call, item),
 *			       sizeof(field.type));
 *
 * }
 */

#undef TP_STRUCT__entry
#define TP_STRUCT__entry(args...) args

#undef __field
#define __field(type, item)					\
	ret = trace_seq_printf(s, "\tfield:" #type " " #item ";\t"	\
			       "offset:%u;\tsize:%u;\n",		\
			       (unsigned int)offsetof(typeof(field), item), \
			       (unsigned int)sizeof(field.item));	\
	if (!ret)							\
		return 0;

#undef __array
#define __array(type, item, len)						\
	ret = trace_seq_printf(s, "\tfield:" #type " " #item "[" #len "];\t"	\
			       "offset:%u;\tsize:%u;\n",		\
			       (unsigned int)offsetof(typeof(field), item), \
			       (unsigned int)sizeof(field.item));	\
	if (!ret)							\
		return 0;

#undef __dynamic_array
#define __dynamic_array(type, item, len)				       \
124
	ret = trace_seq_printf(s, "\tfield:__data_loc " #type "[] " #item ";\t"\
125
126
127
128
129
130
131
132
133
134
135
136
137
138
139
140
141
142
143
144
145
146
147
			       "offset:%u;\tsize:%u;\n",		       \
			       (unsigned int)offsetof(typeof(field),	       \
					__data_loc_##item),		       \
			       (unsigned int)sizeof(field.__data_loc_##item)); \
	if (!ret)							       \
		return 0;

#undef __string
#define __string(item, src) __dynamic_array(char, item, -1)

#undef __entry
#define __entry REC

#undef __print_symbolic
#undef __get_dynamic_array
#undef __get_str

#undef TP_printk
#define TP_printk(fmt, args...) "%s, %s\n", #fmt, __stringify(args)

#undef TP_fast_assign
#define TP_fast_assign(args...) args

148
149
150
#undef TP_perf_assign
#define TP_perf_assign(args...)

151
152
153
#undef TRACE_EVENT
#define TRACE_EVENT(call, proto, args, tstruct, func, print)		\
static int								\
154
155
ftrace_format_##call(struct ftrace_event_call *unused,			\
		      struct trace_seq *s)				\
156
157
158
159
160
161
162
163
164
165
166
167
168
{									\
	struct ftrace_raw_##call field __attribute__((unused));		\
	int ret = 0;							\
									\
	tstruct;							\
									\
	trace_seq_printf(s, "\nprint fmt: " print);			\
									\
	return ret;							\
}

#include TRACE_INCLUDE(TRACE_INCLUDE_FILE)

169
170
171
/*
 * Stage 3 of the trace events.
 *
172
173
174
175
176
177
178
179
 * Override the macros in <trace/trace_events.h> to include the following:
 *
 * enum print_line_t
 * ftrace_raw_output_<call>(struct trace_iterator *iter, int flags)
 * {
 *	struct trace_seq *s = &iter->seq;
 *	struct ftrace_raw_<call> *field; <-- defined in stage 1
 *	struct trace_entry *entry;
180
 *	struct trace_seq *p;
181
182
183
184
185
186
187
188
189
190
191
 *	int ret;
 *
 *	entry = iter->ent;
 *
 *	if (entry->type != event_<call>.id) {
 *		WARN_ON_ONCE(1);
 *		return TRACE_TYPE_UNHANDLED;
 *	}
 *
 *	field = (typeof(field))entry;
 *
192
 *	p = get_cpu_var(ftrace_event_seq);
193
 *	trace_seq_init(p);
194
 *	ret = trace_seq_printf(s, <TP_printk> "\n");
195
 *	put_cpu();
196
197
198
199
200
201
202
203
204
205
206
207
208
209
210
211
212
 *	if (!ret)
 *		return TRACE_TYPE_PARTIAL_LINE;
 *
 *	return TRACE_TYPE_HANDLED;
 * }
 *
 * This is the method used to print the raw event to the trace
 * output format. Note, this is not needed if the data is read
 * in binary.
 */

#undef __entry
#define __entry field

#undef TP_printk
#define TP_printk(fmt, args...) fmt "\n", args

213
214
#undef __get_dynamic_array
#define __get_dynamic_array(field)	\
215
		((void *)__entry + (__entry->__data_loc_##field & 0xffff))
216

217
#undef __get_str
218
#define __get_str(field) (char *)__get_dynamic_array(field)
219

220
221
222
223
224
225
226
227
#undef __print_flags
#define __print_flags(flag, delim, flag_array...)			\
	({								\
		static const struct trace_print_flags flags[] =		\
			{ flag_array, { -1, NULL }};			\
		ftrace_print_flags_seq(p, delim, flag, flags);		\
	})

228
229
230
231
232
233
234
235
#undef __print_symbolic
#define __print_symbolic(value, symbol_array...)			\
	({								\
		static const struct trace_print_flags symbols[] =	\
			{ symbol_array, { -1, NULL }};			\
		ftrace_print_symbols_seq(p, value, symbols);		\
	})

236
237
238
239
240
241
242
243
#undef TRACE_EVENT
#define TRACE_EVENT(call, proto, args, tstruct, assign, print)		\
enum print_line_t							\
ftrace_raw_output_##call(struct trace_iterator *iter, int flags)	\
{									\
	struct trace_seq *s = &iter->seq;				\
	struct ftrace_raw_##call *field;				\
	struct trace_entry *entry;					\
244
	struct trace_seq *p;						\
245
246
247
248
249
250
251
252
253
254
255
	int ret;							\
									\
	entry = iter->ent;						\
									\
	if (entry->type != event_##call.id) {				\
		WARN_ON_ONCE(1);					\
		return TRACE_TYPE_UNHANDLED;				\
	}								\
									\
	field = (typeof(field))entry;					\
									\
256
	p = &get_cpu_var(ftrace_event_seq);				\
257
	trace_seq_init(p);						\
258
	ret = trace_seq_printf(s, #call ": " print);			\
259
	put_cpu();							\
260
261
262
263
264
265
266
267
268
269
270
271
	if (!ret)							\
		return TRACE_TYPE_PARTIAL_LINE;				\
									\
	return TRACE_TYPE_HANDLED;					\
}
	
#include TRACE_INCLUDE(TRACE_INCLUDE_FILE)

#undef __field
#define __field(type, item)						\
	ret = trace_define_field(event_call, #type, #item,		\
				 offsetof(typeof(field), item),		\
272
				 sizeof(field.item), is_signed_type(type));	\
273
274
275
276
277
278
279
280
	if (ret)							\
		return ret;

#undef __array
#define __array(type, item, len)					\
	BUILD_BUG_ON(len > MAX_FILTER_STR_VAL);				\
	ret = trace_define_field(event_call, #type "[" #len "]", #item,	\
				 offsetof(typeof(field), item),		\
281
				 sizeof(field.item), 0);		\
282
283
284
	if (ret)							\
		return ret;

285
286
#undef __dynamic_array
#define __dynamic_array(type, item, len)				       \
287
	ret = trace_define_field(event_call, "__data_loc " #type "[]", #item,  \
288
289
290
				offsetof(typeof(field), __data_loc_##item),    \
				 sizeof(field.__data_loc_##item), 0);

291
#undef __string
292
#define __string(item, src) __dynamic_array(char, item, -1)
293

294
295
296
297
298
299
300
301
302
#undef TRACE_EVENT
#define TRACE_EVENT(call, proto, args, tstruct, func, print)		\
int									\
ftrace_define_fields_##call(void)					\
{									\
	struct ftrace_raw_##call field;					\
	struct ftrace_event_call *event_call = &event_##call;		\
	int ret;							\
									\
303
304
305
306
307
	__common_field(int, type, 1);					\
	__common_field(unsigned char, flags, 0);			\
	__common_field(unsigned char, preempt_count, 0);		\
	__common_field(int, pid, 1);					\
	__common_field(int, tgid, 1);					\
308
309
310
311
312
313
314
315
									\
	tstruct;							\
									\
	return ret;							\
}

#include TRACE_INCLUDE(TRACE_INCLUDE_FILE)

316
317
318
319
320
321
322
323
324
325
326
327
328
329
330
331
332
/*
 * remember the offset of each array from the beginning of the event.
 */

#undef __entry
#define __entry entry

#undef __field
#define __field(type, item)

#undef __array
#define __array(type, item, len)

#undef __dynamic_array
#define __dynamic_array(type, item, len)				\
	__data_offsets->item = __data_size +				\
			       offsetof(typeof(*entry), __data);	\
333
	__data_offsets->item |= (len * sizeof(type)) << 16;		\
334
335
336
337
338
339
340
341
342
343
344
345
346
347
348
349
350
351
352
353
	__data_size += (len) * sizeof(type);

#undef __string
#define __string(item, src) __dynamic_array(char, item, strlen(src) + 1)       \

#undef TRACE_EVENT
#define TRACE_EVENT(call, proto, args, tstruct, assign, print)		\
static inline int ftrace_get_offsets_##call(				\
	struct ftrace_data_offsets_##call *__data_offsets, proto)       \
{									\
	int __data_size = 0;						\
	struct ftrace_raw_##call __maybe_unused *entry;			\
									\
	tstruct;							\
									\
	return __data_size;						\
}

#include TRACE_INCLUDE(TRACE_INCLUDE_FILE)

354
355
356
357
358
#ifdef CONFIG_EVENT_PROFILE

/*
 * Generate the functions needed for tracepoint perf_counter support.
 *
359
 * NOTE: The insertion profile callback (ftrace_profile_<call>) is defined later
360
361
362
363
364
365
366
367
368
369
370
371
372
373
374
375
376
377
378
379
380
381
 *
 * static int ftrace_profile_enable_<call>(struct ftrace_event_call *event_call)
 * {
 * 	int ret = 0;
 *
 * 	if (!atomic_inc_return(&event_call->profile_count))
 * 		ret = register_trace_<call>(ftrace_profile_<call>);
 *
 * 	return ret;
 * }
 *
 * static void ftrace_profile_disable_<call>(struct ftrace_event_call *event_call)
 * {
 * 	if (atomic_add_negative(-1, &event->call->profile_count))
 * 		unregister_trace_<call>(ftrace_profile_<call>);
 * }
 *
 */

#undef TRACE_EVENT
#define TRACE_EVENT(call, proto, args, tstruct, assign, print)		\
									\
382
static void ftrace_profile_##call(proto);				\
383
384
385
386
387
388
389
390
391
392
393
394
395
396
397
398
399
400
401
402
403
									\
static int ftrace_profile_enable_##call(struct ftrace_event_call *event_call) \
{									\
	int ret = 0;							\
									\
	if (!atomic_inc_return(&event_call->profile_count))		\
		ret = register_trace_##call(ftrace_profile_##call);	\
									\
	return ret;							\
}									\
									\
static void ftrace_profile_disable_##call(struct ftrace_event_call *event_call)\
{									\
	if (atomic_add_negative(-1, &event_call->profile_count))	\
		unregister_trace_##call(ftrace_profile_##call);		\
}

#include TRACE_INCLUDE(TRACE_INCLUDE_FILE)

#endif

404
/*
405
 * Stage 4 of the trace events.
406
 *
407
 * Override the macros in <trace/trace_events.h> to include the following:
408
409
410
 *
 * static void ftrace_event_<call>(proto)
 * {
411
 *	event_trace_printk(_RET_IP_, "<call>: " <fmt>);
412
413
414
415
 * }
 *
 * static int ftrace_reg_event_<call>(void)
 * {
416
 *	int ret;
417
 *
418
419
420
421
422
 *	ret = register_trace_<call>(ftrace_event_<call>);
 *	if (!ret)
 *		pr_info("event trace: Could not activate trace point "
 *			"probe to  <call>");
 *	return ret;
423
424
425
426
 * }
 *
 * static void ftrace_unreg_event_<call>(void)
 * {
427
 *	unregister_trace_<call>(ftrace_event_<call>);
428
429
430
 * }
 *
 *
431
 * For those macros defined with TRACE_EVENT:
432
433
434
435
436
 *
 * static struct ftrace_event_call event_<call>;
 *
 * static void ftrace_raw_event_<call>(proto)
 * {
437
438
439
440
441
442
443
444
445
446
447
448
449
450
451
452
 *	struct ring_buffer_event *event;
 *	struct ftrace_raw_<call> *entry; <-- defined in stage 1
 *	unsigned long irq_flags;
 *	int pc;
 *
 *	local_save_flags(irq_flags);
 *	pc = preempt_count();
 *
 *	event = trace_current_buffer_lock_reserve(event_<call>.id,
 *				  sizeof(struct ftrace_raw_<call>),
 *				  irq_flags, pc);
 *	if (!event)
 *		return;
 *	entry	= ring_buffer_event_data(event);
 *
 *	<assign>;  <-- Here we assign the entries by the __field and
453
 *			__array macros.
454
 *
455
 *	trace_current_buffer_unlock_commit(event, irq_flags, pc);
456
457
458
459
 * }
 *
 * static int ftrace_raw_reg_event_<call>(void)
 * {
460
 *	int ret;
461
 *
462
463
464
465
466
 *	ret = register_trace_<call>(ftrace_raw_event_<call>);
 *	if (!ret)
 *		pr_info("event trace: Could not activate trace point "
 *			"probe to <call>");
 *	return ret;
467
468
469
470
 * }
 *
 * static void ftrace_unreg_event_<call>(void)
 * {
471
 *	unregister_trace_<call>(ftrace_raw_event_<call>);
472
473
474
 * }
 *
 * static struct trace_event ftrace_event_type_<call> = {
475
 *	.trace			= ftrace_raw_output_<call>, <-- stage 2
476
477
478
479
 * };
 *
 * static int ftrace_raw_init_event_<call>(void)
 * {
480
 *	int id;
481
 *
482
483
484
485
486
 *	id = register_ftrace_event(&ftrace_event_type_<call>);
 *	if (!id)
 *		return -ENODEV;
 *	event_<call>.id = id;
 *	return 0;
487
488
489
490
491
 * }
 *
 * static struct ftrace_event_call __used
 * __attribute__((__aligned__(4)))
 * __attribute__((section("_ftrace_events"))) event_<call> = {
492
 *	.name			= "<call>",
493
 *	.system			= "<system>",
494
495
496
 *	.raw_init		= ftrace_raw_init_event_<call>,
 *	.regfunc		= ftrace_reg_event_<call>,
 *	.unregfunc		= ftrace_unreg_event_<call>,
497
 *	.show_format		= ftrace_format_<call>,
498
499
500
501
 * }
 *
 */

502
503
#undef TP_FMT
#define TP_FMT(fmt, args...)	fmt "\n", ##args
504

Peter Zijlstra's avatar
Peter Zijlstra committed
505
506
507
508
509
510
511
512
513
514
515
#ifdef CONFIG_EVENT_PROFILE

#define _TRACE_PROFILE_INIT(call)					\
	.profile_count = ATOMIC_INIT(-1),				\
	.profile_enable = ftrace_profile_enable_##call,			\
	.profile_disable = ftrace_profile_disable_##call,

#else
#define _TRACE_PROFILE_INIT(call)
#endif

516
517
#undef __entry
#define __entry entry
518

519
520
521
522
523
524
#undef __field
#define __field(type, item)

#undef __array
#define __array(type, item, len)

525
526
527
528
#undef __dynamic_array
#define __dynamic_array(type, item, len)				\
	__entry->__data_loc_##item = __data_offsets.item;

529
#undef __string
530
#define __string(item, src) __dynamic_array(char, item, -1)       	\
531
532
533
534
535

#undef __assign_str
#define __assign_str(dst, src)						\
	strcpy(__get_str(dst), src);

536
#undef TRACE_EVENT
537
#define TRACE_EVENT(call, proto, args, tstruct, assign, print)		\
538
539
540
541
542
									\
static struct ftrace_event_call event_##call;				\
									\
static void ftrace_raw_event_##call(proto)				\
{									\
543
	struct ftrace_data_offsets_##call __maybe_unused __data_offsets;\
544
	struct ftrace_event_call *event_call = &event_##call;		\
545
546
547
	struct ring_buffer_event *event;				\
	struct ftrace_raw_##call *entry;				\
	unsigned long irq_flags;					\
548
	int __data_size;						\
549
550
551
552
553
	int pc;								\
									\
	local_save_flags(irq_flags);					\
	pc = preempt_count();						\
									\
554
	__data_size = ftrace_get_offsets_##call(&__data_offsets, args); \
555
									\
556
	event = trace_current_buffer_lock_reserve(event_##call.id,	\
557
				 sizeof(*entry) + __data_size,		\
558
				 irq_flags, pc);			\
559
560
561
562
	if (!event)							\
		return;							\
	entry	= ring_buffer_event_data(event);			\
									\
563
564
565
									\
	tstruct								\
									\
566
	{ assign; }							\
567
									\
568
	if (!filter_current_check_discard(event_call, entry, event))	\
569
		trace_nowake_buffer_unlock_commit(event, irq_flags, pc); \
570
571
}									\
									\
572
static int ftrace_raw_reg_event_##call(void *ptr)			\
573
574
575
576
{									\
	int ret;							\
									\
	ret = register_trace_##call(ftrace_raw_event_##call);		\
577
	if (ret)							\
578
		pr_info("event trace: Could not activate trace point "	\
579
			"probe to " #call "\n");			\
580
581
582
	return ret;							\
}									\
									\
583
static void ftrace_raw_unreg_event_##call(void *ptr)			\
584
585
586
587
588
589
590
591
592
593
594
595
596
597
598
599
{									\
	unregister_trace_##call(ftrace_raw_event_##call);		\
}									\
									\
static struct trace_event ftrace_event_type_##call = {			\
	.trace			= ftrace_raw_output_##call,		\
};									\
									\
static int ftrace_raw_init_event_##call(void)				\
{									\
	int id;								\
									\
	id = register_ftrace_event(&ftrace_event_type_##call);		\
	if (!id)							\
		return -ENODEV;						\
	event_##call.id = id;						\
600
	INIT_LIST_HEAD(&event_##call.fields);				\
601
	init_preds(&event_##call);					\
602
603
604
605
606
607
	return 0;							\
}									\
									\
static struct ftrace_event_call __used					\
__attribute__((__aligned__(4)))						\
__attribute__((section("_ftrace_events"))) event_##call = {		\
608
	.name			= #call,				\
609
	.system			= __stringify(TRACE_SYSTEM),		\
610
	.event			= &ftrace_event_type_##call,		\
611
	.raw_init		= ftrace_raw_init_event_##call,		\
612
613
	.regfunc		= ftrace_raw_reg_event_##call,		\
	.unregfunc		= ftrace_raw_unreg_event_##call,	\
614
	.show_format		= ftrace_format_##call,			\
615
	.define_fields		= ftrace_define_fields_##call,		\
Peter Zijlstra's avatar
Peter Zijlstra committed
616
	_TRACE_PROFILE_INIT(call)					\
617
}
Peter Zijlstra's avatar
Peter Zijlstra committed
618

619
#include TRACE_INCLUDE(TRACE_INCLUDE_FILE)
Peter Zijlstra's avatar
Peter Zijlstra committed
620

621
622
623
624
625
626
627
628
629
630
631
632
633
634
635
636
637
638
639
640
641
642
/*
 * Define the insertion callback to profile events
 *
 * The job is very similar to ftrace_raw_event_<call> except that we don't
 * insert in the ring buffer but in a perf counter.
 *
 * static void ftrace_profile_<call>(proto)
 * {
 *	struct ftrace_data_offsets_<call> __maybe_unused __data_offsets;
 *	struct ftrace_event_call *event_call = &event_<call>;
 *	extern void perf_tpcounter_event(int, u64, u64, void *, int);
 *	struct ftrace_raw_##call *entry;
 *	u64 __addr = 0, __count = 1;
 *	unsigned long irq_flags;
 *	int __entry_size;
 *	int __data_size;
 *	int pc;
 *
 *	local_save_flags(irq_flags);
 *	pc = preempt_count();
 *
 *	__data_size = ftrace_get_offsets_<call>(&__data_offsets, args);
643
644
645
646
647
648
 *
 *	// Below we want to get the aligned size by taking into account
 *	// the u32 field that will later store the buffer size
 *	__entry_size = ALIGN(__data_size + sizeof(*entry) + sizeof(u32),
 *			     sizeof(u64));
 *	__entry_size -= sizeof(u32);
649
650
651
652
653
 *
 *	do {
 *		char raw_data[__entry_size]; <- allocate our sample in the stack
 *		struct trace_entry *ent;
 *
654
655
656
 *		zero dead bytes from alignment to avoid stack leak to userspace:
 *
 *		*(u64 *)(&raw_data[__entry_size - sizeof(u64)]) = 0ULL;
657
658
659
660
661
662
663
664
665
666
667
668
669
670
671
672
673
674
675
676
677
678
679
680
681
682
683
684
685
686
687
688
689
690
691
692
693
694
695
696
697
698
 *		entry = (struct ftrace_raw_<call> *)raw_data;
 *		ent = &entry->ent;
 *		tracing_generic_entry_update(ent, irq_flags, pc);
 *		ent->type = event_call->id;
 *
 *		<tstruct> <- do some jobs with dynamic arrays
 *
 *		<assign>  <- affect our values
 *
 *		perf_tpcounter_event(event_call->id, __addr, __count, entry,
 *			     __entry_size);  <- submit them to perf counter
 *	} while (0);
 *
 * }
 */

#ifdef CONFIG_EVENT_PROFILE

#undef __perf_addr
#define __perf_addr(a) __addr = (a)

#undef __perf_count
#define __perf_count(c) __count = (c)

#undef TRACE_EVENT
#define TRACE_EVENT(call, proto, args, tstruct, assign, print)		\
static void ftrace_profile_##call(proto)				\
{									\
	struct ftrace_data_offsets_##call __maybe_unused __data_offsets;\
	struct ftrace_event_call *event_call = &event_##call;		\
	extern void perf_tpcounter_event(int, u64, u64, void *, int);	\
	struct ftrace_raw_##call *entry;				\
	u64 __addr = 0, __count = 1;					\
	unsigned long irq_flags;					\
	int __entry_size;						\
	int __data_size;						\
	int pc;								\
									\
	local_save_flags(irq_flags);					\
	pc = preempt_count();						\
									\
	__data_size = ftrace_get_offsets_##call(&__data_offsets, args); \
699
700
	__entry_size = ALIGN(__data_size + sizeof(*entry) + sizeof(u32),\
			     sizeof(u64));				\
701
	__entry_size -= sizeof(u32);					\
702
703
704
705
706
									\
	do {								\
		char raw_data[__entry_size];				\
		struct trace_entry *ent;				\
									\
707
		*(u64 *)(&raw_data[__entry_size - sizeof(u64)]) = 0ULL;	\
708
709
710
711
712
713
714
715
716
717
718
719
720
721
722
723
724
725
		entry = (struct ftrace_raw_##call *)raw_data;		\
		ent = &entry->ent;					\
		tracing_generic_entry_update(ent, irq_flags, pc);	\
		ent->type = event_call->id;				\
									\
		tstruct							\
									\
		{ assign; }						\
									\
		perf_tpcounter_event(event_call->id, __addr, __count, entry,\
			     __entry_size);				\
	} while (0);							\
									\
}

#include TRACE_INCLUDE(TRACE_INCLUDE_FILE)
#endif /* CONFIG_EVENT_PROFILE */

Peter Zijlstra's avatar
Peter Zijlstra committed
726
727
#undef _TRACE_PROFILE_INIT