ftrace.h 19.6 KB
Newer Older
1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
/*
 * Stage 1 of the trace events.
 *
 * Override the macros in <trace/trace_events.h> to include the following:
 *
 * struct ftrace_raw_<call> {
 *	struct trace_entry		ent;
 *	<type>				<item>;
 *	<type2>				<item2>[<len>];
 *	[...]
 * };
 *
 * The <type> <item> is created by the __field(type, item) macro or
 * the __array(type2, item2, len) macro.
 * We simply do "type item;", and that will create the fields
 * in the structure.
 */

#include <linux/ftrace_event.h>

21
22
23
#undef __field
#define __field(type, item)		type	item;

24
25
26
#undef __field_ext
#define __field_ext(type, item, filter_type)	type	item;

27
28
29
#undef __array
#define __array(type, item, len)	type	item[len];

30
#undef __dynamic_array
31
#define __dynamic_array(type, item, len) u32 __data_loc_##item;
32

33
#undef __string
34
#define __string(item, src) __dynamic_array(char, item, -1)
35

36
37
38
39
40
41
42
43
#undef TP_STRUCT__entry
#define TP_STRUCT__entry(args...) args

#undef TRACE_EVENT
#define TRACE_EVENT(name, proto, args, tstruct, assign, print)	\
	struct ftrace_raw_##name {				\
		struct trace_entry	ent;			\
		tstruct						\
44
		char			__data[0];		\
45
46
47
	};							\
	static struct ftrace_event_call event_##name

48
49
50
#undef __cpparg
#define __cpparg(arg...) arg

51
52
/* Callbacks are meaningless to ftrace. */
#undef TRACE_EVENT_FN
53
54
55
56
#define TRACE_EVENT_FN(name, proto, args, tstruct,			\
		assign, print, reg, unreg)				\
	TRACE_EVENT(name, __cpparg(proto), __cpparg(args),		\
		__cpparg(tstruct), __cpparg(assign), __cpparg(print))	\
57

58
59
#include TRACE_INCLUDE(TRACE_INCLUDE_FILE)

60

61
62
63
/*
 * Stage 2 of the trace events.
 *
64
65
 * Include the following:
 *
66
 * struct ftrace_data_offsets_<call> {
67
68
 *	u32				<item1>;
 *	u32				<item2>;
69
70
71
 *	[...]
 * };
 *
72
 * The __dynamic_array() macro will create each u32 <item>, this is
73
 * to keep the offset of each array from the beginning of the event.
74
 * The size of an array is also encoded, in the higher 16 bits of <item>.
75
76
 */

77
#undef __field
78
79
80
81
#define __field(type, item)

#undef __field_ext
#define __field_ext(type, item, filter_type)
82

83
84
85
#undef __array
#define __array(type, item, len)

86
#undef __dynamic_array
87
#define __dynamic_array(type, item, len)	u32 item;
88
89

#undef __string
90
#define __string(item, src) __dynamic_array(char, item, -1)
91
92
93

#undef TRACE_EVENT
#define TRACE_EVENT(call, proto, args, tstruct, assign, print)		\
94
	struct ftrace_data_offsets_##call {				\
95
96
97
98
99
		tstruct;						\
	};

#include TRACE_INCLUDE(TRACE_INCLUDE_FILE)

100
101
102
103
104
105
106
107
108
109
110
111
112
113
114
115
116
117
118
119
120
121
122
123
124
125
126
127
128
/*
 * Setup the showing format of trace point.
 *
 * int
 * ftrace_format_##call(struct trace_seq *s)
 * {
 *	struct ftrace_raw_##call field;
 *	int ret;
 *
 *	ret = trace_seq_printf(s, #type " " #item ";"
 *			       " offset:%u; size:%u;\n",
 *			       offsetof(struct ftrace_raw_##call, item),
 *			       sizeof(field.type));
 *
 * }
 */

#undef TP_STRUCT__entry
#define TP_STRUCT__entry(args...) args

#undef __field
#define __field(type, item)					\
	ret = trace_seq_printf(s, "\tfield:" #type " " #item ";\t"	\
			       "offset:%u;\tsize:%u;\n",		\
			       (unsigned int)offsetof(typeof(field), item), \
			       (unsigned int)sizeof(field.item));	\
	if (!ret)							\
		return 0;

129
130
131
#undef __field_ext
#define __field_ext(type, item, filter_type)	__field(type, item)

132
133
134
135
136
137
138
139
140
141
142
#undef __array
#define __array(type, item, len)						\
	ret = trace_seq_printf(s, "\tfield:" #type " " #item "[" #len "];\t"	\
			       "offset:%u;\tsize:%u;\n",		\
			       (unsigned int)offsetof(typeof(field), item), \
			       (unsigned int)sizeof(field.item));	\
	if (!ret)							\
		return 0;

#undef __dynamic_array
#define __dynamic_array(type, item, len)				       \
143
	ret = trace_seq_printf(s, "\tfield:__data_loc " #type "[] " #item ";\t"\
144
145
146
147
148
149
150
151
152
153
154
155
156
157
158
159
160
161
162
163
164
165
166
			       "offset:%u;\tsize:%u;\n",		       \
			       (unsigned int)offsetof(typeof(field),	       \
					__data_loc_##item),		       \
			       (unsigned int)sizeof(field.__data_loc_##item)); \
	if (!ret)							       \
		return 0;

#undef __string
#define __string(item, src) __dynamic_array(char, item, -1)

#undef __entry
#define __entry REC

#undef __print_symbolic
#undef __get_dynamic_array
#undef __get_str

#undef TP_printk
#define TP_printk(fmt, args...) "%s, %s\n", #fmt, __stringify(args)

#undef TP_fast_assign
#define TP_fast_assign(args...) args

167
168
169
#undef TP_perf_assign
#define TP_perf_assign(args...)

170
171
172
#undef TRACE_EVENT
#define TRACE_EVENT(call, proto, args, tstruct, func, print)		\
static int								\
173
174
ftrace_format_##call(struct ftrace_event_call *unused,			\
		      struct trace_seq *s)				\
175
176
177
178
179
180
181
182
183
184
185
186
187
{									\
	struct ftrace_raw_##call field __attribute__((unused));		\
	int ret = 0;							\
									\
	tstruct;							\
									\
	trace_seq_printf(s, "\nprint fmt: " print);			\
									\
	return ret;							\
}

#include TRACE_INCLUDE(TRACE_INCLUDE_FILE)

188
189
190
/*
 * Stage 3 of the trace events.
 *
191
192
193
194
195
196
197
198
 * Override the macros in <trace/trace_events.h> to include the following:
 *
 * enum print_line_t
 * ftrace_raw_output_<call>(struct trace_iterator *iter, int flags)
 * {
 *	struct trace_seq *s = &iter->seq;
 *	struct ftrace_raw_<call> *field; <-- defined in stage 1
 *	struct trace_entry *entry;
199
 *	struct trace_seq *p;
200
201
202
203
204
205
206
207
208
209
210
 *	int ret;
 *
 *	entry = iter->ent;
 *
 *	if (entry->type != event_<call>.id) {
 *		WARN_ON_ONCE(1);
 *		return TRACE_TYPE_UNHANDLED;
 *	}
 *
 *	field = (typeof(field))entry;
 *
211
 *	p = get_cpu_var(ftrace_event_seq);
212
 *	trace_seq_init(p);
213
 *	ret = trace_seq_printf(s, <TP_printk> "\n");
214
 *	put_cpu();
215
216
217
218
219
220
221
222
223
224
225
226
227
228
229
230
231
 *	if (!ret)
 *		return TRACE_TYPE_PARTIAL_LINE;
 *
 *	return TRACE_TYPE_HANDLED;
 * }
 *
 * This is the method used to print the raw event to the trace
 * output format. Note, this is not needed if the data is read
 * in binary.
 */

#undef __entry
#define __entry field

#undef TP_printk
#define TP_printk(fmt, args...) fmt "\n", args

232
233
#undef __get_dynamic_array
#define __get_dynamic_array(field)	\
234
		((void *)__entry + (__entry->__data_loc_##field & 0xffff))
235

236
#undef __get_str
237
#define __get_str(field) (char *)__get_dynamic_array(field)
238

239
240
241
242
243
244
245
246
#undef __print_flags
#define __print_flags(flag, delim, flag_array...)			\
	({								\
		static const struct trace_print_flags flags[] =		\
			{ flag_array, { -1, NULL }};			\
		ftrace_print_flags_seq(p, delim, flag, flags);		\
	})

247
248
249
250
251
252
253
254
#undef __print_symbolic
#define __print_symbolic(value, symbol_array...)			\
	({								\
		static const struct trace_print_flags symbols[] =	\
			{ symbol_array, { -1, NULL }};			\
		ftrace_print_symbols_seq(p, value, symbols);		\
	})

255
256
#undef TRACE_EVENT
#define TRACE_EVENT(call, proto, args, tstruct, assign, print)		\
257
static enum print_line_t						\
258
259
260
261
262
ftrace_raw_output_##call(struct trace_iterator *iter, int flags)	\
{									\
	struct trace_seq *s = &iter->seq;				\
	struct ftrace_raw_##call *field;				\
	struct trace_entry *entry;					\
263
	struct trace_seq *p;						\
264
265
266
267
268
269
270
271
272
273
274
	int ret;							\
									\
	entry = iter->ent;						\
									\
	if (entry->type != event_##call.id) {				\
		WARN_ON_ONCE(1);					\
		return TRACE_TYPE_UNHANDLED;				\
	}								\
									\
	field = (typeof(field))entry;					\
									\
275
	p = &get_cpu_var(ftrace_event_seq);				\
276
	trace_seq_init(p);						\
277
	ret = trace_seq_printf(s, #call ": " print);			\
278
	put_cpu();							\
279
280
281
282
283
284
285
286
	if (!ret)							\
		return TRACE_TYPE_PARTIAL_LINE;				\
									\
	return TRACE_TYPE_HANDLED;					\
}
	
#include TRACE_INCLUDE(TRACE_INCLUDE_FILE)

287
288
#undef __field_ext
#define __field_ext(type, item, filter_type)				\
289
290
	ret = trace_define_field(event_call, #type, #item,		\
				 offsetof(typeof(field), item),		\
291
292
				 sizeof(field.item),			\
				 is_signed_type(type), filter_type);	\
293
294
295
	if (ret)							\
		return ret;

296
297
298
#undef __field
#define __field(type, item)	__field_ext(type, item, FILTER_OTHER)

299
300
301
302
303
#undef __array
#define __array(type, item, len)					\
	BUILD_BUG_ON(len > MAX_FILTER_STR_VAL);				\
	ret = trace_define_field(event_call, #type "[" #len "]", #item,	\
				 offsetof(typeof(field), item),		\
304
				 sizeof(field.item), 0, FILTER_OTHER);	\
305
306
307
	if (ret)							\
		return ret;

308
309
#undef __dynamic_array
#define __dynamic_array(type, item, len)				       \
310
	ret = trace_define_field(event_call, "__data_loc " #type "[]", #item,  \
311
312
313
				 offsetof(typeof(field), __data_loc_##item),   \
				 sizeof(field.__data_loc_##item), 0,	       \
				 FILTER_OTHER);
314

315
#undef __string
316
#define __string(item, src) __dynamic_array(char, item, -1)
317

318
319
#undef TRACE_EVENT
#define TRACE_EVENT(call, proto, args, tstruct, func, print)		\
320
static int								\
321
ftrace_define_fields_##call(struct ftrace_event_call *event_call)	\
322
323
324
325
{									\
	struct ftrace_raw_##call field;					\
	int ret;							\
									\
326
327
328
	ret = trace_define_common_fields(event_call);			\
	if (ret)							\
		return ret;						\
329
330
331
332
333
334
335
336
									\
	tstruct;							\
									\
	return ret;							\
}

#include TRACE_INCLUDE(TRACE_INCLUDE_FILE)

337
338
339
340
341
342
343
344
345
346
/*
 * remember the offset of each array from the beginning of the event.
 */

#undef __entry
#define __entry entry

#undef __field
#define __field(type, item)

347
348
349
#undef __field_ext
#define __field_ext(type, item, filter_type)

350
351
352
353
354
355
356
#undef __array
#define __array(type, item, len)

#undef __dynamic_array
#define __dynamic_array(type, item, len)				\
	__data_offsets->item = __data_size +				\
			       offsetof(typeof(*entry), __data);	\
357
	__data_offsets->item |= (len * sizeof(type)) << 16;		\
358
359
360
361
362
363
364
365
366
367
368
369
370
371
372
373
374
375
376
377
	__data_size += (len) * sizeof(type);

#undef __string
#define __string(item, src) __dynamic_array(char, item, strlen(src) + 1)       \

#undef TRACE_EVENT
#define TRACE_EVENT(call, proto, args, tstruct, assign, print)		\
static inline int ftrace_get_offsets_##call(				\
	struct ftrace_data_offsets_##call *__data_offsets, proto)       \
{									\
	int __data_size = 0;						\
	struct ftrace_raw_##call __maybe_unused *entry;			\
									\
	tstruct;							\
									\
	return __data_size;						\
}

#include TRACE_INCLUDE(TRACE_INCLUDE_FILE)

378
379
380
381
382
#ifdef CONFIG_EVENT_PROFILE

/*
 * Generate the functions needed for tracepoint perf_counter support.
 *
383
 * NOTE: The insertion profile callback (ftrace_profile_<call>) is defined later
384
385
386
387
388
389
390
391
392
393
394
395
396
397
398
399
400
401
402
403
404
405
 *
 * static int ftrace_profile_enable_<call>(struct ftrace_event_call *event_call)
 * {
 * 	int ret = 0;
 *
 * 	if (!atomic_inc_return(&event_call->profile_count))
 * 		ret = register_trace_<call>(ftrace_profile_<call>);
 *
 * 	return ret;
 * }
 *
 * static void ftrace_profile_disable_<call>(struct ftrace_event_call *event_call)
 * {
 * 	if (atomic_add_negative(-1, &event->call->profile_count))
 * 		unregister_trace_<call>(ftrace_profile_<call>);
 * }
 *
 */

#undef TRACE_EVENT
#define TRACE_EVENT(call, proto, args, tstruct, assign, print)		\
									\
406
static void ftrace_profile_##call(proto);				\
407
408
409
410
411
412
413
414
415
416
417
418
419
420
421
422
423
424
425
426
427
									\
static int ftrace_profile_enable_##call(struct ftrace_event_call *event_call) \
{									\
	int ret = 0;							\
									\
	if (!atomic_inc_return(&event_call->profile_count))		\
		ret = register_trace_##call(ftrace_profile_##call);	\
									\
	return ret;							\
}									\
									\
static void ftrace_profile_disable_##call(struct ftrace_event_call *event_call)\
{									\
	if (atomic_add_negative(-1, &event_call->profile_count))	\
		unregister_trace_##call(ftrace_profile_##call);		\
}

#include TRACE_INCLUDE(TRACE_INCLUDE_FILE)

#endif

428
/*
429
 * Stage 4 of the trace events.
430
 *
431
 * Override the macros in <trace/trace_events.h> to include the following:
432
433
434
 *
 * static void ftrace_event_<call>(proto)
 * {
435
 *	event_trace_printk(_RET_IP_, "<call>: " <fmt>);
436
437
438
439
 * }
 *
 * static int ftrace_reg_event_<call>(void)
 * {
440
 *	int ret;
441
 *
442
443
444
445
446
 *	ret = register_trace_<call>(ftrace_event_<call>);
 *	if (!ret)
 *		pr_info("event trace: Could not activate trace point "
 *			"probe to  <call>");
 *	return ret;
447
448
449
450
 * }
 *
 * static void ftrace_unreg_event_<call>(void)
 * {
451
 *	unregister_trace_<call>(ftrace_event_<call>);
452
453
454
 * }
 *
 *
455
 * For those macros defined with TRACE_EVENT:
456
457
458
459
460
 *
 * static struct ftrace_event_call event_<call>;
 *
 * static void ftrace_raw_event_<call>(proto)
 * {
461
462
 *	struct ring_buffer_event *event;
 *	struct ftrace_raw_<call> *entry; <-- defined in stage 1
463
 *	struct ring_buffer *buffer;
464
465
466
467
468
469
 *	unsigned long irq_flags;
 *	int pc;
 *
 *	local_save_flags(irq_flags);
 *	pc = preempt_count();
 *
470
471
 *	event = trace_current_buffer_lock_reserve(&buffer,
 *				  event_<call>.id,
472
473
474
475
476
477
478
 *				  sizeof(struct ftrace_raw_<call>),
 *				  irq_flags, pc);
 *	if (!event)
 *		return;
 *	entry	= ring_buffer_event_data(event);
 *
 *	<assign>;  <-- Here we assign the entries by the __field and
479
 *			__array macros.
480
 *
481
 *	trace_current_buffer_unlock_commit(buffer, event, irq_flags, pc);
482
483
484
485
 * }
 *
 * static int ftrace_raw_reg_event_<call>(void)
 * {
486
 *	int ret;
487
 *
488
489
490
491
492
 *	ret = register_trace_<call>(ftrace_raw_event_<call>);
 *	if (!ret)
 *		pr_info("event trace: Could not activate trace point "
 *			"probe to <call>");
 *	return ret;
493
494
495
496
 * }
 *
 * static void ftrace_unreg_event_<call>(void)
 * {
497
 *	unregister_trace_<call>(ftrace_raw_event_<call>);
498
499
500
 * }
 *
 * static struct trace_event ftrace_event_type_<call> = {
501
 *	.trace			= ftrace_raw_output_<call>, <-- stage 2
502
503
504
505
 * };
 *
 * static int ftrace_raw_init_event_<call>(void)
 * {
506
 *	int id;
507
 *
508
509
510
511
512
 *	id = register_ftrace_event(&ftrace_event_type_<call>);
 *	if (!id)
 *		return -ENODEV;
 *	event_<call>.id = id;
 *	return 0;
513
514
515
516
517
 * }
 *
 * static struct ftrace_event_call __used
 * __attribute__((__aligned__(4)))
 * __attribute__((section("_ftrace_events"))) event_<call> = {
518
 *	.name			= "<call>",
519
 *	.system			= "<system>",
520
521
522
 *	.raw_init		= ftrace_raw_init_event_<call>,
 *	.regfunc		= ftrace_reg_event_<call>,
 *	.unregfunc		= ftrace_unreg_event_<call>,
523
 *	.show_format		= ftrace_format_<call>,
524
525
526
527
 * }
 *
 */

528
529
#undef TP_FMT
#define TP_FMT(fmt, args...)	fmt "\n", ##args
530

Peter Zijlstra's avatar
Peter Zijlstra committed
531
532
533
534
535
536
537
538
539
540
541
#ifdef CONFIG_EVENT_PROFILE

#define _TRACE_PROFILE_INIT(call)					\
	.profile_count = ATOMIC_INIT(-1),				\
	.profile_enable = ftrace_profile_enable_##call,			\
	.profile_disable = ftrace_profile_disable_##call,

#else
#define _TRACE_PROFILE_INIT(call)
#endif

542
543
#undef __entry
#define __entry entry
544

545
546
547
548
549
550
#undef __field
#define __field(type, item)

#undef __array
#define __array(type, item, len)

551
552
553
554
#undef __dynamic_array
#define __dynamic_array(type, item, len)				\
	__entry->__data_loc_##item = __data_offsets.item;

555
#undef __string
556
#define __string(item, src) __dynamic_array(char, item, -1)       	\
557
558
559
560
561

#undef __assign_str
#define __assign_str(dst, src)						\
	strcpy(__get_str(dst), src);

562
#undef TRACE_EVENT
563
#define TRACE_EVENT(call, proto, args, tstruct, assign, print)		\
564
565
566
567
568
									\
static struct ftrace_event_call event_##call;				\
									\
static void ftrace_raw_event_##call(proto)				\
{									\
569
	struct ftrace_data_offsets_##call __maybe_unused __data_offsets;\
570
	struct ftrace_event_call *event_call = &event_##call;		\
571
572
	struct ring_buffer_event *event;				\
	struct ftrace_raw_##call *entry;				\
573
	struct ring_buffer *buffer;					\
574
	unsigned long irq_flags;					\
575
	int __data_size;						\
576
577
578
579
580
	int pc;								\
									\
	local_save_flags(irq_flags);					\
	pc = preempt_count();						\
									\
581
	__data_size = ftrace_get_offsets_##call(&__data_offsets, args); \
582
									\
583
584
	event = trace_current_buffer_lock_reserve(&buffer,		\
				 event_##call.id,			\
585
				 sizeof(*entry) + __data_size,		\
586
				 irq_flags, pc);			\
587
588
589
590
	if (!event)							\
		return;							\
	entry	= ring_buffer_event_data(event);			\
									\
591
592
593
									\
	tstruct								\
									\
594
	{ assign; }							\
595
									\
596
597
598
	if (!filter_current_check_discard(buffer, event_call, entry, event)) \
		trace_nowake_buffer_unlock_commit(buffer,		\
						  event, irq_flags, pc); \
599
600
}									\
									\
601
static int ftrace_raw_reg_event_##call(void *ptr)			\
602
603
604
605
{									\
	int ret;							\
									\
	ret = register_trace_##call(ftrace_raw_event_##call);		\
606
	if (ret)							\
607
		pr_info("event trace: Could not activate trace point "	\
608
			"probe to " #call "\n");			\
609
610
611
	return ret;							\
}									\
									\
612
static void ftrace_raw_unreg_event_##call(void *ptr)			\
613
614
615
616
617
618
619
620
621
622
623
624
625
626
627
628
{									\
	unregister_trace_##call(ftrace_raw_event_##call);		\
}									\
									\
static struct trace_event ftrace_event_type_##call = {			\
	.trace			= ftrace_raw_output_##call,		\
};									\
									\
static int ftrace_raw_init_event_##call(void)				\
{									\
	int id;								\
									\
	id = register_ftrace_event(&ftrace_event_type_##call);		\
	if (!id)							\
		return -ENODEV;						\
	event_##call.id = id;						\
629
	INIT_LIST_HEAD(&event_##call.fields);				\
630
631
632
633
634
635
	return 0;							\
}									\
									\
static struct ftrace_event_call __used					\
__attribute__((__aligned__(4)))						\
__attribute__((section("_ftrace_events"))) event_##call = {		\
636
	.name			= #call,				\
637
	.system			= __stringify(TRACE_SYSTEM),		\
638
	.event			= &ftrace_event_type_##call,		\
639
	.raw_init		= ftrace_raw_init_event_##call,		\
640
641
	.regfunc		= ftrace_raw_reg_event_##call,		\
	.unregfunc		= ftrace_raw_unreg_event_##call,	\
642
	.show_format		= ftrace_format_##call,			\
643
	.define_fields		= ftrace_define_fields_##call,		\
Peter Zijlstra's avatar
Peter Zijlstra committed
644
	_TRACE_PROFILE_INIT(call)					\
645
}
Peter Zijlstra's avatar
Peter Zijlstra committed
646

647
#include TRACE_INCLUDE(TRACE_INCLUDE_FILE)
Peter Zijlstra's avatar
Peter Zijlstra committed
648

649
650
651
652
653
654
655
656
657
658
659
660
661
662
663
664
665
666
667
668
669
670
/*
 * Define the insertion callback to profile events
 *
 * The job is very similar to ftrace_raw_event_<call> except that we don't
 * insert in the ring buffer but in a perf counter.
 *
 * static void ftrace_profile_<call>(proto)
 * {
 *	struct ftrace_data_offsets_<call> __maybe_unused __data_offsets;
 *	struct ftrace_event_call *event_call = &event_<call>;
 *	extern void perf_tpcounter_event(int, u64, u64, void *, int);
 *	struct ftrace_raw_##call *entry;
 *	u64 __addr = 0, __count = 1;
 *	unsigned long irq_flags;
 *	int __entry_size;
 *	int __data_size;
 *	int pc;
 *
 *	local_save_flags(irq_flags);
 *	pc = preempt_count();
 *
 *	__data_size = ftrace_get_offsets_<call>(&__data_offsets, args);
671
672
673
674
675
676
 *
 *	// Below we want to get the aligned size by taking into account
 *	// the u32 field that will later store the buffer size
 *	__entry_size = ALIGN(__data_size + sizeof(*entry) + sizeof(u32),
 *			     sizeof(u64));
 *	__entry_size -= sizeof(u32);
677
678
679
680
681
 *
 *	do {
 *		char raw_data[__entry_size]; <- allocate our sample in the stack
 *		struct trace_entry *ent;
 *
682
683
684
 *		zero dead bytes from alignment to avoid stack leak to userspace:
 *
 *		*(u64 *)(&raw_data[__entry_size - sizeof(u64)]) = 0ULL;
685
686
687
688
689
690
691
692
693
694
695
696
697
698
699
700
701
702
703
704
705
706
707
708
709
710
711
712
713
714
715
716
717
718
719
720
721
722
723
724
725
726
 *		entry = (struct ftrace_raw_<call> *)raw_data;
 *		ent = &entry->ent;
 *		tracing_generic_entry_update(ent, irq_flags, pc);
 *		ent->type = event_call->id;
 *
 *		<tstruct> <- do some jobs with dynamic arrays
 *
 *		<assign>  <- affect our values
 *
 *		perf_tpcounter_event(event_call->id, __addr, __count, entry,
 *			     __entry_size);  <- submit them to perf counter
 *	} while (0);
 *
 * }
 */

#ifdef CONFIG_EVENT_PROFILE

#undef __perf_addr
#define __perf_addr(a) __addr = (a)

#undef __perf_count
#define __perf_count(c) __count = (c)

#undef TRACE_EVENT
#define TRACE_EVENT(call, proto, args, tstruct, assign, print)		\
static void ftrace_profile_##call(proto)				\
{									\
	struct ftrace_data_offsets_##call __maybe_unused __data_offsets;\
	struct ftrace_event_call *event_call = &event_##call;		\
	extern void perf_tpcounter_event(int, u64, u64, void *, int);	\
	struct ftrace_raw_##call *entry;				\
	u64 __addr = 0, __count = 1;					\
	unsigned long irq_flags;					\
	int __entry_size;						\
	int __data_size;						\
	int pc;								\
									\
	local_save_flags(irq_flags);					\
	pc = preempt_count();						\
									\
	__data_size = ftrace_get_offsets_##call(&__data_offsets, args); \
727
728
	__entry_size = ALIGN(__data_size + sizeof(*entry) + sizeof(u32),\
			     sizeof(u64));				\
729
	__entry_size -= sizeof(u32);					\
730
731
732
733
734
									\
	do {								\
		char raw_data[__entry_size];				\
		struct trace_entry *ent;				\
									\
735
		*(u64 *)(&raw_data[__entry_size - sizeof(u64)]) = 0ULL;	\
736
737
738
739
740
741
742
743
744
745
746
747
748
749
750
751
752
753
		entry = (struct ftrace_raw_##call *)raw_data;		\
		ent = &entry->ent;					\
		tracing_generic_entry_update(ent, irq_flags, pc);	\
		ent->type = event_call->id;				\
									\
		tstruct							\
									\
		{ assign; }						\
									\
		perf_tpcounter_event(event_call->id, __addr, __count, entry,\
			     __entry_size);				\
	} while (0);							\
									\
}

#include TRACE_INCLUDE(TRACE_INCLUDE_FILE)
#endif /* CONFIG_EVENT_PROFILE */

Peter Zijlstra's avatar
Peter Zijlstra committed
754
755
#undef _TRACE_PROFILE_INIT