ftrace.h 22.9 KB
Newer Older
1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
/*
 * Stage 1 of the trace events.
 *
 * Override the macros in <trace/trace_events.h> to include the following:
 *
 * struct ftrace_raw_<call> {
 *	struct trace_entry		ent;
 *	<type>				<item>;
 *	<type2>				<item2>[<len>];
 *	[...]
 * };
 *
 * The <type> <item> is created by the __field(type, item) macro or
 * the __array(type2, item2, len) macro.
 * We simply do "type item;", and that will create the fields
 * in the structure.
 */

#include <linux/ftrace_event.h>

21
22
23
24
25
26
27
28
29
30
31
32
33
34
35
36
37
38
39
40
/*
 * TRACE_EVENT_TEMPLATE can be used to add a generic function
 * handlers for events. That is, if all events have the same
 * parameters and just have distinct trace points.
 * Each tracepoint can be defined with DEFINE_EVENT and that
 * will map the TRACE_EVENT_TEMPLATE to the tracepoint.
 *
 * TRACE_EVENT is a one to one mapping between tracepoint and template.
 */
#undef TRACE_EVENT
#define TRACE_EVENT(name, proto, args, tstruct, assign, print) \
	TRACE_EVENT_TEMPLATE(name,			       \
			     PARAMS(proto),		       \
			     PARAMS(args),		       \
			     PARAMS(tstruct),		       \
			     PARAMS(assign),		       \
			     PARAMS(print));		       \
	DEFINE_EVENT(name, name, PARAMS(proto), PARAMS(args));


41
42
43
#undef __field
#define __field(type, item)		type	item;

44
45
46
#undef __field_ext
#define __field_ext(type, item, filter_type)	type	item;

47
48
49
#undef __array
#define __array(type, item, len)	type	item[len];

50
#undef __dynamic_array
51
#define __dynamic_array(type, item, len) u32 __data_loc_##item;
52

53
#undef __string
54
#define __string(item, src) __dynamic_array(char, item, -1)
55

56
57
58
#undef TP_STRUCT__entry
#define TP_STRUCT__entry(args...) args

59
60
61
62
63
64
65
66
67
#undef TRACE_EVENT_TEMPLATE
#define TRACE_EVENT_TEMPLATE(name, proto, args, tstruct, assign, print)	\
	struct ftrace_raw_##name {					\
		struct trace_entry	ent;				\
		tstruct							\
		char			__data[0];			\
	};
#undef DEFINE_EVENT
#define DEFINE_EVENT(template, name, proto, args)	\
68
69
	static struct ftrace_event_call event_##name

70
71
72
#undef __cpparg
#define __cpparg(arg...) arg

73
74
/* Callbacks are meaningless to ftrace. */
#undef TRACE_EVENT_FN
75
76
77
78
#define TRACE_EVENT_FN(name, proto, args, tstruct,			\
		assign, print, reg, unreg)				\
	TRACE_EVENT(name, __cpparg(proto), __cpparg(args),		\
		__cpparg(tstruct), __cpparg(assign), __cpparg(print))	\
79

80
81
#include TRACE_INCLUDE(TRACE_INCLUDE_FILE)

82

83
84
85
/*
 * Stage 2 of the trace events.
 *
86
87
 * Include the following:
 *
88
 * struct ftrace_data_offsets_<call> {
89
90
 *	u32				<item1>;
 *	u32				<item2>;
91
92
93
 *	[...]
 * };
 *
94
 * The __dynamic_array() macro will create each u32 <item>, this is
95
 * to keep the offset of each array from the beginning of the event.
96
 * The size of an array is also encoded, in the higher 16 bits of <item>.
97
98
 */

99
#undef __field
100
101
102
103
#define __field(type, item)

#undef __field_ext
#define __field_ext(type, item, filter_type)
104

105
106
107
#undef __array
#define __array(type, item, len)

108
#undef __dynamic_array
109
#define __dynamic_array(type, item, len)	u32 item;
110
111

#undef __string
112
#define __string(item, src) __dynamic_array(char, item, -1)
113

114
115
#undef TRACE_EVENT_TEMPLATE
#define TRACE_EVENT_TEMPLATE(call, proto, args, tstruct, assign, print)	\
116
	struct ftrace_data_offsets_##call {				\
117
118
119
		tstruct;						\
	};

120
121
122
#undef DEFINE_EVENT
#define DEFINE_EVENT(template, name, proto, args)

123
124
#include TRACE_INCLUDE(TRACE_INCLUDE_FILE)

125
126
127
128
129
130
131
132
133
134
135
136
137
138
139
140
141
142
143
144
145
146
147
/*
 * Setup the showing format of trace point.
 *
 * int
 * ftrace_format_##call(struct trace_seq *s)
 * {
 *	struct ftrace_raw_##call field;
 *	int ret;
 *
 *	ret = trace_seq_printf(s, #type " " #item ";"
 *			       " offset:%u; size:%u;\n",
 *			       offsetof(struct ftrace_raw_##call, item),
 *			       sizeof(field.type));
 *
 * }
 */

#undef TP_STRUCT__entry
#define TP_STRUCT__entry(args...) args

#undef __field
#define __field(type, item)					\
	ret = trace_seq_printf(s, "\tfield:" #type " " #item ";\t"	\
148
			       "offset:%u;\tsize:%u;\tsigned:%u;\n",	\
149
			       (unsigned int)offsetof(typeof(field), item), \
150
151
			       (unsigned int)sizeof(field.item),	\
			       (unsigned int)is_signed_type(type));	\
152
153
154
	if (!ret)							\
		return 0;

155
156
157
#undef __field_ext
#define __field_ext(type, item, filter_type)	__field(type, item)

158
159
160
#undef __array
#define __array(type, item, len)						\
	ret = trace_seq_printf(s, "\tfield:" #type " " #item "[" #len "];\t"	\
161
			       "offset:%u;\tsize:%u;\tsigned:%u;\n",	\
162
			       (unsigned int)offsetof(typeof(field), item), \
163
164
			       (unsigned int)sizeof(field.item),	\
			       (unsigned int)is_signed_type(type));	\
165
166
167
168
169
	if (!ret)							\
		return 0;

#undef __dynamic_array
#define __dynamic_array(type, item, len)				       \
170
	ret = trace_seq_printf(s, "\tfield:__data_loc " #type "[] " #item ";\t"\
171
			       "offset:%u;\tsize:%u;\tsigned:%u;\n",	       \
172
173
			       (unsigned int)offsetof(typeof(field),	       \
					__data_loc_##item),		       \
174
175
			       (unsigned int)sizeof(field.__data_loc_##item), \
			       (unsigned int)is_signed_type(type));	\
176
177
178
179
180
181
182
183
184
185
186
187
188
189
190
191
192
193
194
	if (!ret)							       \
		return 0;

#undef __string
#define __string(item, src) __dynamic_array(char, item, -1)

#undef __entry
#define __entry REC

#undef __print_symbolic
#undef __get_dynamic_array
#undef __get_str

#undef TP_printk
#define TP_printk(fmt, args...) "%s, %s\n", #fmt, __stringify(args)

#undef TP_fast_assign
#define TP_fast_assign(args...) args

195
196
197
#undef TP_perf_assign
#define TP_perf_assign(args...)

198
199
#undef TRACE_EVENT_TEMPLATE
#define TRACE_EVENT_TEMPLATE(call, proto, args, tstruct, func, print)	\
200
static int								\
201
202
ftrace_format_##call(struct ftrace_event_call *unused,			\
		      struct trace_seq *s)				\
203
204
205
206
207
208
209
210
211
212
213
{									\
	struct ftrace_raw_##call field __attribute__((unused));		\
	int ret = 0;							\
									\
	tstruct;							\
									\
	trace_seq_printf(s, "\nprint fmt: " print);			\
									\
	return ret;							\
}

214
215
216
#undef DEFINE_EVENT
#define DEFINE_EVENT(template, name, proto, args)

217
218
#include TRACE_INCLUDE(TRACE_INCLUDE_FILE)

219
220
221
/*
 * Stage 3 of the trace events.
 *
222
223
224
225
226
227
228
229
 * Override the macros in <trace/trace_events.h> to include the following:
 *
 * enum print_line_t
 * ftrace_raw_output_<call>(struct trace_iterator *iter, int flags)
 * {
 *	struct trace_seq *s = &iter->seq;
 *	struct ftrace_raw_<call> *field; <-- defined in stage 1
 *	struct trace_entry *entry;
230
 *	struct trace_seq *p;
231
232
233
234
235
236
237
238
239
240
241
 *	int ret;
 *
 *	entry = iter->ent;
 *
 *	if (entry->type != event_<call>.id) {
 *		WARN_ON_ONCE(1);
 *		return TRACE_TYPE_UNHANDLED;
 *	}
 *
 *	field = (typeof(field))entry;
 *
242
 *	p = get_cpu_var(ftrace_event_seq);
243
 *	trace_seq_init(p);
244
 *	ret = trace_seq_printf(s, <TP_printk> "\n");
245
 *	put_cpu();
246
247
248
249
250
251
252
253
254
255
256
257
258
259
260
261
262
 *	if (!ret)
 *		return TRACE_TYPE_PARTIAL_LINE;
 *
 *	return TRACE_TYPE_HANDLED;
 * }
 *
 * This is the method used to print the raw event to the trace
 * output format. Note, this is not needed if the data is read
 * in binary.
 */

#undef __entry
#define __entry field

#undef TP_printk
#define TP_printk(fmt, args...) fmt "\n", args

263
264
#undef __get_dynamic_array
#define __get_dynamic_array(field)	\
265
		((void *)__entry + (__entry->__data_loc_##field & 0xffff))
266

267
#undef __get_str
268
#define __get_str(field) (char *)__get_dynamic_array(field)
269

270
271
272
#undef __print_flags
#define __print_flags(flag, delim, flag_array...)			\
	({								\
273
		static const struct trace_print_flags __flags[] =	\
274
			{ flag_array, { -1, NULL }};			\
275
		ftrace_print_flags_seq(p, delim, flag, __flags);	\
276
277
	})

278
279
280
281
282
283
284
285
#undef __print_symbolic
#define __print_symbolic(value, symbol_array...)			\
	({								\
		static const struct trace_print_flags symbols[] =	\
			{ symbol_array, { -1, NULL }};			\
		ftrace_print_symbols_seq(p, value, symbols);		\
	})

286
287
#undef TRACE_EVENT_TEMPLATE
#define TRACE_EVENT_TEMPLATE(call, proto, args, tstruct, assign, print)	\
288
static enum print_line_t						\
289
290
ftrace_raw_output_id_##call(int event_id, const char *name,		\
			    struct trace_iterator *iter, int flags)	\
291
292
293
294
{									\
	struct trace_seq *s = &iter->seq;				\
	struct ftrace_raw_##call *field;				\
	struct trace_entry *entry;					\
295
	struct trace_seq *p;						\
296
297
298
299
	int ret;							\
									\
	entry = iter->ent;						\
									\
300
	if (entry->type != event_id) {					\
301
302
303
304
305
306
		WARN_ON_ONCE(1);					\
		return TRACE_TYPE_UNHANDLED;				\
	}								\
									\
	field = (typeof(field))entry;					\
									\
307
	p = &get_cpu_var(ftrace_event_seq);				\
308
	trace_seq_init(p);						\
309
310
311
	ret = trace_seq_printf(s, "%s: ", name);			\
	if (ret)							\
		ret = trace_seq_printf(s, print);			\
312
	put_cpu();							\
313
314
315
316
317
	if (!ret)							\
		return TRACE_TYPE_PARTIAL_LINE;				\
									\
	return TRACE_TYPE_HANDLED;					\
}
318
319
320
321
322
323
324
325
326
327

#undef DEFINE_EVENT
#define DEFINE_EVENT(template, name, proto, args)			\
static enum print_line_t						\
ftrace_raw_output_##name(struct trace_iterator *iter, int flags)	\
{									\
	return ftrace_raw_output_id_##template(event_##name.id,		\
					       #name, iter, flags);	\
}

328
329
#include TRACE_INCLUDE(TRACE_INCLUDE_FILE)

330
331
#undef __field_ext
#define __field_ext(type, item, filter_type)				\
332
333
	ret = trace_define_field(event_call, #type, #item,		\
				 offsetof(typeof(field), item),		\
334
335
				 sizeof(field.item),			\
				 is_signed_type(type), filter_type);	\
336
337
338
	if (ret)							\
		return ret;

339
340
341
#undef __field
#define __field(type, item)	__field_ext(type, item, FILTER_OTHER)

342
343
344
345
346
#undef __array
#define __array(type, item, len)					\
	BUILD_BUG_ON(len > MAX_FILTER_STR_VAL);				\
	ret = trace_define_field(event_call, #type "[" #len "]", #item,	\
				 offsetof(typeof(field), item),		\
347
				 sizeof(field.item), 0, FILTER_OTHER);	\
348
349
350
	if (ret)							\
		return ret;

351
352
#undef __dynamic_array
#define __dynamic_array(type, item, len)				       \
353
	ret = trace_define_field(event_call, "__data_loc " #type "[]", #item,  \
354
355
356
				 offsetof(typeof(field), __data_loc_##item),   \
				 sizeof(field.__data_loc_##item), 0,	       \
				 FILTER_OTHER);
357

358
#undef __string
359
#define __string(item, src) __dynamic_array(char, item, -1)
360

361
362
#undef TRACE_EVENT_TEMPLATE
#define TRACE_EVENT_TEMPLATE(call, proto, args, tstruct, func, print)	\
363
static int								\
364
ftrace_define_fields_##call(struct ftrace_event_call *event_call)	\
365
366
367
368
{									\
	struct ftrace_raw_##call field;					\
	int ret;							\
									\
369
370
371
	ret = trace_define_common_fields(event_call);			\
	if (ret)							\
		return ret;						\
372
373
374
375
376
377
									\
	tstruct;							\
									\
	return ret;							\
}

378
379
380
#undef DEFINE_EVENT
#define DEFINE_EVENT(template, name, proto, args)

381
382
#include TRACE_INCLUDE(TRACE_INCLUDE_FILE)

383
384
385
386
387
388
389
390
391
392
/*
 * remember the offset of each array from the beginning of the event.
 */

#undef __entry
#define __entry entry

#undef __field
#define __field(type, item)

393
394
395
#undef __field_ext
#define __field_ext(type, item, filter_type)

396
397
398
399
400
401
402
#undef __array
#define __array(type, item, len)

#undef __dynamic_array
#define __dynamic_array(type, item, len)				\
	__data_offsets->item = __data_size +				\
			       offsetof(typeof(*entry), __data);	\
403
	__data_offsets->item |= (len * sizeof(type)) << 16;		\
404
405
406
	__data_size += (len) * sizeof(type);

#undef __string
407
#define __string(item, src) __dynamic_array(char, item, strlen(src) + 1)
408

409
410
#undef TRACE_EVENT_TEMPLATE
#define TRACE_EVENT_TEMPLATE(call, proto, args, tstruct, assign, print)	\
411
412
413
414
415
416
417
418
419
420
421
static inline int ftrace_get_offsets_##call(				\
	struct ftrace_data_offsets_##call *__data_offsets, proto)       \
{									\
	int __data_size = 0;						\
	struct ftrace_raw_##call __maybe_unused *entry;			\
									\
	tstruct;							\
									\
	return __data_size;						\
}

422
423
424
#undef DEFINE_EVENT
#define DEFINE_EVENT(template, name, proto, args)

425
426
#include TRACE_INCLUDE(TRACE_INCLUDE_FILE)

427
428
429
#ifdef CONFIG_EVENT_PROFILE

/*
430
 * Generate the functions needed for tracepoint perf_event support.
431
 *
432
 * NOTE: The insertion profile callback (ftrace_profile_<call>) is defined later
433
 *
434
 * static int ftrace_profile_enable_<call>(void)
435
 * {
436
 * 	return register_trace_<call>(ftrace_profile_<call>);
437
438
 * }
 *
439
 * static void ftrace_profile_disable_<call>(void)
440
 * {
441
 * 	unregister_trace_<call>(ftrace_profile_<call>);
442
443
444
445
 * }
 *
 */

446
447
448
449
450
#undef TRACE_EVENT_TEMPLATE
#define TRACE_EVENT_TEMPLATE(call, proto, args, tstruct, assign, print)

#undef DEFINE_EVENT
#define DEFINE_EVENT(template, name, proto, args)			\
451
									\
452
static void ftrace_profile_##name(proto);				\
453
									\
454
static int ftrace_profile_enable_##name(struct ftrace_event_call *unused)\
455
{									\
456
	return register_trace_##name(ftrace_profile_##name);		\
457
458
}									\
									\
459
static void ftrace_profile_disable_##name(struct ftrace_event_call *unused)\
460
{									\
461
	unregister_trace_##name(ftrace_profile_##name);			\
462
463
464
465
466
467
}

#include TRACE_INCLUDE(TRACE_INCLUDE_FILE)

#endif

468
/*
469
 * Stage 4 of the trace events.
470
 *
471
 * Override the macros in <trace/trace_events.h> to include the following:
472
473
474
 *
 * static void ftrace_event_<call>(proto)
 * {
475
 *	event_trace_printk(_RET_IP_, "<call>: " <fmt>);
476
477
 * }
 *
478
 * static int ftrace_reg_event_<call>(struct ftrace_event_call *unused)
479
 * {
480
 *	int ret;
481
 *
482
483
484
485
486
 *	ret = register_trace_<call>(ftrace_event_<call>);
 *	if (!ret)
 *		pr_info("event trace: Could not activate trace point "
 *			"probe to  <call>");
 *	return ret;
487
488
 * }
 *
489
 * static void ftrace_unreg_event_<call>(struct ftrace_event_call *unused)
490
 * {
491
 *	unregister_trace_<call>(ftrace_event_<call>);
492
493
494
 * }
 *
 *
495
 * For those macros defined with TRACE_EVENT:
496
497
498
499
500
 *
 * static struct ftrace_event_call event_<call>;
 *
 * static void ftrace_raw_event_<call>(proto)
 * {
501
502
 *	struct ring_buffer_event *event;
 *	struct ftrace_raw_<call> *entry; <-- defined in stage 1
503
 *	struct ring_buffer *buffer;
504
505
506
507
508
509
 *	unsigned long irq_flags;
 *	int pc;
 *
 *	local_save_flags(irq_flags);
 *	pc = preempt_count();
 *
510
511
 *	event = trace_current_buffer_lock_reserve(&buffer,
 *				  event_<call>.id,
512
513
514
515
516
517
518
 *				  sizeof(struct ftrace_raw_<call>),
 *				  irq_flags, pc);
 *	if (!event)
 *		return;
 *	entry	= ring_buffer_event_data(event);
 *
 *	<assign>;  <-- Here we assign the entries by the __field and
519
 *			__array macros.
520
 *
521
 *	trace_current_buffer_unlock_commit(buffer, event, irq_flags, pc);
522
523
 * }
 *
524
 * static int ftrace_raw_reg_event_<call>(struct ftrace_event_call *unused)
525
 * {
526
 *	int ret;
527
 *
528
529
530
531
532
 *	ret = register_trace_<call>(ftrace_raw_event_<call>);
 *	if (!ret)
 *		pr_info("event trace: Could not activate trace point "
 *			"probe to <call>");
 *	return ret;
533
534
 * }
 *
535
 * static void ftrace_unreg_event_<call>(struct ftrace_event_call *unused)
536
 * {
537
 *	unregister_trace_<call>(ftrace_raw_event_<call>);
538
539
540
 * }
 *
 * static struct trace_event ftrace_event_type_<call> = {
541
 *	.trace			= ftrace_raw_output_<call>, <-- stage 2
542
543
 * };
 *
544
 * static int ftrace_raw_init_event_<call>(struct ftrace_event_call *unused)
545
 * {
546
 *	int id;
547
 *
548
549
550
551
552
 *	id = register_ftrace_event(&ftrace_event_type_<call>);
 *	if (!id)
 *		return -ENODEV;
 *	event_<call>.id = id;
 *	return 0;
553
554
555
556
557
 * }
 *
 * static struct ftrace_event_call __used
 * __attribute__((__aligned__(4)))
 * __attribute__((section("_ftrace_events"))) event_<call> = {
558
 *	.name			= "<call>",
559
 *	.system			= "<system>",
560
561
562
 *	.raw_init		= ftrace_raw_init_event_<call>,
 *	.regfunc		= ftrace_reg_event_<call>,
 *	.unregfunc		= ftrace_unreg_event_<call>,
563
 *	.show_format		= ftrace_format_<call>,
564
565
566
567
 * }
 *
 */

568
569
#undef TP_FMT
#define TP_FMT(fmt, args...)	fmt "\n", ##args
570

Peter Zijlstra's avatar
Peter Zijlstra committed
571
572
573
574
575
576
577
578
579
580
581
#ifdef CONFIG_EVENT_PROFILE

#define _TRACE_PROFILE_INIT(call)					\
	.profile_count = ATOMIC_INIT(-1),				\
	.profile_enable = ftrace_profile_enable_##call,			\
	.profile_disable = ftrace_profile_disable_##call,

#else
#define _TRACE_PROFILE_INIT(call)
#endif

582
583
#undef __entry
#define __entry entry
584

585
586
587
588
589
590
#undef __field
#define __field(type, item)

#undef __array
#define __array(type, item, len)

591
592
593
594
#undef __dynamic_array
#define __dynamic_array(type, item, len)				\
	__entry->__data_loc_##item = __data_offsets.item;

595
#undef __string
596
#define __string(item, src) __dynamic_array(char, item, -1)       	\
597
598
599
600
601

#undef __assign_str
#define __assign_str(dst, src)						\
	strcpy(__get_str(dst), src);

602
603
#undef TRACE_EVENT_TEMPLATE
#define TRACE_EVENT_TEMPLATE(call, proto, args, tstruct, assign, print)	\
604
									\
605
606
static void ftrace_raw_event_id_##call(struct ftrace_event_call *event_call, \
				       proto)				\
607
{									\
608
	struct ftrace_data_offsets_##call __maybe_unused __data_offsets;\
609
610
	struct ring_buffer_event *event;				\
	struct ftrace_raw_##call *entry;				\
611
	struct ring_buffer *buffer;					\
612
	unsigned long irq_flags;					\
613
	int __data_size;						\
614
615
616
617
618
	int pc;								\
									\
	local_save_flags(irq_flags);					\
	pc = preempt_count();						\
									\
619
	__data_size = ftrace_get_offsets_##call(&__data_offsets, args); \
620
									\
621
	event = trace_current_buffer_lock_reserve(&buffer,		\
622
				 event_call->id,			\
623
				 sizeof(*entry) + __data_size,		\
624
				 irq_flags, pc);			\
625
626
627
628
	if (!event)							\
		return;							\
	entry	= ring_buffer_event_data(event);			\
									\
629
630
631
									\
	tstruct								\
									\
632
	{ assign; }							\
633
									\
634
635
636
	if (!filter_current_check_discard(buffer, event_call, entry, event)) \
		trace_nowake_buffer_unlock_commit(buffer,		\
						  event, irq_flags, pc); \
637
638
639
640
641
642
643
644
}

#undef DEFINE_EVENT
#define DEFINE_EVENT(template, call, proto, args)			\
									\
static void ftrace_raw_event_##call(proto)				\
{									\
	ftrace_raw_event_id_##template(&event_##call, args);		\
645
646
}									\
									\
647
static int ftrace_raw_reg_event_##call(struct ftrace_event_call *unused)\
648
649
650
651
{									\
	int ret;							\
									\
	ret = register_trace_##call(ftrace_raw_event_##call);		\
652
	if (ret)							\
653
		pr_info("event trace: Could not activate trace point "	\
654
			"probe to " #call "\n");			\
655
656
657
	return ret;							\
}									\
									\
658
static void ftrace_raw_unreg_event_##call(struct ftrace_event_call *unused)\
659
660
661
662
663
664
665
666
{									\
	unregister_trace_##call(ftrace_raw_event_##call);		\
}									\
									\
static struct trace_event ftrace_event_type_##call = {			\
	.trace			= ftrace_raw_output_##call,		\
};									\
									\
667
static int ftrace_raw_init_event_##call(struct ftrace_event_call *unused)\
668
669
670
671
672
673
674
{									\
	int id;								\
									\
	id = register_ftrace_event(&ftrace_event_type_##call);		\
	if (!id)							\
		return -ENODEV;						\
	event_##call.id = id;						\
675
	INIT_LIST_HEAD(&event_##call.fields);				\
676
677
678
679
680
681
	return 0;							\
}									\
									\
static struct ftrace_event_call __used					\
__attribute__((__aligned__(4)))						\
__attribute__((section("_ftrace_events"))) event_##call = {		\
682
	.name			= #call,				\
683
	.system			= __stringify(TRACE_SYSTEM),		\
684
	.event			= &ftrace_event_type_##call,		\
685
	.raw_init		= ftrace_raw_init_event_##call,		\
686
687
	.regfunc		= ftrace_raw_reg_event_##call,		\
	.unregfunc		= ftrace_raw_unreg_event_##call,	\
688
689
	.show_format		= ftrace_format_##template,		\
	.define_fields		= ftrace_define_fields_##template,	\
Peter Zijlstra's avatar
Peter Zijlstra committed
690
	_TRACE_PROFILE_INIT(call)					\
691
}
Peter Zijlstra's avatar
Peter Zijlstra committed
692

693
#include TRACE_INCLUDE(TRACE_INCLUDE_FILE)
Peter Zijlstra's avatar
Peter Zijlstra committed
694

695
696
697
698
699
700
701
702
703
704
/*
 * Define the insertion callback to profile events
 *
 * The job is very similar to ftrace_raw_event_<call> except that we don't
 * insert in the ring buffer but in a perf counter.
 *
 * static void ftrace_profile_<call>(proto)
 * {
 *	struct ftrace_data_offsets_<call> __maybe_unused __data_offsets;
 *	struct ftrace_event_call *event_call = &event_<call>;
705
 *	extern void perf_tp_event(int, u64, u64, void *, int);
706
 *	struct ftrace_raw_##call *entry;
707
 *	struct perf_trace_buf *trace_buf;
708
709
 *	u64 __addr = 0, __count = 1;
 *	unsigned long irq_flags;
710
 *	struct trace_entry *ent;
711
712
 *	int __entry_size;
 *	int __data_size;
713
 *	int __cpu
714
715
716
717
718
 *	int pc;
 *
 *	pc = preempt_count();
 *
 *	__data_size = ftrace_get_offsets_<call>(&__data_offsets, args);
719
720
721
722
723
724
 *
 *	// Below we want to get the aligned size by taking into account
 *	// the u32 field that will later store the buffer size
 *	__entry_size = ALIGN(__data_size + sizeof(*entry) + sizeof(u32),
 *			     sizeof(u64));
 *	__entry_size -= sizeof(u32);
725
 *
726
727
728
729
730
731
 *	// Protect the non nmi buffer
 *	// This also protects the rcu read side
 *	local_irq_save(irq_flags);
 *	__cpu = smp_processor_id();
 *
 *	if (in_nmi())
732
 *		trace_buf = rcu_dereference(perf_trace_buf_nmi);
733
 *	else
734
 *		trace_buf = rcu_dereference(perf_trace_buf);
735
 *
736
 *	if (!trace_buf)
737
 *		goto end;
738
 *
739
740
741
742
743
744
745
746
747
748
749
750
 *	trace_buf = per_cpu_ptr(trace_buf, __cpu);
 *
 * 	// Avoid recursion from perf that could mess up the buffer
 * 	if (trace_buf->recursion++)
 *		goto end_recursion;
 *
 * 	raw_data = trace_buf->buf;
 *
 *	// Make recursion update visible before entering perf_tp_event
 *	// so that we protect from perf recursions.
 *
 *	barrier();
751
 *
752
753
754
755
756
757
 *	//zero dead bytes from alignment to avoid stack leak to userspace:
 *	*(u64 *)(&raw_data[__entry_size - sizeof(u64)]) = 0ULL;
 *	entry = (struct ftrace_raw_<call> *)raw_data;
 *	ent = &entry->ent;
 *	tracing_generic_entry_update(ent, irq_flags, pc);
 *	ent->type = event_call->id;
758
 *
759
 *	<tstruct> <- do some jobs with dynamic arrays
760
 *
761
 *	<assign>  <- affect our values
762
 *
763
 *	perf_tp_event(event_call->id, __addr, __count, entry,
764
 *		     __entry_size);  <- submit them to perf counter
765
766
767
768
769
770
771
772
773
774
775
776
 *
 * }
 */

#ifdef CONFIG_EVENT_PROFILE

#undef __perf_addr
#define __perf_addr(a) __addr = (a)

#undef __perf_count
#define __perf_count(c) __count = (c)

777
778
779
780
781
#undef TRACE_EVENT_TEMPLATE
#define TRACE_EVENT_TEMPLATE(call, proto, args, tstruct, assign, print)	\
static void								\
ftrace_profile_templ_##call(struct ftrace_event_call *event_call,	\
			    proto)					\
782
783
{									\
	struct ftrace_data_offsets_##call __maybe_unused __data_offsets;\
784
785
	extern int perf_swevent_get_recursion_context(void);		\
	extern void perf_swevent_put_recursion_context(int rctx);	\
786
	extern void perf_tp_event(int, u64, u64, void *, int);		\
787
788
789
	struct ftrace_raw_##call *entry;				\
	u64 __addr = 0, __count = 1;					\
	unsigned long irq_flags;					\
790
	struct trace_entry *ent;					\
791
792
	int __entry_size;						\
	int __data_size;						\
793
	char *trace_buf;						\
794
795
	char *raw_data;							\
	int __cpu;							\
796
	int rctx;							\
797
798
799
800
801
	int pc;								\
									\
	pc = preempt_count();						\
									\
	__data_size = ftrace_get_offsets_##call(&__data_offsets, args); \
802
803
	__entry_size = ALIGN(__data_size + sizeof(*entry) + sizeof(u32),\
			     sizeof(u64));				\
804
	__entry_size -= sizeof(u32);					\
805
									\
806
807
808
809
810
	if (WARN_ONCE(__entry_size > FTRACE_MAX_PROFILE_SIZE,		\
		      "profile buffer not large enough"))		\
		return;							\
									\
	local_irq_save(irq_flags);					\
811
									\
812
813
814
	rctx = perf_swevent_get_recursion_context();			\
	if (rctx < 0)							\
		goto end_recursion;					\
815
									\
816
	__cpu = smp_processor_id();					\
817
									\
818
	if (in_nmi())							\
819
		trace_buf = rcu_dereference(perf_trace_buf_nmi);	\
820
	else								\
821
		trace_buf = rcu_dereference(perf_trace_buf);		\
822
									\
823
	if (!trace_buf)							\
824
		goto end;						\
825
									\
826
	raw_data = per_cpu_ptr(trace_buf, __cpu);			\
827
									\
828
829
830
831
832
833
834
835
836
837
	*(u64 *)(&raw_data[__entry_size - sizeof(u64)]) = 0ULL;		\
	entry = (struct ftrace_raw_##call *)raw_data;			\
	ent = &entry->ent;						\
	tracing_generic_entry_update(ent, irq_flags, pc);		\
	ent->type = event_call->id;					\
									\
	tstruct								\
									\
	{ assign; }							\
									\
838
	perf_tp_event(event_call->id, __addr, __count, entry,		\
839
			     __entry_size);				\
840
									\
841
842
843
end:									\
	perf_swevent_put_recursion_context(rctx);			\
end_recursion:								\
844
	local_irq_restore(irq_flags);					\
845
846
847
									\
}

848
849
850
851
852
853
854
855
856
#undef DEFINE_EVENT
#define DEFINE_EVENT(template, call, proto, args)		\
static void ftrace_profile_##call(proto)			\
{								\
	struct ftrace_event_call *event_call = &event_##call;	\
								\
	ftrace_profile_templ_##template(event_call, args);	\
}

857
858
859
#include TRACE_INCLUDE(TRACE_INCLUDE_FILE)
#endif /* CONFIG_EVENT_PROFILE */

Peter Zijlstra's avatar
Peter Zijlstra committed
860
861
#undef _TRACE_PROFILE_INIT