ftrace.h 26.3 KB
Newer Older
1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
/*
 * Stage 1 of the trace events.
 *
 * Override the macros in <trace/trace_events.h> to include the following:
 *
 * struct ftrace_raw_<call> {
 *	struct trace_entry		ent;
 *	<type>				<item>;
 *	<type2>				<item2>[<len>];
 *	[...]
 * };
 *
 * The <type> <item> is created by the __field(type, item) macro or
 * the __array(type2, item2, len) macro.
 * We simply do "type item;", and that will create the fields
 * in the structure.
 */

#include <linux/ftrace_event.h>

21
22
23
24
25
26
27
28
29
30
31
32
33
34
35
36
37
38
39
40
/*
 * TRACE_EVENT_TEMPLATE can be used to add a generic function
 * handlers for events. That is, if all events have the same
 * parameters and just have distinct trace points.
 * Each tracepoint can be defined with DEFINE_EVENT and that
 * will map the TRACE_EVENT_TEMPLATE to the tracepoint.
 *
 * TRACE_EVENT is a one to one mapping between tracepoint and template.
 */
#undef TRACE_EVENT
#define TRACE_EVENT(name, proto, args, tstruct, assign, print) \
	TRACE_EVENT_TEMPLATE(name,			       \
			     PARAMS(proto),		       \
			     PARAMS(args),		       \
			     PARAMS(tstruct),		       \
			     PARAMS(assign),		       \
			     PARAMS(print));		       \
	DEFINE_EVENT(name, name, PARAMS(proto), PARAMS(args));


41
42
43
#undef __field
#define __field(type, item)		type	item;

44
45
46
#undef __field_ext
#define __field_ext(type, item, filter_type)	type	item;

47
48
49
#undef __array
#define __array(type, item, len)	type	item[len];

50
#undef __dynamic_array
51
#define __dynamic_array(type, item, len) u32 __data_loc_##item;
52

53
#undef __string
54
#define __string(item, src) __dynamic_array(char, item, -1)
55

56
57
58
#undef TP_STRUCT__entry
#define TP_STRUCT__entry(args...) args

59
60
61
62
63
64
65
66
67
#undef TRACE_EVENT_TEMPLATE
#define TRACE_EVENT_TEMPLATE(name, proto, args, tstruct, assign, print)	\
	struct ftrace_raw_##name {					\
		struct trace_entry	ent;				\
		tstruct							\
		char			__data[0];			\
	};
#undef DEFINE_EVENT
#define DEFINE_EVENT(template, name, proto, args)	\
68
69
	static struct ftrace_event_call event_##name

70
71
72
73
#undef DEFINE_EVENT_PRINT
#define DEFINE_EVENT_PRINT(template, name, proto, args, print)	\
	DEFINE_EVENT(template, name, PARAMS(proto), PARAMS(args))

74
75
76
#undef __cpparg
#define __cpparg(arg...) arg

77
78
/* Callbacks are meaningless to ftrace. */
#undef TRACE_EVENT_FN
79
80
81
82
#define TRACE_EVENT_FN(name, proto, args, tstruct,			\
		assign, print, reg, unreg)				\
	TRACE_EVENT(name, __cpparg(proto), __cpparg(args),		\
		__cpparg(tstruct), __cpparg(assign), __cpparg(print))	\
83

84
85
#include TRACE_INCLUDE(TRACE_INCLUDE_FILE)

86

87
88
89
/*
 * Stage 2 of the trace events.
 *
90
91
 * Include the following:
 *
92
 * struct ftrace_data_offsets_<call> {
93
94
 *	u32				<item1>;
 *	u32				<item2>;
95
96
97
 *	[...]
 * };
 *
98
 * The __dynamic_array() macro will create each u32 <item>, this is
99
 * to keep the offset of each array from the beginning of the event.
100
 * The size of an array is also encoded, in the higher 16 bits of <item>.
101
102
 */

103
#undef __field
104
105
106
107
#define __field(type, item)

#undef __field_ext
#define __field_ext(type, item, filter_type)
108

109
110
111
#undef __array
#define __array(type, item, len)

112
#undef __dynamic_array
113
#define __dynamic_array(type, item, len)	u32 item;
114
115

#undef __string
116
#define __string(item, src) __dynamic_array(char, item, -1)
117

118
119
#undef TRACE_EVENT_TEMPLATE
#define TRACE_EVENT_TEMPLATE(call, proto, args, tstruct, assign, print)	\
120
	struct ftrace_data_offsets_##call {				\
121
122
123
		tstruct;						\
	};

124
125
126
#undef DEFINE_EVENT
#define DEFINE_EVENT(template, name, proto, args)

127
128
129
130
#undef DEFINE_EVENT_PRINT
#define DEFINE_EVENT_PRINT(template, name, proto, args, print)	\
	DEFINE_EVENT(template, name, PARAMS(proto), PARAMS(args))

131
132
#include TRACE_INCLUDE(TRACE_INCLUDE_FILE)

133
134
135
136
137
138
139
140
141
142
143
144
145
146
147
148
149
150
151
152
153
154
155
/*
 * Setup the showing format of trace point.
 *
 * int
 * ftrace_format_##call(struct trace_seq *s)
 * {
 *	struct ftrace_raw_##call field;
 *	int ret;
 *
 *	ret = trace_seq_printf(s, #type " " #item ";"
 *			       " offset:%u; size:%u;\n",
 *			       offsetof(struct ftrace_raw_##call, item),
 *			       sizeof(field.type));
 *
 * }
 */

#undef TP_STRUCT__entry
#define TP_STRUCT__entry(args...) args

#undef __field
#define __field(type, item)					\
	ret = trace_seq_printf(s, "\tfield:" #type " " #item ";\t"	\
156
			       "offset:%u;\tsize:%u;\tsigned:%u;\n",	\
157
			       (unsigned int)offsetof(typeof(field), item), \
158
159
			       (unsigned int)sizeof(field.item),	\
			       (unsigned int)is_signed_type(type));	\
160
161
162
	if (!ret)							\
		return 0;

163
164
165
#undef __field_ext
#define __field_ext(type, item, filter_type)	__field(type, item)

166
167
168
#undef __array
#define __array(type, item, len)						\
	ret = trace_seq_printf(s, "\tfield:" #type " " #item "[" #len "];\t"	\
169
			       "offset:%u;\tsize:%u;\tsigned:%u;\n",	\
170
			       (unsigned int)offsetof(typeof(field), item), \
171
172
			       (unsigned int)sizeof(field.item),	\
			       (unsigned int)is_signed_type(type));	\
173
174
175
176
177
	if (!ret)							\
		return 0;

#undef __dynamic_array
#define __dynamic_array(type, item, len)				       \
178
	ret = trace_seq_printf(s, "\tfield:__data_loc " #type "[] " #item ";\t"\
179
			       "offset:%u;\tsize:%u;\tsigned:%u;\n",	       \
180
181
			       (unsigned int)offsetof(typeof(field),	       \
					__data_loc_##item),		       \
182
183
			       (unsigned int)sizeof(field.__data_loc_##item), \
			       (unsigned int)is_signed_type(type));	\
184
185
186
187
188
189
190
191
192
193
194
195
196
197
198
199
200
201
202
	if (!ret)							       \
		return 0;

#undef __string
#define __string(item, src) __dynamic_array(char, item, -1)

#undef __entry
#define __entry REC

#undef __print_symbolic
#undef __get_dynamic_array
#undef __get_str

#undef TP_printk
#define TP_printk(fmt, args...) "%s, %s\n", #fmt, __stringify(args)

#undef TP_fast_assign
#define TP_fast_assign(args...) args

203
204
205
#undef TP_perf_assign
#define TP_perf_assign(args...)

206
207
#undef TRACE_EVENT_TEMPLATE
#define TRACE_EVENT_TEMPLATE(call, proto, args, tstruct, func, print)	\
208
static int								\
209
210
ftrace_format_setup_##call(struct ftrace_event_call *unused,		\
			   struct trace_seq *s)				\
211
212
213
214
215
216
{									\
	struct ftrace_raw_##call field __attribute__((unused));		\
	int ret = 0;							\
									\
	tstruct;							\
									\
217
218
219
220
221
222
223
224
225
226
227
228
229
230
	return ret;							\
}									\
									\
static int								\
ftrace_format_##call(struct ftrace_event_call *unused,			\
		     struct trace_seq *s)				\
{									\
	int ret = 0;							\
									\
	ret = ftrace_format_setup_##call(unused, s);			\
	if (!ret)							\
		return ret;						\
									\
	ret = trace_seq_printf(s, "\nprint fmt: " print);		\
231
232
233
234
									\
	return ret;							\
}

235
236
237
#undef DEFINE_EVENT
#define DEFINE_EVENT(template, name, proto, args)

238
239
240
241
242
243
244
245
246
247
248
249
250
251
252
253
254
#undef DEFINE_EVENT_PRINT
#define DEFINE_EVENT_PRINT(template, name, proto, args, print)		\
static int								\
ftrace_format_##name(struct ftrace_event_call *unused,			\
		      struct trace_seq *s)				\
{									\
	int ret = 0;							\
									\
	ret = ftrace_format_setup_##template(unused, s);		\
	if (!ret)							\
		return ret;						\
									\
	trace_seq_printf(s, "\nprint fmt: " print);			\
									\
	return ret;							\
}

255
256
#include TRACE_INCLUDE(TRACE_INCLUDE_FILE)

257
258
259
/*
 * Stage 3 of the trace events.
 *
260
261
262
263
264
265
266
267
 * Override the macros in <trace/trace_events.h> to include the following:
 *
 * enum print_line_t
 * ftrace_raw_output_<call>(struct trace_iterator *iter, int flags)
 * {
 *	struct trace_seq *s = &iter->seq;
 *	struct ftrace_raw_<call> *field; <-- defined in stage 1
 *	struct trace_entry *entry;
268
 *	struct trace_seq *p;
269
270
271
272
273
274
275
276
277
278
279
 *	int ret;
 *
 *	entry = iter->ent;
 *
 *	if (entry->type != event_<call>.id) {
 *		WARN_ON_ONCE(1);
 *		return TRACE_TYPE_UNHANDLED;
 *	}
 *
 *	field = (typeof(field))entry;
 *
280
 *	p = get_cpu_var(ftrace_event_seq);
281
 *	trace_seq_init(p);
282
 *	ret = trace_seq_printf(s, <TP_printk> "\n");
283
 *	put_cpu();
284
285
286
287
288
289
290
291
292
293
294
295
296
297
298
299
300
 *	if (!ret)
 *		return TRACE_TYPE_PARTIAL_LINE;
 *
 *	return TRACE_TYPE_HANDLED;
 * }
 *
 * This is the method used to print the raw event to the trace
 * output format. Note, this is not needed if the data is read
 * in binary.
 */

#undef __entry
#define __entry field

#undef TP_printk
#define TP_printk(fmt, args...) fmt "\n", args

301
302
#undef __get_dynamic_array
#define __get_dynamic_array(field)	\
303
		((void *)__entry + (__entry->__data_loc_##field & 0xffff))
304

305
#undef __get_str
306
#define __get_str(field) (char *)__get_dynamic_array(field)
307

308
309
310
#undef __print_flags
#define __print_flags(flag, delim, flag_array...)			\
	({								\
311
		static const struct trace_print_flags __flags[] =	\
312
			{ flag_array, { -1, NULL }};			\
313
		ftrace_print_flags_seq(p, delim, flag, __flags);	\
314
315
	})

316
317
318
319
320
321
322
323
#undef __print_symbolic
#define __print_symbolic(value, symbol_array...)			\
	({								\
		static const struct trace_print_flags symbols[] =	\
			{ symbol_array, { -1, NULL }};			\
		ftrace_print_symbols_seq(p, value, symbols);		\
	})

324
325
#undef TRACE_EVENT_TEMPLATE
#define TRACE_EVENT_TEMPLATE(call, proto, args, tstruct, assign, print)	\
326
static enum print_line_t						\
327
328
ftrace_raw_output_id_##call(int event_id, const char *name,		\
			    struct trace_iterator *iter, int flags)	\
329
330
331
332
{									\
	struct trace_seq *s = &iter->seq;				\
	struct ftrace_raw_##call *field;				\
	struct trace_entry *entry;					\
333
	struct trace_seq *p;						\
334
335
336
337
	int ret;							\
									\
	entry = iter->ent;						\
									\
338
	if (entry->type != event_id) {					\
339
340
341
342
343
344
		WARN_ON_ONCE(1);					\
		return TRACE_TYPE_UNHANDLED;				\
	}								\
									\
	field = (typeof(field))entry;					\
									\
345
	p = &get_cpu_var(ftrace_event_seq);				\
346
	trace_seq_init(p);						\
347
348
349
	ret = trace_seq_printf(s, "%s: ", name);			\
	if (ret)							\
		ret = trace_seq_printf(s, print);			\
350
	put_cpu();							\
351
352
353
354
355
	if (!ret)							\
		return TRACE_TYPE_PARTIAL_LINE;				\
									\
	return TRACE_TYPE_HANDLED;					\
}
356
357
358
359
360
361
362
363
364
365

#undef DEFINE_EVENT
#define DEFINE_EVENT(template, name, proto, args)			\
static enum print_line_t						\
ftrace_raw_output_##name(struct trace_iterator *iter, int flags)	\
{									\
	return ftrace_raw_output_id_##template(event_##name.id,		\
					       #name, iter, flags);	\
}

366
367
368
369
370
371
372
373
374
375
376
377
378
379
380
381
382
383
384
385
386
387
388
389
390
391
392
393
394
395
396
397
#undef DEFINE_EVENT_PRINT
#define DEFINE_EVENT_PRINT(template, call, proto, args, print)		\
static enum print_line_t						\
ftrace_raw_output_##call(struct trace_iterator *iter, int flags)	\
{									\
	struct trace_seq *s = &iter->seq;				\
	struct ftrace_raw_##template *field;				\
	struct trace_entry *entry;					\
	struct trace_seq *p;						\
	int ret;							\
									\
	entry = iter->ent;						\
									\
	if (entry->type != event_##call.id) {				\
		WARN_ON_ONCE(1);					\
		return TRACE_TYPE_UNHANDLED;				\
	}								\
									\
	field = (typeof(field))entry;					\
									\
	p = &get_cpu_var(ftrace_event_seq);				\
	trace_seq_init(p);						\
	ret = trace_seq_printf(s, "%s: ", #call);			\
	if (ret)							\
		ret = trace_seq_printf(s, print);			\
	put_cpu();							\
	if (!ret)							\
		return TRACE_TYPE_PARTIAL_LINE;				\
									\
	return TRACE_TYPE_HANDLED;					\
}

398
399
#include TRACE_INCLUDE(TRACE_INCLUDE_FILE)

400
401
#undef __field_ext
#define __field_ext(type, item, filter_type)				\
402
403
	ret = trace_define_field(event_call, #type, #item,		\
				 offsetof(typeof(field), item),		\
404
405
				 sizeof(field.item),			\
				 is_signed_type(type), filter_type);	\
406
407
408
	if (ret)							\
		return ret;

409
410
411
#undef __field
#define __field(type, item)	__field_ext(type, item, FILTER_OTHER)

412
413
414
415
416
#undef __array
#define __array(type, item, len)					\
	BUILD_BUG_ON(len > MAX_FILTER_STR_VAL);				\
	ret = trace_define_field(event_call, #type "[" #len "]", #item,	\
				 offsetof(typeof(field), item),		\
417
				 sizeof(field.item), 0, FILTER_OTHER);	\
418
419
420
	if (ret)							\
		return ret;

421
422
#undef __dynamic_array
#define __dynamic_array(type, item, len)				       \
423
	ret = trace_define_field(event_call, "__data_loc " #type "[]", #item,  \
424
425
426
				 offsetof(typeof(field), __data_loc_##item),   \
				 sizeof(field.__data_loc_##item), 0,	       \
				 FILTER_OTHER);
427

428
#undef __string
429
#define __string(item, src) __dynamic_array(char, item, -1)
430

431
432
#undef TRACE_EVENT_TEMPLATE
#define TRACE_EVENT_TEMPLATE(call, proto, args, tstruct, func, print)	\
433
static int								\
434
ftrace_define_fields_##call(struct ftrace_event_call *event_call)	\
435
436
437
438
{									\
	struct ftrace_raw_##call field;					\
	int ret;							\
									\
439
440
441
	ret = trace_define_common_fields(event_call);			\
	if (ret)							\
		return ret;						\
442
443
444
445
446
447
									\
	tstruct;							\
									\
	return ret;							\
}

448
449
450
#undef DEFINE_EVENT
#define DEFINE_EVENT(template, name, proto, args)

451
452
453
454
#undef DEFINE_EVENT_PRINT
#define DEFINE_EVENT_PRINT(template, name, proto, args, print)	\
	DEFINE_EVENT(template, name, PARAMS(proto), PARAMS(args))

455
456
#include TRACE_INCLUDE(TRACE_INCLUDE_FILE)

457
458
459
460
461
462
463
464
465
466
/*
 * remember the offset of each array from the beginning of the event.
 */

#undef __entry
#define __entry entry

#undef __field
#define __field(type, item)

467
468
469
#undef __field_ext
#define __field_ext(type, item, filter_type)

470
471
472
473
474
475
476
#undef __array
#define __array(type, item, len)

#undef __dynamic_array
#define __dynamic_array(type, item, len)				\
	__data_offsets->item = __data_size +				\
			       offsetof(typeof(*entry), __data);	\
477
	__data_offsets->item |= (len * sizeof(type)) << 16;		\
478
479
480
	__data_size += (len) * sizeof(type);

#undef __string
481
#define __string(item, src) __dynamic_array(char, item, strlen(src) + 1)
482

483
484
#undef TRACE_EVENT_TEMPLATE
#define TRACE_EVENT_TEMPLATE(call, proto, args, tstruct, assign, print)	\
485
486
487
488
489
490
491
492
493
494
495
static inline int ftrace_get_offsets_##call(				\
	struct ftrace_data_offsets_##call *__data_offsets, proto)       \
{									\
	int __data_size = 0;						\
	struct ftrace_raw_##call __maybe_unused *entry;			\
									\
	tstruct;							\
									\
	return __data_size;						\
}

496
497
498
#undef DEFINE_EVENT
#define DEFINE_EVENT(template, name, proto, args)

499
500
501
502
#undef DEFINE_EVENT_PRINT
#define DEFINE_EVENT_PRINT(template, name, proto, args, print)	\
	DEFINE_EVENT(template, name, PARAMS(proto), PARAMS(args))

503
504
#include TRACE_INCLUDE(TRACE_INCLUDE_FILE)

505
506
507
#ifdef CONFIG_EVENT_PROFILE

/*
508
 * Generate the functions needed for tracepoint perf_event support.
509
 *
510
 * NOTE: The insertion profile callback (ftrace_profile_<call>) is defined later
511
 *
512
 * static int ftrace_profile_enable_<call>(void)
513
 * {
514
 * 	return register_trace_<call>(ftrace_profile_<call>);
515
516
 * }
 *
517
 * static void ftrace_profile_disable_<call>(void)
518
 * {
519
 * 	unregister_trace_<call>(ftrace_profile_<call>);
520
521
522
523
 * }
 *
 */

524
525
526
527
528
#undef TRACE_EVENT_TEMPLATE
#define TRACE_EVENT_TEMPLATE(call, proto, args, tstruct, assign, print)

#undef DEFINE_EVENT
#define DEFINE_EVENT(template, name, proto, args)			\
529
									\
530
static void ftrace_profile_##name(proto);				\
531
									\
532
static int ftrace_profile_enable_##name(struct ftrace_event_call *unused)\
533
{									\
534
	return register_trace_##name(ftrace_profile_##name);		\
535
536
}									\
									\
537
static void ftrace_profile_disable_##name(struct ftrace_event_call *unused)\
538
{									\
539
	unregister_trace_##name(ftrace_profile_##name);			\
540
541
}

542
543
544
545
#undef DEFINE_EVENT_PRINT
#define DEFINE_EVENT_PRINT(template, name, proto, args, print)	\
	DEFINE_EVENT(template, name, PARAMS(proto), PARAMS(args))

546
547
548
549
#include TRACE_INCLUDE(TRACE_INCLUDE_FILE)

#endif

550
/*
551
 * Stage 4 of the trace events.
552
 *
553
 * Override the macros in <trace/trace_events.h> to include the following:
554
555
556
 *
 * static void ftrace_event_<call>(proto)
 * {
557
 *	event_trace_printk(_RET_IP_, "<call>: " <fmt>);
558
559
 * }
 *
560
 * static int ftrace_reg_event_<call>(struct ftrace_event_call *unused)
561
 * {
562
 *	int ret;
563
 *
564
565
566
567
568
 *	ret = register_trace_<call>(ftrace_event_<call>);
 *	if (!ret)
 *		pr_info("event trace: Could not activate trace point "
 *			"probe to  <call>");
 *	return ret;
569
570
 * }
 *
571
 * static void ftrace_unreg_event_<call>(struct ftrace_event_call *unused)
572
 * {
573
 *	unregister_trace_<call>(ftrace_event_<call>);
574
575
576
 * }
 *
 *
577
 * For those macros defined with TRACE_EVENT:
578
579
580
581
582
 *
 * static struct ftrace_event_call event_<call>;
 *
 * static void ftrace_raw_event_<call>(proto)
 * {
583
584
 *	struct ring_buffer_event *event;
 *	struct ftrace_raw_<call> *entry; <-- defined in stage 1
585
 *	struct ring_buffer *buffer;
586
587
588
589
590
591
 *	unsigned long irq_flags;
 *	int pc;
 *
 *	local_save_flags(irq_flags);
 *	pc = preempt_count();
 *
592
593
 *	event = trace_current_buffer_lock_reserve(&buffer,
 *				  event_<call>.id,
594
595
596
597
598
599
600
 *				  sizeof(struct ftrace_raw_<call>),
 *				  irq_flags, pc);
 *	if (!event)
 *		return;
 *	entry	= ring_buffer_event_data(event);
 *
 *	<assign>;  <-- Here we assign the entries by the __field and
601
 *			__array macros.
602
 *
603
 *	trace_current_buffer_unlock_commit(buffer, event, irq_flags, pc);
604
605
 * }
 *
606
 * static int ftrace_raw_reg_event_<call>(struct ftrace_event_call *unused)
607
 * {
608
 *	int ret;
609
 *
610
611
612
613
614
 *	ret = register_trace_<call>(ftrace_raw_event_<call>);
 *	if (!ret)
 *		pr_info("event trace: Could not activate trace point "
 *			"probe to <call>");
 *	return ret;
615
616
 * }
 *
617
 * static void ftrace_unreg_event_<call>(struct ftrace_event_call *unused)
618
 * {
619
 *	unregister_trace_<call>(ftrace_raw_event_<call>);
620
621
622
 * }
 *
 * static struct trace_event ftrace_event_type_<call> = {
623
 *	.trace			= ftrace_raw_output_<call>, <-- stage 2
624
625
 * };
 *
626
 * static int ftrace_raw_init_event_<call>(struct ftrace_event_call *unused)
627
 * {
628
 *	int id;
629
 *
630
631
632
633
634
 *	id = register_ftrace_event(&ftrace_event_type_<call>);
 *	if (!id)
 *		return -ENODEV;
 *	event_<call>.id = id;
 *	return 0;
635
636
637
638
639
 * }
 *
 * static struct ftrace_event_call __used
 * __attribute__((__aligned__(4)))
 * __attribute__((section("_ftrace_events"))) event_<call> = {
640
 *	.name			= "<call>",
641
 *	.system			= "<system>",
642
643
644
 *	.raw_init		= ftrace_raw_init_event_<call>,
 *	.regfunc		= ftrace_reg_event_<call>,
 *	.unregfunc		= ftrace_unreg_event_<call>,
645
 *	.show_format		= ftrace_format_<call>,
646
647
648
649
 * }
 *
 */

650
651
#undef TP_FMT
#define TP_FMT(fmt, args...)	fmt "\n", ##args
652

Peter Zijlstra's avatar
Peter Zijlstra committed
653
654
655
656
657
658
659
660
661
662
663
#ifdef CONFIG_EVENT_PROFILE

#define _TRACE_PROFILE_INIT(call)					\
	.profile_count = ATOMIC_INIT(-1),				\
	.profile_enable = ftrace_profile_enable_##call,			\
	.profile_disable = ftrace_profile_disable_##call,

#else
#define _TRACE_PROFILE_INIT(call)
#endif

664
665
#undef __entry
#define __entry entry
666

667
668
669
670
671
672
#undef __field
#define __field(type, item)

#undef __array
#define __array(type, item, len)

673
674
675
676
#undef __dynamic_array
#define __dynamic_array(type, item, len)				\
	__entry->__data_loc_##item = __data_offsets.item;

677
#undef __string
678
#define __string(item, src) __dynamic_array(char, item, -1)       	\
679
680
681
682
683

#undef __assign_str
#define __assign_str(dst, src)						\
	strcpy(__get_str(dst), src);

684
685
#undef TRACE_EVENT_TEMPLATE
#define TRACE_EVENT_TEMPLATE(call, proto, args, tstruct, assign, print)	\
686
									\
687
688
static void ftrace_raw_event_id_##call(struct ftrace_event_call *event_call, \
				       proto)				\
689
{									\
690
	struct ftrace_data_offsets_##call __maybe_unused __data_offsets;\
691
692
	struct ring_buffer_event *event;				\
	struct ftrace_raw_##call *entry;				\
693
	struct ring_buffer *buffer;					\
694
	unsigned long irq_flags;					\
695
	int __data_size;						\
696
697
698
699
700
	int pc;								\
									\
	local_save_flags(irq_flags);					\
	pc = preempt_count();						\
									\
701
	__data_size = ftrace_get_offsets_##call(&__data_offsets, args); \
702
									\
703
	event = trace_current_buffer_lock_reserve(&buffer,		\
704
				 event_call->id,			\
705
				 sizeof(*entry) + __data_size,		\
706
				 irq_flags, pc);			\
707
708
709
710
	if (!event)							\
		return;							\
	entry	= ring_buffer_event_data(event);			\
									\
711
712
713
									\
	tstruct								\
									\
714
	{ assign; }							\
715
									\
716
717
718
	if (!filter_current_check_discard(buffer, event_call, entry, event)) \
		trace_nowake_buffer_unlock_commit(buffer,		\
						  event, irq_flags, pc); \
719
720
721
722
723
724
725
726
}

#undef DEFINE_EVENT
#define DEFINE_EVENT(template, call, proto, args)			\
									\
static void ftrace_raw_event_##call(proto)				\
{									\
	ftrace_raw_event_id_##template(&event_##call, args);		\
727
728
}									\
									\
729
static int ftrace_raw_reg_event_##call(struct ftrace_event_call *unused)\
730
731
732
733
{									\
	int ret;							\
									\
	ret = register_trace_##call(ftrace_raw_event_##call);		\
734
	if (ret)							\
735
		pr_info("event trace: Could not activate trace point "	\
736
			"probe to " #call "\n");			\
737
738
739
	return ret;							\
}									\
									\
740
static void ftrace_raw_unreg_event_##call(struct ftrace_event_call *unused)\
741
742
743
744
745
746
747
748
{									\
	unregister_trace_##call(ftrace_raw_event_##call);		\
}									\
									\
static struct trace_event ftrace_event_type_##call = {			\
	.trace			= ftrace_raw_output_##call,		\
};									\
									\
749
static int ftrace_raw_init_event_##call(struct ftrace_event_call *unused)\
750
751
752
753
754
755
756
{									\
	int id;								\
									\
	id = register_ftrace_event(&ftrace_event_type_##call);		\
	if (!id)							\
		return -ENODEV;						\
	event_##call.id = id;						\
757
	INIT_LIST_HEAD(&event_##call.fields);				\
758
	return 0;							\
759
760
761
762
763
764
765
766
767
768
769
770
771
}

#undef DEFINE_EVENT_PRINT
#define DEFINE_EVENT_PRINT(template, name, proto, args, print)	\
	DEFINE_EVENT(template, name, PARAMS(proto), PARAMS(args))

#include TRACE_INCLUDE(TRACE_INCLUDE_FILE)

#undef TRACE_EVENT_TEMPLATE
#define TRACE_EVENT_TEMPLATE(call, proto, args, tstruct, assign, print)

#undef DEFINE_EVENT
#define DEFINE_EVENT(template, call, proto, args)			\
772
773
774
775
									\
static struct ftrace_event_call __used					\
__attribute__((__aligned__(4)))						\
__attribute__((section("_ftrace_events"))) event_##call = {		\
776
	.name			= #call,				\
777
	.system			= __stringify(TRACE_SYSTEM),		\
778
	.event			= &ftrace_event_type_##call,		\
779
	.raw_init		= ftrace_raw_init_event_##call,		\
780
781
	.regfunc		= ftrace_raw_reg_event_##call,		\
	.unregfunc		= ftrace_raw_unreg_event_##call,	\
782
783
	.show_format		= ftrace_format_##template,		\
	.define_fields		= ftrace_define_fields_##template,	\
Peter Zijlstra's avatar
Peter Zijlstra committed
784
	_TRACE_PROFILE_INIT(call)					\
785
}
Peter Zijlstra's avatar
Peter Zijlstra committed
786

787
788
789
790
791
792
793
794
795
796
797
798
799
800
801
802
803
#undef DEFINE_EVENT_PRINT
#define DEFINE_EVENT_PRINT(template, call, proto, args, print)		\
									\
static struct ftrace_event_call __used					\
__attribute__((__aligned__(4)))						\
__attribute__((section("_ftrace_events"))) event_##call = {		\
	.name			= #call,				\
	.system			= __stringify(TRACE_SYSTEM),		\
	.event			= &ftrace_event_type_##call,		\
	.raw_init		= ftrace_raw_init_event_##call,		\
	.regfunc		= ftrace_raw_reg_event_##call,		\
	.unregfunc		= ftrace_raw_unreg_event_##call,	\
	.show_format		= ftrace_format_##call,			\
	.define_fields		= ftrace_define_fields_##template,	\
	_TRACE_PROFILE_INIT(call)					\
}

804
#include TRACE_INCLUDE(TRACE_INCLUDE_FILE)
Peter Zijlstra's avatar
Peter Zijlstra committed
805

806
807
808
809
810
811
812
813
814
815
/*
 * Define the insertion callback to profile events
 *
 * The job is very similar to ftrace_raw_event_<call> except that we don't
 * insert in the ring buffer but in a perf counter.
 *
 * static void ftrace_profile_<call>(proto)
 * {
 *	struct ftrace_data_offsets_<call> __maybe_unused __data_offsets;
 *	struct ftrace_event_call *event_call = &event_<call>;
816
 *	extern void perf_tp_event(int, u64, u64, void *, int);
817
 *	struct ftrace_raw_##call *entry;
818
 *	struct perf_trace_buf *trace_buf;
819
820
 *	u64 __addr = 0, __count = 1;
 *	unsigned long irq_flags;
821
 *	struct trace_entry *ent;
822
823
 *	int __entry_size;
 *	int __data_size;
824
 *	int __cpu
825
826
827
828
829
 *	int pc;
 *
 *	pc = preempt_count();
 *
 *	__data_size = ftrace_get_offsets_<call>(&__data_offsets, args);
830
831
832
833
834
835
 *
 *	// Below we want to get the aligned size by taking into account
 *	// the u32 field that will later store the buffer size
 *	__entry_size = ALIGN(__data_size + sizeof(*entry) + sizeof(u32),
 *			     sizeof(u64));
 *	__entry_size -= sizeof(u32);
836
 *
837
838
839
840
841
842
 *	// Protect the non nmi buffer
 *	// This also protects the rcu read side
 *	local_irq_save(irq_flags);
 *	__cpu = smp_processor_id();
 *
 *	if (in_nmi())
843
 *		trace_buf = rcu_dereference(perf_trace_buf_nmi);
844
 *	else
845
 *		trace_buf = rcu_dereference(perf_trace_buf);
846
 *
847
 *	if (!trace_buf)
848
 *		goto end;
849
 *
850
851
852
853
854
855
856
857
858
859
860
861
 *	trace_buf = per_cpu_ptr(trace_buf, __cpu);
 *
 * 	// Avoid recursion from perf that could mess up the buffer
 * 	if (trace_buf->recursion++)
 *		goto end_recursion;
 *
 * 	raw_data = trace_buf->buf;
 *
 *	// Make recursion update visible before entering perf_tp_event
 *	// so that we protect from perf recursions.
 *
 *	barrier();
862
 *
863
864
865
866
867
868
 *	//zero dead bytes from alignment to avoid stack leak to userspace:
 *	*(u64 *)(&raw_data[__entry_size - sizeof(u64)]) = 0ULL;
 *	entry = (struct ftrace_raw_<call> *)raw_data;
 *	ent = &entry->ent;
 *	tracing_generic_entry_update(ent, irq_flags, pc);
 *	ent->type = event_call->id;
869
 *
870
 *	<tstruct> <- do some jobs with dynamic arrays
871
 *
872
 *	<assign>  <- affect our values
873
 *
874
 *	perf_tp_event(event_call->id, __addr, __count, entry,
875
 *		     __entry_size);  <- submit them to perf counter
876
877
878
879
880
881
882
883
884
885
886
887
 *
 * }
 */

#ifdef CONFIG_EVENT_PROFILE

#undef __perf_addr
#define __perf_addr(a) __addr = (a)

#undef __perf_count
#define __perf_count(c) __count = (c)

888
889
890
891
892
#undef TRACE_EVENT_TEMPLATE
#define TRACE_EVENT_TEMPLATE(call, proto, args, tstruct, assign, print)	\
static void								\
ftrace_profile_templ_##call(struct ftrace_event_call *event_call,	\
			    proto)					\
893
894
{									\
	struct ftrace_data_offsets_##call __maybe_unused __data_offsets;\
895
896
	extern int perf_swevent_get_recursion_context(void);		\
	extern void perf_swevent_put_recursion_context(int rctx);	\
897
	extern void perf_tp_event(int, u64, u64, void *, int);		\
898
899
900
	struct ftrace_raw_##call *entry;				\
	u64 __addr = 0, __count = 1;					\
	unsigned long irq_flags;					\
901
	struct trace_entry *ent;					\
902
903
	int __entry_size;						\
	int __data_size;						\
904
	char *trace_buf;						\
905
906
	char *raw_data;							\
	int __cpu;							\
907
	int rctx;							\
908
909
910
911
912
	int pc;								\
									\
	pc = preempt_count();						\
									\
	__data_size = ftrace_get_offsets_##call(&__data_offsets, args); \
913
914
	__entry_size = ALIGN(__data_size + sizeof(*entry) + sizeof(u32),\
			     sizeof(u64));				\
915
	__entry_size -= sizeof(u32);					\
916
									\
917
918
919
920
921
	if (WARN_ONCE(__entry_size > FTRACE_MAX_PROFILE_SIZE,		\
		      "profile buffer not large enough"))		\
		return;							\
									\
	local_irq_save(irq_flags);					\
922
									\
923
924
925
	rctx = perf_swevent_get_recursion_context();			\
	if (rctx < 0)							\
		goto end_recursion;					\
926
									\
927
	__cpu = smp_processor_id();					\
928
									\
929
	if (in_nmi())							\
930
		trace_buf = rcu_dereference(perf_trace_buf_nmi);	\
931
	else								\
932
		trace_buf = rcu_dereference(perf_trace_buf);		\
933
									\
934
	if (!trace_buf)							\
935
		goto end;						\
936
									\
937
	raw_data = per_cpu_ptr(trace_buf, __cpu);			\
938
									\
939
940
941
942
943
944
945
946
947
948
	*(u64 *)(&raw_data[__entry_size - sizeof(u64)]) = 0ULL;		\
	entry = (struct ftrace_raw_##call *)raw_data;			\
	ent = &entry->ent;						\
	tracing_generic_entry_update(ent, irq_flags, pc);		\
	ent->type = event_call->id;					\
									\
	tstruct								\
									\
	{ assign; }							\
									\
949
	perf_tp_event(event_call->id, __addr, __count, entry,		\
950
			     __entry_size);				\
951
									\
952
953
954
end:									\
	perf_swevent_put_recursion_context(rctx);			\
end_recursion:								\
955
	local_irq_restore(irq_flags);					\
956
957
958
									\
}

959
960
961
962
963
964
965
966
967
#undef DEFINE_EVENT
#define DEFINE_EVENT(template, call, proto, args)		\
static void ftrace_profile_##call(proto)			\
{								\
	struct ftrace_event_call *event_call = &event_##call;	\
								\
	ftrace_profile_templ_##template(event_call, args);	\
}

968
969
970
971
#undef DEFINE_EVENT_PRINT
#define DEFINE_EVENT_PRINT(template, name, proto, args, print)	\
	DEFINE_EVENT(template, name, PARAMS(proto), PARAMS(args))

972
973
974
#include TRACE_INCLUDE(TRACE_INCLUDE_FILE)
#endif /* CONFIG_EVENT_PROFILE */

Peter Zijlstra's avatar
Peter Zijlstra committed
975
976
#undef _TRACE_PROFILE_INIT