trace_syscalls.c 16.9 KB
Newer Older
1
#include <trace/syscall.h>
2
#include <trace/events/syscalls.h>
3
#include <linux/slab.h>
4
#include <linux/kernel.h>
5
#include <linux/module.h>	/* for MODULE_NAME_LEN via KSYM_SYMBOL_LEN */
6
#include <linux/ftrace.h>
7
#include <linux/perf_event.h>
8
9
10
11
12
#include <asm/syscall.h>

#include "trace_output.h"
#include "trace.h"

13
static DEFINE_MUTEX(syscall_trace_lock);
14
15
static int sys_refcount_enter;
static int sys_refcount_exit;
16
17
static DECLARE_BITMAP(enabled_enter_syscalls, NR_syscalls);
static DECLARE_BITMAP(enabled_exit_syscalls, NR_syscalls);
18

19
static int syscall_enter_register(struct ftrace_event_call *event,
20
				 enum trace_reg type, void *data);
21
static int syscall_exit_register(struct ftrace_event_call *event,
22
				 enum trace_reg type, void *data);
23

24
25
26
27
28
29
30
31
static struct list_head *
syscall_get_enter_fields(struct ftrace_event_call *call)
{
	struct syscall_metadata *entry = call->data;

	return &entry->enter_fields;
}

32
33
extern struct syscall_metadata *__start_syscalls_metadata[];
extern struct syscall_metadata *__stop_syscalls_metadata[];
34
35
36

static struct syscall_metadata **syscalls_metadata;

37
38
39
40
41
42
43
44
45
46
47
48
49
#ifndef ARCH_HAS_SYSCALL_MATCH_SYM_NAME
static inline bool arch_syscall_match_sym_name(const char *sym, const char *name)
{
	/*
	 * Only compare after the "sys" prefix. Archs that use
	 * syscall wrappers may have syscalls symbols aliases prefixed
	 * with "SyS" instead of "sys", leading to an unwanted
	 * mismatch.
	 */
	return !strcmp(sym + 3, name + 3);
}
#endif

50
51
static __init struct syscall_metadata *
find_syscall_meta(unsigned long syscall)
52
{
53
54
	struct syscall_metadata **start;
	struct syscall_metadata **stop;
55
56
57
	char str[KSYM_SYMBOL_LEN];


58
59
	start = __start_syscalls_metadata;
	stop = __stop_syscalls_metadata;
60
61
	kallsyms_lookup(syscall, NULL, NULL, NULL, str);

62
63
64
	if (arch_syscall_match_sym_name(str, "sys_ni_syscall"))
		return NULL;

65
	for ( ; start < stop; start++) {
66
		if ((*start)->name && arch_syscall_match_sym_name(str, (*start)->name))
67
			return *start;
68
69
70
71
72
73
74
75
76
77
78
79
	}
	return NULL;
}

static struct syscall_metadata *syscall_nr_to_meta(int nr)
{
	if (!syscalls_metadata || nr >= NR_syscalls || nr < 0)
		return NULL;

	return syscalls_metadata[nr];
}

80
static enum print_line_t
81
82
print_syscall_enter(struct trace_iterator *iter, int flags,
		    struct trace_event *event)
83
84
85
86
87
88
89
{
	struct trace_seq *s = &iter->seq;
	struct trace_entry *ent = iter->ent;
	struct syscall_trace_enter *trace;
	struct syscall_metadata *entry;
	int i, ret, syscall;

90
	trace = (typeof(trace))ent;
91
92
	syscall = trace->nr;
	entry = syscall_nr_to_meta(syscall);
93

94
95
96
	if (!entry)
		goto end;

97
	if (entry->enter_event->event.type != ent->type) {
98
99
100
101
		WARN_ON_ONCE(1);
		goto end;
	}

102
103
104
105
106
107
	ret = trace_seq_printf(s, "%s(", entry->name);
	if (!ret)
		return TRACE_TYPE_PARTIAL_LINE;

	for (i = 0; i < entry->nb_args; i++) {
		/* parameter types */
108
		if (trace_flags & TRACE_ITER_VERBOSE) {
109
110
111
112
113
			ret = trace_seq_printf(s, "%s ", entry->types[i]);
			if (!ret)
				return TRACE_TYPE_PARTIAL_LINE;
		}
		/* parameter values */
114
		ret = trace_seq_printf(s, "%s: %lx%s", entry->args[i],
115
				       trace->args[i],
116
				       i == entry->nb_args - 1 ? "" : ", ");
117
118
119
120
		if (!ret)
			return TRACE_TYPE_PARTIAL_LINE;
	}

121
122
123
124
	ret = trace_seq_putc(s, ')');
	if (!ret)
		return TRACE_TYPE_PARTIAL_LINE;

125
end:
126
127
128
129
	ret =  trace_seq_putc(s, '\n');
	if (!ret)
		return TRACE_TYPE_PARTIAL_LINE;

130
131
132
	return TRACE_TYPE_HANDLED;
}

133
static enum print_line_t
134
135
print_syscall_exit(struct trace_iterator *iter, int flags,
		   struct trace_event *event)
136
137
138
139
140
141
142
143
{
	struct trace_seq *s = &iter->seq;
	struct trace_entry *ent = iter->ent;
	struct syscall_trace_exit *trace;
	int syscall;
	struct syscall_metadata *entry;
	int ret;

144
	trace = (typeof(trace))ent;
145
146
	syscall = trace->nr;
	entry = syscall_nr_to_meta(syscall);
147

148
149
150
151
152
	if (!entry) {
		trace_seq_printf(s, "\n");
		return TRACE_TYPE_HANDLED;
	}

153
	if (entry->exit_event->event.type != ent->type) {
154
155
156
157
		WARN_ON_ONCE(1);
		return TRACE_TYPE_UNHANDLED;
	}

158
159
160
161
162
163
164
165
	ret = trace_seq_printf(s, "%s -> 0x%lx\n", entry->name,
				trace->ret);
	if (!ret)
		return TRACE_TYPE_PARTIAL_LINE;

	return TRACE_TYPE_HANDLED;
}

166
167
168
169
170
extern char *__bad_type_size(void);

#define SYSCALL_FIELD(type, name)					\
	sizeof(type) != sizeof(trace.name) ?				\
		__bad_type_size() :					\
171
172
		#type, #name, offsetof(typeof(trace), name),		\
		sizeof(trace.name), is_signed_type(type)
173

174
175
176
177
178
179
180
181
182
183
184
185
186
187
188
189
190
191
192
193
194
195
196
197
198
199
200
201
202
203
204
205
206
207
208
209
210
211
212
213
214
215
216
217
218
219
220
221
222
223
224
225
226
227
228
229
230
231
232
233
234
static
int  __set_enter_print_fmt(struct syscall_metadata *entry, char *buf, int len)
{
	int i;
	int pos = 0;

	/* When len=0, we just calculate the needed length */
#define LEN_OR_ZERO (len ? len - pos : 0)

	pos += snprintf(buf + pos, LEN_OR_ZERO, "\"");
	for (i = 0; i < entry->nb_args; i++) {
		pos += snprintf(buf + pos, LEN_OR_ZERO, "%s: 0x%%0%zulx%s",
				entry->args[i], sizeof(unsigned long),
				i == entry->nb_args - 1 ? "" : ", ");
	}
	pos += snprintf(buf + pos, LEN_OR_ZERO, "\"");

	for (i = 0; i < entry->nb_args; i++) {
		pos += snprintf(buf + pos, LEN_OR_ZERO,
				", ((unsigned long)(REC->%s))", entry->args[i]);
	}

#undef LEN_OR_ZERO

	/* return the length of print_fmt */
	return pos;
}

static int set_syscall_print_fmt(struct ftrace_event_call *call)
{
	char *print_fmt;
	int len;
	struct syscall_metadata *entry = call->data;

	if (entry->enter_event != call) {
		call->print_fmt = "\"0x%lx\", REC->ret";
		return 0;
	}

	/* First: called with 0 length to calculate the needed length */
	len = __set_enter_print_fmt(entry, NULL, 0);

	print_fmt = kmalloc(len + 1, GFP_KERNEL);
	if (!print_fmt)
		return -ENOMEM;

	/* Second: actually write the @print_fmt */
	__set_enter_print_fmt(entry, print_fmt, len + 1);
	call->print_fmt = print_fmt;

	return 0;
}

static void free_syscall_print_fmt(struct ftrace_event_call *call)
{
	struct syscall_metadata *entry = call->data;

	if (entry->enter_event == call)
		kfree(call->print_fmt);
}

235
static int syscall_enter_define_fields(struct ftrace_event_call *call)
236
237
{
	struct syscall_trace_enter trace;
238
	struct syscall_metadata *meta = call->data;
239
240
241
242
	int ret;
	int i;
	int offset = offsetof(typeof(trace), args);

243
244
245
246
	ret = trace_define_field(call, SYSCALL_FIELD(int, nr), FILTER_OTHER);
	if (ret)
		return ret;

247
	for (i = 0; i < meta->nb_args; i++) {
248
249
		ret = trace_define_field(call, meta->types[i],
					 meta->args[i], offset,
250
251
					 sizeof(unsigned long), 0,
					 FILTER_OTHER);
252
253
254
255
256
257
		offset += sizeof(unsigned long);
	}

	return ret;
}

258
static int syscall_exit_define_fields(struct ftrace_event_call *call)
259
260
261
262
{
	struct syscall_trace_exit trace;
	int ret;

263
264
265
266
	ret = trace_define_field(call, SYSCALL_FIELD(int, nr), FILTER_OTHER);
	if (ret)
		return ret;

267
	ret = trace_define_field(call, SYSCALL_FIELD(long, ret),
268
				 FILTER_OTHER);
269
270
271
272

	return ret;
}

273
static void ftrace_syscall_enter(void *ignore, struct pt_regs *regs, long id)
274
{
275
276
277
	struct syscall_trace_enter *entry;
	struct syscall_metadata *sys_data;
	struct ring_buffer_event *event;
278
	struct ring_buffer *buffer;
279
	int size;
280
281
282
	int syscall_nr;

	syscall_nr = syscall_get_nr(current, regs);
283
284
	if (syscall_nr < 0)
		return;
285
286
	if (!test_bit(syscall_nr, enabled_enter_syscalls))
		return;
287

288
289
290
291
292
293
	sys_data = syscall_nr_to_meta(syscall_nr);
	if (!sys_data)
		return;

	size = sizeof(*entry) + sizeof(unsigned long) * sys_data->nb_args;

294
	event = trace_current_buffer_lock_reserve(&buffer,
295
			sys_data->enter_event->event.type, size, 0, 0);
296
297
298
299
300
301
302
	if (!event)
		return;

	entry = ring_buffer_event_data(event);
	entry->nr = syscall_nr;
	syscall_get_arguments(current, regs, 0, sys_data->nb_args, entry->args);

303
304
305
	if (!filter_current_check_discard(buffer, sys_data->enter_event,
					  entry, event))
		trace_current_buffer_unlock_commit(buffer, event, 0, 0);
306
307
}

308
static void ftrace_syscall_exit(void *ignore, struct pt_regs *regs, long ret)
309
{
310
311
312
	struct syscall_trace_exit *entry;
	struct syscall_metadata *sys_data;
	struct ring_buffer_event *event;
313
	struct ring_buffer *buffer;
314
315
316
	int syscall_nr;

	syscall_nr = syscall_get_nr(current, regs);
317
318
	if (syscall_nr < 0)
		return;
319
320
	if (!test_bit(syscall_nr, enabled_exit_syscalls))
		return;
321

322
323
324
325
	sys_data = syscall_nr_to_meta(syscall_nr);
	if (!sys_data)
		return;

326
	event = trace_current_buffer_lock_reserve(&buffer,
327
			sys_data->exit_event->event.type, sizeof(*entry), 0, 0);
328
329
330
331
332
333
334
	if (!event)
		return;

	entry = ring_buffer_event_data(event);
	entry->nr = syscall_nr;
	entry->ret = syscall_get_return_value(current, regs);

335
336
337
	if (!filter_current_check_discard(buffer, sys_data->exit_event,
					  entry, event))
		trace_current_buffer_unlock_commit(buffer, event, 0, 0);
338
339
}

340
static int reg_event_syscall_enter(struct ftrace_event_call *call)
341
{
342
343
344
	int ret = 0;
	int num;

345
	num = ((struct syscall_metadata *)call->data)->syscall_nr;
346
	if (WARN_ON_ONCE(num < 0 || num >= NR_syscalls))
347
348
349
		return -ENOSYS;
	mutex_lock(&syscall_trace_lock);
	if (!sys_refcount_enter)
350
		ret = register_trace_sys_enter(ftrace_syscall_enter, NULL);
351
	if (!ret) {
352
353
354
355
356
		set_bit(num, enabled_enter_syscalls);
		sys_refcount_enter++;
	}
	mutex_unlock(&syscall_trace_lock);
	return ret;
357
358
}

359
static void unreg_event_syscall_enter(struct ftrace_event_call *call)
360
{
361
	int num;
362

363
	num = ((struct syscall_metadata *)call->data)->syscall_nr;
364
	if (WARN_ON_ONCE(num < 0 || num >= NR_syscalls))
365
366
367
368
369
		return;
	mutex_lock(&syscall_trace_lock);
	sys_refcount_enter--;
	clear_bit(num, enabled_enter_syscalls);
	if (!sys_refcount_enter)
370
		unregister_trace_sys_enter(ftrace_syscall_enter, NULL);
371
372
	mutex_unlock(&syscall_trace_lock);
}
373

374
static int reg_event_syscall_exit(struct ftrace_event_call *call)
375
{
376
377
378
	int ret = 0;
	int num;

379
	num = ((struct syscall_metadata *)call->data)->syscall_nr;
380
	if (WARN_ON_ONCE(num < 0 || num >= NR_syscalls))
381
382
383
		return -ENOSYS;
	mutex_lock(&syscall_trace_lock);
	if (!sys_refcount_exit)
384
		ret = register_trace_sys_exit(ftrace_syscall_exit, NULL);
385
	if (!ret) {
386
387
		set_bit(num, enabled_exit_syscalls);
		sys_refcount_exit++;
388
	}
389
390
391
	mutex_unlock(&syscall_trace_lock);
	return ret;
}
392

393
static void unreg_event_syscall_exit(struct ftrace_event_call *call)
394
395
{
	int num;
396

397
	num = ((struct syscall_metadata *)call->data)->syscall_nr;
398
	if (WARN_ON_ONCE(num < 0 || num >= NR_syscalls))
399
400
401
402
403
		return;
	mutex_lock(&syscall_trace_lock);
	sys_refcount_exit--;
	clear_bit(num, enabled_exit_syscalls);
	if (!sys_refcount_exit)
404
		unregister_trace_sys_exit(ftrace_syscall_exit, NULL);
405
	mutex_unlock(&syscall_trace_lock);
406
}
407

408
static int init_syscall_trace(struct ftrace_event_call *call)
409
410
{
	int id;
411
412
413
414
415
416
417
418
	int num;

	num = ((struct syscall_metadata *)call->data)->syscall_nr;
	if (num < 0 || num >= NR_syscalls) {
		pr_debug("syscall %s metadata not mapped, disabling ftrace event\n",
				((struct syscall_metadata *)call->data)->name);
		return -ENOSYS;
	}
419

420
421
422
	if (set_syscall_print_fmt(call) < 0)
		return -ENOMEM;

423
424
425
	id = trace_event_raw_init(call);

	if (id < 0) {
426
		free_syscall_print_fmt(call);
427
		return id;
428
	}
429
430

	return id;
431
432
}

433
434
435
436
437
438
439
440
441
442
443
444
445
446
447
448
449
450
451
452
453
454
455
456
struct trace_event_functions enter_syscall_print_funcs = {
	.trace		= print_syscall_enter,
};

struct trace_event_functions exit_syscall_print_funcs = {
	.trace		= print_syscall_exit,
};

struct ftrace_event_class event_class_syscall_enter = {
	.system		= "syscalls",
	.reg		= syscall_enter_register,
	.define_fields	= syscall_enter_define_fields,
	.get_fields	= syscall_get_enter_fields,
	.raw_init	= init_syscall_trace,
};

struct ftrace_event_class event_class_syscall_exit = {
	.system		= "syscalls",
	.reg		= syscall_exit_register,
	.define_fields	= syscall_exit_define_fields,
	.fields		= LIST_HEAD_INIT(event_class_syscall_exit.fields),
	.raw_init	= init_syscall_trace,
};

457
unsigned long __init __weak arch_syscall_addr(int nr)
458
459
460
461
{
	return (unsigned long)sys_call_table[nr];
}

462
static int __init init_ftrace_syscalls(void)
463
464
465
466
467
{
	struct syscall_metadata *meta;
	unsigned long addr;
	int i;

468
469
	syscalls_metadata = kcalloc(NR_syscalls, sizeof(*syscalls_metadata),
				    GFP_KERNEL);
470
471
472
473
474
475
476
477
	if (!syscalls_metadata) {
		WARN_ON(1);
		return -ENOMEM;
	}

	for (i = 0; i < NR_syscalls; i++) {
		addr = arch_syscall_addr(i);
		meta = find_syscall_meta(addr);
478
479
480
481
		if (!meta)
			continue;

		meta->syscall_nr = i;
482
483
484
485
486
		syscalls_metadata[i] = meta;
	}

	return 0;
}
487
early_initcall(init_ftrace_syscalls);
488

489
#ifdef CONFIG_PERF_EVENTS
490

491
492
493
494
static DECLARE_BITMAP(enabled_perf_enter_syscalls, NR_syscalls);
static DECLARE_BITMAP(enabled_perf_exit_syscalls, NR_syscalls);
static int sys_perf_refcount_enter;
static int sys_perf_refcount_exit;
495

496
static void perf_syscall_enter(void *ignore, struct pt_regs *regs, long id)
497
498
{
	struct syscall_metadata *sys_data;
499
	struct syscall_trace_enter *rec;
500
	struct hlist_head *head;
501
	int syscall_nr;
502
	int rctx;
503
	int size;
504
505

	syscall_nr = syscall_get_nr(current, regs);
506
507
	if (syscall_nr < 0)
		return;
508
	if (!test_bit(syscall_nr, enabled_perf_enter_syscalls))
509
510
511
512
513
514
		return;

	sys_data = syscall_nr_to_meta(syscall_nr);
	if (!sys_data)
		return;

515
516
517
518
519
	/* get the size after alignment with the u32 buffer size field */
	size = sizeof(unsigned long) * sys_data->nb_args + sizeof(*rec);
	size = ALIGN(size + sizeof(u32), sizeof(u64));
	size -= sizeof(u32);

520
521
	if (WARN_ONCE(size > PERF_MAX_TRACE_SIZE,
		      "perf buffer not large enough"))
522
523
		return;

524
	rec = (struct syscall_trace_enter *)perf_trace_buf_prepare(size,
525
				sys_data->enter_event->event.type, regs, &rctx);
526
527
	if (!rec)
		return;
528
529
530
531

	rec->nr = syscall_nr;
	syscall_get_arguments(current, regs, 0, sys_data->nb_args,
			       (unsigned long *)&rec->args);
532

533
	head = this_cpu_ptr(sys_data->enter_event->perf_events);
534
	perf_trace_buf_submit(rec, size, rctx, 0, 1, regs, head, NULL);
535
536
}

537
static int perf_sysenter_enable(struct ftrace_event_call *call)
538
539
540
541
{
	int ret = 0;
	int num;

542
	num = ((struct syscall_metadata *)call->data)->syscall_nr;
543
544

	mutex_lock(&syscall_trace_lock);
545
	if (!sys_perf_refcount_enter)
546
		ret = register_trace_sys_enter(perf_syscall_enter, NULL);
547
548
549
550
	if (ret) {
		pr_info("event trace: Could not activate"
				"syscall entry trace point");
	} else {
551
552
		set_bit(num, enabled_perf_enter_syscalls);
		sys_perf_refcount_enter++;
553
554
555
556
557
	}
	mutex_unlock(&syscall_trace_lock);
	return ret;
}

558
static void perf_sysenter_disable(struct ftrace_event_call *call)
559
560
561
{
	int num;

562
	num = ((struct syscall_metadata *)call->data)->syscall_nr;
563
564

	mutex_lock(&syscall_trace_lock);
565
566
567
	sys_perf_refcount_enter--;
	clear_bit(num, enabled_perf_enter_syscalls);
	if (!sys_perf_refcount_enter)
568
		unregister_trace_sys_enter(perf_syscall_enter, NULL);
569
570
571
	mutex_unlock(&syscall_trace_lock);
}

572
static void perf_syscall_exit(void *ignore, struct pt_regs *regs, long ret)
573
574
{
	struct syscall_metadata *sys_data;
575
	struct syscall_trace_exit *rec;
576
	struct hlist_head *head;
577
	int syscall_nr;
578
	int rctx;
579
	int size;
580
581

	syscall_nr = syscall_get_nr(current, regs);
582
583
	if (syscall_nr < 0)
		return;
584
	if (!test_bit(syscall_nr, enabled_perf_exit_syscalls))
585
586
587
588
589
590
		return;

	sys_data = syscall_nr_to_meta(syscall_nr);
	if (!sys_data)
		return;

591
592
593
	/* We can probably do that at build time */
	size = ALIGN(sizeof(*rec) + sizeof(u32), sizeof(u64));
	size -= sizeof(u32);
594

595
596
597
598
	/*
	 * Impossible, but be paranoid with the future
	 * How to put this check outside runtime?
	 */
599
600
	if (WARN_ONCE(size > PERF_MAX_TRACE_SIZE,
		"exit event has grown above perf buffer size"))
601
602
		return;

603
	rec = (struct syscall_trace_exit *)perf_trace_buf_prepare(size,
604
				sys_data->exit_event->event.type, regs, &rctx);
605
606
	if (!rec)
		return;
607
608
609
610

	rec->nr = syscall_nr;
	rec->ret = syscall_get_return_value(current, regs);

611
	head = this_cpu_ptr(sys_data->exit_event->perf_events);
612
	perf_trace_buf_submit(rec, size, rctx, 0, 1, regs, head, NULL);
613
614
}

615
static int perf_sysexit_enable(struct ftrace_event_call *call)
616
617
618
619
{
	int ret = 0;
	int num;

620
	num = ((struct syscall_metadata *)call->data)->syscall_nr;
621
622

	mutex_lock(&syscall_trace_lock);
623
	if (!sys_perf_refcount_exit)
624
		ret = register_trace_sys_exit(perf_syscall_exit, NULL);
625
626
	if (ret) {
		pr_info("event trace: Could not activate"
627
				"syscall exit trace point");
628
	} else {
629
630
		set_bit(num, enabled_perf_exit_syscalls);
		sys_perf_refcount_exit++;
631
632
633
634
635
	}
	mutex_unlock(&syscall_trace_lock);
	return ret;
}

636
static void perf_sysexit_disable(struct ftrace_event_call *call)
637
638
639
{
	int num;

640
	num = ((struct syscall_metadata *)call->data)->syscall_nr;
641
642

	mutex_lock(&syscall_trace_lock);
643
644
645
	sys_perf_refcount_exit--;
	clear_bit(num, enabled_perf_exit_syscalls);
	if (!sys_perf_refcount_exit)
646
		unregister_trace_sys_exit(perf_syscall_exit, NULL);
647
648
649
	mutex_unlock(&syscall_trace_lock);
}

650
#endif /* CONFIG_PERF_EVENTS */
651

652
static int syscall_enter_register(struct ftrace_event_call *event,
653
				 enum trace_reg type, void *data)
654
655
656
657
658
659
660
661
662
663
664
665
666
667
{
	switch (type) {
	case TRACE_REG_REGISTER:
		return reg_event_syscall_enter(event);
	case TRACE_REG_UNREGISTER:
		unreg_event_syscall_enter(event);
		return 0;

#ifdef CONFIG_PERF_EVENTS
	case TRACE_REG_PERF_REGISTER:
		return perf_sysenter_enable(event);
	case TRACE_REG_PERF_UNREGISTER:
		perf_sysenter_disable(event);
		return 0;
668
669
	case TRACE_REG_PERF_OPEN:
	case TRACE_REG_PERF_CLOSE:
670
671
	case TRACE_REG_PERF_ADD:
	case TRACE_REG_PERF_DEL:
672
		return 0;
673
674
675
676
677
678
#endif
	}
	return 0;
}

static int syscall_exit_register(struct ftrace_event_call *event,
679
				 enum trace_reg type, void *data)
680
681
682
683
684
685
686
687
688
689
690
691
692
693
{
	switch (type) {
	case TRACE_REG_REGISTER:
		return reg_event_syscall_exit(event);
	case TRACE_REG_UNREGISTER:
		unreg_event_syscall_exit(event);
		return 0;

#ifdef CONFIG_PERF_EVENTS
	case TRACE_REG_PERF_REGISTER:
		return perf_sysexit_enable(event);
	case TRACE_REG_PERF_UNREGISTER:
		perf_sysexit_disable(event);
		return 0;
694
695
	case TRACE_REG_PERF_OPEN:
	case TRACE_REG_PERF_CLOSE:
696
697
	case TRACE_REG_PERF_ADD:
	case TRACE_REG_PERF_DEL:
698
		return 0;
699
700
701
702
#endif
	}
	return 0;
}