trace_syscalls.c 17.9 KB
Newer Older
1
#include <trace/syscall.h>
2
#include <trace/events/syscalls.h>
3
#include <linux/syscalls.h>
4
#include <linux/slab.h>
5
#include <linux/kernel.h>
6
#include <linux/module.h>	/* for MODULE_NAME_LEN via KSYM_SYMBOL_LEN */
7
#include <linux/ftrace.h>
8
#include <linux/perf_event.h>
9
10
11
12
13
#include <asm/syscall.h>

#include "trace_output.h"
#include "trace.h"

14
static DEFINE_MUTEX(syscall_trace_lock);
15
16
static int sys_refcount_enter;
static int sys_refcount_exit;
17
18
static DECLARE_BITMAP(enabled_enter_syscalls, NR_syscalls);
static DECLARE_BITMAP(enabled_exit_syscalls, NR_syscalls);
19

20
static int syscall_enter_register(struct ftrace_event_call *event,
21
				 enum trace_reg type, void *data);
22
static int syscall_exit_register(struct ftrace_event_call *event,
23
				 enum trace_reg type, void *data);
24

25
26
27
28
29
30
31
32
static struct list_head *
syscall_get_enter_fields(struct ftrace_event_call *call)
{
	struct syscall_metadata *entry = call->data;

	return &entry->enter_fields;
}

33
34
extern struct syscall_metadata *__start_syscalls_metadata[];
extern struct syscall_metadata *__stop_syscalls_metadata[];
35
36
37

static struct syscall_metadata **syscalls_metadata;

38
39
40
41
42
43
44
45
46
47
48
49
50
#ifndef ARCH_HAS_SYSCALL_MATCH_SYM_NAME
static inline bool arch_syscall_match_sym_name(const char *sym, const char *name)
{
	/*
	 * Only compare after the "sys" prefix. Archs that use
	 * syscall wrappers may have syscalls symbols aliases prefixed
	 * with "SyS" instead of "sys", leading to an unwanted
	 * mismatch.
	 */
	return !strcmp(sym + 3, name + 3);
}
#endif

51
52
53
54
55
56
57
58
59
60
61
62
63
64
65
66
67
68
69
70
71
72
73
74
75
76
77
78
79
80
81
82
#ifdef ARCH_TRACE_IGNORE_COMPAT_SYSCALLS
/*
 * Some architectures that allow for 32bit applications
 * to run on a 64bit kernel, do not map the syscalls for
 * the 32bit tasks the same as they do for 64bit tasks.
 *
 *     *cough*x86*cough*
 *
 * In such a case, instead of reporting the wrong syscalls,
 * simply ignore them.
 *
 * For an arch to ignore the compat syscalls it needs to
 * define ARCH_TRACE_IGNORE_COMPAT_SYSCALLS as well as
 * define the function arch_trace_is_compat_syscall() to let
 * the tracing system know that it should ignore it.
 */
static int
trace_get_syscall_nr(struct task_struct *task, struct pt_regs *regs)
{
	if (unlikely(arch_trace_is_compat_syscall(regs)))
		return -1;

	return syscall_get_nr(task, regs);
}
#else
static inline int
trace_get_syscall_nr(struct task_struct *task, struct pt_regs *regs)
{
	return syscall_get_nr(task, regs);
}
#endif /* ARCH_TRACE_IGNORE_COMPAT_SYSCALLS */

83
84
static __init struct syscall_metadata *
find_syscall_meta(unsigned long syscall)
85
{
86
87
	struct syscall_metadata **start;
	struct syscall_metadata **stop;
88
89
90
	char str[KSYM_SYMBOL_LEN];


91
92
	start = __start_syscalls_metadata;
	stop = __stop_syscalls_metadata;
93
94
	kallsyms_lookup(syscall, NULL, NULL, NULL, str);

95
96
97
	if (arch_syscall_match_sym_name(str, "sys_ni_syscall"))
		return NULL;

98
	for ( ; start < stop; start++) {
99
		if ((*start)->name && arch_syscall_match_sym_name(str, (*start)->name))
100
			return *start;
101
102
103
104
105
106
107
108
109
110
111
112
	}
	return NULL;
}

static struct syscall_metadata *syscall_nr_to_meta(int nr)
{
	if (!syscalls_metadata || nr >= NR_syscalls || nr < 0)
		return NULL;

	return syscalls_metadata[nr];
}

113
static enum print_line_t
114
115
print_syscall_enter(struct trace_iterator *iter, int flags,
		    struct trace_event *event)
116
117
118
119
120
121
122
{
	struct trace_seq *s = &iter->seq;
	struct trace_entry *ent = iter->ent;
	struct syscall_trace_enter *trace;
	struct syscall_metadata *entry;
	int i, ret, syscall;

123
	trace = (typeof(trace))ent;
124
125
	syscall = trace->nr;
	entry = syscall_nr_to_meta(syscall);
126

127
128
129
	if (!entry)
		goto end;

130
	if (entry->enter_event->event.type != ent->type) {
131
132
133
134
		WARN_ON_ONCE(1);
		goto end;
	}

135
136
137
138
139
140
	ret = trace_seq_printf(s, "%s(", entry->name);
	if (!ret)
		return TRACE_TYPE_PARTIAL_LINE;

	for (i = 0; i < entry->nb_args; i++) {
		/* parameter types */
141
		if (trace_flags & TRACE_ITER_VERBOSE) {
142
143
144
145
146
			ret = trace_seq_printf(s, "%s ", entry->types[i]);
			if (!ret)
				return TRACE_TYPE_PARTIAL_LINE;
		}
		/* parameter values */
147
		ret = trace_seq_printf(s, "%s: %lx%s", entry->args[i],
148
				       trace->args[i],
149
				       i == entry->nb_args - 1 ? "" : ", ");
150
151
152
153
		if (!ret)
			return TRACE_TYPE_PARTIAL_LINE;
	}

154
155
156
157
	ret = trace_seq_putc(s, ')');
	if (!ret)
		return TRACE_TYPE_PARTIAL_LINE;

158
end:
159
160
161
162
	ret =  trace_seq_putc(s, '\n');
	if (!ret)
		return TRACE_TYPE_PARTIAL_LINE;

163
164
165
	return TRACE_TYPE_HANDLED;
}

166
static enum print_line_t
167
168
print_syscall_exit(struct trace_iterator *iter, int flags,
		   struct trace_event *event)
169
170
171
172
173
174
175
176
{
	struct trace_seq *s = &iter->seq;
	struct trace_entry *ent = iter->ent;
	struct syscall_trace_exit *trace;
	int syscall;
	struct syscall_metadata *entry;
	int ret;

177
	trace = (typeof(trace))ent;
178
179
	syscall = trace->nr;
	entry = syscall_nr_to_meta(syscall);
180

181
182
183
184
185
	if (!entry) {
		trace_seq_printf(s, "\n");
		return TRACE_TYPE_HANDLED;
	}

186
	if (entry->exit_event->event.type != ent->type) {
187
188
189
190
		WARN_ON_ONCE(1);
		return TRACE_TYPE_UNHANDLED;
	}

191
192
193
194
195
196
197
198
	ret = trace_seq_printf(s, "%s -> 0x%lx\n", entry->name,
				trace->ret);
	if (!ret)
		return TRACE_TYPE_PARTIAL_LINE;

	return TRACE_TYPE_HANDLED;
}

199
200
201
202
203
extern char *__bad_type_size(void);

#define SYSCALL_FIELD(type, name)					\
	sizeof(type) != sizeof(trace.name) ?				\
		__bad_type_size() :					\
204
205
		#type, #name, offsetof(typeof(trace), name),		\
		sizeof(trace.name), is_signed_type(type)
206

207
208
209
210
211
212
213
214
215
216
217
218
219
220
221
222
223
224
225
226
227
228
229
230
231
232
233
234
235
236
237
238
239
240
241
242
243
244
245
246
247
248
249
250
251
252
253
254
255
256
257
258
259
260
261
262
263
264
265
266
267
static
int  __set_enter_print_fmt(struct syscall_metadata *entry, char *buf, int len)
{
	int i;
	int pos = 0;

	/* When len=0, we just calculate the needed length */
#define LEN_OR_ZERO (len ? len - pos : 0)

	pos += snprintf(buf + pos, LEN_OR_ZERO, "\"");
	for (i = 0; i < entry->nb_args; i++) {
		pos += snprintf(buf + pos, LEN_OR_ZERO, "%s: 0x%%0%zulx%s",
				entry->args[i], sizeof(unsigned long),
				i == entry->nb_args - 1 ? "" : ", ");
	}
	pos += snprintf(buf + pos, LEN_OR_ZERO, "\"");

	for (i = 0; i < entry->nb_args; i++) {
		pos += snprintf(buf + pos, LEN_OR_ZERO,
				", ((unsigned long)(REC->%s))", entry->args[i]);
	}

#undef LEN_OR_ZERO

	/* return the length of print_fmt */
	return pos;
}

static int set_syscall_print_fmt(struct ftrace_event_call *call)
{
	char *print_fmt;
	int len;
	struct syscall_metadata *entry = call->data;

	if (entry->enter_event != call) {
		call->print_fmt = "\"0x%lx\", REC->ret";
		return 0;
	}

	/* First: called with 0 length to calculate the needed length */
	len = __set_enter_print_fmt(entry, NULL, 0);

	print_fmt = kmalloc(len + 1, GFP_KERNEL);
	if (!print_fmt)
		return -ENOMEM;

	/* Second: actually write the @print_fmt */
	__set_enter_print_fmt(entry, print_fmt, len + 1);
	call->print_fmt = print_fmt;

	return 0;
}

static void free_syscall_print_fmt(struct ftrace_event_call *call)
{
	struct syscall_metadata *entry = call->data;

	if (entry->enter_event == call)
		kfree(call->print_fmt);
}

268
static int syscall_enter_define_fields(struct ftrace_event_call *call)
269
270
{
	struct syscall_trace_enter trace;
271
	struct syscall_metadata *meta = call->data;
272
273
274
275
	int ret;
	int i;
	int offset = offsetof(typeof(trace), args);

276
277
278
279
	ret = trace_define_field(call, SYSCALL_FIELD(int, nr), FILTER_OTHER);
	if (ret)
		return ret;

280
	for (i = 0; i < meta->nb_args; i++) {
281
282
		ret = trace_define_field(call, meta->types[i],
					 meta->args[i], offset,
283
284
					 sizeof(unsigned long), 0,
					 FILTER_OTHER);
285
286
287
288
289
290
		offset += sizeof(unsigned long);
	}

	return ret;
}

291
static int syscall_exit_define_fields(struct ftrace_event_call *call)
292
293
294
295
{
	struct syscall_trace_exit trace;
	int ret;

296
297
298
299
	ret = trace_define_field(call, SYSCALL_FIELD(int, nr), FILTER_OTHER);
	if (ret)
		return ret;

300
	ret = trace_define_field(call, SYSCALL_FIELD(long, ret),
301
				 FILTER_OTHER);
302
303
304
305

	return ret;
}

306
static void ftrace_syscall_enter(void *ignore, struct pt_regs *regs, long id)
307
{
308
309
310
	struct syscall_trace_enter *entry;
	struct syscall_metadata *sys_data;
	struct ring_buffer_event *event;
311
	struct ring_buffer *buffer;
312
	int syscall_nr;
313
	int size;
314

315
	syscall_nr = trace_get_syscall_nr(current, regs);
316
317
	if (syscall_nr < 0)
		return;
318
319
	if (!test_bit(syscall_nr, enabled_enter_syscalls))
		return;
320

321
322
323
324
325
326
	sys_data = syscall_nr_to_meta(syscall_nr);
	if (!sys_data)
		return;

	size = sizeof(*entry) + sizeof(unsigned long) * sys_data->nb_args;

327
	event = trace_current_buffer_lock_reserve(&buffer,
328
			sys_data->enter_event->event.type, size, 0, 0);
329
330
331
332
333
334
335
	if (!event)
		return;

	entry = ring_buffer_event_data(event);
	entry->nr = syscall_nr;
	syscall_get_arguments(current, regs, 0, sys_data->nb_args, entry->args);

336
337
338
	if (!filter_current_check_discard(buffer, sys_data->enter_event,
					  entry, event))
		trace_current_buffer_unlock_commit(buffer, event, 0, 0);
339
340
}

341
static void ftrace_syscall_exit(void *ignore, struct pt_regs *regs, long ret)
342
{
343
344
345
	struct syscall_trace_exit *entry;
	struct syscall_metadata *sys_data;
	struct ring_buffer_event *event;
346
	struct ring_buffer *buffer;
347
348
	int syscall_nr;

349
	syscall_nr = trace_get_syscall_nr(current, regs);
350
351
	if (syscall_nr < 0)
		return;
352
353
	if (!test_bit(syscall_nr, enabled_exit_syscalls))
		return;
354

355
356
357
358
	sys_data = syscall_nr_to_meta(syscall_nr);
	if (!sys_data)
		return;

359
	event = trace_current_buffer_lock_reserve(&buffer,
360
			sys_data->exit_event->event.type, sizeof(*entry), 0, 0);
361
362
363
364
365
366
367
	if (!event)
		return;

	entry = ring_buffer_event_data(event);
	entry->nr = syscall_nr;
	entry->ret = syscall_get_return_value(current, regs);

368
369
370
	if (!filter_current_check_discard(buffer, sys_data->exit_event,
					  entry, event))
		trace_current_buffer_unlock_commit(buffer, event, 0, 0);
371
372
}

373
static int reg_event_syscall_enter(struct ftrace_event_call *call)
374
{
375
376
377
	int ret = 0;
	int num;

378
	num = ((struct syscall_metadata *)call->data)->syscall_nr;
379
	if (WARN_ON_ONCE(num < 0 || num >= NR_syscalls))
380
381
382
		return -ENOSYS;
	mutex_lock(&syscall_trace_lock);
	if (!sys_refcount_enter)
383
		ret = register_trace_sys_enter(ftrace_syscall_enter, NULL);
384
	if (!ret) {
385
386
387
388
389
		set_bit(num, enabled_enter_syscalls);
		sys_refcount_enter++;
	}
	mutex_unlock(&syscall_trace_lock);
	return ret;
390
391
}

392
static void unreg_event_syscall_enter(struct ftrace_event_call *call)
393
{
394
	int num;
395

396
	num = ((struct syscall_metadata *)call->data)->syscall_nr;
397
	if (WARN_ON_ONCE(num < 0 || num >= NR_syscalls))
398
399
400
401
402
		return;
	mutex_lock(&syscall_trace_lock);
	sys_refcount_enter--;
	clear_bit(num, enabled_enter_syscalls);
	if (!sys_refcount_enter)
403
		unregister_trace_sys_enter(ftrace_syscall_enter, NULL);
404
405
	mutex_unlock(&syscall_trace_lock);
}
406

407
static int reg_event_syscall_exit(struct ftrace_event_call *call)
408
{
409
410
411
	int ret = 0;
	int num;

412
	num = ((struct syscall_metadata *)call->data)->syscall_nr;
413
	if (WARN_ON_ONCE(num < 0 || num >= NR_syscalls))
414
415
416
		return -ENOSYS;
	mutex_lock(&syscall_trace_lock);
	if (!sys_refcount_exit)
417
		ret = register_trace_sys_exit(ftrace_syscall_exit, NULL);
418
	if (!ret) {
419
420
		set_bit(num, enabled_exit_syscalls);
		sys_refcount_exit++;
421
	}
422
423
424
	mutex_unlock(&syscall_trace_lock);
	return ret;
}
425

426
static void unreg_event_syscall_exit(struct ftrace_event_call *call)
427
428
{
	int num;
429

430
	num = ((struct syscall_metadata *)call->data)->syscall_nr;
431
	if (WARN_ON_ONCE(num < 0 || num >= NR_syscalls))
432
433
434
435
436
		return;
	mutex_lock(&syscall_trace_lock);
	sys_refcount_exit--;
	clear_bit(num, enabled_exit_syscalls);
	if (!sys_refcount_exit)
437
		unregister_trace_sys_exit(ftrace_syscall_exit, NULL);
438
	mutex_unlock(&syscall_trace_lock);
439
}
440

441
static int init_syscall_trace(struct ftrace_event_call *call)
442
443
{
	int id;
444
445
446
447
448
449
450
451
	int num;

	num = ((struct syscall_metadata *)call->data)->syscall_nr;
	if (num < 0 || num >= NR_syscalls) {
		pr_debug("syscall %s metadata not mapped, disabling ftrace event\n",
				((struct syscall_metadata *)call->data)->name);
		return -ENOSYS;
	}
452

453
454
455
	if (set_syscall_print_fmt(call) < 0)
		return -ENOMEM;

456
457
458
	id = trace_event_raw_init(call);

	if (id < 0) {
459
		free_syscall_print_fmt(call);
460
		return id;
461
	}
462
463

	return id;
464
465
}

466
467
468
469
470
471
472
473
474
475
476
477
478
479
480
481
482
483
484
485
486
487
488
489
struct trace_event_functions enter_syscall_print_funcs = {
	.trace		= print_syscall_enter,
};

struct trace_event_functions exit_syscall_print_funcs = {
	.trace		= print_syscall_exit,
};

struct ftrace_event_class event_class_syscall_enter = {
	.system		= "syscalls",
	.reg		= syscall_enter_register,
	.define_fields	= syscall_enter_define_fields,
	.get_fields	= syscall_get_enter_fields,
	.raw_init	= init_syscall_trace,
};

struct ftrace_event_class event_class_syscall_exit = {
	.system		= "syscalls",
	.reg		= syscall_exit_register,
	.define_fields	= syscall_exit_define_fields,
	.fields		= LIST_HEAD_INIT(event_class_syscall_exit.fields),
	.raw_init	= init_syscall_trace,
};

490
unsigned long __init __weak arch_syscall_addr(int nr)
491
492
493
494
{
	return (unsigned long)sys_call_table[nr];
}

495
static int __init init_ftrace_syscalls(void)
496
497
498
499
500
{
	struct syscall_metadata *meta;
	unsigned long addr;
	int i;

501
502
	syscalls_metadata = kcalloc(NR_syscalls, sizeof(*syscalls_metadata),
				    GFP_KERNEL);
503
504
505
506
507
508
509
510
	if (!syscalls_metadata) {
		WARN_ON(1);
		return -ENOMEM;
	}

	for (i = 0; i < NR_syscalls; i++) {
		addr = arch_syscall_addr(i);
		meta = find_syscall_meta(addr);
511
512
513
514
		if (!meta)
			continue;

		meta->syscall_nr = i;
515
516
517
518
519
		syscalls_metadata[i] = meta;
	}

	return 0;
}
520
early_initcall(init_ftrace_syscalls);
521

522
#ifdef CONFIG_PERF_EVENTS
523

524
525
526
527
static DECLARE_BITMAP(enabled_perf_enter_syscalls, NR_syscalls);
static DECLARE_BITMAP(enabled_perf_exit_syscalls, NR_syscalls);
static int sys_perf_refcount_enter;
static int sys_perf_refcount_exit;
528

529
static void perf_syscall_enter(void *ignore, struct pt_regs *regs, long id)
530
531
{
	struct syscall_metadata *sys_data;
532
	struct syscall_trace_enter *rec;
533
	struct hlist_head *head;
534
	int syscall_nr;
535
	int rctx;
536
	int size;
537

538
	syscall_nr = trace_get_syscall_nr(current, regs);
539
540
	if (syscall_nr < 0)
		return;
541
	if (!test_bit(syscall_nr, enabled_perf_enter_syscalls))
542
543
544
545
546
547
		return;

	sys_data = syscall_nr_to_meta(syscall_nr);
	if (!sys_data)
		return;

548
549
550
551
552
	/* get the size after alignment with the u32 buffer size field */
	size = sizeof(unsigned long) * sys_data->nb_args + sizeof(*rec);
	size = ALIGN(size + sizeof(u32), sizeof(u64));
	size -= sizeof(u32);

553
554
	if (WARN_ONCE(size > PERF_MAX_TRACE_SIZE,
		      "perf buffer not large enough"))
555
556
		return;

557
	rec = (struct syscall_trace_enter *)perf_trace_buf_prepare(size,
558
				sys_data->enter_event->event.type, regs, &rctx);
559
560
	if (!rec)
		return;
561
562
563
564

	rec->nr = syscall_nr;
	syscall_get_arguments(current, regs, 0, sys_data->nb_args,
			       (unsigned long *)&rec->args);
565

566
	head = this_cpu_ptr(sys_data->enter_event->perf_events);
567
	perf_trace_buf_submit(rec, size, rctx, 0, 1, regs, head, NULL);
568
569
}

570
static int perf_sysenter_enable(struct ftrace_event_call *call)
571
572
573
574
{
	int ret = 0;
	int num;

575
	num = ((struct syscall_metadata *)call->data)->syscall_nr;
576
577

	mutex_lock(&syscall_trace_lock);
578
	if (!sys_perf_refcount_enter)
579
		ret = register_trace_sys_enter(perf_syscall_enter, NULL);
580
581
582
583
	if (ret) {
		pr_info("event trace: Could not activate"
				"syscall entry trace point");
	} else {
584
585
		set_bit(num, enabled_perf_enter_syscalls);
		sys_perf_refcount_enter++;
586
587
588
589
590
	}
	mutex_unlock(&syscall_trace_lock);
	return ret;
}

591
static void perf_sysenter_disable(struct ftrace_event_call *call)
592
593
594
{
	int num;

595
	num = ((struct syscall_metadata *)call->data)->syscall_nr;
596
597

	mutex_lock(&syscall_trace_lock);
598
599
600
	sys_perf_refcount_enter--;
	clear_bit(num, enabled_perf_enter_syscalls);
	if (!sys_perf_refcount_enter)
601
		unregister_trace_sys_enter(perf_syscall_enter, NULL);
602
603
604
	mutex_unlock(&syscall_trace_lock);
}

605
static void perf_syscall_exit(void *ignore, struct pt_regs *regs, long ret)
606
607
{
	struct syscall_metadata *sys_data;
608
	struct syscall_trace_exit *rec;
609
	struct hlist_head *head;
610
	int syscall_nr;
611
	int rctx;
612
	int size;
613

614
	syscall_nr = trace_get_syscall_nr(current, regs);
615
616
	if (syscall_nr < 0)
		return;
617
	if (!test_bit(syscall_nr, enabled_perf_exit_syscalls))
618
619
620
621
622
623
		return;

	sys_data = syscall_nr_to_meta(syscall_nr);
	if (!sys_data)
		return;

624
625
626
	/* We can probably do that at build time */
	size = ALIGN(sizeof(*rec) + sizeof(u32), sizeof(u64));
	size -= sizeof(u32);
627

628
629
630
631
	/*
	 * Impossible, but be paranoid with the future
	 * How to put this check outside runtime?
	 */
632
633
	if (WARN_ONCE(size > PERF_MAX_TRACE_SIZE,
		"exit event has grown above perf buffer size"))
634
635
		return;

636
	rec = (struct syscall_trace_exit *)perf_trace_buf_prepare(size,
637
				sys_data->exit_event->event.type, regs, &rctx);
638
639
	if (!rec)
		return;
640
641
642
643

	rec->nr = syscall_nr;
	rec->ret = syscall_get_return_value(current, regs);

644
	head = this_cpu_ptr(sys_data->exit_event->perf_events);
645
	perf_trace_buf_submit(rec, size, rctx, 0, 1, regs, head, NULL);
646
647
}

648
static int perf_sysexit_enable(struct ftrace_event_call *call)
649
650
651
652
{
	int ret = 0;
	int num;

653
	num = ((struct syscall_metadata *)call->data)->syscall_nr;
654
655

	mutex_lock(&syscall_trace_lock);
656
	if (!sys_perf_refcount_exit)
657
		ret = register_trace_sys_exit(perf_syscall_exit, NULL);
658
659
	if (ret) {
		pr_info("event trace: Could not activate"
660
				"syscall exit trace point");
661
	} else {
662
663
		set_bit(num, enabled_perf_exit_syscalls);
		sys_perf_refcount_exit++;
664
665
666
667
668
	}
	mutex_unlock(&syscall_trace_lock);
	return ret;
}

669
static void perf_sysexit_disable(struct ftrace_event_call *call)
670
671
672
{
	int num;

673
	num = ((struct syscall_metadata *)call->data)->syscall_nr;
674
675

	mutex_lock(&syscall_trace_lock);
676
677
678
	sys_perf_refcount_exit--;
	clear_bit(num, enabled_perf_exit_syscalls);
	if (!sys_perf_refcount_exit)
679
		unregister_trace_sys_exit(perf_syscall_exit, NULL);
680
681
682
	mutex_unlock(&syscall_trace_lock);
}

683
#endif /* CONFIG_PERF_EVENTS */
684

685
static int syscall_enter_register(struct ftrace_event_call *event,
686
				 enum trace_reg type, void *data)
687
688
689
690
691
692
693
694
695
696
697
698
699
700
{
	switch (type) {
	case TRACE_REG_REGISTER:
		return reg_event_syscall_enter(event);
	case TRACE_REG_UNREGISTER:
		unreg_event_syscall_enter(event);
		return 0;

#ifdef CONFIG_PERF_EVENTS
	case TRACE_REG_PERF_REGISTER:
		return perf_sysenter_enable(event);
	case TRACE_REG_PERF_UNREGISTER:
		perf_sysenter_disable(event);
		return 0;
701
702
	case TRACE_REG_PERF_OPEN:
	case TRACE_REG_PERF_CLOSE:
703
704
	case TRACE_REG_PERF_ADD:
	case TRACE_REG_PERF_DEL:
705
		return 0;
706
707
708
709
710
711
#endif
	}
	return 0;
}

static int syscall_exit_register(struct ftrace_event_call *event,
712
				 enum trace_reg type, void *data)
713
714
715
716
717
718
719
720
721
722
723
724
725
726
{
	switch (type) {
	case TRACE_REG_REGISTER:
		return reg_event_syscall_exit(event);
	case TRACE_REG_UNREGISTER:
		unreg_event_syscall_exit(event);
		return 0;

#ifdef CONFIG_PERF_EVENTS
	case TRACE_REG_PERF_REGISTER:
		return perf_sysexit_enable(event);
	case TRACE_REG_PERF_UNREGISTER:
		perf_sysexit_disable(event);
		return 0;
727
728
	case TRACE_REG_PERF_OPEN:
	case TRACE_REG_PERF_CLOSE:
729
730
	case TRACE_REG_PERF_ADD:
	case TRACE_REG_PERF_DEL:
731
		return 0;
732
733
734
735
#endif
	}
	return 0;
}