trace_syscalls.c 15.5 KB
Newer Older
1
#include <trace/syscall.h>
2
#include <trace/events/syscalls.h>
3
#include <linux/kernel.h>
4
#include <linux/ftrace.h>
5
#include <linux/perf_event.h>
6
7
8
9
10
#include <asm/syscall.h>

#include "trace_output.h"
#include "trace.h"

11
static DEFINE_MUTEX(syscall_trace_lock);
12
13
static int sys_refcount_enter;
static int sys_refcount_exit;
14
15
static DECLARE_BITMAP(enabled_enter_syscalls, NR_syscalls);
static DECLARE_BITMAP(enabled_exit_syscalls, NR_syscalls);
16

17
18
19
20
21
22
23
24
25
26
27
28
29
30
31
32
33
34
35
36
37
38
39
40
41
42
43
44
45
46
47
48
49
50
51
52
53
extern unsigned long __start_syscalls_metadata[];
extern unsigned long __stop_syscalls_metadata[];

static struct syscall_metadata **syscalls_metadata;

static struct syscall_metadata *find_syscall_meta(unsigned long syscall)
{
	struct syscall_metadata *start;
	struct syscall_metadata *stop;
	char str[KSYM_SYMBOL_LEN];


	start = (struct syscall_metadata *)__start_syscalls_metadata;
	stop = (struct syscall_metadata *)__stop_syscalls_metadata;
	kallsyms_lookup(syscall, NULL, NULL, NULL, str);

	for ( ; start < stop; start++) {
		/*
		 * Only compare after the "sys" prefix. Archs that use
		 * syscall wrappers may have syscalls symbols aliases prefixed
		 * with "SyS" instead of "sys", leading to an unwanted
		 * mismatch.
		 */
		if (start->name && !strcmp(start->name + 3, str + 3))
			return start;
	}
	return NULL;
}

static struct syscall_metadata *syscall_nr_to_meta(int nr)
{
	if (!syscalls_metadata || nr >= NR_syscalls || nr < 0)
		return NULL;

	return syscalls_metadata[nr];
}

54
int syscall_name_to_nr(const char *name)
55
56
57
58
59
60
61
62
63
64
65
66
67
68
69
70
71
72
73
74
75
76
77
78
79
{
	int i;

	if (!syscalls_metadata)
		return -1;

	for (i = 0; i < NR_syscalls; i++) {
		if (syscalls_metadata[i]) {
			if (!strcmp(syscalls_metadata[i]->name, name))
				return i;
		}
	}
	return -1;
}

void set_syscall_enter_id(int num, int id)
{
	syscalls_metadata[num]->enter_id = id;
}

void set_syscall_exit_id(int num, int id)
{
	syscalls_metadata[num]->exit_id = id;
}

80
81
82
83
84
85
86
87
88
enum print_line_t
print_syscall_enter(struct trace_iterator *iter, int flags)
{
	struct trace_seq *s = &iter->seq;
	struct trace_entry *ent = iter->ent;
	struct syscall_trace_enter *trace;
	struct syscall_metadata *entry;
	int i, ret, syscall;

89
	trace = (typeof(trace))ent;
90
91
	syscall = trace->nr;
	entry = syscall_nr_to_meta(syscall);
92

93
94
95
	if (!entry)
		goto end;

96
97
98
99
100
	if (entry->enter_id != ent->type) {
		WARN_ON_ONCE(1);
		goto end;
	}

101
102
103
104
105
106
	ret = trace_seq_printf(s, "%s(", entry->name);
	if (!ret)
		return TRACE_TYPE_PARTIAL_LINE;

	for (i = 0; i < entry->nb_args; i++) {
		/* parameter types */
107
		if (trace_flags & TRACE_ITER_VERBOSE) {
108
109
110
111
112
			ret = trace_seq_printf(s, "%s ", entry->types[i]);
			if (!ret)
				return TRACE_TYPE_PARTIAL_LINE;
		}
		/* parameter values */
113
		ret = trace_seq_printf(s, "%s: %lx%s", entry->args[i],
114
				       trace->args[i],
115
				       i == entry->nb_args - 1 ? "" : ", ");
116
117
118
119
		if (!ret)
			return TRACE_TYPE_PARTIAL_LINE;
	}

120
121
122
123
	ret = trace_seq_putc(s, ')');
	if (!ret)
		return TRACE_TYPE_PARTIAL_LINE;

124
end:
125
126
127
128
	ret =  trace_seq_putc(s, '\n');
	if (!ret)
		return TRACE_TYPE_PARTIAL_LINE;

129
130
131
132
133
134
135
136
137
138
139
140
141
	return TRACE_TYPE_HANDLED;
}

enum print_line_t
print_syscall_exit(struct trace_iterator *iter, int flags)
{
	struct trace_seq *s = &iter->seq;
	struct trace_entry *ent = iter->ent;
	struct syscall_trace_exit *trace;
	int syscall;
	struct syscall_metadata *entry;
	int ret;

142
	trace = (typeof(trace))ent;
143
144
	syscall = trace->nr;
	entry = syscall_nr_to_meta(syscall);
145

146
147
148
149
150
	if (!entry) {
		trace_seq_printf(s, "\n");
		return TRACE_TYPE_HANDLED;
	}

151
152
153
154
155
	if (entry->exit_id != ent->type) {
		WARN_ON_ONCE(1);
		return TRACE_TYPE_UNHANDLED;
	}

156
157
158
159
160
161
162
163
	ret = trace_seq_printf(s, "%s -> 0x%lx\n", entry->name,
				trace->ret);
	if (!ret)
		return TRACE_TYPE_PARTIAL_LINE;

	return TRACE_TYPE_HANDLED;
}

164
165
166
167
168
extern char *__bad_type_size(void);

#define SYSCALL_FIELD(type, name)					\
	sizeof(type) != sizeof(trace.name) ?				\
		__bad_type_size() :					\
169
170
		#type, #name, offsetof(typeof(trace), name),		\
		sizeof(trace.name), is_signed_type(type)
171

172
int syscall_enter_format(struct ftrace_event_call *call, struct trace_seq *s)
173
174
{
	int i;
175
	int ret;
176
	struct syscall_metadata *entry = call->data;
177
178
	struct syscall_trace_enter trace;
	int offset = offsetof(struct syscall_trace_enter, args);
179

180
181
	ret = trace_seq_printf(s, "\tfield:%s %s;\toffset:%zu;\tsize:%zu;"
			       "\tsigned:%u;\n",
182
183
184
			       SYSCALL_FIELD(int, nr));
	if (!ret)
		return 0;
185
186
187
188
189
190

	for (i = 0; i < entry->nb_args; i++) {
		ret = trace_seq_printf(s, "\tfield:%s %s;", entry->types[i],
				        entry->args[i]);
		if (!ret)
			return 0;
191
192
193
194
		ret = trace_seq_printf(s, "\toffset:%d;\tsize:%zu;"
				       "\tsigned:%u;\n", offset,
				       sizeof(unsigned long),
				       is_signed_type(unsigned long));
195
196
197
198
199
		if (!ret)
			return 0;
		offset += sizeof(unsigned long);
	}

200
	trace_seq_puts(s, "\nprint fmt: \"");
201
	for (i = 0; i < entry->nb_args; i++) {
202
		ret = trace_seq_printf(s, "%s: 0x%%0%zulx%s", entry->args[i],
203
				        sizeof(unsigned long),
204
					i == entry->nb_args - 1 ? "" : ", ");
205
206
207
		if (!ret)
			return 0;
	}
208
	trace_seq_putc(s, '"');
209
210

	for (i = 0; i < entry->nb_args; i++) {
211
212
		ret = trace_seq_printf(s, ", ((unsigned long)(REC->%s))",
				       entry->args[i]);
213
214
215
216
		if (!ret)
			return 0;
	}

217
	return trace_seq_putc(s, '\n');
218
219
}

220
221
222
223
224
225
int syscall_exit_format(struct ftrace_event_call *call, struct trace_seq *s)
{
	int ret;
	struct syscall_trace_exit trace;

	ret = trace_seq_printf(s,
226
227
228
229
			       "\tfield:%s %s;\toffset:%zu;\tsize:%zu;"
			       "\tsigned:%u;\n"
			       "\tfield:%s %s;\toffset:%zu;\tsize:%zu;"
			       "\tsigned:%u;\n",
230
			       SYSCALL_FIELD(int, nr),
231
			       SYSCALL_FIELD(long, ret));
232
233
234
235
236
237
	if (!ret)
		return 0;

	return trace_seq_printf(s, "\nprint fmt: \"0x%%lx\", REC->ret\n");
}

238
239
240
int syscall_enter_define_fields(struct ftrace_event_call *call)
{
	struct syscall_trace_enter trace;
241
	struct syscall_metadata *meta = call->data;
242
243
244
245
246
247
248
249
	int ret;
	int i;
	int offset = offsetof(typeof(trace), args);

	ret = trace_define_common_fields(call);
	if (ret)
		return ret;

250
251
252
253
	ret = trace_define_field(call, SYSCALL_FIELD(int, nr), FILTER_OTHER);
	if (ret)
		return ret;

254
	for (i = 0; i < meta->nb_args; i++) {
255
256
		ret = trace_define_field(call, meta->types[i],
					 meta->args[i], offset,
257
258
					 sizeof(unsigned long), 0,
					 FILTER_OTHER);
259
260
261
262
263
264
265
266
267
268
269
270
271
272
273
		offset += sizeof(unsigned long);
	}

	return ret;
}

int syscall_exit_define_fields(struct ftrace_event_call *call)
{
	struct syscall_trace_exit trace;
	int ret;

	ret = trace_define_common_fields(call);
	if (ret)
		return ret;

274
275
276
277
	ret = trace_define_field(call, SYSCALL_FIELD(int, nr), FILTER_OTHER);
	if (ret)
		return ret;

278
	ret = trace_define_field(call, SYSCALL_FIELD(long, ret),
279
				 FILTER_OTHER);
280
281
282
283

	return ret;
}

284
void ftrace_syscall_enter(struct pt_regs *regs, long id)
285
{
286
287
288
	struct syscall_trace_enter *entry;
	struct syscall_metadata *sys_data;
	struct ring_buffer_event *event;
289
	struct ring_buffer *buffer;
290
	int size;
291
292
293
	int syscall_nr;

	syscall_nr = syscall_get_nr(current, regs);
294
295
	if (syscall_nr < 0)
		return;
296
297
	if (!test_bit(syscall_nr, enabled_enter_syscalls))
		return;
298

299
300
301
302
303
304
	sys_data = syscall_nr_to_meta(syscall_nr);
	if (!sys_data)
		return;

	size = sizeof(*entry) + sizeof(unsigned long) * sys_data->nb_args;

305
306
	event = trace_current_buffer_lock_reserve(&buffer, sys_data->enter_id,
						  size, 0, 0);
307
308
309
310
311
312
313
	if (!event)
		return;

	entry = ring_buffer_event_data(event);
	entry->nr = syscall_nr;
	syscall_get_arguments(current, regs, 0, sys_data->nb_args, entry->args);

314
315
316
	if (!filter_current_check_discard(buffer, sys_data->enter_event,
					  entry, event))
		trace_current_buffer_unlock_commit(buffer, event, 0, 0);
317
318
}

319
void ftrace_syscall_exit(struct pt_regs *regs, long ret)
320
{
321
322
323
	struct syscall_trace_exit *entry;
	struct syscall_metadata *sys_data;
	struct ring_buffer_event *event;
324
	struct ring_buffer *buffer;
325
326
327
	int syscall_nr;

	syscall_nr = syscall_get_nr(current, regs);
328
329
	if (syscall_nr < 0)
		return;
330
331
	if (!test_bit(syscall_nr, enabled_exit_syscalls))
		return;
332

333
334
335
336
	sys_data = syscall_nr_to_meta(syscall_nr);
	if (!sys_data)
		return;

337
	event = trace_current_buffer_lock_reserve(&buffer, sys_data->exit_id,
338
339
340
341
342
343
344
345
				sizeof(*entry), 0, 0);
	if (!event)
		return;

	entry = ring_buffer_event_data(event);
	entry->nr = syscall_nr;
	entry->ret = syscall_get_return_value(current, regs);

346
347
348
	if (!filter_current_check_discard(buffer, sys_data->exit_event,
					  entry, event))
		trace_current_buffer_unlock_commit(buffer, event, 0, 0);
349
350
}

351
int reg_event_syscall_enter(struct ftrace_event_call *call)
352
{
353
354
	int ret = 0;
	int num;
355
	const char *name;
356

357
	name = ((struct syscall_metadata *)call->data)->name;
358
	num = syscall_name_to_nr(name);
359
	if (num < 0 || num >= NR_syscalls)
360
361
362
		return -ENOSYS;
	mutex_lock(&syscall_trace_lock);
	if (!sys_refcount_enter)
363
		ret = register_trace_sys_enter(ftrace_syscall_enter);
364
365
366
367
368
369
370
371
372
	if (ret) {
		pr_info("event trace: Could not activate"
				"syscall entry trace point");
	} else {
		set_bit(num, enabled_enter_syscalls);
		sys_refcount_enter++;
	}
	mutex_unlock(&syscall_trace_lock);
	return ret;
373
374
}

375
void unreg_event_syscall_enter(struct ftrace_event_call *call)
376
{
377
	int num;
378
	const char *name;
379

380
	name = ((struct syscall_metadata *)call->data)->name;
381
	num = syscall_name_to_nr(name);
382
	if (num < 0 || num >= NR_syscalls)
383
384
385
386
387
		return;
	mutex_lock(&syscall_trace_lock);
	sys_refcount_enter--;
	clear_bit(num, enabled_enter_syscalls);
	if (!sys_refcount_enter)
388
		unregister_trace_sys_enter(ftrace_syscall_enter);
389
390
	mutex_unlock(&syscall_trace_lock);
}
391

392
int reg_event_syscall_exit(struct ftrace_event_call *call)
393
{
394
395
	int ret = 0;
	int num;
396
	const char *name;
397

398
	name = ((struct syscall_metadata *)call->data)->name;
399
	num = syscall_name_to_nr(name);
400
	if (num < 0 || num >= NR_syscalls)
401
402
403
		return -ENOSYS;
	mutex_lock(&syscall_trace_lock);
	if (!sys_refcount_exit)
404
		ret = register_trace_sys_exit(ftrace_syscall_exit);
405
406
407
408
409
410
	if (ret) {
		pr_info("event trace: Could not activate"
				"syscall exit trace point");
	} else {
		set_bit(num, enabled_exit_syscalls);
		sys_refcount_exit++;
411
	}
412
413
414
	mutex_unlock(&syscall_trace_lock);
	return ret;
}
415

416
void unreg_event_syscall_exit(struct ftrace_event_call *call)
417
418
{
	int num;
419
	const char *name;
420

421
	name = ((struct syscall_metadata *)call->data)->name;
422
	num = syscall_name_to_nr(name);
423
	if (num < 0 || num >= NR_syscalls)
424
425
426
427
428
		return;
	mutex_lock(&syscall_trace_lock);
	sys_refcount_exit--;
	clear_bit(num, enabled_exit_syscalls);
	if (!sys_refcount_exit)
429
		unregister_trace_sys_exit(ftrace_syscall_exit);
430
	mutex_unlock(&syscall_trace_lock);
431
}
432

433
434
435
436
437
438
439
440
441
442
443
444
445
446
447
448
449
450
451
452
453
454
455
int __init init_ftrace_syscalls(void)
{
	struct syscall_metadata *meta;
	unsigned long addr;
	int i;

	syscalls_metadata = kzalloc(sizeof(*syscalls_metadata) *
					NR_syscalls, GFP_KERNEL);
	if (!syscalls_metadata) {
		WARN_ON(1);
		return -ENOMEM;
	}

	for (i = 0; i < NR_syscalls; i++) {
		addr = arch_syscall_addr(i);
		meta = find_syscall_meta(addr);
		syscalls_metadata[i] = meta;
	}

	return 0;
}
core_initcall(init_ftrace_syscalls);

456
#ifdef CONFIG_EVENT_PROFILE
457

458
459
static DECLARE_BITMAP(enabled_prof_enter_syscalls, NR_syscalls);
static DECLARE_BITMAP(enabled_prof_exit_syscalls, NR_syscalls);
460
461
462
463
464
465
static int sys_prof_refcount_enter;
static int sys_prof_refcount_exit;

static void prof_syscall_enter(struct pt_regs *regs, long id)
{
	struct syscall_metadata *sys_data;
466
467
	struct syscall_trace_enter *rec;
	unsigned long flags;
468
	char *trace_buf;
469
	char *raw_data;
470
	int syscall_nr;
471
	int rctx;
472
	int size;
473
	int cpu;
474
475
476
477
478
479
480
481
482

	syscall_nr = syscall_get_nr(current, regs);
	if (!test_bit(syscall_nr, enabled_prof_enter_syscalls))
		return;

	sys_data = syscall_nr_to_meta(syscall_nr);
	if (!sys_data)
		return;

483
484
485
486
487
	/* get the size after alignment with the u32 buffer size field */
	size = sizeof(unsigned long) * sys_data->nb_args + sizeof(*rec);
	size = ALIGN(size + sizeof(u32), sizeof(u64));
	size -= sizeof(u32);

488
489
490
491
492
493
	if (WARN_ONCE(size > FTRACE_MAX_PROFILE_SIZE,
		      "profile buffer not large enough"))
		return;

	/* Protect the per cpu buffer, begin the rcu read side */
	local_irq_save(flags);
494

495
496
	rctx = perf_swevent_get_recursion_context();
	if (rctx < 0)
497
498
		goto end_recursion;

499
500
	cpu = smp_processor_id();

501
	trace_buf = rcu_dereference(perf_trace_buf);
502

503
	if (!trace_buf)
504
		goto end;
505

506
	raw_data = per_cpu_ptr(trace_buf, cpu);
507
508
509
510
511
512
513
514
515
516

	/* zero the dead bytes from align to not leak stack to user */
	*(u64 *)(&raw_data[size - sizeof(u64)]) = 0ULL;

	rec = (struct syscall_trace_enter *) raw_data;
	tracing_generic_entry_update(&rec->ent, 0, 0);
	rec->ent.type = sys_data->enter_id;
	rec->nr = syscall_nr;
	syscall_get_arguments(current, regs, 0, sys_data->nb_args,
			       (unsigned long *)&rec->args);
517
	perf_tp_event(sys_data->enter_id, 0, 1, rec, size);
518
519

end:
520
	perf_swevent_put_recursion_context(rctx);
521
end_recursion:
522
	local_irq_restore(flags);
523
524
525
526
527
528
529
530
}

int reg_prof_syscall_enter(char *name)
{
	int ret = 0;
	int num;

	num = syscall_name_to_nr(name);
531
	if (num < 0 || num >= NR_syscalls)
532
533
534
535
		return -ENOSYS;

	mutex_lock(&syscall_trace_lock);
	if (!sys_prof_refcount_enter)
536
		ret = register_trace_sys_enter(prof_syscall_enter);
537
538
539
540
541
542
543
544
545
546
547
548
549
550
551
552
	if (ret) {
		pr_info("event trace: Could not activate"
				"syscall entry trace point");
	} else {
		set_bit(num, enabled_prof_enter_syscalls);
		sys_prof_refcount_enter++;
	}
	mutex_unlock(&syscall_trace_lock);
	return ret;
}

void unreg_prof_syscall_enter(char *name)
{
	int num;

	num = syscall_name_to_nr(name);
553
	if (num < 0 || num >= NR_syscalls)
554
555
556
557
558
559
		return;

	mutex_lock(&syscall_trace_lock);
	sys_prof_refcount_enter--;
	clear_bit(num, enabled_prof_enter_syscalls);
	if (!sys_prof_refcount_enter)
560
		unregister_trace_sys_enter(prof_syscall_enter);
561
562
563
564
565
566
	mutex_unlock(&syscall_trace_lock);
}

static void prof_syscall_exit(struct pt_regs *regs, long ret)
{
	struct syscall_metadata *sys_data;
567
568
	struct syscall_trace_exit *rec;
	unsigned long flags;
569
	int syscall_nr;
570
	char *trace_buf;
571
	char *raw_data;
572
	int rctx;
573
574
	int size;
	int cpu;
575
576
577
578
579
580
581
582
583

	syscall_nr = syscall_get_nr(current, regs);
	if (!test_bit(syscall_nr, enabled_prof_exit_syscalls))
		return;

	sys_data = syscall_nr_to_meta(syscall_nr);
	if (!sys_data)
		return;

584
585
586
	/* We can probably do that at build time */
	size = ALIGN(sizeof(*rec) + sizeof(u32), sizeof(u64));
	size -= sizeof(u32);
587

588
589
590
591
592
593
594
595
596
597
	/*
	 * Impossible, but be paranoid with the future
	 * How to put this check outside runtime?
	 */
	if (WARN_ONCE(size > FTRACE_MAX_PROFILE_SIZE,
		"exit event has grown above profile buffer size"))
		return;

	/* Protect the per cpu buffer, begin the rcu read side */
	local_irq_save(flags);
598

599
600
	rctx = perf_swevent_get_recursion_context();
	if (rctx < 0)
601
602
		goto end_recursion;

603
604
	cpu = smp_processor_id();

605
	trace_buf = rcu_dereference(perf_trace_buf);
606

607
	if (!trace_buf)
608
609
		goto end;

610
	raw_data = per_cpu_ptr(trace_buf, cpu);
611
612
613
614
615
616
617
618
619
620
621

	/* zero the dead bytes from align to not leak stack to user */
	*(u64 *)(&raw_data[size - sizeof(u64)]) = 0ULL;

	rec = (struct syscall_trace_exit *)raw_data;

	tracing_generic_entry_update(&rec->ent, 0, 0);
	rec->ent.type = sys_data->exit_id;
	rec->nr = syscall_nr;
	rec->ret = syscall_get_return_value(current, regs);

622
	perf_tp_event(sys_data->exit_id, 0, 1, rec, size);
623
624

end:
625
	perf_swevent_put_recursion_context(rctx);
626
end_recursion:
627
	local_irq_restore(flags);
628
629
630
631
632
633
634
635
}

int reg_prof_syscall_exit(char *name)
{
	int ret = 0;
	int num;

	num = syscall_name_to_nr(name);
636
	if (num < 0 || num >= NR_syscalls)
637
638
639
640
		return -ENOSYS;

	mutex_lock(&syscall_trace_lock);
	if (!sys_prof_refcount_exit)
641
		ret = register_trace_sys_exit(prof_syscall_exit);
642
643
644
645
646
647
648
649
650
651
652
653
654
655
656
657
	if (ret) {
		pr_info("event trace: Could not activate"
				"syscall entry trace point");
	} else {
		set_bit(num, enabled_prof_exit_syscalls);
		sys_prof_refcount_exit++;
	}
	mutex_unlock(&syscall_trace_lock);
	return ret;
}

void unreg_prof_syscall_exit(char *name)
{
	int num;

	num = syscall_name_to_nr(name);
658
	if (num < 0 || num >= NR_syscalls)
659
660
661
662
663
664
		return;

	mutex_lock(&syscall_trace_lock);
	sys_prof_refcount_exit--;
	clear_bit(num, enabled_prof_exit_syscalls);
	if (!sys_prof_refcount_exit)
665
		unregister_trace_sys_exit(prof_syscall_exit);
666
667
668
669
670
671
	mutex_unlock(&syscall_trace_lock);
}

#endif