trace_syscalls.c 16.4 KB
Newer Older
1
#include <trace/syscall.h>
2
#include <trace/events/syscalls.h>
3
#include <linux/kernel.h>
4
#include <linux/ftrace.h>
5
#include <linux/perf_event.h>
6
7
8
9
10
#include <asm/syscall.h>

#include "trace_output.h"
#include "trace.h"

11
static DEFINE_MUTEX(syscall_trace_lock);
12
13
static int sys_refcount_enter;
static int sys_refcount_exit;
14
15
static DECLARE_BITMAP(enabled_enter_syscalls, NR_syscalls);
static DECLARE_BITMAP(enabled_exit_syscalls, NR_syscalls);
16

17
18
19
20
21
22
23
24
25
26
27
28
29
30
31
32
33
34
35
36
37
38
39
40
41
42
43
44
45
46
47
48
49
50
51
52
53
extern unsigned long __start_syscalls_metadata[];
extern unsigned long __stop_syscalls_metadata[];

static struct syscall_metadata **syscalls_metadata;

static struct syscall_metadata *find_syscall_meta(unsigned long syscall)
{
	struct syscall_metadata *start;
	struct syscall_metadata *stop;
	char str[KSYM_SYMBOL_LEN];


	start = (struct syscall_metadata *)__start_syscalls_metadata;
	stop = (struct syscall_metadata *)__stop_syscalls_metadata;
	kallsyms_lookup(syscall, NULL, NULL, NULL, str);

	for ( ; start < stop; start++) {
		/*
		 * Only compare after the "sys" prefix. Archs that use
		 * syscall wrappers may have syscalls symbols aliases prefixed
		 * with "SyS" instead of "sys", leading to an unwanted
		 * mismatch.
		 */
		if (start->name && !strcmp(start->name + 3, str + 3))
			return start;
	}
	return NULL;
}

static struct syscall_metadata *syscall_nr_to_meta(int nr)
{
	if (!syscalls_metadata || nr >= NR_syscalls || nr < 0)
		return NULL;

	return syscalls_metadata[nr];
}

54
55
56
57
58
59
60
61
62
enum print_line_t
print_syscall_enter(struct trace_iterator *iter, int flags)
{
	struct trace_seq *s = &iter->seq;
	struct trace_entry *ent = iter->ent;
	struct syscall_trace_enter *trace;
	struct syscall_metadata *entry;
	int i, ret, syscall;

63
	trace = (typeof(trace))ent;
64
65
	syscall = trace->nr;
	entry = syscall_nr_to_meta(syscall);
66

67
68
69
	if (!entry)
		goto end;

70
	if (entry->enter_event->id != ent->type) {
71
72
73
74
		WARN_ON_ONCE(1);
		goto end;
	}

75
76
77
78
79
80
	ret = trace_seq_printf(s, "%s(", entry->name);
	if (!ret)
		return TRACE_TYPE_PARTIAL_LINE;

	for (i = 0; i < entry->nb_args; i++) {
		/* parameter types */
81
		if (trace_flags & TRACE_ITER_VERBOSE) {
82
83
84
85
86
			ret = trace_seq_printf(s, "%s ", entry->types[i]);
			if (!ret)
				return TRACE_TYPE_PARTIAL_LINE;
		}
		/* parameter values */
87
		ret = trace_seq_printf(s, "%s: %lx%s", entry->args[i],
88
				       trace->args[i],
89
				       i == entry->nb_args - 1 ? "" : ", ");
90
91
92
93
		if (!ret)
			return TRACE_TYPE_PARTIAL_LINE;
	}

94
95
96
97
	ret = trace_seq_putc(s, ')');
	if (!ret)
		return TRACE_TYPE_PARTIAL_LINE;

98
end:
99
100
101
102
	ret =  trace_seq_putc(s, '\n');
	if (!ret)
		return TRACE_TYPE_PARTIAL_LINE;

103
104
105
106
107
108
109
110
111
112
113
114
115
	return TRACE_TYPE_HANDLED;
}

enum print_line_t
print_syscall_exit(struct trace_iterator *iter, int flags)
{
	struct trace_seq *s = &iter->seq;
	struct trace_entry *ent = iter->ent;
	struct syscall_trace_exit *trace;
	int syscall;
	struct syscall_metadata *entry;
	int ret;

116
	trace = (typeof(trace))ent;
117
118
	syscall = trace->nr;
	entry = syscall_nr_to_meta(syscall);
119

120
121
122
123
124
	if (!entry) {
		trace_seq_printf(s, "\n");
		return TRACE_TYPE_HANDLED;
	}

125
	if (entry->exit_event->id != ent->type) {
126
127
128
129
		WARN_ON_ONCE(1);
		return TRACE_TYPE_UNHANDLED;
	}

130
131
132
133
134
135
136
137
	ret = trace_seq_printf(s, "%s -> 0x%lx\n", entry->name,
				trace->ret);
	if (!ret)
		return TRACE_TYPE_PARTIAL_LINE;

	return TRACE_TYPE_HANDLED;
}

138
139
140
141
142
extern char *__bad_type_size(void);

#define SYSCALL_FIELD(type, name)					\
	sizeof(type) != sizeof(trace.name) ?				\
		__bad_type_size() :					\
143
144
		#type, #name, offsetof(typeof(trace), name),		\
		sizeof(trace.name), is_signed_type(type)
145

146
int syscall_enter_format(struct ftrace_event_call *call, struct trace_seq *s)
147
148
{
	int i;
149
	int ret;
150
	struct syscall_metadata *entry = call->data;
151
152
	struct syscall_trace_enter trace;
	int offset = offsetof(struct syscall_trace_enter, args);
153

154
155
	ret = trace_seq_printf(s, "\tfield:%s %s;\toffset:%zu;\tsize:%zu;"
			       "\tsigned:%u;\n",
156
157
158
			       SYSCALL_FIELD(int, nr));
	if (!ret)
		return 0;
159
160
161
162
163
164

	for (i = 0; i < entry->nb_args; i++) {
		ret = trace_seq_printf(s, "\tfield:%s %s;", entry->types[i],
				        entry->args[i]);
		if (!ret)
			return 0;
165
166
167
168
		ret = trace_seq_printf(s, "\toffset:%d;\tsize:%zu;"
				       "\tsigned:%u;\n", offset,
				       sizeof(unsigned long),
				       is_signed_type(unsigned long));
169
170
171
172
173
		if (!ret)
			return 0;
		offset += sizeof(unsigned long);
	}

174
	trace_seq_puts(s, "\nprint fmt: \"");
175
	for (i = 0; i < entry->nb_args; i++) {
176
		ret = trace_seq_printf(s, "%s: 0x%%0%zulx%s", entry->args[i],
177
				        sizeof(unsigned long),
178
					i == entry->nb_args - 1 ? "" : ", ");
179
180
181
		if (!ret)
			return 0;
	}
182
	trace_seq_putc(s, '"');
183
184

	for (i = 0; i < entry->nb_args; i++) {
185
186
		ret = trace_seq_printf(s, ", ((unsigned long)(REC->%s))",
				       entry->args[i]);
187
188
189
190
		if (!ret)
			return 0;
	}

191
	return trace_seq_putc(s, '\n');
192
193
}

194
195
196
197
198
199
200
201
202
203
204
205
206
207
208
209
210
211
212
213
214
215
216
217
218
219
220
221
222
223
224
225
226
227
228
229
230
231
232
233
234
235
236
237
238
239
240
241
242
243
244
245
246
247
248
249
250
251
252
253
254
static
int  __set_enter_print_fmt(struct syscall_metadata *entry, char *buf, int len)
{
	int i;
	int pos = 0;

	/* When len=0, we just calculate the needed length */
#define LEN_OR_ZERO (len ? len - pos : 0)

	pos += snprintf(buf + pos, LEN_OR_ZERO, "\"");
	for (i = 0; i < entry->nb_args; i++) {
		pos += snprintf(buf + pos, LEN_OR_ZERO, "%s: 0x%%0%zulx%s",
				entry->args[i], sizeof(unsigned long),
				i == entry->nb_args - 1 ? "" : ", ");
	}
	pos += snprintf(buf + pos, LEN_OR_ZERO, "\"");

	for (i = 0; i < entry->nb_args; i++) {
		pos += snprintf(buf + pos, LEN_OR_ZERO,
				", ((unsigned long)(REC->%s))", entry->args[i]);
	}

#undef LEN_OR_ZERO

	/* return the length of print_fmt */
	return pos;
}

static int set_syscall_print_fmt(struct ftrace_event_call *call)
{
	char *print_fmt;
	int len;
	struct syscall_metadata *entry = call->data;

	if (entry->enter_event != call) {
		call->print_fmt = "\"0x%lx\", REC->ret";
		return 0;
	}

	/* First: called with 0 length to calculate the needed length */
	len = __set_enter_print_fmt(entry, NULL, 0);

	print_fmt = kmalloc(len + 1, GFP_KERNEL);
	if (!print_fmt)
		return -ENOMEM;

	/* Second: actually write the @print_fmt */
	__set_enter_print_fmt(entry, print_fmt, len + 1);
	call->print_fmt = print_fmt;

	return 0;
}

static void free_syscall_print_fmt(struct ftrace_event_call *call)
{
	struct syscall_metadata *entry = call->data;

	if (entry->enter_event == call)
		kfree(call->print_fmt);
}

255
256
257
258
259
260
int syscall_exit_format(struct ftrace_event_call *call, struct trace_seq *s)
{
	int ret;
	struct syscall_trace_exit trace;

	ret = trace_seq_printf(s,
261
262
263
264
			       "\tfield:%s %s;\toffset:%zu;\tsize:%zu;"
			       "\tsigned:%u;\n"
			       "\tfield:%s %s;\toffset:%zu;\tsize:%zu;"
			       "\tsigned:%u;\n",
265
			       SYSCALL_FIELD(int, nr),
266
			       SYSCALL_FIELD(long, ret));
267
268
269
270
271
272
	if (!ret)
		return 0;

	return trace_seq_printf(s, "\nprint fmt: \"0x%%lx\", REC->ret\n");
}

273
274
275
int syscall_enter_define_fields(struct ftrace_event_call *call)
{
	struct syscall_trace_enter trace;
276
	struct syscall_metadata *meta = call->data;
277
278
279
280
	int ret;
	int i;
	int offset = offsetof(typeof(trace), args);

281
282
283
284
	ret = trace_define_field(call, SYSCALL_FIELD(int, nr), FILTER_OTHER);
	if (ret)
		return ret;

285
	for (i = 0; i < meta->nb_args; i++) {
286
287
		ret = trace_define_field(call, meta->types[i],
					 meta->args[i], offset,
288
289
					 sizeof(unsigned long), 0,
					 FILTER_OTHER);
290
291
292
293
294
295
296
297
298
299
300
		offset += sizeof(unsigned long);
	}

	return ret;
}

int syscall_exit_define_fields(struct ftrace_event_call *call)
{
	struct syscall_trace_exit trace;
	int ret;

301
302
303
304
	ret = trace_define_field(call, SYSCALL_FIELD(int, nr), FILTER_OTHER);
	if (ret)
		return ret;

305
	ret = trace_define_field(call, SYSCALL_FIELD(long, ret),
306
				 FILTER_OTHER);
307
308
309
310

	return ret;
}

311
void ftrace_syscall_enter(struct pt_regs *regs, long id)
312
{
313
314
315
	struct syscall_trace_enter *entry;
	struct syscall_metadata *sys_data;
	struct ring_buffer_event *event;
316
	struct ring_buffer *buffer;
317
	int size;
318
319
320
	int syscall_nr;

	syscall_nr = syscall_get_nr(current, regs);
321
322
	if (syscall_nr < 0)
		return;
323
324
	if (!test_bit(syscall_nr, enabled_enter_syscalls))
		return;
325

326
327
328
329
330
331
	sys_data = syscall_nr_to_meta(syscall_nr);
	if (!sys_data)
		return;

	size = sizeof(*entry) + sizeof(unsigned long) * sys_data->nb_args;

332
333
	event = trace_current_buffer_lock_reserve(&buffer,
			sys_data->enter_event->id, size, 0, 0);
334
335
336
337
338
339
340
	if (!event)
		return;

	entry = ring_buffer_event_data(event);
	entry->nr = syscall_nr;
	syscall_get_arguments(current, regs, 0, sys_data->nb_args, entry->args);

341
342
343
	if (!filter_current_check_discard(buffer, sys_data->enter_event,
					  entry, event))
		trace_current_buffer_unlock_commit(buffer, event, 0, 0);
344
345
}

346
void ftrace_syscall_exit(struct pt_regs *regs, long ret)
347
{
348
349
350
	struct syscall_trace_exit *entry;
	struct syscall_metadata *sys_data;
	struct ring_buffer_event *event;
351
	struct ring_buffer *buffer;
352
353
354
	int syscall_nr;

	syscall_nr = syscall_get_nr(current, regs);
355
356
	if (syscall_nr < 0)
		return;
357
358
	if (!test_bit(syscall_nr, enabled_exit_syscalls))
		return;
359

360
361
362
363
	sys_data = syscall_nr_to_meta(syscall_nr);
	if (!sys_data)
		return;

364
365
	event = trace_current_buffer_lock_reserve(&buffer,
			sys_data->exit_event->id, sizeof(*entry), 0, 0);
366
367
368
369
370
371
372
	if (!event)
		return;

	entry = ring_buffer_event_data(event);
	entry->nr = syscall_nr;
	entry->ret = syscall_get_return_value(current, regs);

373
374
375
	if (!filter_current_check_discard(buffer, sys_data->exit_event,
					  entry, event))
		trace_current_buffer_unlock_commit(buffer, event, 0, 0);
376
377
}

378
int reg_event_syscall_enter(struct ftrace_event_call *call)
379
{
380
381
382
	int ret = 0;
	int num;

383
	num = ((struct syscall_metadata *)call->data)->syscall_nr;
384
	if (num < 0 || num >= NR_syscalls)
385
386
387
		return -ENOSYS;
	mutex_lock(&syscall_trace_lock);
	if (!sys_refcount_enter)
388
		ret = register_trace_sys_enter(ftrace_syscall_enter);
389
	if (!ret) {
390
391
392
393
394
		set_bit(num, enabled_enter_syscalls);
		sys_refcount_enter++;
	}
	mutex_unlock(&syscall_trace_lock);
	return ret;
395
396
}

397
void unreg_event_syscall_enter(struct ftrace_event_call *call)
398
{
399
	int num;
400

401
	num = ((struct syscall_metadata *)call->data)->syscall_nr;
402
	if (num < 0 || num >= NR_syscalls)
403
404
405
406
407
		return;
	mutex_lock(&syscall_trace_lock);
	sys_refcount_enter--;
	clear_bit(num, enabled_enter_syscalls);
	if (!sys_refcount_enter)
408
		unregister_trace_sys_enter(ftrace_syscall_enter);
409
410
	mutex_unlock(&syscall_trace_lock);
}
411

412
int reg_event_syscall_exit(struct ftrace_event_call *call)
413
{
414
415
416
	int ret = 0;
	int num;

417
	num = ((struct syscall_metadata *)call->data)->syscall_nr;
418
	if (num < 0 || num >= NR_syscalls)
419
420
421
		return -ENOSYS;
	mutex_lock(&syscall_trace_lock);
	if (!sys_refcount_exit)
422
		ret = register_trace_sys_exit(ftrace_syscall_exit);
423
	if (!ret) {
424
425
		set_bit(num, enabled_exit_syscalls);
		sys_refcount_exit++;
426
	}
427
428
429
	mutex_unlock(&syscall_trace_lock);
	return ret;
}
430

431
void unreg_event_syscall_exit(struct ftrace_event_call *call)
432
433
{
	int num;
434

435
	num = ((struct syscall_metadata *)call->data)->syscall_nr;
436
	if (num < 0 || num >= NR_syscalls)
437
438
439
440
441
		return;
	mutex_lock(&syscall_trace_lock);
	sys_refcount_exit--;
	clear_bit(num, enabled_exit_syscalls);
	if (!sys_refcount_exit)
442
		unregister_trace_sys_exit(ftrace_syscall_exit);
443
	mutex_unlock(&syscall_trace_lock);
444
}
445

446
447
448
449
int init_syscall_trace(struct ftrace_event_call *call)
{
	int id;

450
451
452
	if (set_syscall_print_fmt(call) < 0)
		return -ENOMEM;

453
	id = register_ftrace_event(call->event);
454
455
	if (!id) {
		free_syscall_print_fmt(call);
456
		return -ENODEV;
457
	}
458
459
460
461
462
	call->id = id;
	INIT_LIST_HEAD(&call->fields);
	return 0;
}

463
464
465
466
467
468
469
470
471
472
473
474
475
476
477
478
int __init init_ftrace_syscalls(void)
{
	struct syscall_metadata *meta;
	unsigned long addr;
	int i;

	syscalls_metadata = kzalloc(sizeof(*syscalls_metadata) *
					NR_syscalls, GFP_KERNEL);
	if (!syscalls_metadata) {
		WARN_ON(1);
		return -ENOMEM;
	}

	for (i = 0; i < NR_syscalls; i++) {
		addr = arch_syscall_addr(i);
		meta = find_syscall_meta(addr);
479
480
481
482
		if (!meta)
			continue;

		meta->syscall_nr = i;
483
484
485
486
487
488
489
		syscalls_metadata[i] = meta;
	}

	return 0;
}
core_initcall(init_ftrace_syscalls);

490
#ifdef CONFIG_EVENT_PROFILE
491

492
493
static DECLARE_BITMAP(enabled_prof_enter_syscalls, NR_syscalls);
static DECLARE_BITMAP(enabled_prof_exit_syscalls, NR_syscalls);
494
495
496
497
498
499
static int sys_prof_refcount_enter;
static int sys_prof_refcount_exit;

static void prof_syscall_enter(struct pt_regs *regs, long id)
{
	struct syscall_metadata *sys_data;
500
501
	struct syscall_trace_enter *rec;
	unsigned long flags;
502
	char *trace_buf;
503
	char *raw_data;
504
	int syscall_nr;
505
	int rctx;
506
	int size;
507
	int cpu;
508
509
510
511
512
513
514
515
516

	syscall_nr = syscall_get_nr(current, regs);
	if (!test_bit(syscall_nr, enabled_prof_enter_syscalls))
		return;

	sys_data = syscall_nr_to_meta(syscall_nr);
	if (!sys_data)
		return;

517
518
519
520
521
	/* get the size after alignment with the u32 buffer size field */
	size = sizeof(unsigned long) * sys_data->nb_args + sizeof(*rec);
	size = ALIGN(size + sizeof(u32), sizeof(u64));
	size -= sizeof(u32);

522
523
524
525
526
527
	if (WARN_ONCE(size > FTRACE_MAX_PROFILE_SIZE,
		      "profile buffer not large enough"))
		return;

	/* Protect the per cpu buffer, begin the rcu read side */
	local_irq_save(flags);
528

529
530
	rctx = perf_swevent_get_recursion_context();
	if (rctx < 0)
531
532
		goto end_recursion;

533
534
	cpu = smp_processor_id();

535
	trace_buf = rcu_dereference(perf_trace_buf);
536

537
	if (!trace_buf)
538
		goto end;
539

540
	raw_data = per_cpu_ptr(trace_buf, cpu);
541
542
543
544
545
546

	/* zero the dead bytes from align to not leak stack to user */
	*(u64 *)(&raw_data[size - sizeof(u64)]) = 0ULL;

	rec = (struct syscall_trace_enter *) raw_data;
	tracing_generic_entry_update(&rec->ent, 0, 0);
547
	rec->ent.type = sys_data->enter_event->id;
548
549
550
	rec->nr = syscall_nr;
	syscall_get_arguments(current, regs, 0, sys_data->nb_args,
			       (unsigned long *)&rec->args);
551
	perf_tp_event(sys_data->enter_event->id, 0, 1, rec, size);
552
553

end:
554
	perf_swevent_put_recursion_context(rctx);
555
end_recursion:
556
	local_irq_restore(flags);
557
558
}

559
int prof_sysenter_enable(struct ftrace_event_call *call)
560
561
562
563
{
	int ret = 0;
	int num;

564
	num = ((struct syscall_metadata *)call->data)->syscall_nr;
565
566
567

	mutex_lock(&syscall_trace_lock);
	if (!sys_prof_refcount_enter)
568
		ret = register_trace_sys_enter(prof_syscall_enter);
569
570
571
572
573
574
575
576
577
578
579
	if (ret) {
		pr_info("event trace: Could not activate"
				"syscall entry trace point");
	} else {
		set_bit(num, enabled_prof_enter_syscalls);
		sys_prof_refcount_enter++;
	}
	mutex_unlock(&syscall_trace_lock);
	return ret;
}

580
void prof_sysenter_disable(struct ftrace_event_call *call)
581
582
583
{
	int num;

584
	num = ((struct syscall_metadata *)call->data)->syscall_nr;
585
586
587
588
589

	mutex_lock(&syscall_trace_lock);
	sys_prof_refcount_enter--;
	clear_bit(num, enabled_prof_enter_syscalls);
	if (!sys_prof_refcount_enter)
590
		unregister_trace_sys_enter(prof_syscall_enter);
591
592
593
594
595
596
	mutex_unlock(&syscall_trace_lock);
}

static void prof_syscall_exit(struct pt_regs *regs, long ret)
{
	struct syscall_metadata *sys_data;
597
598
	struct syscall_trace_exit *rec;
	unsigned long flags;
599
	int syscall_nr;
600
	char *trace_buf;
601
	char *raw_data;
602
	int rctx;
603
604
	int size;
	int cpu;
605
606
607
608
609
610
611
612
613

	syscall_nr = syscall_get_nr(current, regs);
	if (!test_bit(syscall_nr, enabled_prof_exit_syscalls))
		return;

	sys_data = syscall_nr_to_meta(syscall_nr);
	if (!sys_data)
		return;

614
615
616
	/* We can probably do that at build time */
	size = ALIGN(sizeof(*rec) + sizeof(u32), sizeof(u64));
	size -= sizeof(u32);
617

618
619
620
621
622
623
624
625
626
627
	/*
	 * Impossible, but be paranoid with the future
	 * How to put this check outside runtime?
	 */
	if (WARN_ONCE(size > FTRACE_MAX_PROFILE_SIZE,
		"exit event has grown above profile buffer size"))
		return;

	/* Protect the per cpu buffer, begin the rcu read side */
	local_irq_save(flags);
628

629
630
	rctx = perf_swevent_get_recursion_context();
	if (rctx < 0)
631
632
		goto end_recursion;

633
634
	cpu = smp_processor_id();

635
	trace_buf = rcu_dereference(perf_trace_buf);
636

637
	if (!trace_buf)
638
639
		goto end;

640
	raw_data = per_cpu_ptr(trace_buf, cpu);
641
642
643
644
645
646
647

	/* zero the dead bytes from align to not leak stack to user */
	*(u64 *)(&raw_data[size - sizeof(u64)]) = 0ULL;

	rec = (struct syscall_trace_exit *)raw_data;

	tracing_generic_entry_update(&rec->ent, 0, 0);
648
	rec->ent.type = sys_data->exit_event->id;
649
650
651
	rec->nr = syscall_nr;
	rec->ret = syscall_get_return_value(current, regs);

652
	perf_tp_event(sys_data->exit_event->id, 0, 1, rec, size);
653
654

end:
655
	perf_swevent_put_recursion_context(rctx);
656
end_recursion:
657
	local_irq_restore(flags);
658
659
}

660
int prof_sysexit_enable(struct ftrace_event_call *call)
661
662
663
664
{
	int ret = 0;
	int num;

665
	num = ((struct syscall_metadata *)call->data)->syscall_nr;
666
667
668

	mutex_lock(&syscall_trace_lock);
	if (!sys_prof_refcount_exit)
669
		ret = register_trace_sys_exit(prof_syscall_exit);
670
671
672
673
674
675
676
677
678
679
680
	if (ret) {
		pr_info("event trace: Could not activate"
				"syscall entry trace point");
	} else {
		set_bit(num, enabled_prof_exit_syscalls);
		sys_prof_refcount_exit++;
	}
	mutex_unlock(&syscall_trace_lock);
	return ret;
}

681
void prof_sysexit_disable(struct ftrace_event_call *call)
682
683
684
{
	int num;

685
	num = ((struct syscall_metadata *)call->data)->syscall_nr;
686
687
688
689
690

	mutex_lock(&syscall_trace_lock);
	sys_prof_refcount_exit--;
	clear_bit(num, enabled_prof_exit_syscalls);
	if (!sys_prof_refcount_exit)
691
		unregister_trace_sys_exit(prof_syscall_exit);
692
693
694
695
696
697
	mutex_unlock(&syscall_trace_lock);
}

#endif