ftrace.c 13.7 KB
Newer Older
Steven Rostedt's avatar
Steven Rostedt committed
1
2
3
4
5
6
7
/*
 * Code for replacing ftrace calls with jumps.
 *
 * Copyright (C) 2007-2008 Steven Rostedt <srostedt@redhat.com>
 *
 * Thanks goes out to P.A. Semi, Inc for supplying me with a PPC64 box.
 *
8
9
10
 * Added function graph tracer code, taken from x86 that was written
 * by Frederic Weisbecker, and ported to PPC by Steven Rostedt.
 *
Steven Rostedt's avatar
Steven Rostedt committed
11
12
13
14
 */

#include <linux/spinlock.h>
#include <linux/hardirq.h>
15
#include <linux/uaccess.h>
16
#include <linux/module.h>
Steven Rostedt's avatar
Steven Rostedt committed
17
18
19
20
21
22
#include <linux/ftrace.h>
#include <linux/percpu.h>
#include <linux/init.h>
#include <linux/list.h>

#include <asm/cacheflush.h>
23
#include <asm/code-patching.h>
24
#include <asm/ftrace.h>
25
#include <asm/syscall.h>
Steven Rostedt's avatar
Steven Rostedt committed
26
27


28
#ifdef CONFIG_DYNAMIC_FTRACE
29
static unsigned int
30
ftrace_call_replace(unsigned long ip, unsigned long addr, int link)
Steven Rostedt's avatar
Steven Rostedt committed
31
{
32
	unsigned int op;
Steven Rostedt's avatar
Steven Rostedt committed
33

34
	addr = ppc_function_entry((void *)addr);
Steven Rostedt's avatar
Steven Rostedt committed
35

36
	/* if (link) set op to 'bl' else 'b' */
37
	op = create_branch((unsigned int *)ip, addr, link ? 1 : 0);
Steven Rostedt's avatar
Steven Rostedt committed
38

39
	return op;
Steven Rostedt's avatar
Steven Rostedt committed
40
41
}

42
static int
43
ftrace_modify_code(unsigned long ip, unsigned int old, unsigned int new)
Steven Rostedt's avatar
Steven Rostedt committed
44
{
45
	unsigned int replaced;
Steven Rostedt's avatar
Steven Rostedt committed
46
47
48
49

	/*
	 * Note: Due to modules and __init, code can
	 *  disappear and change, we need to protect against faulting
50
51
	 *  as well as code changing. We do this by using the
	 *  probe_kernel_* functions.
Steven Rostedt's avatar
Steven Rostedt committed
52
53
	 *
	 * No real locking needed, this code is run through
54
	 * kstop_machine, or before SMP starts.
Steven Rostedt's avatar
Steven Rostedt committed
55
	 */
56
57

	/* read the text we want to modify */
58
	if (probe_kernel_read(&replaced, (void *)ip, MCOUNT_INSN_SIZE))
59
60
61
		return -EFAULT;

	/* Make sure it is what we expect it to be */
62
	if (replaced != old)
63
64
65
		return -EINVAL;

	/* replace the text with the new text */
66
	if (patch_instruction((unsigned int *)ip, new))
67
68
69
		return -EPERM;

	return 0;
Steven Rostedt's avatar
Steven Rostedt committed
70
71
}

72
73
74
/*
 * Helper functions that are the same for both PPC64 and PPC32.
 */
75
76
static int test_24bit_addr(unsigned long ip, unsigned long addr)
{
77
	addr = ppc_function_entry((void *)addr);
78

79
80
	/* use the create_branch to verify that this offset can be branched */
	return create_branch((unsigned int *)ip, addr, 0);
81
82
}

83
84
#ifdef CONFIG_MODULES

85
86
87
88
89
90
91
92
93
94
95
96
97
98
99
100
101
102
103
104
105
106
static int is_bl_op(unsigned int op)
{
	return (op & 0xfc000003) == 0x48000001;
}

static unsigned long find_bl_target(unsigned long ip, unsigned int op)
{
	static int offset;

	offset = (op & 0x03fffffc);
	/* make it signed */
	if (offset & 0x02000000)
		offset |= 0xfe000000;

	return ip + (long)offset;
}

#ifdef CONFIG_PPC64
static int
__ftrace_make_nop(struct module *mod,
		  struct dyn_ftrace *rec, unsigned long addr)
{
107
108
	unsigned int op;
	unsigned long ptr;
109
	unsigned long ip = rec->ip;
110
	void *tramp;
111
112

	/* read where this goes */
113
	if (probe_kernel_read(&op, (void *)ip, sizeof(int)))
114
115
116
		return -EFAULT;

	/* Make sure that that this is still a 24bit jump */
117
118
	if (!is_bl_op(op)) {
		printk(KERN_ERR "Not expected bl: opcode is %x\n", op);
119
120
121
122
		return -EINVAL;
	}

	/* lets find where the pointer goes */
123
	tramp = (void *)find_bl_target(ip, op);
124

125
	pr_devel("ip:%lx jumps to %p", ip, tramp);
126

127
	if (!is_module_trampoline(tramp)) {
128
129
130
		printk(KERN_ERR "Not a trampoline\n");
		return -EINVAL;
	}
131

132
133
	if (module_trampoline_target(mod, tramp, &ptr)) {
		printk(KERN_ERR "Failed to get trampoline target\n");
134
135
136
		return -EFAULT;
	}

137
	pr_devel("trampoline target %lx", ptr);
138
139

	/* This should match what was called */
140
	if (ptr != ppc_function_entry((void *)addr)) {
141
142
		printk(KERN_ERR "addr %lx does not match expected %lx\n",
			ptr, ppc_function_entry((void *)addr));
143
144
145
146
		return -EINVAL;
	}

	/*
147
148
149
150
151
152
153
154
155
156
157
	 * Our original call site looks like:
	 *
	 * bl <tramp>
	 * ld r2,XX(r1)
	 *
	 * Milton Miller pointed out that we can not simply nop the branch.
	 * If a task was preempted when calling a trace function, the nops
	 * will remove the way to restore the TOC in r2 and the r2 TOC will
	 * get corrupted.
	 *
	 * Use a b +8 to jump over the load.
158
	 */
159
	op = 0x48000008;	/* b +8 */
160

161
	if (patch_instruction((unsigned int *)ip, op))
162
163
164
165
166
167
168
169
170
171
		return -EPERM;

	return 0;
}

#else /* !PPC64 */
static int
__ftrace_make_nop(struct module *mod,
		  struct dyn_ftrace *rec, unsigned long addr)
{
172
173
	unsigned int op;
	unsigned int jmp[4];
174
175
176
	unsigned long ip = rec->ip;
	unsigned long tramp;

177
	if (probe_kernel_read(&op, (void *)ip, MCOUNT_INSN_SIZE))
178
179
180
		return -EFAULT;

	/* Make sure that that this is still a 24bit jump */
181
182
	if (!is_bl_op(op)) {
		printk(KERN_ERR "Not expected bl: opcode is %x\n", op);
183
184
185
186
		return -EINVAL;
	}

	/* lets find where the pointer goes */
187
	tramp = find_bl_target(ip, op);
188
189
190

	/*
	 * On PPC32 the trampoline looks like:
191
192
193
	 *  0x3d, 0x80, 0x00, 0x00  lis r12,sym@ha
	 *  0x39, 0x8c, 0x00, 0x00  addi r12,r12,sym@l
	 *  0x7d, 0x89, 0x03, 0xa6  mtctr r12
194
	 *  0x4e, 0x80, 0x04, 0x20  bctr
195
196
	 */

197
	pr_devel("ip:%lx jumps to %lx", ip, tramp);
198
199

	/* Find where the trampoline jumps to */
200
	if (probe_kernel_read(jmp, (void *)tramp, sizeof(jmp))) {
201
202
203
204
		printk(KERN_ERR "Failed to read %lx\n", tramp);
		return -EFAULT;
	}

205
	pr_devel(" %08x %08x ", jmp[0], jmp[1]);
206
207

	/* verify that this is what we expect it to be */
208
209
210
	if (((jmp[0] & 0xffff0000) != 0x3d800000) ||
	    ((jmp[1] & 0xffff0000) != 0x398c0000) ||
	    (jmp[2] != 0x7d8903a6) ||
211
212
213
214
	    (jmp[3] != 0x4e800420)) {
		printk(KERN_ERR "Not a trampoline\n");
		return -EINVAL;
	}
215

216
217
	tramp = (jmp[1] & 0xffff) |
		((jmp[0] & 0xffff) << 16);
218
219
220
	if (tramp & 0x8000)
		tramp -= 0x10000;

221
	pr_devel(" %lx ", tramp);
222
223
224
225
226
227
228
229

	if (tramp != addr) {
		printk(KERN_ERR
		       "Trampoline location %08lx does not match addr\n",
		       tramp);
		return -EINVAL;
	}

230
	op = PPC_INST_NOP;
231

232
	if (patch_instruction((unsigned int *)ip, op))
233
234
		return -EPERM;

235
236
237
	return 0;
}
#endif /* PPC64 */
238
#endif /* CONFIG_MODULES */
239

240
241
242
int ftrace_make_nop(struct module *mod,
		    struct dyn_ftrace *rec, unsigned long addr)
{
243
	unsigned long ip = rec->ip;
244
	unsigned int old, new;
245
246
247
248
249
250

	/*
	 * If the calling address is more that 24 bits away,
	 * then we had to use a trampoline to make the call.
	 * Otherwise just update the call site.
	 */
251
	if (test_24bit_addr(ip, addr)) {
252
		/* within range */
253
		old = ftrace_call_replace(ip, addr, 1);
254
		new = PPC_INST_NOP;
255
256
257
		return ftrace_modify_code(ip, old, new);
	}

258
#ifdef CONFIG_MODULES
259
260
261
262
263
264
265
266
267
268
269
270
271
272
273
274
275
276
277
278
279
280
281
282
	/*
	 * Out of range jumps are called from modules.
	 * We should either already have a pointer to the module
	 * or it has been passed in.
	 */
	if (!rec->arch.mod) {
		if (!mod) {
			printk(KERN_ERR "No module loaded addr=%lx\n",
			       addr);
			return -EFAULT;
		}
		rec->arch.mod = mod;
	} else if (mod) {
		if (mod != rec->arch.mod) {
			printk(KERN_ERR
			       "Record mod %p not equal to passed in mod %p\n",
			       rec->arch.mod, mod);
			return -EINVAL;
		}
		/* nothing to do if mod == rec->arch.mod */
	} else
		mod = rec->arch.mod;

	return __ftrace_make_nop(mod, rec, addr);
283
284
285
286
#else
	/* We should not get here without modules */
	return -EINVAL;
#endif /* CONFIG_MODULES */
287
288
}

289
#ifdef CONFIG_MODULES
290
291
292
293
#ifdef CONFIG_PPC64
static int
__ftrace_make_call(struct dyn_ftrace *rec, unsigned long addr)
{
294
	unsigned int op[2];
295
	void *ip = (void *)rec->ip;
296
297

	/* read where this goes */
298
	if (probe_kernel_read(op, ip, sizeof(op)))
299
300
301
		return -EFAULT;

	/*
302
303
304
305
306
307
308
	 * We expect to see:
	 *
	 * b +8
	 * ld r2,XX(r1)
	 *
	 * The load offset is different depending on the ABI. For simplicity
	 * just mask it out when doing the compare.
309
	 */
310
311
312
	if ((op[0] != 0x48000008) || ((op[1] & 0xffff00000) != 0xe8410000)) {
		printk(KERN_ERR "Unexpected call sequence: %x %x\n",
			op[0], op[1]);
313
314
315
316
317
318
319
320
321
		return -EINVAL;
	}

	/* If we never set up a trampoline to ftrace_caller, then bail */
	if (!rec->arch.mod->arch.tramp) {
		printk(KERN_ERR "No ftrace trampoline\n");
		return -EINVAL;
	}

322
323
324
	/* Ensure branch is within 24 bits */
	if (create_branch(ip, rec->arch.mod->arch.tramp, BRANCH_SET_LINK)) {
		printk(KERN_ERR "Branch out of range");
325
		return -EINVAL;
326
327
	}

328
329
330
331
	if (patch_branch(ip, rec->arch.mod->arch.tramp, BRANCH_SET_LINK)) {
		printk(KERN_ERR "REL24 out of range!\n");
		return -EINVAL;
	}
332

333
334
	return 0;
}
335
336
337
338
#else
static int
__ftrace_make_call(struct dyn_ftrace *rec, unsigned long addr)
{
339
	unsigned int op;
340
341
342
	unsigned long ip = rec->ip;

	/* read where this goes */
343
	if (probe_kernel_read(&op, (void *)ip, MCOUNT_INSN_SIZE))
344
345
346
		return -EFAULT;

	/* It should be pointing to a nop */
347
	if (op != PPC_INST_NOP) {
348
		printk(KERN_ERR "Expected NOP but have %x\n", op);
349
350
351
352
353
354
355
356
357
		return -EINVAL;
	}

	/* If we never set up a trampoline to ftrace_caller, then bail */
	if (!rec->arch.mod->arch.tramp) {
		printk(KERN_ERR "No ftrace trampoline\n");
		return -EINVAL;
	}

358
359
360
361
362
	/* create the branch to the trampoline */
	op = create_branch((unsigned int *)ip,
			   rec->arch.mod->arch.tramp, BRANCH_SET_LINK);
	if (!op) {
		printk(KERN_ERR "REL24 out of range!\n");
363
364
365
		return -EINVAL;
	}

366
	pr_devel("write to %lx\n", rec->ip);
367

368
	if (patch_instruction((unsigned int *)ip, op))
369
370
		return -EPERM;

371
372
373
	return 0;
}
#endif /* CONFIG_PPC64 */
374
#endif /* CONFIG_MODULES */
375
376
377

int ftrace_make_call(struct dyn_ftrace *rec, unsigned long addr)
{
378
	unsigned long ip = rec->ip;
379
	unsigned int old, new;
380
381
382
383
384
385

	/*
	 * If the calling address is more that 24 bits away,
	 * then we had to use a trampoline to make the call.
	 * Otherwise just update the call site.
	 */
386
	if (test_24bit_addr(ip, addr)) {
387
		/* within range */
388
		old = PPC_INST_NOP;
389
		new = ftrace_call_replace(ip, addr, 1);
390
		return ftrace_modify_code(ip, old, new);
391
392
	}

393
#ifdef CONFIG_MODULES
394
395
396
397
398
399
400
401
402
403
404
	/*
	 * Out of range jumps are called from modules.
	 * Being that we are converting from nop, it had better
	 * already have a module defined.
	 */
	if (!rec->arch.mod) {
		printk(KERN_ERR "No module loaded\n");
		return -EINVAL;
	}

	return __ftrace_make_call(rec, addr);
405
406
407
408
#else
	/* We should not get here without modules */
	return -EINVAL;
#endif /* CONFIG_MODULES */
409
410
}

411
int ftrace_update_ftrace_func(ftrace_func_t func)
Steven Rostedt's avatar
Steven Rostedt committed
412
413
{
	unsigned long ip = (unsigned long)(&ftrace_call);
414
	unsigned int old, new;
Steven Rostedt's avatar
Steven Rostedt committed
415
416
	int ret;

417
	old = *(unsigned int *)&ftrace_call;
418
	new = ftrace_call_replace(ip, (unsigned long)func, 1);
Steven Rostedt's avatar
Steven Rostedt committed
419
420
421
422
423
	ret = ftrace_modify_code(ip, old, new);

	return ret;
}

424
425
426
427
428
429
430
431
432
433
434
435
436
437
438
439
440
441
442
443
444
445
446
447
448
449
450
451
452
453
454
455
456
457
458
459
460
461
462
463
464
465
466
467
468
469
470
471
472
473
474
475
static int __ftrace_replace_code(struct dyn_ftrace *rec, int enable)
{
	unsigned long ftrace_addr = (unsigned long)FTRACE_ADDR;
	int ret;

	ret = ftrace_update_record(rec, enable);

	switch (ret) {
	case FTRACE_UPDATE_IGNORE:
		return 0;
	case FTRACE_UPDATE_MAKE_CALL:
		return ftrace_make_call(rec, ftrace_addr);
	case FTRACE_UPDATE_MAKE_NOP:
		return ftrace_make_nop(NULL, rec, ftrace_addr);
	}

	return 0;
}

void ftrace_replace_code(int enable)
{
	struct ftrace_rec_iter *iter;
	struct dyn_ftrace *rec;
	int ret;

	for (iter = ftrace_rec_iter_start(); iter;
	     iter = ftrace_rec_iter_next(iter)) {
		rec = ftrace_rec_iter_record(iter);
		ret = __ftrace_replace_code(rec, enable);
		if (ret) {
			ftrace_bug(ret, rec->ip);
			return;
		}
	}
}

void arch_ftrace_update_code(int command)
{
	if (command & FTRACE_UPDATE_CALLS)
		ftrace_replace_code(1);
	else if (command & FTRACE_DISABLE_CALLS)
		ftrace_replace_code(0);

	if (command & FTRACE_UPDATE_TRACE_FUNC)
		ftrace_update_ftrace_func(ftrace_trace_function);

	if (command & FTRACE_START_FUNC_RET)
		ftrace_enable_ftrace_graph_caller();
	else if (command & FTRACE_STOP_FUNC_RET)
		ftrace_disable_ftrace_graph_caller();
}

476
int __init ftrace_dyn_arch_init(void)
Steven Rostedt's avatar
Steven Rostedt committed
477
478
479
{
	return 0;
}
480
481
482
483
#endif /* CONFIG_DYNAMIC_FTRACE */

#ifdef CONFIG_FUNCTION_GRAPH_TRACER

484
485
486
487
488
489
490
491
492
#ifdef CONFIG_DYNAMIC_FTRACE
extern void ftrace_graph_call(void);
extern void ftrace_graph_stub(void);

int ftrace_enable_ftrace_graph_caller(void)
{
	unsigned long ip = (unsigned long)(&ftrace_graph_call);
	unsigned long addr = (unsigned long)(&ftrace_graph_caller);
	unsigned long stub = (unsigned long)(&ftrace_graph_stub);
493
	unsigned int old, new;
494

495
	old = ftrace_call_replace(ip, stub, 0);
496
497
498
499
500
501
502
503
504
505
	new = ftrace_call_replace(ip, addr, 0);

	return ftrace_modify_code(ip, old, new);
}

int ftrace_disable_ftrace_graph_caller(void)
{
	unsigned long ip = (unsigned long)(&ftrace_graph_call);
	unsigned long addr = (unsigned long)(&ftrace_graph_caller);
	unsigned long stub = (unsigned long)(&ftrace_graph_stub);
506
	unsigned int old, new;
507

508
	old = ftrace_call_replace(ip, addr, 0);
509
510
511
512
513
514
	new = ftrace_call_replace(ip, stub, 0);

	return ftrace_modify_code(ip, old, new);
}
#endif /* CONFIG_DYNAMIC_FTRACE */

515
516
517
518
#ifdef CONFIG_PPC64
extern void mod_return_to_handler(void);
#endif

519
520
521
522
523
524
525
526
527
/*
 * Hook the return address and push it in the stack of return addrs
 * in current thread info.
 */
void prepare_ftrace_return(unsigned long *parent, unsigned long self_addr)
{
	unsigned long old;
	int faulted;
	struct ftrace_graph_ent trace;
528
	unsigned long return_hooker = (unsigned long)&return_to_handler;
529
530
531
532

	if (unlikely(atomic_read(&current->tracing_graph_pause)))
		return;

533
#ifdef CONFIG_PPC64
534
535
536
537
538
	/* non core kernel code needs to save and restore the TOC */
	if (REGION_ID(self_addr) != KERNEL_REGION_ID)
		return_hooker = (unsigned long)&mod_return_to_handler;
#endif

539
	return_hooker = ppc_function_entry((void *)return_hooker);
540
541
542
543
544
545
546
547
548
549

	/*
	 * Protect against fault, even if it shouldn't
	 * happen. This tool is too much intrusive to
	 * ignore such a protection.
	 */
	asm volatile(
		"1: " PPC_LL "%[old], 0(%[parent])\n"
		"2: " PPC_STL "%[return_hooker], 0(%[parent])\n"
		"   li %[faulted], 0\n"
550
		"3:\n"
551
552
553
554
555
556
557
558
559
560
561
562

		".section .fixup, \"ax\"\n"
		"4: li %[faulted], 1\n"
		"   b 3b\n"
		".previous\n"

		".section __ex_table,\"a\"\n"
			PPC_LONG_ALIGN "\n"
			PPC_LONG "1b,4b\n"
			PPC_LONG "2b,4b\n"
		".previous"

563
		: [old] "=&r" (old), [faulted] "=r" (faulted)
564
565
566
567
568
569
570
571
572
573
574
		: [parent] "r" (parent), [return_hooker] "r" (return_hooker)
		: "memory"
	);

	if (unlikely(faulted)) {
		ftrace_graph_stop();
		WARN_ON(1);
		return;
	}

	trace.func = self_addr;
575
	trace.depth = current->curr_ret_stack + 1;
576
577
578
579

	/* Only trace if the calling function expects to */
	if (!ftrace_graph_entry(&trace)) {
		*parent = old;
580
		return;
581
	}
582
583
584

	if (ftrace_push_return_trace(old, self_addr, &trace.depth, 0) == -EBUSY)
		*parent = old;
585
586
}
#endif /* CONFIG_FUNCTION_GRAPH_TRACER */
587
588
589
590
591
592
593

#if defined(CONFIG_FTRACE_SYSCALLS) && defined(CONFIG_PPC64)
unsigned long __init arch_syscall_addr(int nr)
{
	return sys_call_table[nr*2];
}
#endif /* CONFIG_FTRACE_SYSCALLS && CONFIG_PPC64 */