entry-common.S 12.1 KB
Newer Older
Linus Torvalds's avatar
Linus Torvalds committed
1
2
3
4
5
6
7
8
9
10
11
/*
 *  linux/arch/arm/kernel/entry-common.S
 *
 *  Copyright (C) 2000 Russell King
 *
 * This program is free software; you can redistribute it and/or modify
 * it under the terms of the GNU General Public License version 2 as
 * published by the Free Software Foundation.
 */

#include <asm/unistd.h>
12
#include <asm/ftrace.h>
13
#include <mach/entry-macro.S>
14
#include <asm/unwind.h>
Linus Torvalds's avatar
Linus Torvalds committed
15
16
17
18
19
20
21
22
23
24
25

#include "entry-header.S"


	.align	5
/*
 * This is the fast syscall return path.  We do as little as
 * possible here, and this includes saving r0 back into the SVC
 * stack.
 */
ret_fast_syscall:
26
27
 UNWIND(.fnstart	)
 UNWIND(.cantunwind	)
28
	disable_irq				@ disable interrupts
Linus Torvalds's avatar
Linus Torvalds committed
29
30
31
	ldr	r1, [tsk, #TI_FLAGS]
	tst	r1, #_TIF_WORK_MASK
	bne	fast_work_pending
32

33
34
35
	/* perform architecture specific actions before user return */
	arch_ret_to_user r1, lr

36
	restore_user_regs fast = 1, offset = S_OFF
37
 UNWIND(.fnend		)
Linus Torvalds's avatar
Linus Torvalds committed
38
39
40
41
42
43
44
45
46

/*
 * Ok, we need to do extra processing, enter the slow path.
 */
fast_work_pending:
	str	r0, [sp, #S_R0+S_OFF]!		@ returned r0
work_pending:
	tst	r1, #_TIF_NEED_RESCHED
	bne	work_resched
47
	tst	r1, #_TIF_SIGPENDING|_TIF_NOTIFY_RESUME
Linus Torvalds's avatar
Linus Torvalds committed
48
49
50
	beq	no_work_pending
	mov	r0, sp				@ 'regs'
	mov	r2, why				@ 'syscall'
51
52
	tst	r1, #_TIF_SIGPENDING		@ delivering a signal?
	movne	why, #0				@ prevent further restarts
Linus Torvalds's avatar
Linus Torvalds committed
53
	bl	do_notify_resume
54
	b	ret_slow_syscall		@ Check work again
Linus Torvalds's avatar
Linus Torvalds committed
55
56
57
58
59
60
61
62

work_resched:
	bl	schedule
/*
 * "slow" syscall return path.  "why" tells us if this was a real syscall.
 */
ENTRY(ret_to_user)
ret_slow_syscall:
63
	disable_irq				@ disable interrupts
Linus Torvalds's avatar
Linus Torvalds committed
64
65
66
67
	ldr	r1, [tsk, #TI_FLAGS]
	tst	r1, #_TIF_WORK_MASK
	bne	work_pending
no_work_pending:
68
69
70
	/* perform architecture specific actions before user return */
	arch_ret_to_user r1, lr

71
	restore_user_regs fast = 0, offset = 0
72
ENDPROC(ret_to_user)
Linus Torvalds's avatar
Linus Torvalds committed
73
74
75
76
77
78
79
80
81
82
83
84
85
86
87

/*
 * This is how we return from a fork.
 */
ENTRY(ret_from_fork)
	bl	schedule_tail
	get_thread_info tsk
	ldr	r1, [tsk, #TI_FLAGS]		@ check for syscall tracing
	mov	why, #1
	tst	r1, #_TIF_SYSCALL_TRACE		@ are we tracing syscalls?
	beq	ret_slow_syscall
	mov	r1, sp
	mov	r0, #1				@ trace exit [IP = 1]
	bl	syscall_trace
	b	ret_slow_syscall
88
ENDPROC(ret_from_fork)
Linus Torvalds's avatar
Linus Torvalds committed
89

90
91
	.equ NR_syscalls,0
#define CALL(x) .equ NR_syscalls,NR_syscalls+1
Linus Torvalds's avatar
Linus Torvalds committed
92
#include "calls.S"
93
94
#undef CALL
#define CALL(x) .long x
Linus Torvalds's avatar
Linus Torvalds committed
95

96
#ifdef CONFIG_FUNCTION_TRACER
97
98
99
100
101
102
103
104
105
106
107
108
109
110
111
112
113
114
115
116
117
118
119
120
121
122
123
124
125
126
127
128
129
130
131
132
/*
 * When compiling with -pg, gcc inserts a call to the mcount routine at the
 * start of every function.  In mcount, apart from the function's address (in
 * lr), we need to get hold of the function's caller's address.
 *
 * Older GCCs (pre-4.4) inserted a call to a routine called mcount like this:
 *
 *	bl	mcount
 *
 * These versions have the limitation that in order for the mcount routine to
 * be able to determine the function's caller's address, an APCS-style frame
 * pointer (which is set up with something like the code below) is required.
 *
 *	mov     ip, sp
 *	push    {fp, ip, lr, pc}
 *	sub     fp, ip, #4
 *
 * With EABI, these frame pointers are not available unless -mapcs-frame is
 * specified, and if building as Thumb-2, not even then.
 *
 * Newer GCCs (4.4+) solve this problem by introducing a new version of mcount,
 * with call sites like:
 *
 *	push	{lr}
 *	bl	__gnu_mcount_nc
 *
 * With these compilers, frame pointers are not necessary.
 *
 * mcount can be thought of as a function called in the middle of a subroutine
 * call.  As such, it needs to be transparent for both the caller and the
 * callee: the original lr needs to be restored when leaving mcount, and no
 * registers should be clobbered.  (In the __gnu_mcount_nc implementation, we
 * clobber the ip register.  This is OK because the ARM calling convention
 * allows it to be clobbered in subroutines and doesn't use it to hold
 * parameters.)
 */
Abhishek Sagar's avatar
Abhishek Sagar committed
133
134
#ifdef CONFIG_DYNAMIC_FTRACE
ENTRY(mcount)
135
136
137
	stmdb	sp!, {r0-r3, lr}
	mov	r0, lr
	sub	r0, r0, #MCOUNT_INSN_SIZE
Abhishek Sagar's avatar
Abhishek Sagar committed
138
139
140

	.globl mcount_call
mcount_call:
141
142
143
	bl	ftrace_stub
	ldr	lr, [fp, #-4]			@ restore lr
	ldmia	sp!, {r0-r3, pc}
Abhishek Sagar's avatar
Abhishek Sagar committed
144
145

ENTRY(ftrace_caller)
146
147
148
149
	stmdb	sp!, {r0-r3, lr}
	ldr	r1, [fp, #-4]
	mov	r0, lr
	sub	r0, r0, #MCOUNT_INSN_SIZE
Abhishek Sagar's avatar
Abhishek Sagar committed
150
151
152

	.globl ftrace_call
ftrace_call:
153
154
155
	bl	ftrace_stub
	ldr	lr, [fp, #-4]			@ restore lr
	ldmia	sp!, {r0-r3, pc}
Abhishek Sagar's avatar
Abhishek Sagar committed
156
157
158

#else

159
ENTRY(__gnu_mcount_nc)
160
161
162
163
164
165
166
167
	stmdb	sp!, {r0-r3, lr}
	ldr	r0, =ftrace_trace_function
	ldr	r2, [r0]
	adr	r0, ftrace_stub
	cmp	r0, r2
	bne	gnu_trace
	ldmia	sp!, {r0-r3, ip, lr}
	mov	pc, ip
168
169

gnu_trace:
170
171
172
173
174
175
176
	ldr	r1, [sp, #20]			@ lr of instrumented routine
	mov	r0, lr
	sub	r0, r0, #MCOUNT_INSN_SIZE
	mov	lr, pc
	mov	pc, r2
	ldmia	sp!, {r0-r3, ip, lr}
	mov	pc, ip
177

Abhishek Sagar's avatar
Abhishek Sagar committed
178
ENTRY(mcount)
179
180
181
182
183
184
185
186
	stmdb	sp!, {r0-r3, lr}
	ldr	r0, =ftrace_trace_function
	ldr	r2, [r0]
	adr	r0, ftrace_stub
	cmp	r0, r2
	bne	trace
	ldr	lr, [fp, #-4]			@ restore lr
	ldmia	sp!, {r0-r3, pc}
Abhishek Sagar's avatar
Abhishek Sagar committed
187
188

trace:
189
190
191
192
193
194
195
	ldr	r1, [fp, #-4]			@ lr of instrumented routine
	mov	r0, lr
	sub	r0, r0, #MCOUNT_INSN_SIZE
	mov	lr, pc
	mov	pc, r2
	ldr	lr, [fp, #-4]			@ restore lr
	ldmia	sp!, {r0-r3, pc}
Abhishek Sagar's avatar
Abhishek Sagar committed
196
197
198
199
200

#endif /* CONFIG_DYNAMIC_FTRACE */

	.globl ftrace_stub
ftrace_stub:
201
	mov	pc, lr
Abhishek Sagar's avatar
Abhishek Sagar committed
202

203
#endif /* CONFIG_FUNCTION_TRACER */
Abhishek Sagar's avatar
Abhishek Sagar committed
204

Linus Torvalds's avatar
Linus Torvalds committed
205
206
207
208
209
210
211
212
213
/*=============================================================================
 * SWI handler
 *-----------------------------------------------------------------------------
 */

	/* If we're optimising for StrongARM the resulting code won't 
	   run on an ARM7 and we can save a couple of instructions.  
								--pb */
#ifdef CONFIG_CPU_ARM710
214
215
#define A710(code...) code
.Larm710bug:
Linus Torvalds's avatar
Linus Torvalds committed
216
217
218
	ldmia	sp, {r0 - lr}^			@ Get calling r0 - lr
	mov	r0, r0
	add	sp, sp, #S_FRAME_SIZE
219
	subs	pc, lr, #4
Linus Torvalds's avatar
Linus Torvalds committed
220
#else
221
#define A710(code...)
Linus Torvalds's avatar
Linus Torvalds committed
222
223
224
225
#endif

	.align	5
ENTRY(vector_swi)
226
227
	sub	sp, sp, #S_FRAME_SIZE
	stmia	sp, {r0 - r12}			@ Calling r0 - r12
228
229
230
231
 ARM(	add	r8, sp, #S_PC		)
 ARM(	stmdb	r8, {sp, lr}^		)	@ Calling sp, lr
 THUMB(	mov	r8, sp			)
 THUMB(	store_user_sp_lr r8, r10, S_SP	)	@ calling sp, lr
232
233
234
235
	mrs	r8, spsr			@ called from non-FIQ mode, so ok.
	str	lr, [sp, #S_PC]			@ Save calling PC
	str	r8, [sp, #S_PSR]		@ Save CPSR
	str	r0, [sp, #S_OLD_R0]		@ Save OLD_R0
Linus Torvalds's avatar
Linus Torvalds committed
236
	zero_fp
237
238
239
240

	/*
	 * Get the system call number.
	 */
241

242
#if defined(CONFIG_OABI_COMPAT)
243

244
245
246
247
248
249
250
251
252
253
254
255
256
257
	/*
	 * If we have CONFIG_OABI_COMPAT then we need to look at the swi
	 * value to determine if it is an EABI or an old ABI call.
	 */
#ifdef CONFIG_ARM_THUMB
	tst	r8, #PSR_T_BIT
	movne	r10, #0				@ no thumb OABI emulation
	ldreq	r10, [lr, #-4]			@ get SWI instruction
#else
	ldr	r10, [lr, #-4]			@ get SWI instruction
  A710(	and	ip, r10, #0x0f000000		@ check for SWI		)
  A710(	teq	ip, #0x0f000000						)
  A710(	bne	.Larm710bug						)
#endif
258
259
260
#ifdef CONFIG_CPU_ENDIAN_BE8
	rev	r10, r10			@ little endian instruction
#endif
261
262
263
264
265
266

#elif defined(CONFIG_AEABI)

	/*
	 * Pure EABI user space always put syscall number into scno (r7).
	 */
267
268
269
270
  A710(	ldr	ip, [lr, #-4]			@ get SWI instruction	)
  A710(	and	ip, ip, #0x0f000000		@ check for SWI		)
  A710(	teq	ip, #0x0f000000						)
  A710(	bne	.Larm710bug						)
271

272
#elif defined(CONFIG_ARM_THUMB)
273
274

	/* Legacy ABI only, possibly thumb mode. */
275
276
277
	tst	r8, #PSR_T_BIT			@ this is SPSR from save_user_regs
	addne	scno, r7, #__NR_SYSCALL_BASE	@ put OS number in
	ldreq	scno, [lr, #-4]
278

279
#else
280
281

	/* Legacy ABI only. */
282
	ldr	scno, [lr, #-4]			@ get SWI instruction
283
284
285
  A710(	and	ip, scno, #0x0f000000		@ check for SWI		)
  A710(	teq	ip, #0x0f000000						)
  A710(	bne	.Larm710bug						)
286

287
#endif
Linus Torvalds's avatar
Linus Torvalds committed
288
289
290
291
292
293

#ifdef CONFIG_ALIGNMENT_TRAP
	ldr	ip, __cr_alignment
	ldr	ip, [ip]
	mcr	p15, 0, ip, c1, c0		@ update control register
#endif
294
	enable_irq
Linus Torvalds's avatar
Linus Torvalds committed
295
296

	get_thread_info tsk
297
	adr	tbl, sys_call_table		@ load syscall table pointer
Linus Torvalds's avatar
Linus Torvalds committed
298
	ldr	ip, [tsk, #TI_FLAGS]		@ check for syscall tracing
299
300
301
302
303
304
305
306
307
308
309
310

#if defined(CONFIG_OABI_COMPAT)
	/*
	 * If the swi argument is zero, this is an EABI call and we do nothing.
	 *
	 * If this is an old ABI call, get the syscall number into scno and
	 * get the old ABI syscall table address.
	 */
	bics	r10, r10, #0xff000000
	eorne	scno, r10, #__NR_OABI_SYSCALL_BASE
	ldrne	tbl, =sys_oabi_call_table
#elif !defined(CONFIG_AEABI)
Linus Torvalds's avatar
Linus Torvalds committed
311
	bic	scno, scno, #0xff000000		@ mask off SWI op-code
312
	eor	scno, scno, #__NR_SYSCALL_BASE	@ check OS number
313
#endif
314

315
	stmdb	sp!, {r4, r5}			@ push fifth and sixth args
Linus Torvalds's avatar
Linus Torvalds committed
316
317
318
319
	tst	ip, #_TIF_SYSCALL_TRACE		@ are we tracing syscalls?
	bne	__sys_trace

	cmp	scno, #NR_syscalls		@ check upper syscall limit
320
	adr	lr, BSYM(ret_fast_syscall)	@ return address
Linus Torvalds's avatar
Linus Torvalds committed
321
322
323
324
	ldrcc	pc, [tbl, scno, lsl #2]		@ call sys_* routine

	add	r1, sp, #S_OFF
2:	mov	why, #0				@ no longer a real syscall
325
326
	cmp	scno, #(__ARM_NR_BASE - __NR_SYSCALL_BASE)
	eor	r0, scno, #__NR_SYSCALL_BASE	@ put OS number back
Linus Torvalds's avatar
Linus Torvalds committed
327
328
	bcs	arm_syscall	
	b	sys_ni_syscall			@ not private func
329
ENDPROC(vector_swi)
Linus Torvalds's avatar
Linus Torvalds committed
330
331
332
333
334
335

	/*
	 * This is the really slow path.  We're going to be doing
	 * context switches, and waiting for our parent to respond.
	 */
__sys_trace:
336
	mov	r2, scno
Linus Torvalds's avatar
Linus Torvalds committed
337
338
339
340
	add	r1, sp, #S_OFF
	mov	r0, #0				@ trace entry [IP = 0]
	bl	syscall_trace

341
	adr	lr, BSYM(__sys_trace_return)	@ return address
342
	mov	scno, r0			@ syscall number (possibly new)
Linus Torvalds's avatar
Linus Torvalds committed
343
344
345
346
347
348
349
350
	add	r1, sp, #S_R0 + S_OFF		@ pointer to regs
	cmp	scno, #NR_syscalls		@ check upper syscall limit
	ldmccia	r1, {r0 - r3}			@ have to reload r0 - r3
	ldrcc	pc, [tbl, scno, lsl #2]		@ call sys_* routine
	b	2b

__sys_trace_return:
	str	r0, [sp, #S_R0 + S_OFF]!	@ save returned r0
351
	mov	r2, scno
Linus Torvalds's avatar
Linus Torvalds committed
352
353
354
355
356
357
358
359
360
361
	mov	r1, sp
	mov	r0, #1				@ trace exit [IP = 1]
	bl	syscall_trace
	b	ret_slow_syscall

	.align	5
#ifdef CONFIG_ALIGNMENT_TRAP
	.type	__cr_alignment, #object
__cr_alignment:
	.word	cr_alignment
362
363
364
365
366
367
368
369
370
371
372
373
#endif
	.ltorg

/*
 * This is the syscall table declaration for native ABI syscalls.
 * With EABI a couple syscalls are obsolete and defined as sys_ni_syscall.
 */
#define ABI(native, compat) native
#ifdef CONFIG_AEABI
#define OBSOLETE(syscall) sys_ni_syscall
#else
#define OBSOLETE(syscall) syscall
Linus Torvalds's avatar
Linus Torvalds committed
374
375
376
377
378
#endif

	.type	sys_call_table, #object
ENTRY(sys_call_table)
#include "calls.S"
379
380
#undef ABI
#undef OBSOLETE
Linus Torvalds's avatar
Linus Torvalds committed
381
382
383
384
385

/*============================================================================
 * Special system call wrappers
 */
@ r0 = syscall number
386
@ r8 = syscall table
Linus Torvalds's avatar
Linus Torvalds committed
387
sys_syscall:
388
		bic	scno, r0, #__NR_OABI_SYSCALL_BASE
Linus Torvalds's avatar
Linus Torvalds committed
389
390
391
392
393
394
395
396
397
		cmp	scno, #__NR_syscall - __NR_SYSCALL_BASE
		cmpne	scno, #NR_syscalls	@ check range
		stmloia	sp, {r5, r6}		@ shuffle args
		movlo	r0, r1
		movlo	r1, r2
		movlo	r2, r3
		movlo	r3, r4
		ldrlo	pc, [tbl, scno, lsl #2]
		b	sys_ni_syscall
398
ENDPROC(sys_syscall)
Linus Torvalds's avatar
Linus Torvalds committed
399
400
401
402

sys_fork_wrapper:
		add	r0, sp, #S_OFF
		b	sys_fork
403
ENDPROC(sys_fork_wrapper)
Linus Torvalds's avatar
Linus Torvalds committed
404
405
406
407

sys_vfork_wrapper:
		add	r0, sp, #S_OFF
		b	sys_vfork
408
ENDPROC(sys_vfork_wrapper)
Linus Torvalds's avatar
Linus Torvalds committed
409
410
411
412

sys_execve_wrapper:
		add	r3, sp, #S_OFF
		b	sys_execve
413
ENDPROC(sys_execve_wrapper)
Linus Torvalds's avatar
Linus Torvalds committed
414
415
416
417
418

sys_clone_wrapper:
		add	ip, sp, #S_OFF
		str	ip, [sp, #4]
		b	sys_clone
419
ENDPROC(sys_clone_wrapper)
Linus Torvalds's avatar
Linus Torvalds committed
420
421
422

sys_sigreturn_wrapper:
		add	r0, sp, #S_OFF
Al Viro's avatar
Al Viro committed
423
		mov	why, #0		@ prevent syscall restart handling
Linus Torvalds's avatar
Linus Torvalds committed
424
		b	sys_sigreturn
425
ENDPROC(sys_sigreturn_wrapper)
Linus Torvalds's avatar
Linus Torvalds committed
426
427
428

sys_rt_sigreturn_wrapper:
		add	r0, sp, #S_OFF
Al Viro's avatar
Al Viro committed
429
		mov	why, #0		@ prevent syscall restart handling
Linus Torvalds's avatar
Linus Torvalds committed
430
		b	sys_rt_sigreturn
431
ENDPROC(sys_rt_sigreturn_wrapper)
Linus Torvalds's avatar
Linus Torvalds committed
432
433
434
435

sys_sigaltstack_wrapper:
		ldr	r2, [sp, #S_OFF + S_SP]
		b	do_sigaltstack
436
ENDPROC(sys_sigaltstack_wrapper)
Linus Torvalds's avatar
Linus Torvalds committed
437

438
439
440
441
sys_statfs64_wrapper:
		teq	r1, #88
		moveq	r1, #84
		b	sys_statfs64
442
ENDPROC(sys_statfs64_wrapper)
443
444
445
446
447

sys_fstatfs64_wrapper:
		teq	r1, #88
		moveq	r1, #84
		b	sys_fstatfs64
448
ENDPROC(sys_fstatfs64_wrapper)
449

Linus Torvalds's avatar
Linus Torvalds committed
450
451
452
453
454
455
456
457
458
/*
 * Note: off_4k (r5) is always units of 4K.  If we can't do the requested
 * offset, we return EINVAL.
 */
sys_mmap2:
#if PAGE_SHIFT > 12
		tst	r5, #PGOFF_MASK
		moveq	r5, r5, lsr #PAGE_SHIFT - 12
		streq	r5, [sp, #4]
Al Viro's avatar
Al Viro committed
459
		beq	sys_mmap_pgoff
Linus Torvalds's avatar
Linus Torvalds committed
460
		mov	r0, #-EINVAL
Russell King's avatar
Russell King committed
461
		mov	pc, lr
Linus Torvalds's avatar
Linus Torvalds committed
462
463
#else
		str	r5, [sp, #4]
Al Viro's avatar
Al Viro committed
464
		b	sys_mmap_pgoff
Linus Torvalds's avatar
Linus Torvalds committed
465
#endif
466
ENDPROC(sys_mmap2)
467
468

#ifdef CONFIG_OABI_COMPAT
469

470
471
472
473
474
475
476
/*
 * These are syscalls with argument register differences
 */

sys_oabi_pread64:
		stmia	sp, {r3, r4}
		b	sys_pread64
477
ENDPROC(sys_oabi_pread64)
478
479
480
481

sys_oabi_pwrite64:
		stmia	sp, {r3, r4}
		b	sys_pwrite64
482
ENDPROC(sys_oabi_pwrite64)
483
484
485
486
487

sys_oabi_truncate64:
		mov	r3, r2
		mov	r2, r1
		b	sys_truncate64
488
ENDPROC(sys_oabi_truncate64)
489
490
491
492
493

sys_oabi_ftruncate64:
		mov	r3, r2
		mov	r2, r1
		b	sys_ftruncate64
494
ENDPROC(sys_oabi_ftruncate64)
495
496
497
498
499
500

sys_oabi_readahead:
		str	r3, [sp]
		mov	r3, r2
		mov	r2, r1
		b	sys_readahead
501
ENDPROC(sys_oabi_readahead)
502

503
504
505
506
507
508
509
510
511
512
513
514
515
/*
 * Let's declare a second syscall table for old ABI binaries
 * using the compatibility syscall entries.
 */
#define ABI(native, compat) compat
#define OBSOLETE(syscall) syscall

	.type	sys_oabi_call_table, #object
ENTRY(sys_oabi_call_table)
#include "calls.S"
#undef ABI
#undef OBSOLETE

516
517
#endif