entry-common.S 14.5 KB
Newer Older
Linus Torvalds's avatar
Linus Torvalds committed
1
2
3
4
5
6
7
8
9
10
11
/*
 *  linux/arch/arm/kernel/entry-common.S
 *
 *  Copyright (C) 2000 Russell King
 *
 * This program is free software; you can redistribute it and/or modify
 * it under the terms of the GNU General Public License version 2 as
 * published by the Free Software Foundation.
 */

#include <asm/unistd.h>
12
#include <asm/ftrace.h>
13
#include <mach/entry-macro.S>
14
#include <asm/unwind.h>
Linus Torvalds's avatar
Linus Torvalds committed
15
16
17
18
19
20
21
22
23
24
25

#include "entry-header.S"


	.align	5
/*
 * This is the fast syscall return path.  We do as little as
 * possible here, and this includes saving r0 back into the SVC
 * stack.
 */
ret_fast_syscall:
26
27
 UNWIND(.fnstart	)
 UNWIND(.cantunwind	)
28
	disable_irq				@ disable interrupts
Linus Torvalds's avatar
Linus Torvalds committed
29
30
31
	ldr	r1, [tsk, #TI_FLAGS]
	tst	r1, #_TIF_WORK_MASK
	bne	fast_work_pending
32
33
34
#if defined(CONFIG_IRQSOFF_TRACER)
	asm_trace_hardirqs_on
#endif
35

36
37
38
	/* perform architecture specific actions before user return */
	arch_ret_to_user r1, lr

39
	restore_user_regs fast = 1, offset = S_OFF
40
 UNWIND(.fnend		)
Linus Torvalds's avatar
Linus Torvalds committed
41
42
43
44
45
46
47
48
49

/*
 * Ok, we need to do extra processing, enter the slow path.
 */
fast_work_pending:
	str	r0, [sp, #S_R0+S_OFF]!		@ returned r0
work_pending:
	tst	r1, #_TIF_NEED_RESCHED
	bne	work_resched
50
	tst	r1, #_TIF_SIGPENDING|_TIF_NOTIFY_RESUME
Linus Torvalds's avatar
Linus Torvalds committed
51
52
53
	beq	no_work_pending
	mov	r0, sp				@ 'regs'
	mov	r2, why				@ 'syscall'
54
55
	tst	r1, #_TIF_SIGPENDING		@ delivering a signal?
	movne	why, #0				@ prevent further restarts
Linus Torvalds's avatar
Linus Torvalds committed
56
	bl	do_notify_resume
57
	b	ret_slow_syscall		@ Check work again
Linus Torvalds's avatar
Linus Torvalds committed
58
59
60
61
62
63
64
65

work_resched:
	bl	schedule
/*
 * "slow" syscall return path.  "why" tells us if this was a real syscall.
 */
ENTRY(ret_to_user)
ret_slow_syscall:
66
	disable_irq				@ disable interrupts
Linus Torvalds's avatar
Linus Torvalds committed
67
68
69
70
	ldr	r1, [tsk, #TI_FLAGS]
	tst	r1, #_TIF_WORK_MASK
	bne	work_pending
no_work_pending:
71
72
73
#if defined(CONFIG_IRQSOFF_TRACER)
	asm_trace_hardirqs_on
#endif
74
75
76
	/* perform architecture specific actions before user return */
	arch_ret_to_user r1, lr

77
	restore_user_regs fast = 0, offset = 0
78
ENDPROC(ret_to_user)
Linus Torvalds's avatar
Linus Torvalds committed
79
80
81
82
83
84
85
86
87
88
89
90
91
92
93

/*
 * This is how we return from a fork.
 */
ENTRY(ret_from_fork)
	bl	schedule_tail
	get_thread_info tsk
	ldr	r1, [tsk, #TI_FLAGS]		@ check for syscall tracing
	mov	why, #1
	tst	r1, #_TIF_SYSCALL_TRACE		@ are we tracing syscalls?
	beq	ret_slow_syscall
	mov	r1, sp
	mov	r0, #1				@ trace exit [IP = 1]
	bl	syscall_trace
	b	ret_slow_syscall
94
ENDPROC(ret_from_fork)
Linus Torvalds's avatar
Linus Torvalds committed
95

96
97
	.equ NR_syscalls,0
#define CALL(x) .equ NR_syscalls,NR_syscalls+1
Linus Torvalds's avatar
Linus Torvalds committed
98
#include "calls.S"
99
100
#undef CALL
#define CALL(x) .long x
Linus Torvalds's avatar
Linus Torvalds committed
101

102
#ifdef CONFIG_FUNCTION_TRACER
103
104
105
106
107
108
109
110
111
112
113
114
115
116
117
118
119
120
121
122
123
124
125
126
127
128
129
130
131
132
133
134
135
136
137
/*
 * When compiling with -pg, gcc inserts a call to the mcount routine at the
 * start of every function.  In mcount, apart from the function's address (in
 * lr), we need to get hold of the function's caller's address.
 *
 * Older GCCs (pre-4.4) inserted a call to a routine called mcount like this:
 *
 *	bl	mcount
 *
 * These versions have the limitation that in order for the mcount routine to
 * be able to determine the function's caller's address, an APCS-style frame
 * pointer (which is set up with something like the code below) is required.
 *
 *	mov     ip, sp
 *	push    {fp, ip, lr, pc}
 *	sub     fp, ip, #4
 *
 * With EABI, these frame pointers are not available unless -mapcs-frame is
 * specified, and if building as Thumb-2, not even then.
 *
 * Newer GCCs (4.4+) solve this problem by introducing a new version of mcount,
 * with call sites like:
 *
 *	push	{lr}
 *	bl	__gnu_mcount_nc
 *
 * With these compilers, frame pointers are not necessary.
 *
 * mcount can be thought of as a function called in the middle of a subroutine
 * call.  As such, it needs to be transparent for both the caller and the
 * callee: the original lr needs to be restored when leaving mcount, and no
 * registers should be clobbered.  (In the __gnu_mcount_nc implementation, we
 * clobber the ip register.  This is OK because the ARM calling convention
 * allows it to be clobbered in subroutines and doesn't use it to hold
 * parameters.)
138
139
140
141
 *
 * When using dynamic ftrace, we patch out the mcount call by a "mov r0, r0"
 * for the mcount case, and a "pop {lr}" for the __gnu_mcount_nc case (see
 * arch/arm/kernel/ftrace.c).
142
 */
143
144
145
146
147
148
149

#ifndef CONFIG_OLD_MCOUNT
#if (__GNUC__ < 4 || (__GNUC__ == 4 && __GNUC_MINOR__ < 4))
#error Ftrace requires CONFIG_FRAME_POINTER=y with GCC older than 4.4.0.
#endif
#endif

150
151
152
153
154
155
156
.macro __mcount suffix
	mcount_enter
	ldr	r0, =ftrace_trace_function
	ldr	r2, [r0]
	adr	r0, .Lftrace_stub
	cmp	r0, r2
	bne	1f
157

158
159
160
161
162
163
164
165
166
167
168
169
170
#ifdef CONFIG_FUNCTION_GRAPH_TRACER
	ldr     r1, =ftrace_graph_return
	ldr     r2, [r1]
	cmp     r0, r2
	bne     ftrace_graph_caller\suffix

	ldr     r1, =ftrace_graph_entry
	ldr     r2, [r1]
	ldr     r0, =ftrace_graph_entry_stub
	cmp     r0, r2
	bne     ftrace_graph_caller\suffix
#endif

171
	mcount_exit
172

173
174
1: 	mcount_get_lr	r1			@ lr of instrumented func
	mov	r0, lr				@ instrumented function
175
	sub	r0, r0, #MCOUNT_INSN_SIZE
176
177
178
179
	adr	lr, BSYM(2f)
	mov	pc, r2
2:	mcount_exit
.endm
Abhishek Sagar's avatar
Abhishek Sagar committed
180

181
182
.macro __ftrace_caller suffix
	mcount_enter
Abhishek Sagar's avatar
Abhishek Sagar committed
183

184
185
186
187
188
189
	mcount_get_lr	r1			@ lr of instrumented func
	mov	r0, lr				@ instrumented function
	sub	r0, r0, #MCOUNT_INSN_SIZE

	.globl ftrace_call\suffix
ftrace_call\suffix:
190
	bl	ftrace_stub
191

192
193
194
195
196
197
#ifdef CONFIG_FUNCTION_GRAPH_TRACER
	.globl ftrace_graph_call\suffix
ftrace_graph_call\suffix:
	mov	r0, r0
#endif

198
199
	mcount_exit
.endm
Abhishek Sagar's avatar
Abhishek Sagar committed
200

201
202
.macro __ftrace_graph_caller
	sub	r0, fp, #4		@ &lr of instrumented routine (&parent)
203
204
205
206
207
#ifdef CONFIG_DYNAMIC_FTRACE
	@ called from __ftrace_caller, saved in mcount_enter
	ldr	r1, [sp, #16]		@ instrumented routine (func)
#else
	@ called from __mcount, untouched in lr
208
	mov	r1, lr			@ instrumented routine (func)
209
#endif
210
211
212
213
214
	sub	r1, r1, #MCOUNT_INSN_SIZE
	mov	r2, fp			@ frame pointer
	bl	prepare_ftrace_return
	mcount_exit
.endm
Abhishek Sagar's avatar
Abhishek Sagar committed
215

216
#ifdef CONFIG_OLD_MCOUNT
217
218
219
220
221
222
223
224
225
226
227
228
229
230
231
232
233
/*
 * mcount
 */

.macro mcount_enter
	stmdb	sp!, {r0-r3, lr}
.endm

.macro mcount_get_lr reg
	ldr	\reg, [fp, #-4]
.endm

.macro mcount_exit
	ldr	lr, [fp, #-4]
	ldmia	sp!, {r0-r3, pc}
.endm

234
ENTRY(mcount)
235
#ifdef CONFIG_DYNAMIC_FTRACE
236
237
238
	stmdb	sp!, {lr}
	ldr	lr, [fp, #-4]
	ldmia	sp!, {pc}
239
240
241
#else
	__mcount _old
#endif
242
ENDPROC(mcount)
Abhishek Sagar's avatar
Abhishek Sagar committed
243

244
#ifdef CONFIG_DYNAMIC_FTRACE
245
ENTRY(ftrace_caller_old)
246
	__ftrace_caller _old
247
248
ENDPROC(ftrace_caller_old)
#endif
Abhishek Sagar's avatar
Abhishek Sagar committed
249

250
251
252
253
254
#ifdef CONFIG_FUNCTION_GRAPH_TRACER
ENTRY(ftrace_graph_caller_old)
	__ftrace_graph_caller
ENDPROC(ftrace_graph_caller_old)
#endif
Abhishek Sagar's avatar
Abhishek Sagar committed
255

256
257
258
259
.purgem mcount_enter
.purgem mcount_get_lr
.purgem mcount_exit
#endif
Abhishek Sagar's avatar
Abhishek Sagar committed
260

261
262
263
264
265
/*
 * __gnu_mcount_nc
 */

.macro mcount_enter
266
	stmdb	sp!, {r0-r3, lr}
267
268
269
270
271
272
273
.endm

.macro mcount_get_lr reg
	ldr	\reg, [sp, #20]
.endm

.macro mcount_exit
274
275
	ldmia	sp!, {r0-r3, ip, lr}
	mov	pc, ip
276
.endm
277

278
279
280
281
ENTRY(__gnu_mcount_nc)
#ifdef CONFIG_DYNAMIC_FTRACE
	mov	ip, lr
	ldmia	sp!, {lr}
282
	mov	pc, ip
283
284
285
#else
	__mcount
#endif
286
ENDPROC(__gnu_mcount_nc)
287

288
289
290
291
#ifdef CONFIG_DYNAMIC_FTRACE
ENTRY(ftrace_caller)
	__ftrace_caller
ENDPROC(ftrace_caller)
292
#endif
Abhishek Sagar's avatar
Abhishek Sagar committed
293

294
295
296
297
#ifdef CONFIG_FUNCTION_GRAPH_TRACER
ENTRY(ftrace_graph_caller)
	__ftrace_graph_caller
ENDPROC(ftrace_graph_caller)
298
#endif
Abhishek Sagar's avatar
Abhishek Sagar committed
299

300
301
302
.purgem mcount_enter
.purgem mcount_get_lr
.purgem mcount_exit
Abhishek Sagar's avatar
Abhishek Sagar committed
303

304
305
306
307
308
309
310
311
312
313
#ifdef CONFIG_FUNCTION_GRAPH_TRACER
	.globl return_to_handler
return_to_handler:
	stmdb	sp!, {r0-r3}
	mov	r0, fp			@ frame pointer
	bl	ftrace_return_to_handler
	mov	lr, r0			@ r0 has real ret addr
	ldmia	sp!, {r0-r3}
	mov	pc, lr
#endif
Abhishek Sagar's avatar
Abhishek Sagar committed
314

315
ENTRY(ftrace_stub)
316
.Lftrace_stub:
317
	mov	pc, lr
318
ENDPROC(ftrace_stub)
Abhishek Sagar's avatar
Abhishek Sagar committed
319

320
#endif /* CONFIG_FUNCTION_TRACER */
Abhishek Sagar's avatar
Abhishek Sagar committed
321

Linus Torvalds's avatar
Linus Torvalds committed
322
323
324
325
326
327
328
329
330
/*=============================================================================
 * SWI handler
 *-----------------------------------------------------------------------------
 */

	/* If we're optimising for StrongARM the resulting code won't 
	   run on an ARM7 and we can save a couple of instructions.  
								--pb */
#ifdef CONFIG_CPU_ARM710
331
332
#define A710(code...) code
.Larm710bug:
Linus Torvalds's avatar
Linus Torvalds committed
333
334
335
	ldmia	sp, {r0 - lr}^			@ Get calling r0 - lr
	mov	r0, r0
	add	sp, sp, #S_FRAME_SIZE
336
	subs	pc, lr, #4
Linus Torvalds's avatar
Linus Torvalds committed
337
#else
338
#define A710(code...)
Linus Torvalds's avatar
Linus Torvalds committed
339
340
341
342
#endif

	.align	5
ENTRY(vector_swi)
343
344
	sub	sp, sp, #S_FRAME_SIZE
	stmia	sp, {r0 - r12}			@ Calling r0 - r12
345
346
347
348
 ARM(	add	r8, sp, #S_PC		)
 ARM(	stmdb	r8, {sp, lr}^		)	@ Calling sp, lr
 THUMB(	mov	r8, sp			)
 THUMB(	store_user_sp_lr r8, r10, S_SP	)	@ calling sp, lr
349
350
351
352
	mrs	r8, spsr			@ called from non-FIQ mode, so ok.
	str	lr, [sp, #S_PC]			@ Save calling PC
	str	r8, [sp, #S_PSR]		@ Save CPSR
	str	r0, [sp, #S_OLD_R0]		@ Save OLD_R0
Linus Torvalds's avatar
Linus Torvalds committed
353
	zero_fp
354
355
356
357

	/*
	 * Get the system call number.
	 */
358

359
#if defined(CONFIG_OABI_COMPAT)
360

361
362
363
364
365
366
367
368
369
370
371
372
373
374
	/*
	 * If we have CONFIG_OABI_COMPAT then we need to look at the swi
	 * value to determine if it is an EABI or an old ABI call.
	 */
#ifdef CONFIG_ARM_THUMB
	tst	r8, #PSR_T_BIT
	movne	r10, #0				@ no thumb OABI emulation
	ldreq	r10, [lr, #-4]			@ get SWI instruction
#else
	ldr	r10, [lr, #-4]			@ get SWI instruction
  A710(	and	ip, r10, #0x0f000000		@ check for SWI		)
  A710(	teq	ip, #0x0f000000						)
  A710(	bne	.Larm710bug						)
#endif
375
376
377
#ifdef CONFIG_CPU_ENDIAN_BE8
	rev	r10, r10			@ little endian instruction
#endif
378
379
380
381
382
383

#elif defined(CONFIG_AEABI)

	/*
	 * Pure EABI user space always put syscall number into scno (r7).
	 */
384
385
386
387
  A710(	ldr	ip, [lr, #-4]			@ get SWI instruction	)
  A710(	and	ip, ip, #0x0f000000		@ check for SWI		)
  A710(	teq	ip, #0x0f000000						)
  A710(	bne	.Larm710bug						)
388

389
#elif defined(CONFIG_ARM_THUMB)
390
391

	/* Legacy ABI only, possibly thumb mode. */
392
393
394
	tst	r8, #PSR_T_BIT			@ this is SPSR from save_user_regs
	addne	scno, r7, #__NR_SYSCALL_BASE	@ put OS number in
	ldreq	scno, [lr, #-4]
395

396
#else
397
398

	/* Legacy ABI only. */
399
	ldr	scno, [lr, #-4]			@ get SWI instruction
400
401
402
  A710(	and	ip, scno, #0x0f000000		@ check for SWI		)
  A710(	teq	ip, #0x0f000000						)
  A710(	bne	.Larm710bug						)
403

404
#endif
Linus Torvalds's avatar
Linus Torvalds committed
405
406
407
408
409
410

#ifdef CONFIG_ALIGNMENT_TRAP
	ldr	ip, __cr_alignment
	ldr	ip, [ip]
	mcr	p15, 0, ip, c1, c0		@ update control register
#endif
411
	enable_irq
Linus Torvalds's avatar
Linus Torvalds committed
412
413

	get_thread_info tsk
414
415
416
417
418
419
420
421
422
423
424
425
426
	adr	tbl, sys_call_table		@ load syscall table pointer

#if defined(CONFIG_OABI_COMPAT)
	/*
	 * If the swi argument is zero, this is an EABI call and we do nothing.
	 *
	 * If this is an old ABI call, get the syscall number into scno and
	 * get the old ABI syscall table address.
	 */
	bics	r10, r10, #0xff000000
	eorne	scno, r10, #__NR_OABI_SYSCALL_BASE
	ldrne	tbl, =sys_oabi_call_table
#elif !defined(CONFIG_AEABI)
Linus Torvalds's avatar
Linus Torvalds committed
427
	bic	scno, scno, #0xff000000		@ mask off SWI op-code
428
	eor	scno, scno, #__NR_SYSCALL_BASE	@ check OS number
429
#endif
430

Nicolas Pitre's avatar
Nicolas Pitre committed
431
	ldr	r10, [tsk, #TI_FLAGS]		@ check for syscall tracing
432
	stmdb	sp!, {r4, r5}			@ push fifth and sixth args
Nicolas Pitre's avatar
Nicolas Pitre committed
433
434
435
436
437
438
439
440
441
442
443
444

#ifdef CONFIG_SECCOMP
	tst	r10, #_TIF_SECCOMP
	beq	1f
	mov	r0, scno
	bl	__secure_computing	
	add	r0, sp, #S_R0 + S_OFF		@ pointer to regs
	ldmia	r0, {r0 - r3}			@ have to reload r0 - r3
1:
#endif

	tst	r10, #_TIF_SYSCALL_TRACE		@ are we tracing syscalls?
Linus Torvalds's avatar
Linus Torvalds committed
445
446
447
	bne	__sys_trace

	cmp	scno, #NR_syscalls		@ check upper syscall limit
448
	adr	lr, BSYM(ret_fast_syscall)	@ return address
Linus Torvalds's avatar
Linus Torvalds committed
449
450
451
452
	ldrcc	pc, [tbl, scno, lsl #2]		@ call sys_* routine

	add	r1, sp, #S_OFF
2:	mov	why, #0				@ no longer a real syscall
453
454
	cmp	scno, #(__ARM_NR_BASE - __NR_SYSCALL_BASE)
	eor	r0, scno, #__NR_SYSCALL_BASE	@ put OS number back
Linus Torvalds's avatar
Linus Torvalds committed
455
456
	bcs	arm_syscall	
	b	sys_ni_syscall			@ not private func
457
ENDPROC(vector_swi)
Linus Torvalds's avatar
Linus Torvalds committed
458
459
460
461
462
463

	/*
	 * This is the really slow path.  We're going to be doing
	 * context switches, and waiting for our parent to respond.
	 */
__sys_trace:
464
	mov	r2, scno
Linus Torvalds's avatar
Linus Torvalds committed
465
466
467
468
	add	r1, sp, #S_OFF
	mov	r0, #0				@ trace entry [IP = 0]
	bl	syscall_trace

469
	adr	lr, BSYM(__sys_trace_return)	@ return address
470
	mov	scno, r0			@ syscall number (possibly new)
Linus Torvalds's avatar
Linus Torvalds committed
471
472
473
474
475
476
477
478
	add	r1, sp, #S_R0 + S_OFF		@ pointer to regs
	cmp	scno, #NR_syscalls		@ check upper syscall limit
	ldmccia	r1, {r0 - r3}			@ have to reload r0 - r3
	ldrcc	pc, [tbl, scno, lsl #2]		@ call sys_* routine
	b	2b

__sys_trace_return:
	str	r0, [sp, #S_R0 + S_OFF]!	@ save returned r0
479
	mov	r2, scno
Linus Torvalds's avatar
Linus Torvalds committed
480
481
482
483
484
485
486
487
488
489
	mov	r1, sp
	mov	r0, #1				@ trace exit [IP = 1]
	bl	syscall_trace
	b	ret_slow_syscall

	.align	5
#ifdef CONFIG_ALIGNMENT_TRAP
	.type	__cr_alignment, #object
__cr_alignment:
	.word	cr_alignment
490
491
492
493
494
495
496
497
498
499
500
501
#endif
	.ltorg

/*
 * This is the syscall table declaration for native ABI syscalls.
 * With EABI a couple syscalls are obsolete and defined as sys_ni_syscall.
 */
#define ABI(native, compat) native
#ifdef CONFIG_AEABI
#define OBSOLETE(syscall) sys_ni_syscall
#else
#define OBSOLETE(syscall) syscall
Linus Torvalds's avatar
Linus Torvalds committed
502
503
504
505
506
#endif

	.type	sys_call_table, #object
ENTRY(sys_call_table)
#include "calls.S"
507
508
#undef ABI
#undef OBSOLETE
Linus Torvalds's avatar
Linus Torvalds committed
509
510
511
512
513

/*============================================================================
 * Special system call wrappers
 */
@ r0 = syscall number
514
@ r8 = syscall table
Linus Torvalds's avatar
Linus Torvalds committed
515
sys_syscall:
516
		bic	scno, r0, #__NR_OABI_SYSCALL_BASE
Linus Torvalds's avatar
Linus Torvalds committed
517
518
519
520
521
522
523
524
525
		cmp	scno, #__NR_syscall - __NR_SYSCALL_BASE
		cmpne	scno, #NR_syscalls	@ check range
		stmloia	sp, {r5, r6}		@ shuffle args
		movlo	r0, r1
		movlo	r1, r2
		movlo	r2, r3
		movlo	r3, r4
		ldrlo	pc, [tbl, scno, lsl #2]
		b	sys_ni_syscall
526
ENDPROC(sys_syscall)
Linus Torvalds's avatar
Linus Torvalds committed
527
528
529
530

sys_fork_wrapper:
		add	r0, sp, #S_OFF
		b	sys_fork
531
ENDPROC(sys_fork_wrapper)
Linus Torvalds's avatar
Linus Torvalds committed
532
533
534
535

sys_vfork_wrapper:
		add	r0, sp, #S_OFF
		b	sys_vfork
536
ENDPROC(sys_vfork_wrapper)
Linus Torvalds's avatar
Linus Torvalds committed
537
538
539
540

sys_execve_wrapper:
		add	r3, sp, #S_OFF
		b	sys_execve
541
ENDPROC(sys_execve_wrapper)
Linus Torvalds's avatar
Linus Torvalds committed
542
543
544
545
546

sys_clone_wrapper:
		add	ip, sp, #S_OFF
		str	ip, [sp, #4]
		b	sys_clone
547
ENDPROC(sys_clone_wrapper)
Linus Torvalds's avatar
Linus Torvalds committed
548
549
550

sys_sigreturn_wrapper:
		add	r0, sp, #S_OFF
Al Viro's avatar
Al Viro committed
551
		mov	why, #0		@ prevent syscall restart handling
Linus Torvalds's avatar
Linus Torvalds committed
552
		b	sys_sigreturn
553
ENDPROC(sys_sigreturn_wrapper)
Linus Torvalds's avatar
Linus Torvalds committed
554
555
556

sys_rt_sigreturn_wrapper:
		add	r0, sp, #S_OFF
Al Viro's avatar
Al Viro committed
557
		mov	why, #0		@ prevent syscall restart handling
Linus Torvalds's avatar
Linus Torvalds committed
558
		b	sys_rt_sigreturn
559
ENDPROC(sys_rt_sigreturn_wrapper)
Linus Torvalds's avatar
Linus Torvalds committed
560
561
562
563

sys_sigaltstack_wrapper:
		ldr	r2, [sp, #S_OFF + S_SP]
		b	do_sigaltstack
564
ENDPROC(sys_sigaltstack_wrapper)
Linus Torvalds's avatar
Linus Torvalds committed
565

566
567
568
569
sys_statfs64_wrapper:
		teq	r1, #88
		moveq	r1, #84
		b	sys_statfs64
570
ENDPROC(sys_statfs64_wrapper)
571
572
573
574
575

sys_fstatfs64_wrapper:
		teq	r1, #88
		moveq	r1, #84
		b	sys_fstatfs64
576
ENDPROC(sys_fstatfs64_wrapper)
577

Linus Torvalds's avatar
Linus Torvalds committed
578
579
580
581
582
583
584
585
586
/*
 * Note: off_4k (r5) is always units of 4K.  If we can't do the requested
 * offset, we return EINVAL.
 */
sys_mmap2:
#if PAGE_SHIFT > 12
		tst	r5, #PGOFF_MASK
		moveq	r5, r5, lsr #PAGE_SHIFT - 12
		streq	r5, [sp, #4]
Al Viro's avatar
Al Viro committed
587
		beq	sys_mmap_pgoff
Linus Torvalds's avatar
Linus Torvalds committed
588
		mov	r0, #-EINVAL
Russell King's avatar
Russell King committed
589
		mov	pc, lr
Linus Torvalds's avatar
Linus Torvalds committed
590
591
#else
		str	r5, [sp, #4]
Al Viro's avatar
Al Viro committed
592
		b	sys_mmap_pgoff
Linus Torvalds's avatar
Linus Torvalds committed
593
#endif
594
ENDPROC(sys_mmap2)
595
596

#ifdef CONFIG_OABI_COMPAT
597

598
599
600
601
602
603
604
/*
 * These are syscalls with argument register differences
 */

sys_oabi_pread64:
		stmia	sp, {r3, r4}
		b	sys_pread64
605
ENDPROC(sys_oabi_pread64)
606
607
608
609

sys_oabi_pwrite64:
		stmia	sp, {r3, r4}
		b	sys_pwrite64
610
ENDPROC(sys_oabi_pwrite64)
611
612
613
614
615

sys_oabi_truncate64:
		mov	r3, r2
		mov	r2, r1
		b	sys_truncate64
616
ENDPROC(sys_oabi_truncate64)
617
618
619
620
621

sys_oabi_ftruncate64:
		mov	r3, r2
		mov	r2, r1
		b	sys_ftruncate64
622
ENDPROC(sys_oabi_ftruncate64)
623
624
625
626
627
628

sys_oabi_readahead:
		str	r3, [sp]
		mov	r3, r2
		mov	r2, r1
		b	sys_readahead
629
ENDPROC(sys_oabi_readahead)
630

631
632
633
634
635
636
637
638
639
640
641
642
643
/*
 * Let's declare a second syscall table for old ABI binaries
 * using the compatibility syscall entries.
 */
#define ABI(native, compat) compat
#define OBSOLETE(syscall) syscall

	.type	sys_oabi_call_table, #object
ENTRY(sys_oabi_call_table)
#include "calls.S"
#undef ABI
#undef OBSOLETE

644
645
#endif