entry-common.S 14.2 KB
Newer Older
Linus Torvalds's avatar
Linus Torvalds committed
1
2
3
4
5
6
7
8
9
10
11
/*
 *  linux/arch/arm/kernel/entry-common.S
 *
 *  Copyright (C) 2000 Russell King
 *
 * This program is free software; you can redistribute it and/or modify
 * it under the terms of the GNU General Public License version 2 as
 * published by the Free Software Foundation.
 */

#include <asm/unistd.h>
12
#include <asm/ftrace.h>
13
#include <asm/unwind.h>
Linus Torvalds's avatar
Linus Torvalds committed
14

15
16
17
18
19
20
21
#ifdef CONFIG_NEED_RET_TO_USER
#include <mach/entry-macro.S>
#else
	.macro  arch_ret_to_user, tmp1, tmp2
	.endm
#endif

Linus Torvalds's avatar
Linus Torvalds committed
22
23
24
25
26
27
28
29
30
31
#include "entry-header.S"


	.align	5
/*
 * This is the fast syscall return path.  We do as little as
 * possible here, and this includes saving r0 back into the SVC
 * stack.
 */
ret_fast_syscall:
32
33
 UNWIND(.fnstart	)
 UNWIND(.cantunwind	)
34
	disable_irq				@ disable interrupts
Linus Torvalds's avatar
Linus Torvalds committed
35
36
37
	ldr	r1, [tsk, #TI_FLAGS]
	tst	r1, #_TIF_WORK_MASK
	bne	fast_work_pending
38
39
40
#if defined(CONFIG_IRQSOFF_TRACER)
	asm_trace_hardirqs_on
#endif
41

42
43
44
	/* perform architecture specific actions before user return */
	arch_ret_to_user r1, lr

45
	restore_user_regs fast = 1, offset = S_OFF
46
 UNWIND(.fnend		)
Linus Torvalds's avatar
Linus Torvalds committed
47
48
49
50
51
52
53
54
55

/*
 * Ok, we need to do extra processing, enter the slow path.
 */
fast_work_pending:
	str	r0, [sp, #S_R0+S_OFF]!		@ returned r0
work_pending:
	mov	r0, sp				@ 'regs'
	mov	r2, why				@ 'syscall'
56
	bl	do_work_pending
57
	cmp	r0, #0
58
	beq	no_work_pending
59
	movlt	scno, #(__NR_restart_syscall - __NR_SYSCALL_BASE)
60
61
62
	ldmia	sp, {r0 - r6}			@ have to reload r0 - r6
	b	local_restart			@ ... and off we go

Linus Torvalds's avatar
Linus Torvalds committed
63
64
65
66
67
/*
 * "slow" syscall return path.  "why" tells us if this was a real syscall.
 */
ENTRY(ret_to_user)
ret_slow_syscall:
68
	disable_irq				@ disable interrupts
69
ENTRY(ret_to_user_from_irq)
Linus Torvalds's avatar
Linus Torvalds committed
70
71
72
73
	ldr	r1, [tsk, #TI_FLAGS]
	tst	r1, #_TIF_WORK_MASK
	bne	work_pending
no_work_pending:
74
75
76
#if defined(CONFIG_IRQSOFF_TRACER)
	asm_trace_hardirqs_on
#endif
77
78
79
	/* perform architecture specific actions before user return */
	arch_ret_to_user r1, lr

80
	restore_user_regs fast = 0, offset = 0
81
ENDPROC(ret_to_user_from_irq)
82
ENDPROC(ret_to_user)
Linus Torvalds's avatar
Linus Torvalds committed
83
84
85
86
87
88
89
90
91

/*
 * This is how we return from a fork.
 */
ENTRY(ret_from_fork)
	bl	schedule_tail
	get_thread_info tsk
	mov	why, #1
	b	ret_slow_syscall
92
ENDPROC(ret_from_fork)
Linus Torvalds's avatar
Linus Torvalds committed
93

94
95
96
97
98
99
100
101
102
103
104
105
ENTRY(ret_from_kernel_thread)
 UNWIND(.fnstart)
 UNWIND(.cantunwind)
	bl	schedule_tail
	mov	r0, r4
	adr	lr, BSYM(1f)	@ kernel threads should not exit
	mov	pc, r5
1:	bl	do_exit
	nop
 UNWIND(.fnend)
ENDPROC(ret_from_kernel_thread)

106
107
108
109
110
111
112
113
114
115
116
117
/*
 * turn a kernel thread into userland process
 * use: ret_from_kernel_execve(struct pt_regs *normal)
 */
ENTRY(ret_from_kernel_execve)
	mov	why, #0			@ not a syscall
	str	why, [r0, #S_R0]	@ ... and we want 0 in ->ARM_r0 as well
	get_thread_info tsk		@ thread structure
	mov	sp, r0			@ stack pointer just under pt_regs
	b	ret_slow_syscall
ENDPROC(ret_from_kernel_execve)

118
119
	.equ NR_syscalls,0
#define CALL(x) .equ NR_syscalls,NR_syscalls+1
Linus Torvalds's avatar
Linus Torvalds committed
120
#include "calls.S"
121
122
#undef CALL
#define CALL(x) .long x
Linus Torvalds's avatar
Linus Torvalds committed
123

124
#ifdef CONFIG_FUNCTION_TRACER
125
126
127
128
129
130
131
132
133
134
135
136
137
138
139
140
141
142
143
144
145
146
147
148
149
150
151
152
153
154
155
156
157
158
159
/*
 * When compiling with -pg, gcc inserts a call to the mcount routine at the
 * start of every function.  In mcount, apart from the function's address (in
 * lr), we need to get hold of the function's caller's address.
 *
 * Older GCCs (pre-4.4) inserted a call to a routine called mcount like this:
 *
 *	bl	mcount
 *
 * These versions have the limitation that in order for the mcount routine to
 * be able to determine the function's caller's address, an APCS-style frame
 * pointer (which is set up with something like the code below) is required.
 *
 *	mov     ip, sp
 *	push    {fp, ip, lr, pc}
 *	sub     fp, ip, #4
 *
 * With EABI, these frame pointers are not available unless -mapcs-frame is
 * specified, and if building as Thumb-2, not even then.
 *
 * Newer GCCs (4.4+) solve this problem by introducing a new version of mcount,
 * with call sites like:
 *
 *	push	{lr}
 *	bl	__gnu_mcount_nc
 *
 * With these compilers, frame pointers are not necessary.
 *
 * mcount can be thought of as a function called in the middle of a subroutine
 * call.  As such, it needs to be transparent for both the caller and the
 * callee: the original lr needs to be restored when leaving mcount, and no
 * registers should be clobbered.  (In the __gnu_mcount_nc implementation, we
 * clobber the ip register.  This is OK because the ARM calling convention
 * allows it to be clobbered in subroutines and doesn't use it to hold
 * parameters.)
160
161
162
163
 *
 * When using dynamic ftrace, we patch out the mcount call by a "mov r0, r0"
 * for the mcount case, and a "pop {lr}" for the __gnu_mcount_nc case (see
 * arch/arm/kernel/ftrace.c).
164
 */
165
166
167
168
169
170
171

#ifndef CONFIG_OLD_MCOUNT
#if (__GNUC__ < 4 || (__GNUC__ == 4 && __GNUC_MINOR__ < 4))
#error Ftrace requires CONFIG_FRAME_POINTER=y with GCC older than 4.4.0.
#endif
#endif

172
173
174
175
176
.macro mcount_adjust_addr rd, rn
	bic	\rd, \rn, #1		@ clear the Thumb bit if present
	sub	\rd, \rd, #MCOUNT_INSN_SIZE
.endm

177
178
179
180
181
182
183
.macro __mcount suffix
	mcount_enter
	ldr	r0, =ftrace_trace_function
	ldr	r2, [r0]
	adr	r0, .Lftrace_stub
	cmp	r0, r2
	bne	1f
184

185
186
187
188
189
190
191
192
193
194
195
196
197
#ifdef CONFIG_FUNCTION_GRAPH_TRACER
	ldr     r1, =ftrace_graph_return
	ldr     r2, [r1]
	cmp     r0, r2
	bne     ftrace_graph_caller\suffix

	ldr     r1, =ftrace_graph_entry
	ldr     r2, [r1]
	ldr     r0, =ftrace_graph_entry_stub
	cmp     r0, r2
	bne     ftrace_graph_caller\suffix
#endif

198
	mcount_exit
199

200
1: 	mcount_get_lr	r1			@ lr of instrumented func
201
	mcount_adjust_addr	r0, lr		@ instrumented function
202
203
204
205
	adr	lr, BSYM(2f)
	mov	pc, r2
2:	mcount_exit
.endm
Abhishek Sagar's avatar
Abhishek Sagar committed
206

207
208
.macro __ftrace_caller suffix
	mcount_enter
Abhishek Sagar's avatar
Abhishek Sagar committed
209

210
	mcount_get_lr	r1			@ lr of instrumented func
211
	mcount_adjust_addr	r0, lr		@ instrumented function
212
213
214

	.globl ftrace_call\suffix
ftrace_call\suffix:
215
	bl	ftrace_stub
216

217
218
219
220
221
222
#ifdef CONFIG_FUNCTION_GRAPH_TRACER
	.globl ftrace_graph_call\suffix
ftrace_graph_call\suffix:
	mov	r0, r0
#endif

223
224
	mcount_exit
.endm
Abhishek Sagar's avatar
Abhishek Sagar committed
225

226
227
.macro __ftrace_graph_caller
	sub	r0, fp, #4		@ &lr of instrumented routine (&parent)
228
229
230
#ifdef CONFIG_DYNAMIC_FTRACE
	@ called from __ftrace_caller, saved in mcount_enter
	ldr	r1, [sp, #16]		@ instrumented routine (func)
231
	mcount_adjust_addr	r1, r1
232
233
#else
	@ called from __mcount, untouched in lr
234
	mcount_adjust_addr	r1, lr	@ instrumented routine (func)
235
#endif
236
237
238
239
	mov	r2, fp			@ frame pointer
	bl	prepare_ftrace_return
	mcount_exit
.endm
Abhishek Sagar's avatar
Abhishek Sagar committed
240

241
#ifdef CONFIG_OLD_MCOUNT
242
243
244
245
246
247
248
249
250
251
252
253
254
255
256
257
258
/*
 * mcount
 */

.macro mcount_enter
	stmdb	sp!, {r0-r3, lr}
.endm

.macro mcount_get_lr reg
	ldr	\reg, [fp, #-4]
.endm

.macro mcount_exit
	ldr	lr, [fp, #-4]
	ldmia	sp!, {r0-r3, pc}
.endm

259
ENTRY(mcount)
260
#ifdef CONFIG_DYNAMIC_FTRACE
261
262
263
	stmdb	sp!, {lr}
	ldr	lr, [fp, #-4]
	ldmia	sp!, {pc}
264
265
266
#else
	__mcount _old
#endif
267
ENDPROC(mcount)
Abhishek Sagar's avatar
Abhishek Sagar committed
268

269
#ifdef CONFIG_DYNAMIC_FTRACE
270
ENTRY(ftrace_caller_old)
271
	__ftrace_caller _old
272
273
ENDPROC(ftrace_caller_old)
#endif
Abhishek Sagar's avatar
Abhishek Sagar committed
274

275
276
277
278
279
#ifdef CONFIG_FUNCTION_GRAPH_TRACER
ENTRY(ftrace_graph_caller_old)
	__ftrace_graph_caller
ENDPROC(ftrace_graph_caller_old)
#endif
Abhishek Sagar's avatar
Abhishek Sagar committed
280

281
282
283
284
.purgem mcount_enter
.purgem mcount_get_lr
.purgem mcount_exit
#endif
Abhishek Sagar's avatar
Abhishek Sagar committed
285

286
287
288
289
290
/*
 * __gnu_mcount_nc
 */

.macro mcount_enter
291
	stmdb	sp!, {r0-r3, lr}
292
293
294
295
296
297
298
.endm

.macro mcount_get_lr reg
	ldr	\reg, [sp, #20]
.endm

.macro mcount_exit
299
300
	ldmia	sp!, {r0-r3, ip, lr}
	mov	pc, ip
301
.endm
302

303
304
305
306
ENTRY(__gnu_mcount_nc)
#ifdef CONFIG_DYNAMIC_FTRACE
	mov	ip, lr
	ldmia	sp!, {lr}
307
	mov	pc, ip
308
309
310
#else
	__mcount
#endif
311
ENDPROC(__gnu_mcount_nc)
312

313
314
315
316
#ifdef CONFIG_DYNAMIC_FTRACE
ENTRY(ftrace_caller)
	__ftrace_caller
ENDPROC(ftrace_caller)
317
#endif
Abhishek Sagar's avatar
Abhishek Sagar committed
318

319
320
321
322
#ifdef CONFIG_FUNCTION_GRAPH_TRACER
ENTRY(ftrace_graph_caller)
	__ftrace_graph_caller
ENDPROC(ftrace_graph_caller)
323
#endif
Abhishek Sagar's avatar
Abhishek Sagar committed
324

325
326
327
.purgem mcount_enter
.purgem mcount_get_lr
.purgem mcount_exit
Abhishek Sagar's avatar
Abhishek Sagar committed
328

329
330
331
332
333
334
335
336
337
338
#ifdef CONFIG_FUNCTION_GRAPH_TRACER
	.globl return_to_handler
return_to_handler:
	stmdb	sp!, {r0-r3}
	mov	r0, fp			@ frame pointer
	bl	ftrace_return_to_handler
	mov	lr, r0			@ r0 has real ret addr
	ldmia	sp!, {r0-r3}
	mov	pc, lr
#endif
Abhishek Sagar's avatar
Abhishek Sagar committed
339

340
ENTRY(ftrace_stub)
341
.Lftrace_stub:
342
	mov	pc, lr
343
ENDPROC(ftrace_stub)
Abhishek Sagar's avatar
Abhishek Sagar committed
344

345
#endif /* CONFIG_FUNCTION_TRACER */
Abhishek Sagar's avatar
Abhishek Sagar committed
346

Linus Torvalds's avatar
Linus Torvalds committed
347
348
349
350
351
352
353
/*=============================================================================
 * SWI handler
 *-----------------------------------------------------------------------------
 */

	.align	5
ENTRY(vector_swi)
354
355
	sub	sp, sp, #S_FRAME_SIZE
	stmia	sp, {r0 - r12}			@ Calling r0 - r12
356
357
358
359
 ARM(	add	r8, sp, #S_PC		)
 ARM(	stmdb	r8, {sp, lr}^		)	@ Calling sp, lr
 THUMB(	mov	r8, sp			)
 THUMB(	store_user_sp_lr r8, r10, S_SP	)	@ calling sp, lr
360
361
362
363
	mrs	r8, spsr			@ called from non-FIQ mode, so ok.
	str	lr, [sp, #S_PC]			@ Save calling PC
	str	r8, [sp, #S_PSR]		@ Save CPSR
	str	r0, [sp, #S_OLD_R0]		@ Save OLD_R0
Linus Torvalds's avatar
Linus Torvalds committed
364
	zero_fp
365
366
367
368

	/*
	 * Get the system call number.
	 */
369

370
#if defined(CONFIG_OABI_COMPAT)
371

372
373
374
375
376
377
378
379
380
381
382
	/*
	 * If we have CONFIG_OABI_COMPAT then we need to look at the swi
	 * value to determine if it is an EABI or an old ABI call.
	 */
#ifdef CONFIG_ARM_THUMB
	tst	r8, #PSR_T_BIT
	movne	r10, #0				@ no thumb OABI emulation
	ldreq	r10, [lr, #-4]			@ get SWI instruction
#else
	ldr	r10, [lr, #-4]			@ get SWI instruction
#endif
383
384
385
#ifdef CONFIG_CPU_ENDIAN_BE8
	rev	r10, r10			@ little endian instruction
#endif
386
387
388
389
390
391

#elif defined(CONFIG_AEABI)

	/*
	 * Pure EABI user space always put syscall number into scno (r7).
	 */
392
#elif defined(CONFIG_ARM_THUMB)
393
	/* Legacy ABI only, possibly thumb mode. */
394
395
396
	tst	r8, #PSR_T_BIT			@ this is SPSR from save_user_regs
	addne	scno, r7, #__NR_SYSCALL_BASE	@ put OS number in
	ldreq	scno, [lr, #-4]
397

398
#else
399
	/* Legacy ABI only. */
400
401
	ldr	scno, [lr, #-4]			@ get SWI instruction
#endif
Linus Torvalds's avatar
Linus Torvalds committed
402
403
404
405
406
407

#ifdef CONFIG_ALIGNMENT_TRAP
	ldr	ip, __cr_alignment
	ldr	ip, [ip]
	mcr	p15, 0, ip, c1, c0		@ update control register
#endif
408
	enable_irq
Linus Torvalds's avatar
Linus Torvalds committed
409
410

	get_thread_info tsk
411
412
413
414
415
416
417
418
419
420
421
422
423
	adr	tbl, sys_call_table		@ load syscall table pointer

#if defined(CONFIG_OABI_COMPAT)
	/*
	 * If the swi argument is zero, this is an EABI call and we do nothing.
	 *
	 * If this is an old ABI call, get the syscall number into scno and
	 * get the old ABI syscall table address.
	 */
	bics	r10, r10, #0xff000000
	eorne	scno, r10, #__NR_OABI_SYSCALL_BASE
	ldrne	tbl, =sys_oabi_call_table
#elif !defined(CONFIG_AEABI)
Linus Torvalds's avatar
Linus Torvalds committed
424
	bic	scno, scno, #0xff000000		@ mask off SWI op-code
425
	eor	scno, scno, #__NR_SYSCALL_BASE	@ check OS number
426
#endif
427

428
local_restart:
Nicolas Pitre's avatar
Nicolas Pitre committed
429
	ldr	r10, [tsk, #TI_FLAGS]		@ check for syscall tracing
430
	stmdb	sp!, {r4, r5}			@ push fifth and sixth args
Nicolas Pitre's avatar
Nicolas Pitre committed
431
432
433
434
435
436
437
438
439
440
441

#ifdef CONFIG_SECCOMP
	tst	r10, #_TIF_SECCOMP
	beq	1f
	mov	r0, scno
	bl	__secure_computing	
	add	r0, sp, #S_R0 + S_OFF		@ pointer to regs
	ldmia	r0, {r0 - r3}			@ have to reload r0 - r3
1:
#endif

442
	tst	r10, #_TIF_SYSCALL_WORK		@ are we tracing syscalls?
Linus Torvalds's avatar
Linus Torvalds committed
443
444
445
	bne	__sys_trace

	cmp	scno, #NR_syscalls		@ check upper syscall limit
446
	adr	lr, BSYM(ret_fast_syscall)	@ return address
Linus Torvalds's avatar
Linus Torvalds committed
447
448
449
450
	ldrcc	pc, [tbl, scno, lsl #2]		@ call sys_* routine

	add	r1, sp, #S_OFF
2:	mov	why, #0				@ no longer a real syscall
451
452
	cmp	scno, #(__ARM_NR_BASE - __NR_SYSCALL_BASE)
	eor	r0, scno, #__NR_SYSCALL_BASE	@ put OS number back
Linus Torvalds's avatar
Linus Torvalds committed
453
454
	bcs	arm_syscall	
	b	sys_ni_syscall			@ not private func
455
ENDPROC(vector_swi)
Linus Torvalds's avatar
Linus Torvalds committed
456
457
458
459
460
461

	/*
	 * This is the really slow path.  We're going to be doing
	 * context switches, and waiting for our parent to respond.
	 */
__sys_trace:
462
463
464
	mov	r1, scno
	add	r0, sp, #S_OFF
	bl	syscall_trace_enter
Linus Torvalds's avatar
Linus Torvalds committed
465

466
	adr	lr, BSYM(__sys_trace_return)	@ return address
467
	mov	scno, r0			@ syscall number (possibly new)
Linus Torvalds's avatar
Linus Torvalds committed
468
469
	add	r1, sp, #S_R0 + S_OFF		@ pointer to regs
	cmp	scno, #NR_syscalls		@ check upper syscall limit
470
471
	ldmccia	r1, {r0 - r6}			@ have to reload r0 - r6
	stmccia	sp, {r4, r5}			@ and update the stack args
Linus Torvalds's avatar
Linus Torvalds committed
472
473
474
475
476
	ldrcc	pc, [tbl, scno, lsl #2]		@ call sys_* routine
	b	2b

__sys_trace_return:
	str	r0, [sp, #S_R0 + S_OFF]!	@ save returned r0
477
478
479
	mov	r1, scno
	mov	r0, sp
	bl	syscall_trace_exit
Linus Torvalds's avatar
Linus Torvalds committed
480
481
482
483
484
485
486
	b	ret_slow_syscall

	.align	5
#ifdef CONFIG_ALIGNMENT_TRAP
	.type	__cr_alignment, #object
__cr_alignment:
	.word	cr_alignment
487
488
489
490
491
492
493
494
495
496
497
498
#endif
	.ltorg

/*
 * This is the syscall table declaration for native ABI syscalls.
 * With EABI a couple syscalls are obsolete and defined as sys_ni_syscall.
 */
#define ABI(native, compat) native
#ifdef CONFIG_AEABI
#define OBSOLETE(syscall) sys_ni_syscall
#else
#define OBSOLETE(syscall) syscall
Linus Torvalds's avatar
Linus Torvalds committed
499
500
501
502
503
#endif

	.type	sys_call_table, #object
ENTRY(sys_call_table)
#include "calls.S"
504
505
#undef ABI
#undef OBSOLETE
Linus Torvalds's avatar
Linus Torvalds committed
506
507
508
509
510

/*============================================================================
 * Special system call wrappers
 */
@ r0 = syscall number
511
@ r8 = syscall table
Linus Torvalds's avatar
Linus Torvalds committed
512
sys_syscall:
513
		bic	scno, r0, #__NR_OABI_SYSCALL_BASE
Linus Torvalds's avatar
Linus Torvalds committed
514
515
516
517
518
519
520
521
522
		cmp	scno, #__NR_syscall - __NR_SYSCALL_BASE
		cmpne	scno, #NR_syscalls	@ check range
		stmloia	sp, {r5, r6}		@ shuffle args
		movlo	r0, r1
		movlo	r1, r2
		movlo	r2, r3
		movlo	r3, r4
		ldrlo	pc, [tbl, scno, lsl #2]
		b	sys_ni_syscall
523
ENDPROC(sys_syscall)
Linus Torvalds's avatar
Linus Torvalds committed
524
525
526
527

sys_fork_wrapper:
		add	r0, sp, #S_OFF
		b	sys_fork
528
ENDPROC(sys_fork_wrapper)
Linus Torvalds's avatar
Linus Torvalds committed
529
530
531
532

sys_vfork_wrapper:
		add	r0, sp, #S_OFF
		b	sys_vfork
533
ENDPROC(sys_vfork_wrapper)
Linus Torvalds's avatar
Linus Torvalds committed
534
535
536
537
538

sys_clone_wrapper:
		add	ip, sp, #S_OFF
		str	ip, [sp, #4]
		b	sys_clone
539
ENDPROC(sys_clone_wrapper)
Linus Torvalds's avatar
Linus Torvalds committed
540
541
542

sys_sigreturn_wrapper:
		add	r0, sp, #S_OFF
Al Viro's avatar
Al Viro committed
543
		mov	why, #0		@ prevent syscall restart handling
Linus Torvalds's avatar
Linus Torvalds committed
544
		b	sys_sigreturn
545
ENDPROC(sys_sigreturn_wrapper)
Linus Torvalds's avatar
Linus Torvalds committed
546
547
548

sys_rt_sigreturn_wrapper:
		add	r0, sp, #S_OFF
Al Viro's avatar
Al Viro committed
549
		mov	why, #0		@ prevent syscall restart handling
Linus Torvalds's avatar
Linus Torvalds committed
550
		b	sys_rt_sigreturn
551
ENDPROC(sys_rt_sigreturn_wrapper)
Linus Torvalds's avatar
Linus Torvalds committed
552
553
554
555

sys_sigaltstack_wrapper:
		ldr	r2, [sp, #S_OFF + S_SP]
		b	do_sigaltstack
556
ENDPROC(sys_sigaltstack_wrapper)
Linus Torvalds's avatar
Linus Torvalds committed
557

558
559
560
561
sys_statfs64_wrapper:
		teq	r1, #88
		moveq	r1, #84
		b	sys_statfs64
562
ENDPROC(sys_statfs64_wrapper)
563
564
565
566
567

sys_fstatfs64_wrapper:
		teq	r1, #88
		moveq	r1, #84
		b	sys_fstatfs64
568
ENDPROC(sys_fstatfs64_wrapper)
569

Linus Torvalds's avatar
Linus Torvalds committed
570
571
572
573
574
575
576
577
578
/*
 * Note: off_4k (r5) is always units of 4K.  If we can't do the requested
 * offset, we return EINVAL.
 */
sys_mmap2:
#if PAGE_SHIFT > 12
		tst	r5, #PGOFF_MASK
		moveq	r5, r5, lsr #PAGE_SHIFT - 12
		streq	r5, [sp, #4]
Al Viro's avatar
Al Viro committed
579
		beq	sys_mmap_pgoff
Linus Torvalds's avatar
Linus Torvalds committed
580
		mov	r0, #-EINVAL
Russell King's avatar
Russell King committed
581
		mov	pc, lr
Linus Torvalds's avatar
Linus Torvalds committed
582
583
#else
		str	r5, [sp, #4]
Al Viro's avatar
Al Viro committed
584
		b	sys_mmap_pgoff
Linus Torvalds's avatar
Linus Torvalds committed
585
#endif
586
ENDPROC(sys_mmap2)
587
588

#ifdef CONFIG_OABI_COMPAT
589

590
591
592
593
594
595
596
/*
 * These are syscalls with argument register differences
 */

sys_oabi_pread64:
		stmia	sp, {r3, r4}
		b	sys_pread64
597
ENDPROC(sys_oabi_pread64)
598
599
600
601

sys_oabi_pwrite64:
		stmia	sp, {r3, r4}
		b	sys_pwrite64
602
ENDPROC(sys_oabi_pwrite64)
603
604
605
606
607

sys_oabi_truncate64:
		mov	r3, r2
		mov	r2, r1
		b	sys_truncate64
608
ENDPROC(sys_oabi_truncate64)
609
610
611
612
613

sys_oabi_ftruncate64:
		mov	r3, r2
		mov	r2, r1
		b	sys_ftruncate64
614
ENDPROC(sys_oabi_ftruncate64)
615
616
617
618
619
620

sys_oabi_readahead:
		str	r3, [sp]
		mov	r3, r2
		mov	r2, r1
		b	sys_readahead
621
ENDPROC(sys_oabi_readahead)
622

623
624
625
626
627
628
629
630
631
632
633
634
635
/*
 * Let's declare a second syscall table for old ABI binaries
 * using the compatibility syscall entries.
 */
#define ABI(native, compat) compat
#define OBSOLETE(syscall) syscall

	.type	sys_oabi_call_table, #object
ENTRY(sys_oabi_call_table)
#include "calls.S"
#undef ABI
#undef OBSOLETE

636
637
#endif