| 
									
										
										
										
											2005-04-16 15:20:36 -07:00
										 |  |  | /* | 
					
						
							|  |  |  |  *  linux/arch/arm/kernel/entry-armv.S | 
					
						
							|  |  |  |  * | 
					
						
							|  |  |  |  *  Copyright (C) 1996,1997,1998 Russell King. | 
					
						
							|  |  |  |  *  ARM700 fix by Matthew Godbolt (linux-user@willothewisp.demon.co.uk)
 | 
					
						
							| 
									
										
										
										
											2006-01-13 21:05:25 +00:00
										 |  |  |  *  nommu support by Hyok S. Choi (hyok.choi@samsung.com)
 | 
					
						
							| 
									
										
										
										
											2005-04-16 15:20:36 -07:00
										 |  |  |  * | 
					
						
							|  |  |  |  * This program is free software; you can redistribute it and/or modify
 | 
					
						
							|  |  |  |  * it under the terms of the GNU General Public License version 2 as | 
					
						
							|  |  |  |  * published by the Free Software Foundation. | 
					
						
							|  |  |  |  * | 
					
						
							|  |  |  |  *  Low-level vector interface routines | 
					
						
							|  |  |  |  * | 
					
						
							| 
									
										
										
										
											2007-12-04 14:33:33 +01:00
										 |  |  |  *  Note:  there is a StrongARM bug in the STMIA rn, {regs}^ instruction | 
					
						
							|  |  |  |  *  that causes it to save wrong values...  Be aware! | 
					
						
							| 
									
										
										
										
											2005-04-16 15:20:36 -07:00
										 |  |  |  */ | 
					
						
							|  |  |  | 
 | 
					
						
							| 
									
										
										
										
											2012-03-10 10:30:31 -06:00
										 |  |  | #include <asm/assembler.h> | 
					
						
							| 
									
										
										
										
											2005-10-29 21:44:55 +01:00
										 |  |  | #include <asm/memory.h> | 
					
						
							| 
									
										
										
										
											2011-02-06 15:32:24 +00:00
										 |  |  | #include <asm/glue-df.h> | 
					
						
							|  |  |  | #include <asm/glue-pf.h> | 
					
						
							| 
									
										
										
										
											2005-04-16 15:20:36 -07:00
										 |  |  | #include <asm/vfpmacros.h> | 
					
						
							| 
									
										
										
										
											2012-02-08 18:26:34 -06:00
										 |  |  | #ifndef CONFIG_MULTI_IRQ_HANDLER | 
					
						
							| 
									
										
										
										
											2008-08-05 16:14:15 +01:00
										 |  |  | #include <mach/entry-macro.S> | 
					
						
							| 
									
										
										
										
											2012-02-08 18:26:34 -06:00
										 |  |  | #endif | 
					
						
							| 
									
										
										
										
											2006-06-21 13:31:52 +01:00
										 |  |  | #include <asm/thread_notify.h> | 
					
						
							| 
									
										
										
										
											2009-02-16 11:42:09 +01:00
										 |  |  | #include <asm/unwind.h> | 
					
						
							| 
									
										
										
										
											2009-11-09 23:53:29 +00:00
										 |  |  | #include <asm/unistd.h> | 
					
						
							| 
									
										
										
										
											2010-07-05 14:53:10 +01:00
										 |  |  | #include <asm/tls.h> | 
					
						
							| 
									
										
										
										
											2012-03-28 18:30:01 +01:00
										 |  |  | #include <asm/system_info.h> | 
					
						
							| 
									
										
										
										
											2005-04-16 15:20:36 -07:00
										 |  |  | 
 | 
					
						
							|  |  |  | #include "entry-header.S" | 
					
						
							| 
									
										
										
										
											2010-12-22 13:20:08 +01:00
										 |  |  | #include <asm/entry-macro-multi.S> | 
					
						
							| 
									
										
										
										
											2005-04-16 15:20:36 -07:00
										 |  |  | 
 | 
					
						
							| 
									
										
										
										
											2005-05-21 18:14:44 +01:00
										 |  |  | /* | 
					
						
							| 
									
										
										
										
											2011-06-26 10:34:02 +01:00
										 |  |  |  * Interrupt handling. | 
					
						
							| 
									
										
										
										
											2005-05-21 18:14:44 +01:00
										 |  |  |  */ | 
					
						
							|  |  |  | 	.macro	irq_handler
 | 
					
						
							| 
									
										
										
										
											2010-12-13 09:42:34 +01:00
										 |  |  | #ifdef CONFIG_MULTI_IRQ_HANDLER | 
					
						
							| 
									
										
										
										
											2011-06-26 10:34:02 +01:00
										 |  |  | 	ldr	r1, =handle_arch_irq | 
					
						
							| 
									
										
										
										
											2010-12-13 09:42:34 +01:00
										 |  |  | 	mov	r0, sp | 
					
						
							|  |  |  | 	adr	lr, BSYM(9997f) | 
					
						
							| 
									
										
										
										
											2011-09-06 09:23:26 +01:00
										 |  |  | 	ldr	pc, [r1] | 
					
						
							|  |  |  | #else | 
					
						
							| 
									
										
										
										
											2010-12-22 13:20:08 +01:00
										 |  |  | 	arch_irq_handler_default | 
					
						
							| 
									
										
										
										
											2011-09-06 09:23:26 +01:00
										 |  |  | #endif | 
					
						
							| 
									
										
										
										
											2010-09-04 10:47:48 +01:00
										 |  |  | 9997: | 
					
						
							| 
									
										
										
										
											2005-05-21 18:14:44 +01:00
										 |  |  | 	.endm | 
					
						
							|  |  |  | 
 | 
					
						
							| 
									
										
										
										
											2011-06-26 10:22:08 +01:00
										 |  |  | 	.macro	pabt_helper
 | 
					
						
							| 
									
										
										
										
											2011-06-26 12:37:35 +01:00
										 |  |  | 	@ PABORT handler takes pt_regs in r2, fault address in r4 and psr in r5
 | 
					
						
							| 
									
										
										
										
											2011-06-26 10:22:08 +01:00
										 |  |  | #ifdef MULTI_PABORT | 
					
						
							| 
									
										
										
										
											2011-06-25 15:46:08 +01:00
										 |  |  | 	ldr	ip, .LCprocfns | 
					
						
							| 
									
										
										
										
											2011-06-26 10:22:08 +01:00
										 |  |  | 	mov	lr, pc | 
					
						
							| 
									
										
										
										
											2011-06-25 15:46:08 +01:00
										 |  |  | 	ldr	pc, [ip, #PROCESSOR_PABT_FUNC] | 
					
						
							| 
									
										
										
										
											2011-06-26 10:22:08 +01:00
										 |  |  | #else | 
					
						
							|  |  |  | 	bl	CPU_PABORT_HANDLER | 
					
						
							|  |  |  | #endif | 
					
						
							|  |  |  | 	.endm | 
					
						
							|  |  |  | 
 | 
					
						
							|  |  |  | 	.macro	dabt_helper
 | 
					
						
							|  |  |  | 
 | 
					
						
							|  |  |  | 	@
 | 
					
						
							|  |  |  | 	@ Call the processor-specific abort handler:
 | 
					
						
							|  |  |  | 	@
 | 
					
						
							| 
									
										
										
										
											2011-06-26 16:01:26 +01:00
										 |  |  | 	@  r2 - pt_regs
 | 
					
						
							| 
									
										
										
										
											2011-06-26 14:35:07 +01:00
										 |  |  | 	@  r4 - aborted context pc
 | 
					
						
							|  |  |  | 	@  r5 - aborted context psr
 | 
					
						
							| 
									
										
										
										
											2011-06-26 10:22:08 +01:00
										 |  |  | 	@
 | 
					
						
							|  |  |  | 	@ The abort handler must return the aborted address in r0, and
 | 
					
						
							|  |  |  | 	@ the fault status register in r1.  r9 must be preserved.
 | 
					
						
							|  |  |  | 	@
 | 
					
						
							|  |  |  | #ifdef MULTI_DABORT | 
					
						
							| 
									
										
										
										
											2011-06-25 15:46:08 +01:00
										 |  |  | 	ldr	ip, .LCprocfns | 
					
						
							| 
									
										
										
										
											2011-06-26 10:22:08 +01:00
										 |  |  | 	mov	lr, pc | 
					
						
							| 
									
										
										
										
											2011-06-25 15:46:08 +01:00
										 |  |  | 	ldr	pc, [ip, #PROCESSOR_DABT_FUNC] | 
					
						
							| 
									
										
										
										
											2011-06-26 10:22:08 +01:00
										 |  |  | #else | 
					
						
							|  |  |  | 	bl	CPU_DABORT_HANDLER | 
					
						
							|  |  |  | #endif | 
					
						
							|  |  |  | 	.endm | 
					
						
							|  |  |  | 
 | 
					
						
							| 
									
										
										
										
											2007-12-03 15:27:56 -05:00
										 |  |  | #ifdef CONFIG_KPROBES | 
					
						
							|  |  |  | 	.section	.kprobes.text,"ax",%progbits | 
					
						
							|  |  |  | #else | 
					
						
							|  |  |  | 	.text | 
					
						
							|  |  |  | #endif | 
					
						
							|  |  |  | 
 | 
					
						
							| 
									
										
										
										
											2005-04-16 15:20:36 -07:00
										 |  |  | /* | 
					
						
							|  |  |  |  * Invalid mode handlers | 
					
						
							|  |  |  |  */ | 
					
						
							| 
									
										
										
										
											2005-05-31 22:22:32 +01:00
										 |  |  | 	.macro	inv_entry, reason | 
					
						
							|  |  |  | 	sub	sp, sp, #S_FRAME_SIZE | 
					
						
							| 
									
										
										
										
											2009-07-24 12:32:54 +01:00
										 |  |  |  ARM(	stmib	sp, {r1 - lr}		) | 
					
						
							|  |  |  |  THUMB(	stmia	sp, {r0 - r12}		) | 
					
						
							|  |  |  |  THUMB(	str	sp, [sp, #S_SP]		) | 
					
						
							|  |  |  |  THUMB(	str	lr, [sp, #S_LR]		) | 
					
						
							| 
									
										
										
										
											2005-04-16 15:20:36 -07:00
										 |  |  | 	mov	r1, #\reason | 
					
						
							|  |  |  | 	.endm | 
					
						
							|  |  |  | 
 | 
					
						
							|  |  |  | __pabt_invalid: | 
					
						
							| 
									
										
										
										
											2005-05-31 22:22:32 +01:00
										 |  |  | 	inv_entry BAD_PREFETCH | 
					
						
							|  |  |  | 	b	common_invalid | 
					
						
							| 
									
										
										
										
											2008-08-28 11:22:32 +01:00
										 |  |  | ENDPROC(__pabt_invalid) | 
					
						
							| 
									
										
										
										
											2005-04-16 15:20:36 -07:00
										 |  |  | 
 | 
					
						
							|  |  |  | __dabt_invalid: | 
					
						
							| 
									
										
										
										
											2005-05-31 22:22:32 +01:00
										 |  |  | 	inv_entry BAD_DATA | 
					
						
							|  |  |  | 	b	common_invalid | 
					
						
							| 
									
										
										
										
											2008-08-28 11:22:32 +01:00
										 |  |  | ENDPROC(__dabt_invalid) | 
					
						
							| 
									
										
										
										
											2005-04-16 15:20:36 -07:00
										 |  |  | 
 | 
					
						
							|  |  |  | __irq_invalid: | 
					
						
							| 
									
										
										
										
											2005-05-31 22:22:32 +01:00
										 |  |  | 	inv_entry BAD_IRQ | 
					
						
							|  |  |  | 	b	common_invalid | 
					
						
							| 
									
										
										
										
											2008-08-28 11:22:32 +01:00
										 |  |  | ENDPROC(__irq_invalid) | 
					
						
							| 
									
										
										
										
											2005-04-16 15:20:36 -07:00
										 |  |  | 
 | 
					
						
							|  |  |  | __und_invalid: | 
					
						
							| 
									
										
										
										
											2005-05-31 22:22:32 +01:00
										 |  |  | 	inv_entry BAD_UNDEFINSTR | 
					
						
							|  |  |  | 
 | 
					
						
							|  |  |  | 	@
 | 
					
						
							|  |  |  | 	@ XXX fall through to common_invalid
 | 
					
						
							|  |  |  | 	@
 | 
					
						
							|  |  |  | 
 | 
					
						
							|  |  |  | @
 | 
					
						
							|  |  |  | @ common_invalid - generic code for failed exception (re-entrant version of handlers)
 | 
					
						
							|  |  |  | @
 | 
					
						
							|  |  |  | common_invalid: | 
					
						
							|  |  |  | 	zero_fp | 
					
						
							|  |  |  | 
 | 
					
						
							|  |  |  | 	ldmia	r0, {r4 - r6} | 
					
						
							|  |  |  | 	add	r0, sp, #S_PC		@ here for interlock avoidance | 
					
						
							|  |  |  | 	mov	r7, #-1			@  ""   ""    ""        ""
 | 
					
						
							|  |  |  | 	str	r4, [sp]		@ save preserved r0
 | 
					
						
							|  |  |  | 	stmia	r0, {r5 - r7}		@ lr_<exception>,
 | 
					
						
							|  |  |  | 					@ cpsr_<exception>, "old_r0"
 | 
					
						
							| 
									
										
										
										
											2005-04-16 15:20:36 -07:00
										 |  |  | 
 | 
					
						
							|  |  |  | 	mov	r0, sp | 
					
						
							|  |  |  | 	b	bad_mode | 
					
						
							| 
									
										
										
										
											2008-08-28 11:22:32 +01:00
										 |  |  | ENDPROC(__und_invalid) | 
					
						
							| 
									
										
										
										
											2005-04-16 15:20:36 -07:00
										 |  |  | 
 | 
					
						
							|  |  |  | /* | 
					
						
							|  |  |  |  * SVC mode handlers | 
					
						
							|  |  |  |  */ | 
					
						
							| 
									
										
										
										
											2006-01-14 16:18:08 +00:00
										 |  |  | 
 | 
					
						
							|  |  |  | #if defined(CONFIG_AEABI) && (__LINUX_ARM_ARCH__ >= 5) | 
					
						
							|  |  |  | #define SPFIX(code...) code | 
					
						
							|  |  |  | #else | 
					
						
							|  |  |  | #define SPFIX(code...) | 
					
						
							|  |  |  | #endif | 
					
						
							|  |  |  | 
 | 
					
						
							| 
									
										
										
										
											2007-12-14 15:56:01 -05:00
										 |  |  | 	.macro	svc_entry, stack_hole=0 | 
					
						
							| 
									
										
										
										
											2009-02-16 11:42:09 +01:00
										 |  |  |  UNWIND(.fnstart		) | 
					
						
							|  |  |  |  UNWIND(.save {r0 - pc}		) | 
					
						
							| 
									
										
										
										
											2009-07-24 12:32:54 +01:00
										 |  |  | 	sub	sp, sp, #(S_FRAME_SIZE + \stack_hole - 4) | 
					
						
							|  |  |  | #ifdef CONFIG_THUMB2_KERNEL | 
					
						
							|  |  |  |  SPFIX(	str	r0, [sp]	)	@ temporarily saved
 | 
					
						
							|  |  |  |  SPFIX(	mov	r0, sp		) | 
					
						
							|  |  |  |  SPFIX(	tst	r0, #4		)	@ test original stack alignment
 | 
					
						
							|  |  |  |  SPFIX(	ldr	r0, [sp]	)	@ restored
 | 
					
						
							|  |  |  | #else | 
					
						
							| 
									
										
										
										
											2006-01-14 16:18:08 +00:00
										 |  |  |  SPFIX(	tst	sp, #4		) | 
					
						
							| 
									
										
										
										
											2009-07-24 12:32:54 +01:00
										 |  |  | #endif | 
					
						
							|  |  |  |  SPFIX(	subeq	sp, sp, #4	) | 
					
						
							|  |  |  | 	stmia	sp, {r1 - r12} | 
					
						
							| 
									
										
										
										
											2005-05-31 22:22:32 +01:00
										 |  |  | 
 | 
					
						
							| 
									
										
										
										
											2011-06-25 15:44:20 +01:00
										 |  |  | 	ldmia	r0, {r3 - r5} | 
					
						
							|  |  |  | 	add	r7, sp, #S_SP - 4	@ here for interlock avoidance
 | 
					
						
							|  |  |  | 	mov	r6, #-1			@  ""  ""      ""       ""
 | 
					
						
							|  |  |  | 	add	r2, sp, #(S_FRAME_SIZE + \stack_hole - 4) | 
					
						
							|  |  |  |  SPFIX(	addeq	r2, r2, #4	) | 
					
						
							|  |  |  | 	str	r3, [sp, #-4]!		@ save the "real" r0 copied
 | 
					
						
							| 
									
										
										
										
											2005-05-31 22:22:32 +01:00
										 |  |  | 					@ from the exception stack
 | 
					
						
							|  |  |  | 
 | 
					
						
							| 
									
										
										
										
											2011-06-25 15:44:20 +01:00
										 |  |  | 	mov	r3, lr | 
					
						
							| 
									
										
										
										
											2005-04-16 15:20:36 -07:00
										 |  |  | 
 | 
					
						
							|  |  |  | 	@
 | 
					
						
							|  |  |  | 	@ We are now ready to fill in the remaining blanks on the stack:
 | 
					
						
							|  |  |  | 	@
 | 
					
						
							| 
									
										
										
										
											2011-06-25 15:44:20 +01:00
										 |  |  | 	@  r2 - sp_svc
 | 
					
						
							|  |  |  | 	@  r3 - lr_svc
 | 
					
						
							|  |  |  | 	@  r4 - lr_<exception>, already fixed up for correct return/restart
 | 
					
						
							|  |  |  | 	@  r5 - spsr_<exception>
 | 
					
						
							|  |  |  | 	@  r6 - orig_r0 (see pt_regs definition in ptrace.h)
 | 
					
						
							| 
									
										
										
										
											2005-04-16 15:20:36 -07:00
										 |  |  | 	@
 | 
					
						
							| 
									
										
										
										
											2011-06-25 15:44:20 +01:00
										 |  |  | 	stmia	r7, {r2 - r6} | 
					
						
							| 
									
										
										
										
											2005-04-16 15:20:36 -07:00
										 |  |  | 
 | 
					
						
							| 
									
										
										
										
											2011-06-25 11:44:06 +01:00
										 |  |  | #ifdef CONFIG_TRACE_IRQFLAGS | 
					
						
							|  |  |  | 	bl	trace_hardirqs_off | 
					
						
							|  |  |  | #endif | 
					
						
							| 
									
										
										
										
											2011-06-25 17:35:19 +01:00
										 |  |  | 	.endm | 
					
						
							| 
									
										
										
										
											2005-04-16 15:20:36 -07:00
										 |  |  | 
 | 
					
						
							| 
									
										
										
										
											2011-06-25 17:35:19 +01:00
										 |  |  | 	.align	5
 | 
					
						
							|  |  |  | __dabt_svc: | 
					
						
							|  |  |  | 	svc_entry | 
					
						
							| 
									
										
										
										
											2005-04-16 15:20:36 -07:00
										 |  |  | 	mov	r2, sp | 
					
						
							| 
									
										
										
										
											2011-06-26 16:01:26 +01:00
										 |  |  | 	dabt_helper | 
					
						
							| 
									
										
										
										
											2013-11-04 11:42:29 +01:00
										 |  |  |  THUMB(	ldr	r5, [sp, #S_PSR]	)	@ potentially updated CPSR | 
					
						
							| 
									
										
										
										
											2011-06-25 15:44:20 +01:00
										 |  |  | 	svc_exit r5				@ return from exception
 | 
					
						
							| 
									
										
										
										
											2009-02-16 11:42:09 +01:00
										 |  |  |  UNWIND(.fnend		) | 
					
						
							| 
									
										
										
										
											2008-08-28 11:22:32 +01:00
										 |  |  | ENDPROC(__dabt_svc) | 
					
						
							| 
									
										
										
										
											2005-04-16 15:20:36 -07:00
										 |  |  | 
 | 
					
						
							|  |  |  | 	.align	5
 | 
					
						
							|  |  |  | __irq_svc: | 
					
						
							| 
									
										
										
										
											2005-05-31 22:22:32 +01:00
										 |  |  | 	svc_entry | 
					
						
							| 
									
										
										
										
											2005-05-21 18:14:44 +01:00
										 |  |  | 	irq_handler | 
					
						
							| 
									
										
										
										
											2011-06-25 10:57:57 +01:00
										 |  |  | 
 | 
					
						
							| 
									
										
										
										
											2005-04-16 15:20:36 -07:00
										 |  |  | #ifdef CONFIG_PREEMPT | 
					
						
							| 
									
										
										
										
											2011-06-25 10:57:57 +01:00
										 |  |  | 	get_thread_info tsk | 
					
						
							|  |  |  | 	ldr	r8, [tsk, #TI_PREEMPT]		@ get preempt count | 
					
						
							| 
									
										
										
										
											2005-05-21 18:15:45 +01:00
										 |  |  | 	ldr	r0, [tsk, #TI_FLAGS]		@ get flags | 
					
						
							| 
									
										
										
										
											2008-04-13 17:47:35 +01:00
										 |  |  | 	teq	r8, #0				@ if preempt count != 0
 | 
					
						
							|  |  |  | 	movne	r0, #0				@ force flags to 0
 | 
					
						
							| 
									
										
										
										
											2005-04-16 15:20:36 -07:00
										 |  |  | 	tst	r0, #_TIF_NEED_RESCHED | 
					
						
							|  |  |  | 	blne	svc_preempt | 
					
						
							|  |  |  | #endif | 
					
						
							| 
									
										
										
										
											2011-06-26 12:47:08 +01:00
										 |  |  | 
 | 
					
						
							| 
									
										
										
										
											2013-03-28 12:57:40 +00:00
										 |  |  | 	svc_exit r5, irq = 1			@ return from exception
 | 
					
						
							| 
									
										
										
										
											2009-02-16 11:42:09 +01:00
										 |  |  |  UNWIND(.fnend		) | 
					
						
							| 
									
										
										
										
											2008-08-28 11:22:32 +01:00
										 |  |  | ENDPROC(__irq_svc) | 
					
						
							| 
									
										
										
										
											2005-04-16 15:20:36 -07:00
										 |  |  | 
 | 
					
						
							|  |  |  | 	.ltorg | 
					
						
							|  |  |  | 
 | 
					
						
							|  |  |  | #ifdef CONFIG_PREEMPT | 
					
						
							|  |  |  | svc_preempt: | 
					
						
							| 
									
										
										
										
											2008-04-13 17:47:35 +01:00
										 |  |  | 	mov	r8, lr | 
					
						
							| 
									
										
										
										
											2005-04-16 15:20:36 -07:00
										 |  |  | 1:	bl	preempt_schedule_irq		@ irq en/disable is done inside
 | 
					
						
							| 
									
										
										
										
											2005-05-21 18:15:45 +01:00
										 |  |  | 	ldr	r0, [tsk, #TI_FLAGS]		@ get new tasks TI_FLAGS | 
					
						
							| 
									
										
										
										
											2005-04-16 15:20:36 -07:00
										 |  |  | 	tst	r0, #_TIF_NEED_RESCHED | 
					
						
							| 
									
										
										
										
											2008-04-13 17:47:35 +01:00
										 |  |  | 	moveq	pc, r8				@ go again
 | 
					
						
							| 
									
										
										
										
											2005-04-16 15:20:36 -07:00
										 |  |  | 	b	1b | 
					
						
							|  |  |  | #endif | 
					
						
							|  |  |  | 
 | 
					
						
							| 
									
										
										
										
											2012-07-30 19:42:10 +01:00
										 |  |  | __und_fault: | 
					
						
							|  |  |  | 	@ Correct the PC such that it is pointing at the instruction
 | 
					
						
							|  |  |  | 	@ which caused the fault.  If the faulting instruction was ARM
 | 
					
						
							|  |  |  | 	@ the PC will be pointing at the next instruction, and have to
 | 
					
						
							|  |  |  | 	@ subtract 4.  Otherwise, it is Thumb, and the PC will be
 | 
					
						
							|  |  |  | 	@ pointing at the second half of the Thumb instruction.  We
 | 
					
						
							|  |  |  | 	@ have to subtract 2.
 | 
					
						
							|  |  |  | 	ldr	r2, [r0, #S_PC] | 
					
						
							|  |  |  | 	sub	r2, r2, r1 | 
					
						
							|  |  |  | 	str	r2, [r0, #S_PC] | 
					
						
							|  |  |  | 	b	do_undefinstr | 
					
						
							|  |  |  | ENDPROC(__und_fault) | 
					
						
							|  |  |  | 
 | 
					
						
							| 
									
										
										
										
											2005-04-16 15:20:36 -07:00
										 |  |  | 	.align	5
 | 
					
						
							|  |  |  | __und_svc: | 
					
						
							| 
									
										
										
										
											2007-12-14 15:56:01 -05:00
										 |  |  | #ifdef CONFIG_KPROBES | 
					
						
							|  |  |  | 	@ If a kprobe is about to simulate a "stmdb sp..." instruction,
 | 
					
						
							|  |  |  | 	@ it obviously needs free stack space which then will belong to
 | 
					
						
							|  |  |  | 	@ the saved context.
 | 
					
						
							|  |  |  | 	svc_entry 64 | 
					
						
							|  |  |  | #else | 
					
						
							| 
									
										
										
										
											2005-05-31 22:22:32 +01:00
										 |  |  | 	svc_entry | 
					
						
							| 
									
										
										
										
											2007-12-14 15:56:01 -05:00
										 |  |  | #endif | 
					
						
							| 
									
										
										
										
											2005-04-16 15:20:36 -07:00
										 |  |  | 	@
 | 
					
						
							|  |  |  | 	@ call emulation code, which returns using r9 if it has emulated
 | 
					
						
							|  |  |  | 	@ the instruction, or the more conventional lr if we are to treat
 | 
					
						
							|  |  |  | 	@ this as a real undefined instruction
 | 
					
						
							|  |  |  | 	@
 | 
					
						
							|  |  |  | 	@  r0 - instruction
 | 
					
						
							|  |  |  | 	@
 | 
					
						
							| 
									
										
										
										
											2012-07-30 19:42:10 +01:00
										 |  |  | #ifndef CONFIG_THUMB2_KERNEL | 
					
						
							| 
									
										
										
										
											2011-06-25 15:44:20 +01:00
										 |  |  | 	ldr	r0, [r4, #-4] | 
					
						
							| 
									
										
										
										
											2009-09-18 23:27:07 +01:00
										 |  |  | #else | 
					
						
							| 
									
										
										
										
											2012-07-30 19:42:10 +01:00
										 |  |  | 	mov	r1, #2 | 
					
						
							| 
									
										
										
										
											2011-06-25 15:44:20 +01:00
										 |  |  | 	ldrh	r0, [r4, #-2]			@ Thumb instruction at LR - 2
 | 
					
						
							| 
									
										
										
										
											2011-08-19 17:59:27 +01:00
										 |  |  | 	cmp	r0, #0xe800			@ 32-bit instruction if xx >= 0
 | 
					
						
							| 
									
										
										
										
											2012-07-30 19:42:10 +01:00
										 |  |  | 	blo	__und_svc_fault | 
					
						
							|  |  |  | 	ldrh	r9, [r4]			@ bottom 16 bits
 | 
					
						
							|  |  |  | 	add	r4, r4, #2 | 
					
						
							|  |  |  | 	str	r4, [sp, #S_PC] | 
					
						
							|  |  |  | 	orr	r0, r9, r0, lsl #16 | 
					
						
							| 
									
										
										
										
											2009-09-18 23:27:07 +01:00
										 |  |  | #endif | 
					
						
							| 
									
										
										
										
											2012-07-30 19:42:10 +01:00
										 |  |  | 	adr	r9, BSYM(__und_svc_finish) | 
					
						
							| 
									
										
										
										
											2011-06-25 15:44:20 +01:00
										 |  |  | 	mov	r2, r4 | 
					
						
							| 
									
										
										
										
											2005-04-16 15:20:36 -07:00
										 |  |  | 	bl	call_fpe | 
					
						
							|  |  |  | 
 | 
					
						
							| 
									
										
										
										
											2012-07-30 19:42:10 +01:00
										 |  |  | 	mov	r1, #4				@ PC correction to apply
 | 
					
						
							|  |  |  | __und_svc_fault: | 
					
						
							| 
									
										
										
										
											2005-04-16 15:20:36 -07:00
										 |  |  | 	mov	r0, sp				@ struct pt_regs *regs
 | 
					
						
							| 
									
										
										
										
											2012-07-30 19:42:10 +01:00
										 |  |  | 	bl	__und_fault | 
					
						
							| 
									
										
										
										
											2005-04-16 15:20:36 -07:00
										 |  |  | 
 | 
					
						
							| 
									
										
										
										
											2012-07-30 19:42:10 +01:00
										 |  |  | __und_svc_finish: | 
					
						
							| 
									
										
										
										
											2011-06-25 15:44:20 +01:00
										 |  |  | 	ldr	r5, [sp, #S_PSR]		@ Get SVC cpsr | 
					
						
							|  |  |  | 	svc_exit r5				@ return from exception
 | 
					
						
							| 
									
										
										
										
											2009-02-16 11:42:09 +01:00
										 |  |  |  UNWIND(.fnend		) | 
					
						
							| 
									
										
										
										
											2008-08-28 11:22:32 +01:00
										 |  |  | ENDPROC(__und_svc) | 
					
						
							| 
									
										
										
										
											2005-04-16 15:20:36 -07:00
										 |  |  | 
 | 
					
						
							|  |  |  | 	.align	5
 | 
					
						
							|  |  |  | __pabt_svc: | 
					
						
							| 
									
										
										
										
											2005-05-31 22:22:32 +01:00
										 |  |  | 	svc_entry | 
					
						
							| 
									
										
										
										
											2009-09-25 13:39:47 +01:00
										 |  |  | 	mov	r2, sp				@ regs
 | 
					
						
							| 
									
										
										
										
											2011-06-26 12:37:35 +01:00
										 |  |  | 	pabt_helper | 
					
						
							| 
									
										
										
										
											2011-06-25 15:44:20 +01:00
										 |  |  | 	svc_exit r5				@ return from exception
 | 
					
						
							| 
									
										
										
										
											2009-02-16 11:42:09 +01:00
										 |  |  |  UNWIND(.fnend		) | 
					
						
							| 
									
										
										
										
											2008-08-28 11:22:32 +01:00
										 |  |  | ENDPROC(__pabt_svc) | 
					
						
							| 
									
										
										
										
											2005-04-16 15:20:36 -07:00
										 |  |  | 
 | 
					
						
							|  |  |  | 	.align	5
 | 
					
						
							| 
									
										
										
										
											2005-05-31 18:02:00 +01:00
										 |  |  | .LCcralign: | 
					
						
							|  |  |  | 	.word	cr_alignment
 | 
					
						
							| 
									
										
										
										
											2008-04-18 22:43:07 +01:00
										 |  |  | #ifdef MULTI_DABORT | 
					
						
							| 
									
										
										
										
											2005-04-16 15:20:36 -07:00
										 |  |  | .LCprocfns: | 
					
						
							|  |  |  | 	.word	processor
 | 
					
						
							|  |  |  | #endif | 
					
						
							|  |  |  | .LCfp: | 
					
						
							|  |  |  | 	.word	fp_enter
 | 
					
						
							|  |  |  | 
 | 
					
						
							|  |  |  | /* | 
					
						
							|  |  |  |  * User mode handlers | 
					
						
							| 
									
										
										
										
											2006-01-14 16:18:08 +00:00
										 |  |  |  * | 
					
						
							|  |  |  |  * EABI note: sp_svc is always 64-bit aligned here, so should S_FRAME_SIZE | 
					
						
							| 
									
										
										
										
											2005-04-16 15:20:36 -07:00
										 |  |  |  */ | 
					
						
							| 
									
										
										
										
											2006-01-14 16:18:08 +00:00
										 |  |  | 
 | 
					
						
							|  |  |  | #if defined(CONFIG_AEABI) && (__LINUX_ARM_ARCH__ >= 5) && (S_FRAME_SIZE & 7) | 
					
						
							|  |  |  | #error "sizeof(struct pt_regs) must be a multiple of 8" | 
					
						
							|  |  |  | #endif | 
					
						
							|  |  |  | 
 | 
					
						
							| 
									
										
										
										
											2005-05-31 22:22:32 +01:00
										 |  |  | 	.macro	usr_entry
 | 
					
						
							| 
									
										
										
										
											2009-02-16 11:42:09 +01:00
										 |  |  |  UNWIND(.fnstart	) | 
					
						
							|  |  |  |  UNWIND(.cantunwind	)	@ don't unwind the user space
 | 
					
						
							| 
									
										
										
										
											2005-05-31 22:22:32 +01:00
										 |  |  | 	sub	sp, sp, #S_FRAME_SIZE | 
					
						
							| 
									
										
										
										
											2009-07-24 12:32:54 +01:00
										 |  |  |  ARM(	stmib	sp, {r1 - r12}	) | 
					
						
							|  |  |  |  THUMB(	stmia	sp, {r0 - r12}	) | 
					
						
							| 
									
										
										
										
											2005-05-31 22:22:32 +01:00
										 |  |  | 
 | 
					
						
							| 
									
										
										
										
											2011-06-25 15:44:20 +01:00
										 |  |  | 	ldmia	r0, {r3 - r5} | 
					
						
							| 
									
										
										
										
											2005-05-31 22:22:32 +01:00
										 |  |  | 	add	r0, sp, #S_PC		@ here for interlock avoidance | 
					
						
							| 
									
										
										
										
											2011-06-25 15:44:20 +01:00
										 |  |  | 	mov	r6, #-1			@  ""  ""     ""        ""
 | 
					
						
							| 
									
										
										
										
											2005-05-31 22:22:32 +01:00
										 |  |  | 
 | 
					
						
							| 
									
										
										
										
											2011-06-25 15:44:20 +01:00
										 |  |  | 	str	r3, [sp]		@ save the "real" r0 copied
 | 
					
						
							| 
									
										
										
										
											2005-05-31 22:22:32 +01:00
										 |  |  | 					@ from the exception stack
 | 
					
						
							| 
									
										
										
										
											2005-04-16 15:20:36 -07:00
										 |  |  | 
 | 
					
						
							|  |  |  | 	@
 | 
					
						
							|  |  |  | 	@ We are now ready to fill in the remaining blanks on the stack:
 | 
					
						
							|  |  |  | 	@
 | 
					
						
							| 
									
										
										
										
											2011-06-25 15:44:20 +01:00
										 |  |  | 	@  r4 - lr_<exception>, already fixed up for correct return/restart
 | 
					
						
							|  |  |  | 	@  r5 - spsr_<exception>
 | 
					
						
							|  |  |  | 	@  r6 - orig_r0 (see pt_regs definition in ptrace.h)
 | 
					
						
							| 
									
										
										
										
											2005-04-16 15:20:36 -07:00
										 |  |  | 	@
 | 
					
						
							|  |  |  | 	@ Also, separately save sp_usr and lr_usr
 | 
					
						
							|  |  |  | 	@
 | 
					
						
							| 
									
										
										
										
											2011-06-25 15:44:20 +01:00
										 |  |  | 	stmia	r0, {r4 - r6} | 
					
						
							| 
									
										
										
										
											2009-07-24 12:32:54 +01:00
										 |  |  |  ARM(	stmdb	r0, {sp, lr}^			) | 
					
						
							|  |  |  |  THUMB(	store_user_sp_lr r0, r1, S_SP - S_PC	) | 
					
						
							| 
									
										
										
										
											2005-04-16 15:20:36 -07:00
										 |  |  | 
 | 
					
						
							|  |  |  | 	@
 | 
					
						
							|  |  |  | 	@ Enable the alignment trap while in kernel mode
 | 
					
						
							|  |  |  | 	@
 | 
					
						
							| 
									
										
										
										
											2005-05-31 18:02:00 +01:00
										 |  |  | 	alignment_trap r0 | 
					
						
							| 
									
										
										
										
											2005-04-16 15:20:36 -07:00
										 |  |  | 
 | 
					
						
							|  |  |  | 	@
 | 
					
						
							|  |  |  | 	@ Clear FP to mark the first stack frame
 | 
					
						
							|  |  |  | 	@
 | 
					
						
							|  |  |  | 	zero_fp | 
					
						
							| 
									
										
										
										
											2011-06-25 17:35:19 +01:00
										 |  |  | 
 | 
					
						
							|  |  |  | #ifdef CONFIG_IRQSOFF_TRACER | 
					
						
							|  |  |  | 	bl	trace_hardirqs_off | 
					
						
							|  |  |  | #endif | 
					
						
							| 
									
										
										
										
											2013-03-28 22:54:40 +01:00
										 |  |  | 	ct_user_exit save = 0 | 
					
						
							| 
									
										
										
										
											2005-04-16 15:20:36 -07:00
										 |  |  | 	.endm | 
					
						
							|  |  |  | 
 | 
					
						
							| 
									
										
											  
											
												[ARM] 4659/1: remove possibilities for spurious false negative with __kuser_cmpxchg
The ARM __kuser_cmpxchg routine is meant to implement an atomic cmpxchg
in user space.  It however can produce spurious false negative if a
processor exception occurs in the middle of the operation.  Normally
this is not a problem since cmpxchg is typically called in a loop until
it succeeds to implement an atomic increment for example.
Some use cases which don't involve a loop require that the operation be
100% reliable though.  This patch changes the implementation so to
reattempt the operation after an exception has occurred in the critical
section rather than abort it.
Here's a simple program to test the fix (don't use CONFIG_NO_HZ in your
kernel as this depends on a sufficiently high interrupt rate):
	#include <stdio.h>
	typedef int (__kernel_cmpxchg_t)(int oldval, int newval, int *ptr);
	#define __kernel_cmpxchg (*(__kernel_cmpxchg_t *)0xffff0fc0)
	int main()
	{
		int i, x = 0;
		for (i = 0; i < 100000000; i++) {
			int v = x;
			if (__kernel_cmpxchg(v, v+1, &x))
				printf("failed at %d: %d vs %d\n", i, v, x);
		}
		printf("done with %d vs %d\n", i, x);
		return 0;
	}
Signed-off-by: Nicolas Pitre <nico@marvell.com>
Signed-off-by: Russell King <rmk+kernel@arm.linux.org.uk>
											
										 
											2007-11-20 17:20:29 +01:00
										 |  |  | 	.macro	kuser_cmpxchg_check
 | 
					
						
							| 
									
										
										
										
											2013-08-06 09:48:42 +01:00
										 |  |  | #if !defined(CONFIG_CPU_32v6K) && defined(CONFIG_KUSER_HELPERS) && \ | 
					
						
							|  |  |  |     !defined(CONFIG_NEEDS_SYSCALL_FOR_CMPXCHG) | 
					
						
							| 
									
										
											  
											
												[ARM] 4659/1: remove possibilities for spurious false negative with __kuser_cmpxchg
The ARM __kuser_cmpxchg routine is meant to implement an atomic cmpxchg
in user space.  It however can produce spurious false negative if a
processor exception occurs in the middle of the operation.  Normally
this is not a problem since cmpxchg is typically called in a loop until
it succeeds to implement an atomic increment for example.
Some use cases which don't involve a loop require that the operation be
100% reliable though.  This patch changes the implementation so to
reattempt the operation after an exception has occurred in the critical
section rather than abort it.
Here's a simple program to test the fix (don't use CONFIG_NO_HZ in your
kernel as this depends on a sufficiently high interrupt rate):
	#include <stdio.h>
	typedef int (__kernel_cmpxchg_t)(int oldval, int newval, int *ptr);
	#define __kernel_cmpxchg (*(__kernel_cmpxchg_t *)0xffff0fc0)
	int main()
	{
		int i, x = 0;
		for (i = 0; i < 100000000; i++) {
			int v = x;
			if (__kernel_cmpxchg(v, v+1, &x))
				printf("failed at %d: %d vs %d\n", i, v, x);
		}
		printf("done with %d vs %d\n", i, x);
		return 0;
	}
Signed-off-by: Nicolas Pitre <nico@marvell.com>
Signed-off-by: Russell King <rmk+kernel@arm.linux.org.uk>
											
										 
											2007-11-20 17:20:29 +01:00
										 |  |  | #ifndef CONFIG_MMU | 
					
						
							|  |  |  | #warning "NPTL on non MMU needs fixing" | 
					
						
							|  |  |  | #else | 
					
						
							|  |  |  | 	@ Make sure our user space atomic helper is restarted
 | 
					
						
							|  |  |  | 	@ if it was interrupted in a critical region.  Here we
 | 
					
						
							|  |  |  | 	@ perform a quick test inline since it should be false
 | 
					
						
							|  |  |  | 	@ 99.9999% of the time.  The rest is done out of line.
 | 
					
						
							| 
									
										
										
										
											2011-06-25 15:44:20 +01:00
										 |  |  | 	cmp	r4, #TASK_SIZE | 
					
						
							| 
									
										
										
										
											2011-06-19 23:36:03 -04:00
										 |  |  | 	blhs	kuser_cmpxchg64_fixup | 
					
						
							| 
									
										
											  
											
												[ARM] 4659/1: remove possibilities for spurious false negative with __kuser_cmpxchg
The ARM __kuser_cmpxchg routine is meant to implement an atomic cmpxchg
in user space.  It however can produce spurious false negative if a
processor exception occurs in the middle of the operation.  Normally
this is not a problem since cmpxchg is typically called in a loop until
it succeeds to implement an atomic increment for example.
Some use cases which don't involve a loop require that the operation be
100% reliable though.  This patch changes the implementation so to
reattempt the operation after an exception has occurred in the critical
section rather than abort it.
Here's a simple program to test the fix (don't use CONFIG_NO_HZ in your
kernel as this depends on a sufficiently high interrupt rate):
	#include <stdio.h>
	typedef int (__kernel_cmpxchg_t)(int oldval, int newval, int *ptr);
	#define __kernel_cmpxchg (*(__kernel_cmpxchg_t *)0xffff0fc0)
	int main()
	{
		int i, x = 0;
		for (i = 0; i < 100000000; i++) {
			int v = x;
			if (__kernel_cmpxchg(v, v+1, &x))
				printf("failed at %d: %d vs %d\n", i, v, x);
		}
		printf("done with %d vs %d\n", i, x);
		return 0;
	}
Signed-off-by: Nicolas Pitre <nico@marvell.com>
Signed-off-by: Russell King <rmk+kernel@arm.linux.org.uk>
											
										 
											2007-11-20 17:20:29 +01:00
										 |  |  | #endif | 
					
						
							|  |  |  | #endif | 
					
						
							|  |  |  | 	.endm | 
					
						
							|  |  |  | 
 | 
					
						
							| 
									
										
										
										
											2005-04-16 15:20:36 -07:00
										 |  |  | 	.align	5
 | 
					
						
							|  |  |  | __dabt_usr: | 
					
						
							| 
									
										
										
										
											2005-05-31 22:22:32 +01:00
										 |  |  | 	usr_entry | 
					
						
							| 
									
										
											  
											
												[ARM] 4659/1: remove possibilities for spurious false negative with __kuser_cmpxchg
The ARM __kuser_cmpxchg routine is meant to implement an atomic cmpxchg
in user space.  It however can produce spurious false negative if a
processor exception occurs in the middle of the operation.  Normally
this is not a problem since cmpxchg is typically called in a loop until
it succeeds to implement an atomic increment for example.
Some use cases which don't involve a loop require that the operation be
100% reliable though.  This patch changes the implementation so to
reattempt the operation after an exception has occurred in the critical
section rather than abort it.
Here's a simple program to test the fix (don't use CONFIG_NO_HZ in your
kernel as this depends on a sufficiently high interrupt rate):
	#include <stdio.h>
	typedef int (__kernel_cmpxchg_t)(int oldval, int newval, int *ptr);
	#define __kernel_cmpxchg (*(__kernel_cmpxchg_t *)0xffff0fc0)
	int main()
	{
		int i, x = 0;
		for (i = 0; i < 100000000; i++) {
			int v = x;
			if (__kernel_cmpxchg(v, v+1, &x))
				printf("failed at %d: %d vs %d\n", i, v, x);
		}
		printf("done with %d vs %d\n", i, x);
		return 0;
	}
Signed-off-by: Nicolas Pitre <nico@marvell.com>
Signed-off-by: Russell King <rmk+kernel@arm.linux.org.uk>
											
										 
											2007-11-20 17:20:29 +01:00
										 |  |  | 	kuser_cmpxchg_check | 
					
						
							| 
									
										
										
										
											2005-04-16 15:20:36 -07:00
										 |  |  | 	mov	r2, sp | 
					
						
							| 
									
										
										
										
											2011-06-26 16:01:26 +01:00
										 |  |  | 	dabt_helper | 
					
						
							|  |  |  | 	b	ret_from_exception | 
					
						
							| 
									
										
										
										
											2009-02-16 11:42:09 +01:00
										 |  |  |  UNWIND(.fnend		) | 
					
						
							| 
									
										
										
										
											2008-08-28 11:22:32 +01:00
										 |  |  | ENDPROC(__dabt_usr) | 
					
						
							| 
									
										
										
										
											2005-04-16 15:20:36 -07:00
										 |  |  | 
 | 
					
						
							|  |  |  | 	.align	5
 | 
					
						
							|  |  |  | __irq_usr: | 
					
						
							| 
									
										
										
										
											2005-05-31 22:22:32 +01:00
										 |  |  | 	usr_entry | 
					
						
							| 
									
										
										
										
											2011-06-25 18:28:19 +01:00
										 |  |  | 	kuser_cmpxchg_check | 
					
						
							| 
									
										
										
										
											2005-05-21 18:14:44 +01:00
										 |  |  | 	irq_handler | 
					
						
							| 
									
										
										
										
											2011-06-25 10:57:57 +01:00
										 |  |  | 	get_thread_info tsk | 
					
						
							| 
									
										
										
										
											2005-04-16 15:20:36 -07:00
										 |  |  | 	mov	why, #0 | 
					
						
							| 
									
										
										
										
											2011-06-05 02:24:58 +01:00
										 |  |  | 	b	ret_to_user_from_irq | 
					
						
							| 
									
										
										
										
											2009-02-16 11:42:09 +01:00
										 |  |  |  UNWIND(.fnend		) | 
					
						
							| 
									
										
										
										
											2008-08-28 11:22:32 +01:00
										 |  |  | ENDPROC(__irq_usr) | 
					
						
							| 
									
										
										
										
											2005-04-16 15:20:36 -07:00
										 |  |  | 
 | 
					
						
							|  |  |  | 	.ltorg | 
					
						
							|  |  |  | 
 | 
					
						
							|  |  |  | 	.align	5
 | 
					
						
							|  |  |  | __und_usr: | 
					
						
							| 
									
										
										
										
											2005-05-31 22:22:32 +01:00
										 |  |  | 	usr_entry | 
					
						
							| 
									
										
										
										
											2011-06-25 18:28:19 +01:00
										 |  |  | 
 | 
					
						
							| 
									
										
										
										
											2011-06-25 15:44:20 +01:00
										 |  |  | 	mov	r2, r4 | 
					
						
							|  |  |  | 	mov	r3, r5 | 
					
						
							| 
									
										
										
										
											2005-04-16 15:20:36 -07:00
										 |  |  | 
 | 
					
						
							| 
									
										
										
										
											2012-07-30 19:42:10 +01:00
										 |  |  | 	@ r2 = regs->ARM_pc, which is either 2 or 4 bytes ahead of the
 | 
					
						
							|  |  |  | 	@      faulting instruction depending on Thumb mode.
 | 
					
						
							|  |  |  | 	@ r3 = regs->ARM_cpsr
 | 
					
						
							| 
									
										
										
										
											2005-04-16 15:20:36 -07:00
										 |  |  | 	@
 | 
					
						
							| 
									
										
										
										
											2012-07-30 19:42:10 +01:00
										 |  |  | 	@ The emulation code returns using r9 if it has emulated the
 | 
					
						
							|  |  |  | 	@ instruction, or the more conventional lr if we are to treat
 | 
					
						
							|  |  |  | 	@ this as a real undefined instruction
 | 
					
						
							| 
									
										
										
										
											2005-04-16 15:20:36 -07:00
										 |  |  | 	@
 | 
					
						
							| 
									
										
										
										
											2009-07-24 12:32:54 +01:00
										 |  |  | 	adr	r9, BSYM(ret_from_exception) | 
					
						
							| 
									
										
										
										
											2012-07-30 19:42:10 +01:00
										 |  |  | 
 | 
					
						
							| 
									
										
										
										
											2008-04-18 22:43:08 +01:00
										 |  |  | 	tst	r3, #PSR_T_BIT			@ Thumb mode? | 
					
						
							| 
									
										
										
										
											2012-07-30 19:42:10 +01:00
										 |  |  | 	bne	__und_usr_thumb | 
					
						
							|  |  |  | 	sub	r4, r2, #4			@ ARM instr at LR - 4
 | 
					
						
							|  |  |  | 1:	ldrt	r0, [r4] | 
					
						
							| 
									
										
										
										
											2013-02-12 18:59:57 +00:00
										 |  |  |  ARM_BE8(rev	r0, r0)				@ little endian instruction
 | 
					
						
							|  |  |  | 
 | 
					
						
							| 
									
										
										
										
											2012-07-30 19:42:10 +01:00
										 |  |  | 	@ r0 = 32-bit ARM instruction which caused the exception
 | 
					
						
							|  |  |  | 	@ r2 = PC value for the following instruction (:= regs->ARM_pc)
 | 
					
						
							|  |  |  | 	@ r4 = PC value for the faulting instruction
 | 
					
						
							|  |  |  | 	@ lr = 32-bit undefined instruction function
 | 
					
						
							|  |  |  | 	adr	lr, BSYM(__und_usr_fault_32) | 
					
						
							|  |  |  | 	b	call_fpe | 
					
						
							|  |  |  | 
 | 
					
						
							|  |  |  | __und_usr_thumb: | 
					
						
							| 
									
										
										
										
											2008-04-18 22:43:08 +01:00
										 |  |  | 	@ Thumb instruction
 | 
					
						
							| 
									
										
										
										
											2012-07-30 19:42:10 +01:00
										 |  |  | 	sub	r4, r2, #2			@ First half of thumb instr at LR - 2
 | 
					
						
							| 
									
										
										
										
											2011-08-19 18:00:08 +01:00
										 |  |  | #if CONFIG_ARM_THUMB && __LINUX_ARM_ARCH__ >= 6 && CONFIG_CPU_V7 | 
					
						
							|  |  |  | /* | 
					
						
							|  |  |  |  * Thumb-2 instruction handling.  Note that because pre-v6 and >= v6 platforms | 
					
						
							|  |  |  |  * can never be supported in a single kernel, this code is not applicable at | 
					
						
							|  |  |  |  * all when __LINUX_ARM_ARCH__ < 6.  This allows simplifying assumptions to be | 
					
						
							|  |  |  |  * made about .arch directives. | 
					
						
							|  |  |  |  */ | 
					
						
							|  |  |  | #if __LINUX_ARM_ARCH__ < 7 | 
					
						
							|  |  |  | /* If the target CPU may not be Thumb-2-capable, a run-time check is needed: */ | 
					
						
							|  |  |  | #define NEED_CPU_ARCHITECTURE | 
					
						
							|  |  |  | 	ldr	r5, .LCcpu_architecture | 
					
						
							|  |  |  | 	ldr	r5, [r5] | 
					
						
							|  |  |  | 	cmp	r5, #CPU_ARCH_ARMv7 | 
					
						
							| 
									
										
										
										
											2012-07-30 19:42:10 +01:00
										 |  |  | 	blo	__und_usr_fault_16		@ 16bit undefined instruction
 | 
					
						
							| 
									
										
										
										
											2011-08-19 18:00:08 +01:00
										 |  |  | /* | 
					
						
							|  |  |  |  * The following code won't get run unless the running CPU really is v7, so | 
					
						
							|  |  |  |  * coding round the lack of ldrht on older arches is pointless.  Temporarily | 
					
						
							|  |  |  |  * override the assembler target arch with the minimum required instead: | 
					
						
							|  |  |  |  */ | 
					
						
							|  |  |  | 	.arch	armv6t2
 | 
					
						
							|  |  |  | #endif | 
					
						
							| 
									
										
										
										
											2012-07-30 19:42:10 +01:00
										 |  |  | 2:	ldrht	r5, [r4] | 
					
						
							| 
									
										
										
										
											2011-08-19 17:59:27 +01:00
										 |  |  | 	cmp	r5, #0xe800			@ 32bit instruction if xx != 0
 | 
					
						
							| 
									
										
										
										
											2012-07-30 19:42:10 +01:00
										 |  |  | 	blo	__und_usr_fault_16		@ 16bit undefined instruction
 | 
					
						
							|  |  |  | 3:	ldrht	r0, [r2] | 
					
						
							| 
									
										
										
										
											2008-04-18 22:43:08 +01:00
										 |  |  | 	add	r2, r2, #2			@ r2 is PC + 2, make it PC + 4
 | 
					
						
							| 
									
										
										
										
											2012-07-30 19:42:10 +01:00
										 |  |  | 	str	r2, [sp, #S_PC]			@ it's a 2x16bit instr, update | 
					
						
							| 
									
										
										
										
											2008-04-18 22:43:08 +01:00
										 |  |  | 	orr	r0, r0, r5, lsl #16 | 
					
						
							| 
									
										
										
										
											2012-07-30 19:42:10 +01:00
										 |  |  | 	adr	lr, BSYM(__und_usr_fault_32) | 
					
						
							|  |  |  | 	@ r0 = the two 16-bit Thumb instructions which caused the exception
 | 
					
						
							|  |  |  | 	@ r2 = PC value for the following Thumb instruction (:= regs->ARM_pc)
 | 
					
						
							|  |  |  | 	@ r4 = PC value for the first 16-bit Thumb instruction
 | 
					
						
							|  |  |  | 	@ lr = 32bit undefined instruction function
 | 
					
						
							| 
									
										
										
										
											2011-08-19 18:00:08 +01:00
										 |  |  | 
 | 
					
						
							|  |  |  | #if __LINUX_ARM_ARCH__ < 7 | 
					
						
							|  |  |  | /* If the target arch was overridden, change it back: */ | 
					
						
							|  |  |  | #ifdef CONFIG_CPU_32v6K | 
					
						
							|  |  |  | 	.arch	armv6k
 | 
					
						
							| 
									
										
										
										
											2008-04-18 22:43:08 +01:00
										 |  |  | #else | 
					
						
							| 
									
										
										
										
											2011-08-19 18:00:08 +01:00
										 |  |  | 	.arch	armv6
 | 
					
						
							|  |  |  | #endif | 
					
						
							|  |  |  | #endif /* __LINUX_ARM_ARCH__ < 7 */ | 
					
						
							|  |  |  | #else /* !(CONFIG_ARM_THUMB && __LINUX_ARM_ARCH__ >= 6 && CONFIG_CPU_V7) */ | 
					
						
							| 
									
										
										
										
											2012-07-30 19:42:10 +01:00
										 |  |  | 	b	__und_usr_fault_16 | 
					
						
							| 
									
										
										
										
											2008-04-18 22:43:08 +01:00
										 |  |  | #endif | 
					
						
							| 
									
										
										
										
											2012-07-30 19:42:10 +01:00
										 |  |  |  UNWIND(.fnend) | 
					
						
							| 
									
										
										
										
											2008-08-28 11:22:32 +01:00
										 |  |  | ENDPROC(__und_usr) | 
					
						
							| 
									
										
										
										
											2008-04-18 22:43:08 +01:00
										 |  |  | 
 | 
					
						
							| 
									
										
										
										
											2005-04-16 15:20:36 -07:00
										 |  |  | /* | 
					
						
							| 
									
										
										
										
											2012-07-30 19:42:10 +01:00
										 |  |  |  * The out of line fixup for the ldrt instructions above. | 
					
						
							| 
									
										
										
										
											2005-04-16 15:20:36 -07:00
										 |  |  |  */ | 
					
						
							| 
									
										
										
										
											2010-04-19 10:15:03 +01:00
										 |  |  | 	.pushsection .fixup, "ax" | 
					
						
							| 
									
										
										
										
											2012-06-15 16:49:58 +01:00
										 |  |  | 	.align	2
 | 
					
						
							| 
									
										
										
										
											2008-04-18 22:43:08 +01:00
										 |  |  | 4:	mov	pc, r9 | 
					
						
							| 
									
										
										
										
											2010-04-19 10:15:03 +01:00
										 |  |  | 	.popsection | 
					
						
							|  |  |  | 	.pushsection __ex_table,"a" | 
					
						
							| 
									
										
										
										
											2008-04-18 22:43:08 +01:00
										 |  |  | 	.long	1b, 4b | 
					
						
							| 
									
										
										
										
											2011-11-22 23:42:12 +01:00
										 |  |  | #if CONFIG_ARM_THUMB && __LINUX_ARM_ARCH__ >= 6 && CONFIG_CPU_V7 | 
					
						
							| 
									
										
										
										
											2008-04-18 22:43:08 +01:00
										 |  |  | 	.long	2b, 4b | 
					
						
							|  |  |  | 	.long	3b, 4b | 
					
						
							|  |  |  | #endif | 
					
						
							| 
									
										
										
										
											2010-04-19 10:15:03 +01:00
										 |  |  | 	.popsection | 
					
						
							| 
									
										
										
										
											2005-04-16 15:20:36 -07:00
										 |  |  | 
 | 
					
						
							|  |  |  | /* | 
					
						
							|  |  |  |  * Check whether the instruction is a co-processor instruction. | 
					
						
							|  |  |  |  * If yes, we need to call the relevant co-processor handler. | 
					
						
							|  |  |  |  * | 
					
						
							|  |  |  |  * Note that we don't do a full check here for the co-processor | 
					
						
							|  |  |  |  * instructions; all instructions with bit 27 set are well
 | 
					
						
							|  |  |  |  * defined.  The only instructions that should fault are the | 
					
						
							|  |  |  |  * co-processor instructions.  However, we have to watch out | 
					
						
							|  |  |  |  * for the ARM6/ARM7 SWI bug. | 
					
						
							|  |  |  |  * | 
					
						
							| 
									
										
										
										
											2008-01-10 19:16:17 +01:00
										 |  |  |  * NEON is a special case that has to be handled here. Not all | 
					
						
							|  |  |  |  * NEON instructions are co-processor instructions, so we have | 
					
						
							|  |  |  |  * to make a special case of checking for them. Plus, there's | 
					
						
							|  |  |  |  * five groups of them, so we have a table of mask/opcode pairs | 
					
						
							|  |  |  |  * to check against, and if any match then we branch off into the | 
					
						
							|  |  |  |  * NEON handler code. | 
					
						
							|  |  |  |  * | 
					
						
							| 
									
										
										
										
											2005-04-16 15:20:36 -07:00
										 |  |  |  * Emulators may wish to make use of the following registers: | 
					
						
							| 
									
										
										
										
											2012-07-30 19:42:10 +01:00
										 |  |  |  *  r0  = instruction opcode (32-bit ARM or two 16-bit Thumb) | 
					
						
							|  |  |  |  *  r2  = PC value to resume execution after successful emulation | 
					
						
							| 
									
										
										
										
											2007-01-06 22:53:48 +00:00
										 |  |  |  *  r9  = normal "successful" return address | 
					
						
							| 
									
										
										
										
											2012-07-30 19:42:10 +01:00
										 |  |  |  *  r10 = this threads thread_info structure | 
					
						
							| 
									
										
										
										
											2007-01-06 22:53:48 +00:00
										 |  |  |  *  lr  = unrecognised instruction return address | 
					
						
							| 
									
										
										
										
											2012-07-30 19:42:10 +01:00
										 |  |  |  * IRQs disabled, FIQs enabled. | 
					
						
							| 
									
										
										
										
											2005-04-16 15:20:36 -07:00
										 |  |  |  */ | 
					
						
							| 
									
										
										
										
											2008-04-18 22:43:08 +01:00
										 |  |  | 	@
 | 
					
						
							|  |  |  | 	@ Fall-through from Thumb-2 __und_usr
 | 
					
						
							|  |  |  | 	@
 | 
					
						
							|  |  |  | #ifdef CONFIG_NEON | 
					
						
							| 
									
										
										
										
											2013-02-23 17:53:52 +00:00
										 |  |  | 	get_thread_info r10			@ get current thread
 | 
					
						
							| 
									
										
										
										
											2008-04-18 22:43:08 +01:00
										 |  |  | 	adr	r6, .LCneon_thumb_opcodes | 
					
						
							|  |  |  | 	b	2f | 
					
						
							|  |  |  | #endif | 
					
						
							| 
									
										
										
										
											2005-04-16 15:20:36 -07:00
										 |  |  | call_fpe: | 
					
						
							| 
									
										
										
										
											2013-02-23 17:53:52 +00:00
										 |  |  | 	get_thread_info r10			@ get current thread
 | 
					
						
							| 
									
										
										
										
											2008-01-10 19:16:17 +01:00
										 |  |  | #ifdef CONFIG_NEON | 
					
						
							| 
									
										
										
										
											2008-04-18 22:43:08 +01:00
										 |  |  | 	adr	r6, .LCneon_arm_opcodes | 
					
						
							| 
									
										
										
										
											2013-02-23 17:53:52 +00:00
										 |  |  | 2:	ldr	r5, [r6], #4			@ mask value
 | 
					
						
							| 
									
										
										
										
											2008-01-10 19:16:17 +01:00
										 |  |  | 	ldr	r7, [r6], #4			@ opcode bits matching in mask
 | 
					
						
							| 
									
										
										
										
											2013-02-23 17:53:52 +00:00
										 |  |  | 	cmp	r5, #0				@ end mask?
 | 
					
						
							|  |  |  | 	beq	1f | 
					
						
							|  |  |  | 	and	r8, r0, r5 | 
					
						
							| 
									
										
										
										
											2008-01-10 19:16:17 +01:00
										 |  |  | 	cmp	r8, r7				@ NEON instruction?
 | 
					
						
							|  |  |  | 	bne	2b | 
					
						
							|  |  |  | 	mov	r7, #1 | 
					
						
							|  |  |  | 	strb	r7, [r10, #TI_USED_CP + 10]	@ mark CP#10 as used
 | 
					
						
							|  |  |  | 	strb	r7, [r10, #TI_USED_CP + 11]	@ mark CP#11 as used
 | 
					
						
							|  |  |  | 	b	do_vfp				@ let VFP handler handle this
 | 
					
						
							|  |  |  | 1: | 
					
						
							|  |  |  | #endif | 
					
						
							| 
									
										
										
										
											2005-04-16 15:20:36 -07:00
										 |  |  | 	tst	r0, #0x08000000			@ only CDP/CPRT/LDC/STC have bit 27
 | 
					
						
							| 
									
										
										
										
											2008-04-18 22:43:08 +01:00
										 |  |  | 	tstne	r0, #0x04000000			@ bit 26 set on both ARM and Thumb-2
 | 
					
						
							| 
									
										
										
										
											2005-04-16 15:20:36 -07:00
										 |  |  | 	moveq	pc, lr | 
					
						
							|  |  |  | 	and	r8, r0, #0x00000f00		@ mask out CP number
 | 
					
						
							| 
									
										
										
										
											2009-07-24 12:32:54 +01:00
										 |  |  |  THUMB(	lsr	r8, r8, #8		) | 
					
						
							| 
									
										
										
										
											2005-04-16 15:20:36 -07:00
										 |  |  | 	mov	r7, #1 | 
					
						
							|  |  |  | 	add	r6, r10, #TI_USED_CP | 
					
						
							| 
									
										
										
										
											2009-07-24 12:32:54 +01:00
										 |  |  |  ARM(	strb	r7, [r6, r8, lsr #8]	)	@ set appropriate used_cp[]
 | 
					
						
							|  |  |  |  THUMB(	strb	r7, [r6, r8]		)	@ set appropriate used_cp[]
 | 
					
						
							| 
									
										
										
										
											2005-04-16 15:20:36 -07:00
										 |  |  | #ifdef CONFIG_IWMMXT | 
					
						
							|  |  |  | 	@ Test if we need to give access to iWMMXt coprocessors
 | 
					
						
							|  |  |  | 	ldr	r5, [r10, #TI_FLAGS] | 
					
						
							|  |  |  | 	rsbs	r7, r8, #(1 << 8)		@ CP 0 or 1 only
 | 
					
						
							|  |  |  | 	movcss	r7, r5, lsr #(TIF_USING_IWMMXT + 1) | 
					
						
							|  |  |  | 	bcs	iwmmxt_task_enable | 
					
						
							|  |  |  | #endif | 
					
						
							| 
									
										
										
										
											2009-07-24 12:32:54 +01:00
										 |  |  |  ARM(	add	pc, pc, r8, lsr #6	) | 
					
						
							|  |  |  |  THUMB(	lsl	r8, r8, #2		) | 
					
						
							|  |  |  |  THUMB(	add	pc, r8			) | 
					
						
							|  |  |  | 	nop | 
					
						
							|  |  |  | 
 | 
					
						
							| 
									
										
										
										
											2009-10-12 17:31:20 +01:00
										 |  |  | 	movw_pc	lr				@ CP#0
 | 
					
						
							| 
									
										
										
										
											2009-07-24 12:32:54 +01:00
										 |  |  | 	W(b)	do_fpe				@ CP#1 (FPE)
 | 
					
						
							|  |  |  | 	W(b)	do_fpe				@ CP#2 (FPE)
 | 
					
						
							| 
									
										
										
										
											2009-10-12 17:31:20 +01:00
										 |  |  | 	movw_pc	lr				@ CP#3
 | 
					
						
							| 
									
										
										
										
											2006-06-27 23:03:03 +01:00
										 |  |  | #ifdef CONFIG_CRUNCH | 
					
						
							|  |  |  | 	b	crunch_task_enable		@ CP#4 (MaverickCrunch)
 | 
					
						
							|  |  |  | 	b	crunch_task_enable		@ CP#5 (MaverickCrunch)
 | 
					
						
							|  |  |  | 	b	crunch_task_enable		@ CP#6 (MaverickCrunch)
 | 
					
						
							|  |  |  | #else | 
					
						
							| 
									
										
										
										
											2009-10-12 17:31:20 +01:00
										 |  |  | 	movw_pc	lr				@ CP#4
 | 
					
						
							|  |  |  | 	movw_pc	lr				@ CP#5
 | 
					
						
							|  |  |  | 	movw_pc	lr				@ CP#6
 | 
					
						
							| 
									
										
										
										
											2006-06-27 23:03:03 +01:00
										 |  |  | #endif | 
					
						
							| 
									
										
										
										
											2009-10-12 17:31:20 +01:00
										 |  |  | 	movw_pc	lr				@ CP#7
 | 
					
						
							|  |  |  | 	movw_pc	lr				@ CP#8
 | 
					
						
							|  |  |  | 	movw_pc	lr				@ CP#9
 | 
					
						
							| 
									
										
										
										
											2005-04-16 15:20:36 -07:00
										 |  |  | #ifdef CONFIG_VFP | 
					
						
							| 
									
										
										
										
											2009-07-24 12:32:54 +01:00
										 |  |  | 	W(b)	do_vfp				@ CP#10 (VFP)
 | 
					
						
							|  |  |  | 	W(b)	do_vfp				@ CP#11 (VFP)
 | 
					
						
							| 
									
										
										
										
											2005-04-16 15:20:36 -07:00
										 |  |  | #else | 
					
						
							| 
									
										
										
										
											2009-10-12 17:31:20 +01:00
										 |  |  | 	movw_pc	lr				@ CP#10 (VFP)
 | 
					
						
							|  |  |  | 	movw_pc	lr				@ CP#11 (VFP)
 | 
					
						
							| 
									
										
										
										
											2005-04-16 15:20:36 -07:00
										 |  |  | #endif | 
					
						
							| 
									
										
										
										
											2009-10-12 17:31:20 +01:00
										 |  |  | 	movw_pc	lr				@ CP#12
 | 
					
						
							|  |  |  | 	movw_pc	lr				@ CP#13
 | 
					
						
							|  |  |  | 	movw_pc	lr				@ CP#14 (Debug)
 | 
					
						
							|  |  |  | 	movw_pc	lr				@ CP#15 (Control)
 | 
					
						
							| 
									
										
										
										
											2005-04-16 15:20:36 -07:00
										 |  |  | 
 | 
					
						
							| 
									
										
										
										
											2011-08-19 18:00:08 +01:00
										 |  |  | #ifdef NEED_CPU_ARCHITECTURE | 
					
						
							|  |  |  | 	.align	2
 | 
					
						
							|  |  |  | .LCcpu_architecture: | 
					
						
							|  |  |  | 	.word	__cpu_architecture
 | 
					
						
							|  |  |  | #endif | 
					
						
							|  |  |  | 
 | 
					
						
							| 
									
										
										
										
											2008-01-10 19:16:17 +01:00
										 |  |  | #ifdef CONFIG_NEON | 
					
						
							|  |  |  | 	.align	6
 | 
					
						
							|  |  |  | 
 | 
					
						
							| 
									
										
										
										
											2008-04-18 22:43:08 +01:00
										 |  |  | .LCneon_arm_opcodes: | 
					
						
							| 
									
										
										
										
											2008-01-10 19:16:17 +01:00
										 |  |  | 	.word	0xfe000000			@ mask
 | 
					
						
							|  |  |  | 	.word	0xf2000000			@ opcode
 | 
					
						
							|  |  |  | 
 | 
					
						
							|  |  |  | 	.word	0xff100000			@ mask
 | 
					
						
							|  |  |  | 	.word	0xf4000000			@ opcode
 | 
					
						
							|  |  |  | 
 | 
					
						
							| 
									
										
										
										
											2008-04-18 22:43:08 +01:00
										 |  |  | 	.word	0x00000000			@ mask
 | 
					
						
							|  |  |  | 	.word	0x00000000			@ opcode
 | 
					
						
							|  |  |  | 
 | 
					
						
							|  |  |  | .LCneon_thumb_opcodes: | 
					
						
							|  |  |  | 	.word	0xef000000			@ mask
 | 
					
						
							|  |  |  | 	.word	0xef000000			@ opcode
 | 
					
						
							|  |  |  | 
 | 
					
						
							|  |  |  | 	.word	0xff100000			@ mask
 | 
					
						
							|  |  |  | 	.word	0xf9000000			@ opcode
 | 
					
						
							|  |  |  | 
 | 
					
						
							| 
									
										
										
										
											2008-01-10 19:16:17 +01:00
										 |  |  | 	.word	0x00000000			@ mask
 | 
					
						
							|  |  |  | 	.word	0x00000000			@ opcode
 | 
					
						
							|  |  |  | #endif | 
					
						
							|  |  |  | 
 | 
					
						
							| 
									
										
										
										
											2005-04-16 15:20:36 -07:00
										 |  |  | do_fpe: | 
					
						
							| 
									
										
										
										
											2006-03-15 12:33:43 +00:00
										 |  |  | 	enable_irq | 
					
						
							| 
									
										
										
										
											2005-04-16 15:20:36 -07:00
										 |  |  | 	ldr	r4, .LCfp | 
					
						
							|  |  |  | 	add	r10, r10, #TI_FPSTATE		@ r10 = workspace | 
					
						
							|  |  |  | 	ldr	pc, [r4]			@ Call FP module USR entry point
 | 
					
						
							|  |  |  | 
 | 
					
						
							|  |  |  | /* | 
					
						
							|  |  |  |  * The FP module is called with these registers set: | 
					
						
							|  |  |  |  *  r0  = instruction | 
					
						
							|  |  |  |  *  r2  = PC+4 | 
					
						
							|  |  |  |  *  r9  = normal "successful" return address | 
					
						
							|  |  |  |  *  r10 = FP workspace | 
					
						
							|  |  |  |  *  lr  = unrecognised FP instruction return address | 
					
						
							|  |  |  |  */ | 
					
						
							|  |  |  | 
 | 
					
						
							| 
									
										
										
										
											2010-04-30 10:45:46 +01:00
										 |  |  | 	.pushsection .data | 
					
						
							| 
									
										
										
										
											2005-04-16 15:20:36 -07:00
										 |  |  | ENTRY(fp_enter) | 
					
						
							| 
									
										
										
										
											2007-01-06 22:53:48 +00:00
										 |  |  | 	.word	no_fp
 | 
					
						
							| 
									
										
										
										
											2010-04-30 10:45:46 +01:00
										 |  |  | 	.popsection | 
					
						
							| 
									
										
										
										
											2005-04-16 15:20:36 -07:00
										 |  |  | 
 | 
					
						
							| 
									
										
										
										
											2009-09-18 23:27:07 +01:00
										 |  |  | ENTRY(no_fp) | 
					
						
							|  |  |  | 	mov	pc, lr | 
					
						
							|  |  |  | ENDPROC(no_fp) | 
					
						
							| 
									
										
										
										
											2007-01-06 22:53:48 +00:00
										 |  |  | 
 | 
					
						
							| 
									
										
										
										
											2012-07-30 19:42:10 +01:00
										 |  |  | __und_usr_fault_32: | 
					
						
							|  |  |  | 	mov	r1, #4 | 
					
						
							|  |  |  | 	b	1f | 
					
						
							|  |  |  | __und_usr_fault_16: | 
					
						
							|  |  |  | 	mov	r1, #2 | 
					
						
							|  |  |  | 1:	enable_irq | 
					
						
							| 
									
										
										
										
											2005-04-16 15:20:36 -07:00
										 |  |  | 	mov	r0, sp | 
					
						
							| 
									
										
										
										
											2009-07-24 12:32:54 +01:00
										 |  |  | 	adr	lr, BSYM(ret_from_exception) | 
					
						
							| 
									
										
										
										
											2012-07-30 19:42:10 +01:00
										 |  |  | 	b	__und_fault | 
					
						
							|  |  |  | ENDPROC(__und_usr_fault_32) | 
					
						
							|  |  |  | ENDPROC(__und_usr_fault_16) | 
					
						
							| 
									
										
										
										
											2005-04-16 15:20:36 -07:00
										 |  |  | 
 | 
					
						
							|  |  |  | 	.align	5
 | 
					
						
							|  |  |  | __pabt_usr: | 
					
						
							| 
									
										
										
										
											2005-05-31 22:22:32 +01:00
										 |  |  | 	usr_entry | 
					
						
							| 
									
										
										
										
											2009-09-25 13:39:47 +01:00
										 |  |  | 	mov	r2, sp				@ regs
 | 
					
						
							| 
									
										
										
										
											2011-06-26 12:37:35 +01:00
										 |  |  | 	pabt_helper | 
					
						
							| 
									
										
										
										
											2009-02-16 11:42:09 +01:00
										 |  |  |  UNWIND(.fnend		) | 
					
						
							| 
									
										
										
										
											2005-04-16 15:20:36 -07:00
										 |  |  | 	/* fall through */ | 
					
						
							|  |  |  | /* | 
					
						
							|  |  |  |  * This is the return code to user mode for abort handlers | 
					
						
							|  |  |  |  */ | 
					
						
							|  |  |  | ENTRY(ret_from_exception) | 
					
						
							| 
									
										
										
										
											2009-02-16 11:42:09 +01:00
										 |  |  |  UNWIND(.fnstart	) | 
					
						
							|  |  |  |  UNWIND(.cantunwind	) | 
					
						
							| 
									
										
										
										
											2005-04-16 15:20:36 -07:00
										 |  |  | 	get_thread_info tsk | 
					
						
							|  |  |  | 	mov	why, #0 | 
					
						
							|  |  |  | 	b	ret_to_user | 
					
						
							| 
									
										
										
										
											2009-02-16 11:42:09 +01:00
										 |  |  |  UNWIND(.fnend		) | 
					
						
							| 
									
										
										
										
											2008-08-28 11:22:32 +01:00
										 |  |  | ENDPROC(__pabt_usr) | 
					
						
							|  |  |  | ENDPROC(ret_from_exception) | 
					
						
							| 
									
										
										
										
											2005-04-16 15:20:36 -07:00
										 |  |  | 
 | 
					
						
							|  |  |  | /* | 
					
						
							|  |  |  |  * Register switch for ARMv3 and ARMv4 processors | 
					
						
							|  |  |  |  * r0 = previous task_struct, r1 = previous thread_info, r2 = next thread_info | 
					
						
							|  |  |  |  * previous and next are guaranteed not to be the same. | 
					
						
							|  |  |  |  */ | 
					
						
							|  |  |  | ENTRY(__switch_to) | 
					
						
							| 
									
										
										
										
											2009-02-16 11:42:09 +01:00
										 |  |  |  UNWIND(.fnstart	) | 
					
						
							|  |  |  |  UNWIND(.cantunwind	) | 
					
						
							| 
									
										
										
										
											2005-04-16 15:20:36 -07:00
										 |  |  | 	add	ip, r1, #TI_CPU_SAVE | 
					
						
							| 
									
										
										
										
											2009-07-24 12:32:54 +01:00
										 |  |  |  ARM(	stmia	ip!, {r4 - sl, fp, sp, lr} )	@ Store most regs on stack
 | 
					
						
							|  |  |  |  THUMB(	stmia	ip!, {r4 - sl, fp}	   )	@ Store most regs on stack
 | 
					
						
							|  |  |  |  THUMB(	str	sp, [ip], #4		   ) | 
					
						
							|  |  |  |  THUMB(	str	lr, [ip], #4		   ) | 
					
						
							| 
									
										
										
										
											2013-06-18 23:23:26 +01:00
										 |  |  | 	ldr	r4, [r2, #TI_TP_VALUE] | 
					
						
							|  |  |  | 	ldr	r5, [r2, #TI_TP_VALUE + 4] | 
					
						
							| 
									
										
										
										
											2010-09-13 16:03:21 +01:00
										 |  |  | #ifdef CONFIG_CPU_USE_DOMAINS | 
					
						
							| 
									
										
										
										
											2006-06-21 13:31:52 +01:00
										 |  |  | 	ldr	r6, [r2, #TI_CPU_DOMAIN] | 
					
						
							| 
									
										
										
										
											2006-01-13 21:05:25 +00:00
										 |  |  | #endif | 
					
						
							| 
									
										
										
										
											2013-06-18 23:23:26 +01:00
										 |  |  | 	switch_tls r1, r4, r5, r3, r7 | 
					
						
							| 
									
										
										
										
											2010-06-07 21:50:33 -04:00
										 |  |  | #if defined(CONFIG_CC_STACKPROTECTOR) && !defined(CONFIG_SMP) | 
					
						
							|  |  |  | 	ldr	r7, [r2, #TI_TASK] | 
					
						
							|  |  |  | 	ldr	r8, =__stack_chk_guard | 
					
						
							|  |  |  | 	ldr	r7, [r7, #TSK_STACK_CANARY] | 
					
						
							|  |  |  | #endif | 
					
						
							| 
									
										
										
										
											2010-09-13 16:03:21 +01:00
										 |  |  | #ifdef CONFIG_CPU_USE_DOMAINS | 
					
						
							| 
									
										
										
										
											2005-04-16 15:20:36 -07:00
										 |  |  | 	mcr	p15, 0, r6, c3, c0, 0		@ Set domain register
 | 
					
						
							|  |  |  | #endif | 
					
						
							| 
									
										
										
										
											2006-06-21 13:31:52 +01:00
										 |  |  | 	mov	r5, r0 | 
					
						
							|  |  |  | 	add	r4, r2, #TI_CPU_SAVE | 
					
						
							|  |  |  | 	ldr	r0, =thread_notify_head | 
					
						
							|  |  |  | 	mov	r1, #THREAD_NOTIFY_SWITCH | 
					
						
							|  |  |  | 	bl	atomic_notifier_call_chain | 
					
						
							| 
									
										
										
										
											2010-06-07 21:50:33 -04:00
										 |  |  | #if defined(CONFIG_CC_STACKPROTECTOR) && !defined(CONFIG_SMP) | 
					
						
							|  |  |  | 	str	r7, [r8] | 
					
						
							|  |  |  | #endif | 
					
						
							| 
									
										
										
										
											2009-07-24 12:32:54 +01:00
										 |  |  |  THUMB(	mov	ip, r4			   ) | 
					
						
							| 
									
										
										
										
											2006-06-21 13:31:52 +01:00
										 |  |  | 	mov	r0, r5 | 
					
						
							| 
									
										
										
										
											2009-07-24 12:32:54 +01:00
										 |  |  |  ARM(	ldmia	r4, {r4 - sl, fp, sp, pc}  )	@ Load all regs saved previously
 | 
					
						
							|  |  |  |  THUMB(	ldmia	ip!, {r4 - sl, fp}	   )	@ Load all regs saved previously
 | 
					
						
							|  |  |  |  THUMB(	ldr	sp, [ip], #4		   ) | 
					
						
							|  |  |  |  THUMB(	ldr	pc, [ip]		   ) | 
					
						
							| 
									
										
										
										
											2009-02-16 11:42:09 +01:00
										 |  |  |  UNWIND(.fnend		) | 
					
						
							| 
									
										
										
										
											2008-08-28 11:22:32 +01:00
										 |  |  | ENDPROC(__switch_to) | 
					
						
							| 
									
										
										
										
											2005-04-16 15:20:36 -07:00
										 |  |  | 
 | 
					
						
							|  |  |  | 	__INIT | 
					
						
							| 
									
										
										
										
											2005-04-29 22:08:33 +01:00
										 |  |  | 
 | 
					
						
							|  |  |  | /* | 
					
						
							|  |  |  |  * User helpers. | 
					
						
							|  |  |  |  * | 
					
						
							|  |  |  |  * Each segment is 32-byte aligned and will be moved to the top of the high | 
					
						
							|  |  |  |  * vector page.  New segments (if ever needed) must be added in front of | 
					
						
							|  |  |  |  * existing ones.  This mechanism should be used only for things that are | 
					
						
							|  |  |  |  * really small and justified, and not be abused freely. | 
					
						
							|  |  |  |  * | 
					
						
							| 
									
										
										
										
											2011-06-19 23:36:03 -04:00
										 |  |  |  * See Documentation/arm/kernel_user_helpers.txt for formal definitions. | 
					
						
							| 
									
										
										
										
											2005-04-29 22:08:33 +01:00
										 |  |  |  */ | 
					
						
							| 
									
										
										
										
											2009-07-24 12:32:54 +01:00
										 |  |  |  THUMB(	.arm	) | 
					
						
							| 
									
										
										
										
											2005-04-29 22:08:33 +01:00
										 |  |  | 
 | 
					
						
							| 
									
										
										
										
											2006-08-18 17:20:15 +01:00
										 |  |  | 	.macro	usr_ret, reg | 
					
						
							|  |  |  | #ifdef CONFIG_ARM_THUMB | 
					
						
							|  |  |  | 	bx	\reg | 
					
						
							|  |  |  | #else | 
					
						
							|  |  |  | 	mov	pc, \reg | 
					
						
							|  |  |  | #endif | 
					
						
							|  |  |  | 	.endm | 
					
						
							|  |  |  | 
 | 
					
						
							| 
									
										
										
										
											2013-07-04 11:32:04 +01:00
										 |  |  | 	.macro	kuser_pad, sym, size | 
					
						
							|  |  |  | 	.if	(. - \sym) & 3 | 
					
						
							|  |  |  | 	.rept	4 - (. - \sym) & 3 | 
					
						
							|  |  |  | 	.byte	0
 | 
					
						
							|  |  |  | 	.endr | 
					
						
							|  |  |  | 	.endif | 
					
						
							|  |  |  | 	.rept	(\size - (. - \sym)) / 4 | 
					
						
							|  |  |  | 	.word	0xe7fddef1
 | 
					
						
							|  |  |  | 	.endr | 
					
						
							|  |  |  | 	.endm | 
					
						
							|  |  |  | 
 | 
					
						
							| 
									
										
										
										
											2013-07-23 18:37:00 +01:00
										 |  |  | #ifdef CONFIG_KUSER_HELPERS | 
					
						
							| 
									
										
										
										
											2005-04-29 22:08:33 +01:00
										 |  |  | 	.align	5
 | 
					
						
							|  |  |  | 	.globl	__kuser_helper_start
 | 
					
						
							|  |  |  | __kuser_helper_start: | 
					
						
							|  |  |  | 
 | 
					
						
							| 
									
										
										
										
											2005-12-19 22:20:51 +00:00
										 |  |  | /* | 
					
						
							| 
									
										
										
										
											2011-06-19 23:36:03 -04:00
										 |  |  |  * Due to the length of some sequences, __kuser_cmpxchg64 spans 2 regular | 
					
						
							|  |  |  |  * kuser "slots", therefore 0xffff0f80 is not used as a valid entry point. | 
					
						
							| 
									
										
										
										
											2005-12-19 22:20:51 +00:00
										 |  |  |  */ | 
					
						
							|  |  |  | 
 | 
					
						
							| 
									
										
										
										
											2011-06-19 23:36:03 -04:00
										 |  |  | __kuser_cmpxchg64:				@ 0xffff0f60
 | 
					
						
							|  |  |  | 
 | 
					
						
							|  |  |  | #if defined(CONFIG_NEEDS_SYSCALL_FOR_CMPXCHG) | 
					
						
							|  |  |  | 
 | 
					
						
							|  |  |  | 	/* | 
					
						
							|  |  |  | 	 * Poor you.  No fast solution possible... | 
					
						
							|  |  |  | 	 * The kernel itself must perform the operation. | 
					
						
							|  |  |  | 	 * A special ghost syscall is used for that (see traps.c). | 
					
						
							|  |  |  | 	 */ | 
					
						
							|  |  |  | 	stmfd	sp!, {r7, lr} | 
					
						
							|  |  |  | 	ldr	r7, 1f			@ it's 20 bits
 | 
					
						
							|  |  |  | 	swi	__ARM_NR_cmpxchg64 | 
					
						
							|  |  |  | 	ldmfd	sp!, {r7, pc} | 
					
						
							|  |  |  | 1:	.word	__ARM_NR_cmpxchg64 | 
					
						
							|  |  |  | 
 | 
					
						
							|  |  |  | #elif defined(CONFIG_CPU_32v6K) | 
					
						
							|  |  |  | 
 | 
					
						
							|  |  |  | 	stmfd	sp!, {r4, r5, r6, r7} | 
					
						
							|  |  |  | 	ldrd	r4, r5, [r0]			@ load old val
 | 
					
						
							|  |  |  | 	ldrd	r6, r7, [r1]			@ load new val
 | 
					
						
							|  |  |  | 	smp_dmb	arm | 
					
						
							|  |  |  | 1:	ldrexd	r0, r1, [r2]			@ load current val
 | 
					
						
							|  |  |  | 	eors	r3, r0, r4			@ compare with oldval (1)
 | 
					
						
							|  |  |  | 	eoreqs	r3, r1, r5			@ compare with oldval (2)
 | 
					
						
							|  |  |  | 	strexdeq r3, r6, r7, [r2]		@ store newval if eq
 | 
					
						
							|  |  |  | 	teqeq	r3, #1				@ success?
 | 
					
						
							|  |  |  | 	beq	1b				@ if no then retry
 | 
					
						
							| 
									
										
											  
											
												ARM: 6516/1: Allow SMP_ON_UP to work with Thumb-2 kernels.
  * __fixup_smp_on_up has been modified with support for the
    THUMB2_KERNEL case.  For THUMB2_KERNEL only, fixups are split
    into halfwords in case of misalignment, since we can't rely on
    unaligned accesses working before turning the MMU on.
    No attempt is made to optimise the aligned case, since the
    number of fixups is typically small, and it seems best to keep
    the code as simple as possible.
  * Add a rotate in the fixup_smp code in order to support
    CPU_BIG_ENDIAN, as suggested by Nicolas Pitre.
  * Add an assembly-time sanity-check to ALT_UP() to ensure that
    the content really is the right size (4 bytes).
    (No check is done for ALT_SMP().  Possibly, this could be fixed
    by splitting the two uses ot ALT_SMP() (ALT_SMP...SMP_UP versus
    ALT_SMP...SMP_UP_B) into two macros.  In the first case,
    ALT_SMP needs to expand to >= 4 bytes, not == 4.)
  * smp_mpidr.h (which implements ALT_SMP()/ALT_UP() manually due
    to macro limitations) has not been modified: the affected
    instruction (mov) has no 16-bit encoding, so the correct
    instruction size is satisfied in this case.
  * A "mode" parameter has been added to smp_dmb:
    smp_dmb arm @ assumes 4-byte instructions (for ARM code, e.g. kuser)
    smp_dmb     @ uses W() to ensure 4-byte instructions for ALT_SMP()
    This avoids assembly failures due to use of W() inside smp_dmb,
    when assembling pure-ARM code in the vectors page.
    There might be a better way to achieve this.
  * Kconfig: make SMP_ON_UP depend on
    (!THUMB2_KERNEL || !BIG_ENDIAN) i.e., THUMB2_KERNEL is now
    supported, but only if !BIG_ENDIAN (The fixup code for Thumb-2
    currently assumes little-endian order.)
Tested using a single generic realview kernel on:
	ARM RealView PB-A8 (CONFIG_THUMB2_KERNEL={n,y})
	ARM RealView PBX-A9 (SMP)
Signed-off-by: Dave Martin <dave.martin@linaro.org>
Acked-by: Nicolas Pitre <nicolas.pitre@linaro.org>
Signed-off-by: Russell King <rmk+kernel@arm.linux.org.uk>
											
										 
											2010-12-01 15:39:23 +01:00
										 |  |  | 	smp_dmb	arm | 
					
						
							| 
									
										
										
										
											2011-06-19 23:36:03 -04:00
										 |  |  | 	rsbs	r0, r3, #0			@ set returned val and C flag
 | 
					
						
							|  |  |  | 	ldmfd	sp!, {r4, r5, r6, r7} | 
					
						
							| 
									
										
										
										
											2012-02-03 11:08:05 +01:00
										 |  |  | 	usr_ret	lr | 
					
						
							| 
									
										
										
										
											2011-06-19 23:36:03 -04:00
										 |  |  | 
 | 
					
						
							|  |  |  | #elif !defined(CONFIG_SMP) | 
					
						
							|  |  |  | 
 | 
					
						
							|  |  |  | #ifdef CONFIG_MMU | 
					
						
							|  |  |  | 
 | 
					
						
							|  |  |  | 	/* | 
					
						
							|  |  |  | 	 * The only thing that can break atomicity in this cmpxchg64 | 
					
						
							|  |  |  | 	 * implementation is either an IRQ or a data abort exception | 
					
						
							|  |  |  | 	 * causing another process/thread to be scheduled in the middle of | 
					
						
							|  |  |  | 	 * the critical sequence.  The same strategy as for cmpxchg is used. | 
					
						
							|  |  |  | 	 */ | 
					
						
							|  |  |  | 	stmfd	sp!, {r4, r5, r6, lr} | 
					
						
							|  |  |  | 	ldmia	r0, {r4, r5}			@ load old val
 | 
					
						
							|  |  |  | 	ldmia	r1, {r6, lr}			@ load new val
 | 
					
						
							|  |  |  | 1:	ldmia	r2, {r0, r1}			@ load current val
 | 
					
						
							|  |  |  | 	eors	r3, r0, r4			@ compare with oldval (1)
 | 
					
						
							|  |  |  | 	eoreqs	r3, r1, r5			@ compare with oldval (2)
 | 
					
						
							|  |  |  | 2:	stmeqia	r2, {r6, lr}			@ store newval if eq
 | 
					
						
							|  |  |  | 	rsbs	r0, r3, #0			@ set return val and C flag
 | 
					
						
							|  |  |  | 	ldmfd	sp!, {r4, r5, r6, pc} | 
					
						
							|  |  |  | 
 | 
					
						
							|  |  |  | 	.text | 
					
						
							|  |  |  | kuser_cmpxchg64_fixup: | 
					
						
							|  |  |  | 	@ Called from kuser_cmpxchg_fixup.
 | 
					
						
							| 
									
										
										
										
											2011-07-22 23:09:07 +01:00
										 |  |  | 	@ r4 = address of interrupted insn (must be preserved).
 | 
					
						
							| 
									
										
										
										
											2011-06-19 23:36:03 -04:00
										 |  |  | 	@ sp = saved regs. r7 and r8 are clobbered.
 | 
					
						
							|  |  |  | 	@ 1b = first critical insn, 2b = last critical insn.
 | 
					
						
							| 
									
										
										
										
											2011-07-22 23:09:07 +01:00
										 |  |  | 	@ If r4 >= 1b and r4 <= 2b then saved pc_usr is set to 1b.
 | 
					
						
							| 
									
										
										
										
											2011-06-19 23:36:03 -04:00
										 |  |  | 	mov	r7, #0xffff0fff | 
					
						
							|  |  |  | 	sub	r7, r7, #(0xffff0fff - (0xffff0f60 + (1b - __kuser_cmpxchg64))) | 
					
						
							| 
									
										
										
										
											2011-07-22 23:09:07 +01:00
										 |  |  | 	subs	r8, r4, r7 | 
					
						
							| 
									
										
										
										
											2011-06-19 23:36:03 -04:00
										 |  |  | 	rsbcss	r8, r8, #(2b - 1b) | 
					
						
							|  |  |  | 	strcs	r7, [sp, #S_PC] | 
					
						
							|  |  |  | #if __LINUX_ARM_ARCH__ < 6 | 
					
						
							|  |  |  | 	bcc	kuser_cmpxchg32_fixup | 
					
						
							|  |  |  | #endif | 
					
						
							|  |  |  | 	mov	pc, lr | 
					
						
							|  |  |  | 	.previous | 
					
						
							|  |  |  | 
 | 
					
						
							|  |  |  | #else | 
					
						
							|  |  |  | #warning "NPTL on non MMU needs fixing" | 
					
						
							|  |  |  | 	mov	r0, #-1 | 
					
						
							|  |  |  | 	adds	r0, r0, #0 | 
					
						
							| 
									
										
										
										
											2006-08-18 17:20:15 +01:00
										 |  |  | 	usr_ret	lr | 
					
						
							| 
									
										
										
										
											2011-06-19 23:36:03 -04:00
										 |  |  | #endif | 
					
						
							|  |  |  | 
 | 
					
						
							|  |  |  | #else | 
					
						
							|  |  |  | #error "incoherent kernel configuration" | 
					
						
							|  |  |  | #endif | 
					
						
							|  |  |  | 
 | 
					
						
							| 
									
										
										
										
											2013-07-04 11:32:04 +01:00
										 |  |  | 	kuser_pad __kuser_cmpxchg64, 64 | 
					
						
							| 
									
										
										
										
											2005-12-19 22:20:51 +00:00
										 |  |  | 
 | 
					
						
							|  |  |  | __kuser_memory_barrier:				@ 0xffff0fa0
 | 
					
						
							| 
									
										
											  
											
												ARM: 6516/1: Allow SMP_ON_UP to work with Thumb-2 kernels.
  * __fixup_smp_on_up has been modified with support for the
    THUMB2_KERNEL case.  For THUMB2_KERNEL only, fixups are split
    into halfwords in case of misalignment, since we can't rely on
    unaligned accesses working before turning the MMU on.
    No attempt is made to optimise the aligned case, since the
    number of fixups is typically small, and it seems best to keep
    the code as simple as possible.
  * Add a rotate in the fixup_smp code in order to support
    CPU_BIG_ENDIAN, as suggested by Nicolas Pitre.
  * Add an assembly-time sanity-check to ALT_UP() to ensure that
    the content really is the right size (4 bytes).
    (No check is done for ALT_SMP().  Possibly, this could be fixed
    by splitting the two uses ot ALT_SMP() (ALT_SMP...SMP_UP versus
    ALT_SMP...SMP_UP_B) into two macros.  In the first case,
    ALT_SMP needs to expand to >= 4 bytes, not == 4.)
  * smp_mpidr.h (which implements ALT_SMP()/ALT_UP() manually due
    to macro limitations) has not been modified: the affected
    instruction (mov) has no 16-bit encoding, so the correct
    instruction size is satisfied in this case.
  * A "mode" parameter has been added to smp_dmb:
    smp_dmb arm @ assumes 4-byte instructions (for ARM code, e.g. kuser)
    smp_dmb     @ uses W() to ensure 4-byte instructions for ALT_SMP()
    This avoids assembly failures due to use of W() inside smp_dmb,
    when assembling pure-ARM code in the vectors page.
    There might be a better way to achieve this.
  * Kconfig: make SMP_ON_UP depend on
    (!THUMB2_KERNEL || !BIG_ENDIAN) i.e., THUMB2_KERNEL is now
    supported, but only if !BIG_ENDIAN (The fixup code for Thumb-2
    currently assumes little-endian order.)
Tested using a single generic realview kernel on:
	ARM RealView PB-A8 (CONFIG_THUMB2_KERNEL={n,y})
	ARM RealView PBX-A9 (SMP)
Signed-off-by: Dave Martin <dave.martin@linaro.org>
Acked-by: Nicolas Pitre <nicolas.pitre@linaro.org>
Signed-off-by: Russell King <rmk+kernel@arm.linux.org.uk>
											
										 
											2010-12-01 15:39:23 +01:00
										 |  |  | 	smp_dmb	arm | 
					
						
							| 
									
										
										
										
											2006-08-18 17:20:15 +01:00
										 |  |  | 	usr_ret	lr | 
					
						
							| 
									
										
										
										
											2005-12-19 22:20:51 +00:00
										 |  |  | 
 | 
					
						
							| 
									
										
										
										
											2013-07-04 11:32:04 +01:00
										 |  |  | 	kuser_pad __kuser_memory_barrier, 32 | 
					
						
							| 
									
										
										
										
											2005-04-29 22:08:33 +01:00
										 |  |  | 
 | 
					
						
							|  |  |  | __kuser_cmpxchg:				@ 0xffff0fc0
 | 
					
						
							|  |  |  | 
 | 
					
						
							| 
									
										
										
										
											2005-06-08 19:00:47 +01:00
										 |  |  | #if defined(CONFIG_NEEDS_SYSCALL_FOR_CMPXCHG) | 
					
						
							| 
									
										
										
										
											2005-04-29 22:08:33 +01:00
										 |  |  | 
 | 
					
						
							| 
									
										
										
										
											2005-06-08 19:00:47 +01:00
										 |  |  | 	/* | 
					
						
							|  |  |  | 	 * Poor you.  No fast solution possible... | 
					
						
							|  |  |  | 	 * The kernel itself must perform the operation. | 
					
						
							|  |  |  | 	 * A special ghost syscall is used for that (see traps.c). | 
					
						
							|  |  |  | 	 */ | 
					
						
							| 
									
										
										
										
											2006-01-18 22:38:49 +00:00
										 |  |  | 	stmfd	sp!, {r7, lr} | 
					
						
							| 
									
										
										
										
											2010-12-01 18:12:43 +01:00
										 |  |  | 	ldr	r7, 1f			@ it's 20 bits
 | 
					
						
							| 
									
										
										
										
											2009-11-09 23:53:29 +00:00
										 |  |  | 	swi	__ARM_NR_cmpxchg | 
					
						
							| 
									
										
										
										
											2006-01-18 22:38:49 +00:00
										 |  |  | 	ldmfd	sp!, {r7, pc} | 
					
						
							| 
									
										
										
										
											2009-11-09 23:53:29 +00:00
										 |  |  | 1:	.word	__ARM_NR_cmpxchg | 
					
						
							| 
									
										
										
										
											2005-06-08 19:00:47 +01:00
										 |  |  | 
 | 
					
						
							|  |  |  | #elif __LINUX_ARM_ARCH__ < 6 | 
					
						
							| 
									
										
										
										
											2005-04-29 22:08:33 +01:00
										 |  |  | 
 | 
					
						
							| 
									
										
											  
											
												[ARM] 4659/1: remove possibilities for spurious false negative with __kuser_cmpxchg
The ARM __kuser_cmpxchg routine is meant to implement an atomic cmpxchg
in user space.  It however can produce spurious false negative if a
processor exception occurs in the middle of the operation.  Normally
this is not a problem since cmpxchg is typically called in a loop until
it succeeds to implement an atomic increment for example.
Some use cases which don't involve a loop require that the operation be
100% reliable though.  This patch changes the implementation so to
reattempt the operation after an exception has occurred in the critical
section rather than abort it.
Here's a simple program to test the fix (don't use CONFIG_NO_HZ in your
kernel as this depends on a sufficiently high interrupt rate):
	#include <stdio.h>
	typedef int (__kernel_cmpxchg_t)(int oldval, int newval, int *ptr);
	#define __kernel_cmpxchg (*(__kernel_cmpxchg_t *)0xffff0fc0)
	int main()
	{
		int i, x = 0;
		for (i = 0; i < 100000000; i++) {
			int v = x;
			if (__kernel_cmpxchg(v, v+1, &x))
				printf("failed at %d: %d vs %d\n", i, v, x);
		}
		printf("done with %d vs %d\n", i, x);
		return 0;
	}
Signed-off-by: Nicolas Pitre <nico@marvell.com>
Signed-off-by: Russell King <rmk+kernel@arm.linux.org.uk>
											
										 
											2007-11-20 17:20:29 +01:00
										 |  |  | #ifdef CONFIG_MMU | 
					
						
							|  |  |  | 
 | 
					
						
							| 
									
										
										
										
											2005-04-29 22:08:33 +01:00
										 |  |  | 	/* | 
					
						
							| 
									
										
											  
											
												[ARM] 4659/1: remove possibilities for spurious false negative with __kuser_cmpxchg
The ARM __kuser_cmpxchg routine is meant to implement an atomic cmpxchg
in user space.  It however can produce spurious false negative if a
processor exception occurs in the middle of the operation.  Normally
this is not a problem since cmpxchg is typically called in a loop until
it succeeds to implement an atomic increment for example.
Some use cases which don't involve a loop require that the operation be
100% reliable though.  This patch changes the implementation so to
reattempt the operation after an exception has occurred in the critical
section rather than abort it.
Here's a simple program to test the fix (don't use CONFIG_NO_HZ in your
kernel as this depends on a sufficiently high interrupt rate):
	#include <stdio.h>
	typedef int (__kernel_cmpxchg_t)(int oldval, int newval, int *ptr);
	#define __kernel_cmpxchg (*(__kernel_cmpxchg_t *)0xffff0fc0)
	int main()
	{
		int i, x = 0;
		for (i = 0; i < 100000000; i++) {
			int v = x;
			if (__kernel_cmpxchg(v, v+1, &x))
				printf("failed at %d: %d vs %d\n", i, v, x);
		}
		printf("done with %d vs %d\n", i, x);
		return 0;
	}
Signed-off-by: Nicolas Pitre <nico@marvell.com>
Signed-off-by: Russell King <rmk+kernel@arm.linux.org.uk>
											
										 
											2007-11-20 17:20:29 +01:00
										 |  |  | 	 * The only thing that can break atomicity in this cmpxchg | 
					
						
							|  |  |  | 	 * implementation is either an IRQ or a data abort exception | 
					
						
							|  |  |  | 	 * causing another process/thread to be scheduled in the middle | 
					
						
							|  |  |  | 	 * of the critical sequence.  To prevent this, code is added to | 
					
						
							|  |  |  | 	 * the IRQ and data abort exception handlers to set the pc back | 
					
						
							|  |  |  | 	 * to the beginning of the critical section if it is found to be | 
					
						
							|  |  |  | 	 * within that critical section (see kuser_cmpxchg_fixup). | 
					
						
							| 
									
										
										
										
											2005-04-29 22:08:33 +01:00
										 |  |  | 	 */ | 
					
						
							| 
									
										
											  
											
												[ARM] 4659/1: remove possibilities for spurious false negative with __kuser_cmpxchg
The ARM __kuser_cmpxchg routine is meant to implement an atomic cmpxchg
in user space.  It however can produce spurious false negative if a
processor exception occurs in the middle of the operation.  Normally
this is not a problem since cmpxchg is typically called in a loop until
it succeeds to implement an atomic increment for example.
Some use cases which don't involve a loop require that the operation be
100% reliable though.  This patch changes the implementation so to
reattempt the operation after an exception has occurred in the critical
section rather than abort it.
Here's a simple program to test the fix (don't use CONFIG_NO_HZ in your
kernel as this depends on a sufficiently high interrupt rate):
	#include <stdio.h>
	typedef int (__kernel_cmpxchg_t)(int oldval, int newval, int *ptr);
	#define __kernel_cmpxchg (*(__kernel_cmpxchg_t *)0xffff0fc0)
	int main()
	{
		int i, x = 0;
		for (i = 0; i < 100000000; i++) {
			int v = x;
			if (__kernel_cmpxchg(v, v+1, &x))
				printf("failed at %d: %d vs %d\n", i, v, x);
		}
		printf("done with %d vs %d\n", i, x);
		return 0;
	}
Signed-off-by: Nicolas Pitre <nico@marvell.com>
Signed-off-by: Russell King <rmk+kernel@arm.linux.org.uk>
											
										 
											2007-11-20 17:20:29 +01:00
										 |  |  | 1:	ldr	r3, [r2]			@ load current val
 | 
					
						
							|  |  |  | 	subs	r3, r3, r0			@ compare with oldval
 | 
					
						
							|  |  |  | 2:	streq	r1, [r2]			@ store newval if eq
 | 
					
						
							|  |  |  | 	rsbs	r0, r3, #0			@ set return val and C flag
 | 
					
						
							|  |  |  | 	usr_ret	lr | 
					
						
							|  |  |  | 
 | 
					
						
							|  |  |  | 	.text | 
					
						
							| 
									
										
										
										
											2011-06-19 23:36:03 -04:00
										 |  |  | kuser_cmpxchg32_fixup: | 
					
						
							| 
									
										
											  
											
												[ARM] 4659/1: remove possibilities for spurious false negative with __kuser_cmpxchg
The ARM __kuser_cmpxchg routine is meant to implement an atomic cmpxchg
in user space.  It however can produce spurious false negative if a
processor exception occurs in the middle of the operation.  Normally
this is not a problem since cmpxchg is typically called in a loop until
it succeeds to implement an atomic increment for example.
Some use cases which don't involve a loop require that the operation be
100% reliable though.  This patch changes the implementation so to
reattempt the operation after an exception has occurred in the critical
section rather than abort it.
Here's a simple program to test the fix (don't use CONFIG_NO_HZ in your
kernel as this depends on a sufficiently high interrupt rate):
	#include <stdio.h>
	typedef int (__kernel_cmpxchg_t)(int oldval, int newval, int *ptr);
	#define __kernel_cmpxchg (*(__kernel_cmpxchg_t *)0xffff0fc0)
	int main()
	{
		int i, x = 0;
		for (i = 0; i < 100000000; i++) {
			int v = x;
			if (__kernel_cmpxchg(v, v+1, &x))
				printf("failed at %d: %d vs %d\n", i, v, x);
		}
		printf("done with %d vs %d\n", i, x);
		return 0;
	}
Signed-off-by: Nicolas Pitre <nico@marvell.com>
Signed-off-by: Russell King <rmk+kernel@arm.linux.org.uk>
											
										 
											2007-11-20 17:20:29 +01:00
										 |  |  | 	@ Called from kuser_cmpxchg_check macro.
 | 
					
						
							| 
									
										
										
										
											2011-06-25 15:44:20 +01:00
										 |  |  | 	@ r4 = address of interrupted insn (must be preserved).
 | 
					
						
							| 
									
										
											  
											
												[ARM] 4659/1: remove possibilities for spurious false negative with __kuser_cmpxchg
The ARM __kuser_cmpxchg routine is meant to implement an atomic cmpxchg
in user space.  It however can produce spurious false negative if a
processor exception occurs in the middle of the operation.  Normally
this is not a problem since cmpxchg is typically called in a loop until
it succeeds to implement an atomic increment for example.
Some use cases which don't involve a loop require that the operation be
100% reliable though.  This patch changes the implementation so to
reattempt the operation after an exception has occurred in the critical
section rather than abort it.
Here's a simple program to test the fix (don't use CONFIG_NO_HZ in your
kernel as this depends on a sufficiently high interrupt rate):
	#include <stdio.h>
	typedef int (__kernel_cmpxchg_t)(int oldval, int newval, int *ptr);
	#define __kernel_cmpxchg (*(__kernel_cmpxchg_t *)0xffff0fc0)
	int main()
	{
		int i, x = 0;
		for (i = 0; i < 100000000; i++) {
			int v = x;
			if (__kernel_cmpxchg(v, v+1, &x))
				printf("failed at %d: %d vs %d\n", i, v, x);
		}
		printf("done with %d vs %d\n", i, x);
		return 0;
	}
Signed-off-by: Nicolas Pitre <nico@marvell.com>
Signed-off-by: Russell King <rmk+kernel@arm.linux.org.uk>
											
										 
											2007-11-20 17:20:29 +01:00
										 |  |  | 	@ sp = saved regs. r7 and r8 are clobbered.
 | 
					
						
							|  |  |  | 	@ 1b = first critical insn, 2b = last critical insn.
 | 
					
						
							| 
									
										
										
										
											2011-06-25 15:44:20 +01:00
										 |  |  | 	@ If r4 >= 1b and r4 <= 2b then saved pc_usr is set to 1b.
 | 
					
						
							| 
									
										
											  
											
												[ARM] 4659/1: remove possibilities for spurious false negative with __kuser_cmpxchg
The ARM __kuser_cmpxchg routine is meant to implement an atomic cmpxchg
in user space.  It however can produce spurious false negative if a
processor exception occurs in the middle of the operation.  Normally
this is not a problem since cmpxchg is typically called in a loop until
it succeeds to implement an atomic increment for example.
Some use cases which don't involve a loop require that the operation be
100% reliable though.  This patch changes the implementation so to
reattempt the operation after an exception has occurred in the critical
section rather than abort it.
Here's a simple program to test the fix (don't use CONFIG_NO_HZ in your
kernel as this depends on a sufficiently high interrupt rate):
	#include <stdio.h>
	typedef int (__kernel_cmpxchg_t)(int oldval, int newval, int *ptr);
	#define __kernel_cmpxchg (*(__kernel_cmpxchg_t *)0xffff0fc0)
	int main()
	{
		int i, x = 0;
		for (i = 0; i < 100000000; i++) {
			int v = x;
			if (__kernel_cmpxchg(v, v+1, &x))
				printf("failed at %d: %d vs %d\n", i, v, x);
		}
		printf("done with %d vs %d\n", i, x);
		return 0;
	}
Signed-off-by: Nicolas Pitre <nico@marvell.com>
Signed-off-by: Russell King <rmk+kernel@arm.linux.org.uk>
											
										 
											2007-11-20 17:20:29 +01:00
										 |  |  | 	mov	r7, #0xffff0fff | 
					
						
							|  |  |  | 	sub	r7, r7, #(0xffff0fff - (0xffff0fc0 + (1b - __kuser_cmpxchg))) | 
					
						
							| 
									
										
										
										
											2011-06-25 15:44:20 +01:00
										 |  |  | 	subs	r8, r4, r7 | 
					
						
							| 
									
										
											  
											
												[ARM] 4659/1: remove possibilities for spurious false negative with __kuser_cmpxchg
The ARM __kuser_cmpxchg routine is meant to implement an atomic cmpxchg
in user space.  It however can produce spurious false negative if a
processor exception occurs in the middle of the operation.  Normally
this is not a problem since cmpxchg is typically called in a loop until
it succeeds to implement an atomic increment for example.
Some use cases which don't involve a loop require that the operation be
100% reliable though.  This patch changes the implementation so to
reattempt the operation after an exception has occurred in the critical
section rather than abort it.
Here's a simple program to test the fix (don't use CONFIG_NO_HZ in your
kernel as this depends on a sufficiently high interrupt rate):
	#include <stdio.h>
	typedef int (__kernel_cmpxchg_t)(int oldval, int newval, int *ptr);
	#define __kernel_cmpxchg (*(__kernel_cmpxchg_t *)0xffff0fc0)
	int main()
	{
		int i, x = 0;
		for (i = 0; i < 100000000; i++) {
			int v = x;
			if (__kernel_cmpxchg(v, v+1, &x))
				printf("failed at %d: %d vs %d\n", i, v, x);
		}
		printf("done with %d vs %d\n", i, x);
		return 0;
	}
Signed-off-by: Nicolas Pitre <nico@marvell.com>
Signed-off-by: Russell King <rmk+kernel@arm.linux.org.uk>
											
										 
											2007-11-20 17:20:29 +01:00
										 |  |  | 	rsbcss	r8, r8, #(2b - 1b) | 
					
						
							|  |  |  | 	strcs	r7, [sp, #S_PC] | 
					
						
							|  |  |  | 	mov	pc, lr | 
					
						
							|  |  |  | 	.previous | 
					
						
							|  |  |  | 
 | 
					
						
							| 
									
										
										
										
											2006-02-08 21:19:37 +00:00
										 |  |  | #else | 
					
						
							|  |  |  | #warning "NPTL on non MMU needs fixing" | 
					
						
							|  |  |  | 	mov	r0, #-1 | 
					
						
							|  |  |  | 	adds	r0, r0, #0 | 
					
						
							| 
									
										
										
										
											2006-08-18 17:20:15 +01:00
										 |  |  | 	usr_ret	lr | 
					
						
							| 
									
										
											  
											
												[ARM] 4659/1: remove possibilities for spurious false negative with __kuser_cmpxchg
The ARM __kuser_cmpxchg routine is meant to implement an atomic cmpxchg
in user space.  It however can produce spurious false negative if a
processor exception occurs in the middle of the operation.  Normally
this is not a problem since cmpxchg is typically called in a loop until
it succeeds to implement an atomic increment for example.
Some use cases which don't involve a loop require that the operation be
100% reliable though.  This patch changes the implementation so to
reattempt the operation after an exception has occurred in the critical
section rather than abort it.
Here's a simple program to test the fix (don't use CONFIG_NO_HZ in your
kernel as this depends on a sufficiently high interrupt rate):
	#include <stdio.h>
	typedef int (__kernel_cmpxchg_t)(int oldval, int newval, int *ptr);
	#define __kernel_cmpxchg (*(__kernel_cmpxchg_t *)0xffff0fc0)
	int main()
	{
		int i, x = 0;
		for (i = 0; i < 100000000; i++) {
			int v = x;
			if (__kernel_cmpxchg(v, v+1, &x))
				printf("failed at %d: %d vs %d\n", i, v, x);
		}
		printf("done with %d vs %d\n", i, x);
		return 0;
	}
Signed-off-by: Nicolas Pitre <nico@marvell.com>
Signed-off-by: Russell King <rmk+kernel@arm.linux.org.uk>
											
										 
											2007-11-20 17:20:29 +01:00
										 |  |  | #endif | 
					
						
							| 
									
										
										
										
											2005-04-29 22:08:33 +01:00
										 |  |  | 
 | 
					
						
							|  |  |  | #else | 
					
						
							|  |  |  | 
 | 
					
						
							| 
									
										
											  
											
												ARM: 6516/1: Allow SMP_ON_UP to work with Thumb-2 kernels.
  * __fixup_smp_on_up has been modified with support for the
    THUMB2_KERNEL case.  For THUMB2_KERNEL only, fixups are split
    into halfwords in case of misalignment, since we can't rely on
    unaligned accesses working before turning the MMU on.
    No attempt is made to optimise the aligned case, since the
    number of fixups is typically small, and it seems best to keep
    the code as simple as possible.
  * Add a rotate in the fixup_smp code in order to support
    CPU_BIG_ENDIAN, as suggested by Nicolas Pitre.
  * Add an assembly-time sanity-check to ALT_UP() to ensure that
    the content really is the right size (4 bytes).
    (No check is done for ALT_SMP().  Possibly, this could be fixed
    by splitting the two uses ot ALT_SMP() (ALT_SMP...SMP_UP versus
    ALT_SMP...SMP_UP_B) into two macros.  In the first case,
    ALT_SMP needs to expand to >= 4 bytes, not == 4.)
  * smp_mpidr.h (which implements ALT_SMP()/ALT_UP() manually due
    to macro limitations) has not been modified: the affected
    instruction (mov) has no 16-bit encoding, so the correct
    instruction size is satisfied in this case.
  * A "mode" parameter has been added to smp_dmb:
    smp_dmb arm @ assumes 4-byte instructions (for ARM code, e.g. kuser)
    smp_dmb     @ uses W() to ensure 4-byte instructions for ALT_SMP()
    This avoids assembly failures due to use of W() inside smp_dmb,
    when assembling pure-ARM code in the vectors page.
    There might be a better way to achieve this.
  * Kconfig: make SMP_ON_UP depend on
    (!THUMB2_KERNEL || !BIG_ENDIAN) i.e., THUMB2_KERNEL is now
    supported, but only if !BIG_ENDIAN (The fixup code for Thumb-2
    currently assumes little-endian order.)
Tested using a single generic realview kernel on:
	ARM RealView PB-A8 (CONFIG_THUMB2_KERNEL={n,y})
	ARM RealView PBX-A9 (SMP)
Signed-off-by: Dave Martin <dave.martin@linaro.org>
Acked-by: Nicolas Pitre <nicolas.pitre@linaro.org>
Signed-off-by: Russell King <rmk+kernel@arm.linux.org.uk>
											
										 
											2010-12-01 15:39:23 +01:00
										 |  |  | 	smp_dmb	arm | 
					
						
							| 
									
										
											  
											
												[ARM] 4659/1: remove possibilities for spurious false negative with __kuser_cmpxchg
The ARM __kuser_cmpxchg routine is meant to implement an atomic cmpxchg
in user space.  It however can produce spurious false negative if a
processor exception occurs in the middle of the operation.  Normally
this is not a problem since cmpxchg is typically called in a loop until
it succeeds to implement an atomic increment for example.
Some use cases which don't involve a loop require that the operation be
100% reliable though.  This patch changes the implementation so to
reattempt the operation after an exception has occurred in the critical
section rather than abort it.
Here's a simple program to test the fix (don't use CONFIG_NO_HZ in your
kernel as this depends on a sufficiently high interrupt rate):
	#include <stdio.h>
	typedef int (__kernel_cmpxchg_t)(int oldval, int newval, int *ptr);
	#define __kernel_cmpxchg (*(__kernel_cmpxchg_t *)0xffff0fc0)
	int main()
	{
		int i, x = 0;
		for (i = 0; i < 100000000; i++) {
			int v = x;
			if (__kernel_cmpxchg(v, v+1, &x))
				printf("failed at %d: %d vs %d\n", i, v, x);
		}
		printf("done with %d vs %d\n", i, x);
		return 0;
	}
Signed-off-by: Nicolas Pitre <nico@marvell.com>
Signed-off-by: Russell King <rmk+kernel@arm.linux.org.uk>
											
										 
											2007-11-20 17:20:29 +01:00
										 |  |  | 1:	ldrex	r3, [r2] | 
					
						
							| 
									
										
										
										
											2005-04-29 22:08:33 +01:00
										 |  |  | 	subs	r3, r3, r0 | 
					
						
							|  |  |  | 	strexeq	r3, r1, [r2] | 
					
						
							| 
									
										
											  
											
												[ARM] 4659/1: remove possibilities for spurious false negative with __kuser_cmpxchg
The ARM __kuser_cmpxchg routine is meant to implement an atomic cmpxchg
in user space.  It however can produce spurious false negative if a
processor exception occurs in the middle of the operation.  Normally
this is not a problem since cmpxchg is typically called in a loop until
it succeeds to implement an atomic increment for example.
Some use cases which don't involve a loop require that the operation be
100% reliable though.  This patch changes the implementation so to
reattempt the operation after an exception has occurred in the critical
section rather than abort it.
Here's a simple program to test the fix (don't use CONFIG_NO_HZ in your
kernel as this depends on a sufficiently high interrupt rate):
	#include <stdio.h>
	typedef int (__kernel_cmpxchg_t)(int oldval, int newval, int *ptr);
	#define __kernel_cmpxchg (*(__kernel_cmpxchg_t *)0xffff0fc0)
	int main()
	{
		int i, x = 0;
		for (i = 0; i < 100000000; i++) {
			int v = x;
			if (__kernel_cmpxchg(v, v+1, &x))
				printf("failed at %d: %d vs %d\n", i, v, x);
		}
		printf("done with %d vs %d\n", i, x);
		return 0;
	}
Signed-off-by: Nicolas Pitre <nico@marvell.com>
Signed-off-by: Russell King <rmk+kernel@arm.linux.org.uk>
											
										 
											2007-11-20 17:20:29 +01:00
										 |  |  | 	teqeq	r3, #1 | 
					
						
							|  |  |  | 	beq	1b | 
					
						
							| 
									
										
										
										
											2005-04-29 22:08:33 +01:00
										 |  |  | 	rsbs	r0, r3, #0 | 
					
						
							| 
									
										
											  
											
												[ARM] 4659/1: remove possibilities for spurious false negative with __kuser_cmpxchg
The ARM __kuser_cmpxchg routine is meant to implement an atomic cmpxchg
in user space.  It however can produce spurious false negative if a
processor exception occurs in the middle of the operation.  Normally
this is not a problem since cmpxchg is typically called in a loop until
it succeeds to implement an atomic increment for example.
Some use cases which don't involve a loop require that the operation be
100% reliable though.  This patch changes the implementation so to
reattempt the operation after an exception has occurred in the critical
section rather than abort it.
Here's a simple program to test the fix (don't use CONFIG_NO_HZ in your
kernel as this depends on a sufficiently high interrupt rate):
	#include <stdio.h>
	typedef int (__kernel_cmpxchg_t)(int oldval, int newval, int *ptr);
	#define __kernel_cmpxchg (*(__kernel_cmpxchg_t *)0xffff0fc0)
	int main()
	{
		int i, x = 0;
		for (i = 0; i < 100000000; i++) {
			int v = x;
			if (__kernel_cmpxchg(v, v+1, &x))
				printf("failed at %d: %d vs %d\n", i, v, x);
		}
		printf("done with %d vs %d\n", i, x);
		return 0;
	}
Signed-off-by: Nicolas Pitre <nico@marvell.com>
Signed-off-by: Russell King <rmk+kernel@arm.linux.org.uk>
											
										 
											2007-11-20 17:20:29 +01:00
										 |  |  | 	/* beware -- each __kuser slot must be 8 instructions max */ | 
					
						
							| 
									
										
										
										
											2010-09-04 10:47:48 +01:00
										 |  |  | 	ALT_SMP(b	__kuser_memory_barrier) | 
					
						
							|  |  |  | 	ALT_UP(usr_ret	lr) | 
					
						
							| 
									
										
										
										
											2005-04-29 22:08:33 +01:00
										 |  |  | 
 | 
					
						
							|  |  |  | #endif | 
					
						
							|  |  |  | 
 | 
					
						
							| 
									
										
										
										
											2013-07-04 11:32:04 +01:00
										 |  |  | 	kuser_pad __kuser_cmpxchg, 32 | 
					
						
							| 
									
										
										
										
											2005-04-29 22:08:33 +01:00
										 |  |  | 
 | 
					
						
							|  |  |  | __kuser_get_tls:				@ 0xffff0fe0
 | 
					
						
							| 
									
										
										
										
											2010-07-05 14:53:10 +01:00
										 |  |  | 	ldr	r0, [pc, #(16 - 8)]	@ read TLS, set in kuser_get_tls_init
 | 
					
						
							| 
									
										
										
										
											2006-08-18 17:20:15 +01:00
										 |  |  | 	usr_ret	lr | 
					
						
							| 
									
										
										
										
											2010-07-05 14:53:10 +01:00
										 |  |  | 	mrc	p15, 0, r0, c13, c0, 3	@ 0xffff0fe8 hardware TLS code
 | 
					
						
							| 
									
										
										
										
											2013-07-04 11:32:04 +01:00
										 |  |  | 	kuser_pad __kuser_get_tls, 16 | 
					
						
							|  |  |  | 	.rep	3
 | 
					
						
							| 
									
										
										
										
											2010-07-05 14:53:10 +01:00
										 |  |  | 	.word	0			@ 0xffff0ff0 software TLS value, then
 | 
					
						
							|  |  |  | 	.endr				@ pad up to __kuser_helper_version
 | 
					
						
							| 
									
										
										
										
											2005-04-29 22:08:33 +01:00
										 |  |  | 
 | 
					
						
							|  |  |  | __kuser_helper_version:				@ 0xffff0ffc
 | 
					
						
							|  |  |  | 	.word	((__kuser_helper_end - __kuser_helper_start) >> 5) | 
					
						
							|  |  |  | 
 | 
					
						
							|  |  |  | 	.globl	__kuser_helper_end
 | 
					
						
							|  |  |  | __kuser_helper_end: | 
					
						
							|  |  |  | 
 | 
					
						
							| 
									
										
										
										
											2013-07-23 18:37:00 +01:00
										 |  |  | #endif | 
					
						
							|  |  |  | 
 | 
					
						
							| 
									
										
										
										
											2009-07-24 12:32:54 +01:00
										 |  |  |  THUMB(	.thumb	) | 
					
						
							| 
									
										
										
										
											2005-04-29 22:08:33 +01:00
										 |  |  | 
 | 
					
						
							| 
									
										
										
										
											2005-04-16 15:20:36 -07:00
										 |  |  | /* | 
					
						
							|  |  |  |  * Vector stubs. | 
					
						
							|  |  |  |  * | 
					
						
							| 
									
										
										
										
											2013-07-04 11:40:32 +01:00
										 |  |  |  * This code is copied to 0xffff1000 so we can use branches in the | 
					
						
							|  |  |  |  * vectors, rather than ldr's.  Note that this code must not exceed | 
					
						
							|  |  |  |  * a page size. | 
					
						
							| 
									
										
										
										
											2005-04-16 15:20:36 -07:00
										 |  |  |  * | 
					
						
							|  |  |  |  * Common stub entry macro: | 
					
						
							|  |  |  |  *   Enter in IRQ mode, spsr = SVC/USR CPSR, lr = SVC/USR PC | 
					
						
							| 
									
										
										
										
											2005-05-31 22:22:32 +01:00
										 |  |  |  * | 
					
						
							|  |  |  |  * SP points to a minimal amount of processor-private memory, the address | 
					
						
							|  |  |  |  * of which is copied into r0 for the mode specific abort handler. | 
					
						
							| 
									
										
										
										
											2005-04-16 15:20:36 -07:00
										 |  |  |  */ | 
					
						
							| 
									
										
										
										
											2005-11-06 14:42:37 +00:00
										 |  |  | 	.macro	vector_stub, name, mode, correction=0 | 
					
						
							| 
									
										
										
										
											2005-04-16 15:20:36 -07:00
										 |  |  | 	.align	5
 | 
					
						
							|  |  |  | 
 | 
					
						
							|  |  |  | vector_\name: | 
					
						
							|  |  |  | 	.if \correction | 
					
						
							|  |  |  | 	sub	lr, lr, #\correction | 
					
						
							|  |  |  | 	.endif | 
					
						
							| 
									
										
										
										
											2005-05-31 22:22:32 +01:00
										 |  |  | 
 | 
					
						
							|  |  |  | 	@
 | 
					
						
							|  |  |  | 	@ Save r0, lr_<exception> (parent PC) and spsr_<exception>
 | 
					
						
							|  |  |  | 	@ (parent CPSR)
 | 
					
						
							|  |  |  | 	@
 | 
					
						
							|  |  |  | 	stmia	sp, {r0, lr}		@ save r0, lr
 | 
					
						
							| 
									
										
										
										
											2005-04-16 15:20:36 -07:00
										 |  |  | 	mrs	lr, spsr | 
					
						
							| 
									
										
										
										
											2005-05-31 22:22:32 +01:00
										 |  |  | 	str	lr, [sp, #8]		@ save spsr
 | 
					
						
							|  |  |  | 
 | 
					
						
							| 
									
										
										
										
											2005-04-16 15:20:36 -07:00
										 |  |  | 	@
 | 
					
						
							| 
									
										
										
										
											2005-05-31 22:22:32 +01:00
										 |  |  | 	@ Prepare for SVC32 mode.  IRQs remain disabled.
 | 
					
						
							| 
									
										
										
										
											2005-04-16 15:20:36 -07:00
										 |  |  | 	@
 | 
					
						
							| 
									
										
										
										
											2005-05-31 22:22:32 +01:00
										 |  |  | 	mrs	r0, cpsr | 
					
						
							| 
									
										
										
										
											2009-07-24 12:32:54 +01:00
										 |  |  | 	eor	r0, r0, #(\mode ^ SVC_MODE | PSR_ISETSTATE) | 
					
						
							| 
									
										
										
										
											2005-05-31 22:22:32 +01:00
										 |  |  | 	msr	spsr_cxsf, r0 | 
					
						
							| 
									
										
										
										
											2005-04-16 15:20:36 -07:00
										 |  |  | 
 | 
					
						
							| 
									
										
										
										
											2005-05-31 22:22:32 +01:00
										 |  |  | 	@
 | 
					
						
							|  |  |  | 	@ the branch table must immediately follow this code
 | 
					
						
							|  |  |  | 	@
 | 
					
						
							|  |  |  | 	and	lr, lr, #0x0f | 
					
						
							| 
									
										
										
										
											2009-07-24 12:32:54 +01:00
										 |  |  |  THUMB(	adr	r0, 1f			) | 
					
						
							|  |  |  |  THUMB(	ldr	lr, [r0, lr, lsl #2]	) | 
					
						
							| 
									
										
										
										
											2005-11-06 14:42:37 +00:00
										 |  |  | 	mov	r0, sp | 
					
						
							| 
									
										
										
										
											2009-07-24 12:32:54 +01:00
										 |  |  |  ARM(	ldr	lr, [pc, lr, lsl #2]	) | 
					
						
							| 
									
										
										
										
											2005-05-31 22:22:32 +01:00
										 |  |  | 	movs	pc, lr			@ branch to handler in SVC mode
 | 
					
						
							| 
									
										
										
										
											2008-08-28 11:22:32 +01:00
										 |  |  | ENDPROC(vector_\name) | 
					
						
							| 
									
										
										
										
											2009-07-24 12:32:52 +01:00
										 |  |  | 
 | 
					
						
							|  |  |  | 	.align	2
 | 
					
						
							|  |  |  | 	@ handler addresses follow this label
 | 
					
						
							|  |  |  | 1: | 
					
						
							| 
									
										
										
										
											2005-04-16 15:20:36 -07:00
										 |  |  | 	.endm | 
					
						
							|  |  |  | 
 | 
					
						
							| 
									
										
										
										
											2013-07-04 12:03:31 +01:00
										 |  |  | 	.section .stubs, "ax", %progbits | 
					
						
							| 
									
										
										
										
											2005-04-16 15:20:36 -07:00
										 |  |  | __stubs_start: | 
					
						
							| 
									
										
										
										
											2013-07-04 11:40:32 +01:00
										 |  |  | 	@ This must be the first word
 | 
					
						
							|  |  |  | 	.word	vector_swi
 | 
					
						
							|  |  |  | 
 | 
					
						
							|  |  |  | vector_rst: | 
					
						
							|  |  |  |  ARM(	swi	SYS_ERROR0	) | 
					
						
							|  |  |  |  THUMB(	svc	#0		) | 
					
						
							|  |  |  |  THUMB(	nop			) | 
					
						
							|  |  |  | 	b	vector_und | 
					
						
							|  |  |  | 
 | 
					
						
							| 
									
										
										
										
											2005-04-16 15:20:36 -07:00
										 |  |  | /* | 
					
						
							|  |  |  |  * Interrupt dispatcher | 
					
						
							|  |  |  |  */ | 
					
						
							| 
									
										
										
										
											2005-11-06 14:42:37 +00:00
										 |  |  | 	vector_stub	irq, IRQ_MODE, 4 | 
					
						
							| 
									
										
										
										
											2005-04-16 15:20:36 -07:00
										 |  |  | 
 | 
					
						
							|  |  |  | 	.long	__irq_usr			@  0  (USR_26 / USR_32)
 | 
					
						
							|  |  |  | 	.long	__irq_invalid			@  1  (FIQ_26 / FIQ_32)
 | 
					
						
							|  |  |  | 	.long	__irq_invalid			@  2  (IRQ_26 / IRQ_32)
 | 
					
						
							|  |  |  | 	.long	__irq_svc			@  3  (SVC_26 / SVC_32)
 | 
					
						
							|  |  |  | 	.long	__irq_invalid			@  4
 | 
					
						
							|  |  |  | 	.long	__irq_invalid			@  5
 | 
					
						
							|  |  |  | 	.long	__irq_invalid			@  6
 | 
					
						
							|  |  |  | 	.long	__irq_invalid			@  7
 | 
					
						
							|  |  |  | 	.long	__irq_invalid			@  8
 | 
					
						
							|  |  |  | 	.long	__irq_invalid			@  9
 | 
					
						
							|  |  |  | 	.long	__irq_invalid			@  a
 | 
					
						
							|  |  |  | 	.long	__irq_invalid			@  b
 | 
					
						
							|  |  |  | 	.long	__irq_invalid			@  c
 | 
					
						
							|  |  |  | 	.long	__irq_invalid			@  d
 | 
					
						
							|  |  |  | 	.long	__irq_invalid			@  e
 | 
					
						
							|  |  |  | 	.long	__irq_invalid			@  f
 | 
					
						
							|  |  |  | 
 | 
					
						
							|  |  |  | /* | 
					
						
							|  |  |  |  * Data abort dispatcher | 
					
						
							|  |  |  |  * Enter in ABT mode, spsr = USR CPSR, lr = USR PC | 
					
						
							|  |  |  |  */ | 
					
						
							| 
									
										
										
										
											2005-11-06 14:42:37 +00:00
										 |  |  | 	vector_stub	dabt, ABT_MODE, 8 | 
					
						
							| 
									
										
										
										
											2005-04-16 15:20:36 -07:00
										 |  |  | 
 | 
					
						
							|  |  |  | 	.long	__dabt_usr			@  0  (USR_26 / USR_32)
 | 
					
						
							|  |  |  | 	.long	__dabt_invalid			@  1  (FIQ_26 / FIQ_32)
 | 
					
						
							|  |  |  | 	.long	__dabt_invalid			@  2  (IRQ_26 / IRQ_32)
 | 
					
						
							|  |  |  | 	.long	__dabt_svc			@  3  (SVC_26 / SVC_32)
 | 
					
						
							|  |  |  | 	.long	__dabt_invalid			@  4
 | 
					
						
							|  |  |  | 	.long	__dabt_invalid			@  5
 | 
					
						
							|  |  |  | 	.long	__dabt_invalid			@  6
 | 
					
						
							|  |  |  | 	.long	__dabt_invalid			@  7
 | 
					
						
							|  |  |  | 	.long	__dabt_invalid			@  8
 | 
					
						
							|  |  |  | 	.long	__dabt_invalid			@  9
 | 
					
						
							|  |  |  | 	.long	__dabt_invalid			@  a
 | 
					
						
							|  |  |  | 	.long	__dabt_invalid			@  b
 | 
					
						
							|  |  |  | 	.long	__dabt_invalid			@  c
 | 
					
						
							|  |  |  | 	.long	__dabt_invalid			@  d
 | 
					
						
							|  |  |  | 	.long	__dabt_invalid			@  e
 | 
					
						
							|  |  |  | 	.long	__dabt_invalid			@  f
 | 
					
						
							|  |  |  | 
 | 
					
						
							|  |  |  | /* | 
					
						
							|  |  |  |  * Prefetch abort dispatcher | 
					
						
							|  |  |  |  * Enter in ABT mode, spsr = USR CPSR, lr = USR PC | 
					
						
							|  |  |  |  */ | 
					
						
							| 
									
										
										
										
											2005-11-06 14:42:37 +00:00
										 |  |  | 	vector_stub	pabt, ABT_MODE, 4 | 
					
						
							| 
									
										
										
										
											2005-04-16 15:20:36 -07:00
										 |  |  | 
 | 
					
						
							|  |  |  | 	.long	__pabt_usr			@  0 (USR_26 / USR_32)
 | 
					
						
							|  |  |  | 	.long	__pabt_invalid			@  1 (FIQ_26 / FIQ_32)
 | 
					
						
							|  |  |  | 	.long	__pabt_invalid			@  2 (IRQ_26 / IRQ_32)
 | 
					
						
							|  |  |  | 	.long	__pabt_svc			@  3 (SVC_26 / SVC_32)
 | 
					
						
							|  |  |  | 	.long	__pabt_invalid			@  4
 | 
					
						
							|  |  |  | 	.long	__pabt_invalid			@  5
 | 
					
						
							|  |  |  | 	.long	__pabt_invalid			@  6
 | 
					
						
							|  |  |  | 	.long	__pabt_invalid			@  7
 | 
					
						
							|  |  |  | 	.long	__pabt_invalid			@  8
 | 
					
						
							|  |  |  | 	.long	__pabt_invalid			@  9
 | 
					
						
							|  |  |  | 	.long	__pabt_invalid			@  a
 | 
					
						
							|  |  |  | 	.long	__pabt_invalid			@  b
 | 
					
						
							|  |  |  | 	.long	__pabt_invalid			@  c
 | 
					
						
							|  |  |  | 	.long	__pabt_invalid			@  d
 | 
					
						
							|  |  |  | 	.long	__pabt_invalid			@  e
 | 
					
						
							|  |  |  | 	.long	__pabt_invalid			@  f
 | 
					
						
							|  |  |  | 
 | 
					
						
							|  |  |  | /* | 
					
						
							|  |  |  |  * Undef instr entry dispatcher | 
					
						
							|  |  |  |  * Enter in UND mode, spsr = SVC/USR CPSR, lr = SVC/USR PC | 
					
						
							|  |  |  |  */ | 
					
						
							| 
									
										
										
										
											2005-11-06 14:42:37 +00:00
										 |  |  | 	vector_stub	und, UND_MODE | 
					
						
							| 
									
										
										
										
											2005-04-16 15:20:36 -07:00
										 |  |  | 
 | 
					
						
							|  |  |  | 	.long	__und_usr			@  0 (USR_26 / USR_32)
 | 
					
						
							|  |  |  | 	.long	__und_invalid			@  1 (FIQ_26 / FIQ_32)
 | 
					
						
							|  |  |  | 	.long	__und_invalid			@  2 (IRQ_26 / IRQ_32)
 | 
					
						
							|  |  |  | 	.long	__und_svc			@  3 (SVC_26 / SVC_32)
 | 
					
						
							|  |  |  | 	.long	__und_invalid			@  4
 | 
					
						
							|  |  |  | 	.long	__und_invalid			@  5
 | 
					
						
							|  |  |  | 	.long	__und_invalid			@  6
 | 
					
						
							|  |  |  | 	.long	__und_invalid			@  7
 | 
					
						
							|  |  |  | 	.long	__und_invalid			@  8
 | 
					
						
							|  |  |  | 	.long	__und_invalid			@  9
 | 
					
						
							|  |  |  | 	.long	__und_invalid			@  a
 | 
					
						
							|  |  |  | 	.long	__und_invalid			@  b
 | 
					
						
							|  |  |  | 	.long	__und_invalid			@  c
 | 
					
						
							|  |  |  | 	.long	__und_invalid			@  d
 | 
					
						
							|  |  |  | 	.long	__und_invalid			@  e
 | 
					
						
							|  |  |  | 	.long	__und_invalid			@  f
 | 
					
						
							|  |  |  | 
 | 
					
						
							|  |  |  | 	.align	5
 | 
					
						
							|  |  |  | 
 | 
					
						
							| 
									
										
										
										
											2013-07-04 11:40:32 +01:00
										 |  |  | /*============================================================================= | 
					
						
							|  |  |  |  * Address exception handler | 
					
						
							|  |  |  |  *----------------------------------------------------------------------------- | 
					
						
							|  |  |  |  * These aren't too critical. | 
					
						
							|  |  |  |  * (they're not supposed to happen, and won't happen in 32-bit data mode). | 
					
						
							|  |  |  |  */ | 
					
						
							|  |  |  | 
 | 
					
						
							|  |  |  | vector_addrexcptn: | 
					
						
							|  |  |  | 	b	vector_addrexcptn | 
					
						
							|  |  |  | 
 | 
					
						
							| 
									
										
										
										
											2005-04-16 15:20:36 -07:00
										 |  |  | /*============================================================================= | 
					
						
							|  |  |  |  * Undefined FIQs | 
					
						
							|  |  |  |  *----------------------------------------------------------------------------- | 
					
						
							|  |  |  |  * Enter in FIQ mode, spsr = ANY CPSR, lr = ANY PC | 
					
						
							|  |  |  |  * MUST PRESERVE SVC SPSR, but need to switch to SVC mode to show our msg. | 
					
						
							|  |  |  |  * Basically to switch modes, we *HAVE* to clobber one register...  brain | 
					
						
							|  |  |  |  * damage alert!  I don't think that we can execute any code in here in any | 
					
						
							|  |  |  |  * other mode than FIQ...  Ok you can switch to another mode, but you can't | 
					
						
							|  |  |  |  * get out of that mode without clobbering one register. | 
					
						
							|  |  |  |  */ | 
					
						
							|  |  |  | vector_fiq: | 
					
						
							|  |  |  | 	subs	pc, lr, #4 | 
					
						
							|  |  |  | 
 | 
					
						
							| 
									
										
										
										
											2013-07-09 01:03:17 +01:00
										 |  |  | 	.globl	vector_fiq_offset
 | 
					
						
							|  |  |  | 	.equ	vector_fiq_offset, vector_fiq | 
					
						
							|  |  |  | 
 | 
					
						
							| 
									
										
										
										
											2013-07-04 12:03:31 +01:00
										 |  |  | 	.section .vectors, "ax", %progbits | 
					
						
							| 
									
										
										
										
											2005-04-26 15:17:42 +01:00
										 |  |  | __vectors_start: | 
					
						
							| 
									
										
										
										
											2013-07-04 12:03:31 +01:00
										 |  |  | 	W(b)	vector_rst | 
					
						
							|  |  |  | 	W(b)	vector_und | 
					
						
							|  |  |  | 	W(ldr)	pc, __vectors_start + 0x1000 | 
					
						
							|  |  |  | 	W(b)	vector_pabt | 
					
						
							|  |  |  | 	W(b)	vector_dabt | 
					
						
							|  |  |  | 	W(b)	vector_addrexcptn | 
					
						
							|  |  |  | 	W(b)	vector_irq | 
					
						
							|  |  |  | 	W(b)	vector_fiq | 
					
						
							| 
									
										
										
										
											2005-04-16 15:20:36 -07:00
										 |  |  | 
 | 
					
						
							|  |  |  | 	.data | 
					
						
							|  |  |  | 
 | 
					
						
							|  |  |  | 	.globl	cr_alignment
 | 
					
						
							|  |  |  | 	.globl	cr_no_alignment
 | 
					
						
							|  |  |  | cr_alignment: | 
					
						
							|  |  |  | 	.space	4
 | 
					
						
							|  |  |  | cr_no_alignment: | 
					
						
							|  |  |  | 	.space	4
 | 
					
						
							| 
									
										
										
										
											2010-12-13 09:42:34 +01:00
										 |  |  | 
 | 
					
						
							|  |  |  | #ifdef CONFIG_MULTI_IRQ_HANDLER | 
					
						
							|  |  |  | 	.globl	handle_arch_irq
 | 
					
						
							|  |  |  | handle_arch_irq: | 
					
						
							|  |  |  | 	.space	4
 | 
					
						
							|  |  |  | #endif |