 63bcff2a30
			
		
	
	
	63bcff2a30
	
	
	
		
			
			When Supervisor Mode Access Prevention (SMAP) is enabled, access to userspace from the kernel is controlled by the AC flag. To make the performance of manipulating that flag acceptable, there are two new instructions, STAC and CLAC, to set and clear it. This patch adds those instructions, via alternative(), when the SMAP feature is enabled. It also adds X86_EFLAGS_AC unconditionally to the SYSCALL entry mask; there is simply no reason to make that one conditional. Signed-off-by: H. Peter Anvin <hpa@linux.intel.com> Link: http://lkml.kernel.org/r/1348256595-29119-9-git-send-email-hpa@linux.intel.com
		
			
				
	
	
		
			101 lines
		
	
	
	
		
			1.9 KiB
			
		
	
	
	
		
			ArmAsm
		
	
	
	
	
	
			
		
		
	
	
			101 lines
		
	
	
	
		
			1.9 KiB
			
		
	
	
	
		
			ArmAsm
		
	
	
	
	
	
| /*
 | |
|  * __put_user functions.
 | |
|  *
 | |
|  * (C) Copyright 2005 Linus Torvalds
 | |
|  * (C) Copyright 2005 Andi Kleen
 | |
|  * (C) Copyright 2008 Glauber Costa
 | |
|  *
 | |
|  * These functions have a non-standard call interface
 | |
|  * to make them more efficient, especially as they
 | |
|  * return an error value in addition to the "real"
 | |
|  * return value.
 | |
|  */
 | |
| #include <linux/linkage.h>
 | |
| #include <asm/dwarf2.h>
 | |
| #include <asm/thread_info.h>
 | |
| #include <asm/errno.h>
 | |
| #include <asm/asm.h>
 | |
| #include <asm/smap.h>
 | |
| 
 | |
| 
 | |
| /*
 | |
|  * __put_user_X
 | |
|  *
 | |
|  * Inputs:	%eax[:%edx] contains the data
 | |
|  *		%ecx contains the address
 | |
|  *
 | |
|  * Outputs:	%eax is error code (0 or -EFAULT)
 | |
|  *
 | |
|  * These functions should not modify any other registers,
 | |
|  * as they get called from within inline assembly.
 | |
|  */
 | |
| 
 | |
| #define ENTER	CFI_STARTPROC ; \
 | |
| 		GET_THREAD_INFO(%_ASM_BX)
 | |
| #define EXIT	ASM_CLAC ;	\
 | |
| 		ret ;		\
 | |
| 		CFI_ENDPROC
 | |
| 
 | |
| .text
 | |
| ENTRY(__put_user_1)
 | |
| 	ENTER
 | |
| 	cmp TI_addr_limit(%_ASM_BX),%_ASM_CX
 | |
| 	jae bad_put_user
 | |
| 	ASM_STAC
 | |
| 1:	movb %al,(%_ASM_CX)
 | |
| 	xor %eax,%eax
 | |
| 	EXIT
 | |
| ENDPROC(__put_user_1)
 | |
| 
 | |
| ENTRY(__put_user_2)
 | |
| 	ENTER
 | |
| 	mov TI_addr_limit(%_ASM_BX),%_ASM_BX
 | |
| 	sub $1,%_ASM_BX
 | |
| 	cmp %_ASM_BX,%_ASM_CX
 | |
| 	jae bad_put_user
 | |
| 	ASM_STAC
 | |
| 2:	movw %ax,(%_ASM_CX)
 | |
| 	xor %eax,%eax
 | |
| 	EXIT
 | |
| ENDPROC(__put_user_2)
 | |
| 
 | |
| ENTRY(__put_user_4)
 | |
| 	ENTER
 | |
| 	mov TI_addr_limit(%_ASM_BX),%_ASM_BX
 | |
| 	sub $3,%_ASM_BX
 | |
| 	cmp %_ASM_BX,%_ASM_CX
 | |
| 	jae bad_put_user
 | |
| 	ASM_STAC
 | |
| 3:	movl %eax,(%_ASM_CX)
 | |
| 	xor %eax,%eax
 | |
| 	EXIT
 | |
| ENDPROC(__put_user_4)
 | |
| 
 | |
| ENTRY(__put_user_8)
 | |
| 	ENTER
 | |
| 	mov TI_addr_limit(%_ASM_BX),%_ASM_BX
 | |
| 	sub $7,%_ASM_BX
 | |
| 	cmp %_ASM_BX,%_ASM_CX
 | |
| 	jae bad_put_user
 | |
| 	ASM_STAC
 | |
| 4:	mov %_ASM_AX,(%_ASM_CX)
 | |
| #ifdef CONFIG_X86_32
 | |
| 5:	movl %edx,4(%_ASM_CX)
 | |
| #endif
 | |
| 	xor %eax,%eax
 | |
| 	EXIT
 | |
| ENDPROC(__put_user_8)
 | |
| 
 | |
| bad_put_user:
 | |
| 	CFI_STARTPROC
 | |
| 	movl $-EFAULT,%eax
 | |
| 	EXIT
 | |
| END(bad_put_user)
 | |
| 
 | |
| 	_ASM_EXTABLE(1b,bad_put_user)
 | |
| 	_ASM_EXTABLE(2b,bad_put_user)
 | |
| 	_ASM_EXTABLE(3b,bad_put_user)
 | |
| 	_ASM_EXTABLE(4b,bad_put_user)
 | |
| #ifdef CONFIG_X86_32
 | |
| 	_ASM_EXTABLE(5b,bad_put_user)
 | |
| #endif
 |